| @@ -1,550 +1,550 @@ | | | @@ -1,550 +1,550 @@ |
1 | /* $NetBSD: powerpc_machdep.c,v 1.59 2011/06/29 06:00:17 matt Exp $ */ | | 1 | /* $NetBSD: powerpc_machdep.c,v 1.60 2011/07/31 10:00:52 kiyohara Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (C) 1995, 1996 Wolfgang Solfrank. | | 4 | * Copyright (C) 1995, 1996 Wolfgang Solfrank. |
5 | * Copyright (C) 1995, 1996 TooLs GmbH. | | 5 | * Copyright (C) 1995, 1996 TooLs GmbH. |
6 | * All rights reserved. | | 6 | * All rights reserved. |
7 | * | | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | | 8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions | | 9 | * modification, are permitted provided that the following conditions |
10 | * are met: | | 10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright | | 11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. | | 12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright | | 13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the | | 14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. | | 15 | * documentation and/or other materials provided with the distribution. |
16 | * 3. All advertising materials mentioning features or use of this software | | 16 | * 3. All advertising materials mentioning features or use of this software |
17 | * must display the following acknowledgement: | | 17 | * must display the following acknowledgement: |
18 | * This product includes software developed by TooLs GmbH. | | 18 | * This product includes software developed by TooLs GmbH. |
19 | * 4. The name of TooLs GmbH may not be used to endorse or promote products | | 19 | * 4. The name of TooLs GmbH may not be used to endorse or promote products |
20 | * derived from this software without specific prior written permission. | | 20 | * derived from this software without specific prior written permission. |
21 | * | | 21 | * |
22 | * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR | | 22 | * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR |
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
24 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 24 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
25 | * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | | 25 | * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | | 26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
27 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | | 27 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
28 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | | 28 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
29 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | | 29 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
30 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | | 30 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
31 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 31 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
32 | */ | | 32 | */ |
33 | | | 33 | |
34 | #include <sys/cdefs.h> | | 34 | #include <sys/cdefs.h> |
35 | __KERNEL_RCSID(0, "$NetBSD: powerpc_machdep.c,v 1.59 2011/06/29 06:00:17 matt Exp $"); | | 35 | __KERNEL_RCSID(0, "$NetBSD: powerpc_machdep.c,v 1.60 2011/07/31 10:00:52 kiyohara Exp $"); |
36 | | | 36 | |
37 | #include "opt_altivec.h" | | 37 | #include "opt_altivec.h" |
38 | #include "opt_modular.h" | | 38 | #include "opt_modular.h" |
39 | #include "opt_multiprocessor.h" | | 39 | #include "opt_multiprocessor.h" |
40 | #include "opt_ppcarch.h" | | 40 | #include "opt_ppcarch.h" |
41 | | | 41 | |
42 | #include <sys/param.h> | | 42 | #include <sys/param.h> |
43 | #include <sys/conf.h> | | 43 | #include <sys/conf.h> |
44 | #include <sys/disklabel.h> | | 44 | #include <sys/disklabel.h> |
45 | #include <sys/exec.h> | | 45 | #include <sys/exec.h> |
46 | #include <sys/kauth.h> | | 46 | #include <sys/kauth.h> |
47 | #include <sys/pool.h> | | 47 | #include <sys/pool.h> |
48 | #include <sys/proc.h> | | 48 | #include <sys/proc.h> |
49 | #include <sys/sa.h> | | 49 | #include <sys/sa.h> |
50 | #include <sys/savar.h> | | 50 | #include <sys/savar.h> |
51 | #include <sys/signal.h> | | 51 | #include <sys/signal.h> |
52 | #include <sys/sysctl.h> | | 52 | #include <sys/sysctl.h> |
53 | #include <sys/ucontext.h> | | 53 | #include <sys/ucontext.h> |
54 | #include <sys/cpu.h> | | 54 | #include <sys/cpu.h> |
55 | #include <sys/module.h> | | 55 | #include <sys/module.h> |
56 | #include <sys/device.h> | | 56 | #include <sys/device.h> |
57 | #include <sys/pcu.h> | | 57 | #include <sys/pcu.h> |
58 | #include <sys/atomic.h> | | 58 | #include <sys/atomic.h> |
59 | #include <sys/kmem.h> | | 59 | #include <sys/kmem.h> |
60 | #include <sys/xcall.h> | | 60 | #include <sys/xcall.h> |
61 | | | 61 | |
62 | #include <dev/mm.h> | | 62 | #include <dev/mm.h> |
63 | | | 63 | |
64 | #include <powerpc/fpu.h> | | 64 | #include <powerpc/fpu.h> |
65 | #include <powerpc/pcb.h> | | 65 | #include <powerpc/pcb.h> |
66 | #include <powerpc/psl.h> | | 66 | #include <powerpc/psl.h> |
67 | #include <powerpc/userret.h> | | 67 | #include <powerpc/userret.h> |
68 | #if defined(ALTIVEC) || defined(PPC_HAVE_SPE) | | 68 | #if defined(ALTIVEC) || defined(PPC_HAVE_SPE) |
69 | #include <powerpc/altivec.h> | | 69 | #include <powerpc/altivec.h> |
70 | #endif | | 70 | #endif |
71 | | | 71 | |
72 | #ifdef MULTIPROCESOR | | 72 | #ifdef MULTIPROCESSOR |
73 | #include <powerpc/pic/ipivar.h> | | 73 | #include <powerpc/pic/ipivar.h> |
74 | #endif | | 74 | #endif |
75 | | | 75 | |
76 | int cpu_timebase; | | 76 | int cpu_timebase; |
77 | int cpu_printfataltraps = 1; | | 77 | int cpu_printfataltraps = 1; |
78 | #if !defined(PPC_IBM4XX) | | 78 | #if !defined(PPC_IBM4XX) |
79 | extern int powersave; | | 79 | extern int powersave; |
80 | #endif | | 80 | #endif |
81 | | | 81 | |
82 | /* exported variable to be filled in by the bootloaders */ | | 82 | /* exported variable to be filled in by the bootloaders */ |
83 | char *booted_kernel; | | 83 | char *booted_kernel; |
84 | | | 84 | |
85 | const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = { | | 85 | const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = { |
86 | #if defined(PPC_HAVE_FPU) | | 86 | #if defined(PPC_HAVE_FPU) |
87 | [PCU_FPU] = &fpu_ops, | | 87 | [PCU_FPU] = &fpu_ops, |
88 | #endif | | 88 | #endif |
89 | #if defined(ALTIVEC) || defined(PPC_HAVE_SPE) | | 89 | #if defined(ALTIVEC) || defined(PPC_HAVE_SPE) |
90 | [PCU_VEC] = &vec_ops, | | 90 | [PCU_VEC] = &vec_ops, |
91 | #endif | | 91 | #endif |
92 | }; | | 92 | }; |
93 | | | 93 | |
94 | #ifdef MULTIPROCESSOR | | 94 | #ifdef MULTIPROCESSOR |
95 | volatile struct cpuset_info cpuset_info; | | 95 | volatile struct cpuset_info cpuset_info; |
96 | #endif | | 96 | #endif |
97 | | | 97 | |
98 | /* | | 98 | /* |
99 | * Set set up registers on exec. | | 99 | * Set set up registers on exec. |
100 | */ | | 100 | */ |
101 | void | | 101 | void |
102 | setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) | | 102 | setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) |
103 | { | | 103 | { |
104 | struct proc * const p = l->l_proc; | | 104 | struct proc * const p = l->l_proc; |
105 | struct trapframe * const tf = l->l_md.md_utf; | | 105 | struct trapframe * const tf = l->l_md.md_utf; |
106 | struct pcb * const pcb = lwp_getpcb(l); | | 106 | struct pcb * const pcb = lwp_getpcb(l); |
107 | struct ps_strings arginfo; | | 107 | struct ps_strings arginfo; |
108 | | | 108 | |
109 | memset(tf, 0, sizeof *tf); | | 109 | memset(tf, 0, sizeof *tf); |
110 | tf->tf_fixreg[1] = -roundup(-stack + 8, 16); | | 110 | tf->tf_fixreg[1] = -roundup(-stack + 8, 16); |
111 | | | 111 | |
112 | /* | | 112 | /* |
113 | * XXX Machine-independent code has already copied arguments and | | 113 | * XXX Machine-independent code has already copied arguments and |
114 | * XXX environment to userland. Get them back here. | | 114 | * XXX environment to userland. Get them back here. |
115 | */ | | 115 | */ |
116 | (void)copyin_psstrings(p, &arginfo); | | 116 | (void)copyin_psstrings(p, &arginfo); |
117 | | | 117 | |
118 | /* | | 118 | /* |
119 | * Set up arguments for _start(): | | 119 | * Set up arguments for _start(): |
120 | * _start(argc, argv, envp, obj, cleanup, ps_strings); | | 120 | * _start(argc, argv, envp, obj, cleanup, ps_strings); |
121 | * | | 121 | * |
122 | * Notes: | | 122 | * Notes: |
123 | * - obj and cleanup are the auxiliary and termination | | 123 | * - obj and cleanup are the auxiliary and termination |
124 | * vectors. They are fixed up by ld.elf_so. | | 124 | * vectors. They are fixed up by ld.elf_so. |
125 | * - ps_strings is a NetBSD extension, and will be | | 125 | * - ps_strings is a NetBSD extension, and will be |
126 | * ignored by executables which are strictly | | 126 | * ignored by executables which are strictly |
127 | * compliant with the SVR4 ABI. | | 127 | * compliant with the SVR4 ABI. |
128 | * | | 128 | * |
129 | * XXX We have to set both regs and retval here due to different | | 129 | * XXX We have to set both regs and retval here due to different |
130 | * XXX calling convention in trap.c and init_main.c. | | 130 | * XXX calling convention in trap.c and init_main.c. |
131 | */ | | 131 | */ |
132 | tf->tf_fixreg[3] = arginfo.ps_nargvstr; | | 132 | tf->tf_fixreg[3] = arginfo.ps_nargvstr; |
133 | tf->tf_fixreg[4] = (register_t)arginfo.ps_argvstr; | | 133 | tf->tf_fixreg[4] = (register_t)arginfo.ps_argvstr; |
134 | tf->tf_fixreg[5] = (register_t)arginfo.ps_envstr; | | 134 | tf->tf_fixreg[5] = (register_t)arginfo.ps_envstr; |
135 | tf->tf_fixreg[6] = 0; /* auxillary vector */ | | 135 | tf->tf_fixreg[6] = 0; /* auxillary vector */ |
136 | tf->tf_fixreg[7] = 0; /* termination vector */ | | 136 | tf->tf_fixreg[7] = 0; /* termination vector */ |
137 | tf->tf_fixreg[8] = p->p_psstrp; /* NetBSD extension */ | | 137 | tf->tf_fixreg[8] = p->p_psstrp; /* NetBSD extension */ |
138 | | | 138 | |
139 | tf->tf_srr0 = pack->ep_entry; | | 139 | tf->tf_srr0 = pack->ep_entry; |
140 | tf->tf_srr1 = PSL_MBO | PSL_USERSET; | | 140 | tf->tf_srr1 = PSL_MBO | PSL_USERSET; |
141 | #ifdef ALTIVEC | | 141 | #ifdef ALTIVEC |
142 | tf->tf_vrsave = 0; | | 142 | tf->tf_vrsave = 0; |
143 | #endif | | 143 | #endif |
144 | pcb->pcb_flags = PSL_FE_DFLT; | | 144 | pcb->pcb_flags = PSL_FE_DFLT; |
145 | } | | 145 | } |
146 | | | 146 | |
147 | /* | | 147 | /* |
148 | * Machine dependent system variables. | | 148 | * Machine dependent system variables. |
149 | */ | | 149 | */ |
150 | static int | | 150 | static int |
151 | sysctl_machdep_cacheinfo(SYSCTLFN_ARGS) | | 151 | sysctl_machdep_cacheinfo(SYSCTLFN_ARGS) |
152 | { | | 152 | { |
153 | struct sysctlnode node = *rnode; | | 153 | struct sysctlnode node = *rnode; |
154 | | | 154 | |
155 | node.sysctl_data = &curcpu()->ci_ci; | | 155 | node.sysctl_data = &curcpu()->ci_ci; |
156 | node.sysctl_size = sizeof(curcpu()->ci_ci); | | 156 | node.sysctl_size = sizeof(curcpu()->ci_ci); |
157 | return (sysctl_lookup(SYSCTLFN_CALL(&node))); | | 157 | return (sysctl_lookup(SYSCTLFN_CALL(&node))); |
158 | } | | 158 | } |
159 | | | 159 | |
160 | #if !defined (PPC_IBM4XX) | | 160 | #if !defined (PPC_IBM4XX) |
161 | static int | | 161 | static int |
162 | sysctl_machdep_powersave(SYSCTLFN_ARGS) | | 162 | sysctl_machdep_powersave(SYSCTLFN_ARGS) |
163 | { | | 163 | { |
164 | struct sysctlnode node = *rnode; | | 164 | struct sysctlnode node = *rnode; |
165 | | | 165 | |
166 | if (powersave < 0) | | 166 | if (powersave < 0) |
167 | node.sysctl_flags &= ~CTLFLAG_READWRITE; | | 167 | node.sysctl_flags &= ~CTLFLAG_READWRITE; |
168 | return (sysctl_lookup(SYSCTLFN_CALL(&node))); | | 168 | return (sysctl_lookup(SYSCTLFN_CALL(&node))); |
169 | } | | 169 | } |
170 | #endif | | 170 | #endif |
171 | | | 171 | |
172 | static int | | 172 | static int |
173 | sysctl_machdep_booted_device(SYSCTLFN_ARGS) | | 173 | sysctl_machdep_booted_device(SYSCTLFN_ARGS) |
174 | { | | 174 | { |
175 | struct sysctlnode node; | | 175 | struct sysctlnode node; |
176 | | | 176 | |
177 | if (booted_device == NULL) | | 177 | if (booted_device == NULL) |
178 | return (EOPNOTSUPP); | | 178 | return (EOPNOTSUPP); |
179 | | | 179 | |
180 | const char * const xname = device_xname(booted_device); | | 180 | const char * const xname = device_xname(booted_device); |
181 | | | 181 | |
182 | node = *rnode; | | 182 | node = *rnode; |
183 | node.sysctl_data = __UNCONST(xname); | | 183 | node.sysctl_data = __UNCONST(xname); |
184 | node.sysctl_size = strlen(xname) + 1; | | 184 | node.sysctl_size = strlen(xname) + 1; |
185 | return (sysctl_lookup(SYSCTLFN_CALL(&node))); | | 185 | return (sysctl_lookup(SYSCTLFN_CALL(&node))); |
186 | } | | 186 | } |
187 | | | 187 | |
188 | static int | | 188 | static int |
189 | sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) | | 189 | sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) |
190 | { | | 190 | { |
191 | struct sysctlnode node; | | 191 | struct sysctlnode node; |
192 | | | 192 | |
193 | if (booted_kernel == NULL || booted_kernel[0] == '\0') | | 193 | if (booted_kernel == NULL || booted_kernel[0] == '\0') |
194 | return (EOPNOTSUPP); | | 194 | return (EOPNOTSUPP); |
195 | | | 195 | |
196 | node = *rnode; | | 196 | node = *rnode; |
197 | node.sysctl_data = booted_kernel; | | 197 | node.sysctl_data = booted_kernel; |
198 | node.sysctl_size = strlen(booted_kernel) + 1; | | 198 | node.sysctl_size = strlen(booted_kernel) + 1; |
199 | return (sysctl_lookup(SYSCTLFN_CALL(&node))); | | 199 | return (sysctl_lookup(SYSCTLFN_CALL(&node))); |
200 | } | | 200 | } |
201 | | | 201 | |
202 | SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") | | 202 | SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") |
203 | { | | 203 | { |
204 | | | 204 | |
205 | sysctl_createv(clog, 0, NULL, NULL, | | 205 | sysctl_createv(clog, 0, NULL, NULL, |
206 | CTLFLAG_PERMANENT, | | 206 | CTLFLAG_PERMANENT, |
207 | CTLTYPE_NODE, "machdep", NULL, | | 207 | CTLTYPE_NODE, "machdep", NULL, |
208 | NULL, 0, NULL, 0, | | 208 | NULL, 0, NULL, 0, |
209 | CTL_MACHDEP, CTL_EOL); | | 209 | CTL_MACHDEP, CTL_EOL); |
210 | | | 210 | |
211 | /* Deprecated */ | | 211 | /* Deprecated */ |
212 | sysctl_createv(clog, 0, NULL, NULL, | | 212 | sysctl_createv(clog, 0, NULL, NULL, |
213 | CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, | | 213 | CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, |
214 | CTLTYPE_INT, "cachelinesize", NULL, | | 214 | CTLTYPE_INT, "cachelinesize", NULL, |
215 | NULL, curcpu()->ci_ci.dcache_line_size, NULL, 0, | | 215 | NULL, curcpu()->ci_ci.dcache_line_size, NULL, 0, |
216 | CTL_MACHDEP, CPU_CACHELINE, CTL_EOL); | | 216 | CTL_MACHDEP, CPU_CACHELINE, CTL_EOL); |
217 | sysctl_createv(clog, 0, NULL, NULL, | | 217 | sysctl_createv(clog, 0, NULL, NULL, |
218 | CTLFLAG_PERMANENT, | | 218 | CTLFLAG_PERMANENT, |
219 | CTLTYPE_INT, "timebase", NULL, | | 219 | CTLTYPE_INT, "timebase", NULL, |
220 | NULL, 0, &cpu_timebase, 0, | | 220 | NULL, 0, &cpu_timebase, 0, |
221 | CTL_MACHDEP, CPU_TIMEBASE, CTL_EOL); | | 221 | CTL_MACHDEP, CPU_TIMEBASE, CTL_EOL); |
222 | sysctl_createv(clog, 0, NULL, NULL, | | 222 | sysctl_createv(clog, 0, NULL, NULL, |
223 | CTLFLAG_PERMANENT|CTLFLAG_READWRITE, | | 223 | CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
224 | CTLTYPE_INT, "printfataltraps", NULL, | | 224 | CTLTYPE_INT, "printfataltraps", NULL, |
225 | NULL, 0, &cpu_printfataltraps, 0, | | 225 | NULL, 0, &cpu_printfataltraps, 0, |
226 | CTL_MACHDEP, CPU_PRINTFATALTRAPS, CTL_EOL); | | 226 | CTL_MACHDEP, CPU_PRINTFATALTRAPS, CTL_EOL); |
227 | /* Use this instead of CPU_CACHELINE */ | | 227 | /* Use this instead of CPU_CACHELINE */ |
228 | sysctl_createv(clog, 0, NULL, NULL, | | 228 | sysctl_createv(clog, 0, NULL, NULL, |
229 | CTLFLAG_PERMANENT, | | 229 | CTLFLAG_PERMANENT, |
230 | CTLTYPE_STRUCT, "cacheinfo", NULL, | | 230 | CTLTYPE_STRUCT, "cacheinfo", NULL, |
231 | sysctl_machdep_cacheinfo, 0, NULL, 0, | | 231 | sysctl_machdep_cacheinfo, 0, NULL, 0, |
232 | CTL_MACHDEP, CPU_CACHEINFO, CTL_EOL); | | 232 | CTL_MACHDEP, CPU_CACHEINFO, CTL_EOL); |
233 | #if !defined (PPC_IBM4XX) | | 233 | #if !defined (PPC_IBM4XX) |
234 | sysctl_createv(clog, 0, NULL, NULL, | | 234 | sysctl_createv(clog, 0, NULL, NULL, |
235 | CTLFLAG_PERMANENT|CTLFLAG_READWRITE, | | 235 | CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
236 | CTLTYPE_INT, "powersave", NULL, | | 236 | CTLTYPE_INT, "powersave", NULL, |
237 | sysctl_machdep_powersave, 0, &powersave, 0, | | 237 | sysctl_machdep_powersave, 0, &powersave, 0, |
238 | CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL); | | 238 | CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL); |
239 | #endif | | 239 | #endif |
240 | #if defined(PPC_IBM4XX) || defined(PPC_BOOKE) | | 240 | #if defined(PPC_IBM4XX) || defined(PPC_BOOKE) |
241 | sysctl_createv(clog, 0, NULL, NULL, | | 241 | sysctl_createv(clog, 0, NULL, NULL, |
242 | CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, | | 242 | CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, |
243 | CTLTYPE_INT, "altivec", NULL, | | 243 | CTLTYPE_INT, "altivec", NULL, |
244 | NULL, 0, NULL, 0, | | 244 | NULL, 0, NULL, 0, |
245 | CTL_MACHDEP, CPU_ALTIVEC, CTL_EOL); | | 245 | CTL_MACHDEP, CPU_ALTIVEC, CTL_EOL); |
246 | #else | | 246 | #else |
247 | sysctl_createv(clog, 0, NULL, NULL, | | 247 | sysctl_createv(clog, 0, NULL, NULL, |
248 | CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, | | 248 | CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, |
249 | CTLTYPE_INT, "altivec", NULL, | | 249 | CTLTYPE_INT, "altivec", NULL, |
250 | NULL, cpu_altivec, NULL, 0, | | 250 | NULL, cpu_altivec, NULL, 0, |
251 | CTL_MACHDEP, CPU_ALTIVEC, CTL_EOL); | | 251 | CTL_MACHDEP, CPU_ALTIVEC, CTL_EOL); |
252 | #endif | | 252 | #endif |
253 | sysctl_createv(clog, 0, NULL, NULL, | | 253 | sysctl_createv(clog, 0, NULL, NULL, |
254 | CTLFLAG_PERMANENT, | | 254 | CTLFLAG_PERMANENT, |
255 | CTLTYPE_STRING, "model", NULL, | | 255 | CTLTYPE_STRING, "model", NULL, |
256 | NULL, 0, cpu_model, 0, | | 256 | NULL, 0, cpu_model, 0, |
257 | CTL_MACHDEP, CPU_MODEL, CTL_EOL); | | 257 | CTL_MACHDEP, CPU_MODEL, CTL_EOL); |
258 | sysctl_createv(clog, 0, NULL, NULL, | | 258 | sysctl_createv(clog, 0, NULL, NULL, |
259 | CTLFLAG_PERMANENT, | | 259 | CTLFLAG_PERMANENT, |
260 | CTLTYPE_STRING, "booted_device", NULL, | | 260 | CTLTYPE_STRING, "booted_device", NULL, |
261 | sysctl_machdep_booted_device, 0, NULL, 0, | | 261 | sysctl_machdep_booted_device, 0, NULL, 0, |
262 | CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL); | | 262 | CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL); |
263 | sysctl_createv(clog, 0, NULL, NULL, | | 263 | sysctl_createv(clog, 0, NULL, NULL, |
264 | CTLFLAG_PERMANENT, | | 264 | CTLFLAG_PERMANENT, |
265 | CTLTYPE_STRING, "booted_kernel", NULL, | | 265 | CTLTYPE_STRING, "booted_kernel", NULL, |
266 | sysctl_machdep_booted_kernel, 0, NULL, 0, | | 266 | sysctl_machdep_booted_kernel, 0, NULL, 0, |
267 | CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); | | 267 | CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); |
268 | } | | 268 | } |
269 | | | 269 | |
270 | /* | | 270 | /* |
271 | * Crash dump handling. | | 271 | * Crash dump handling. |
272 | */ | | 272 | */ |
273 | u_int32_t dumpmag = 0x8fca0101; /* magic number */ | | 273 | u_int32_t dumpmag = 0x8fca0101; /* magic number */ |
274 | int dumpsize = 0; /* size of dump in pages */ | | 274 | int dumpsize = 0; /* size of dump in pages */ |
275 | long dumplo = -1; /* blocks */ | | 275 | long dumplo = -1; /* blocks */ |
276 | | | 276 | |
277 | /* | | 277 | /* |
278 | * This is called by main to set dumplo and dumpsize. | | 278 | * This is called by main to set dumplo and dumpsize. |
279 | */ | | 279 | */ |
280 | void | | 280 | void |
281 | cpu_dumpconf(void) | | 281 | cpu_dumpconf(void) |
282 | { | | 282 | { |
283 | const struct bdevsw *bdev; | | 283 | const struct bdevsw *bdev; |
284 | int nblks; /* size of dump device */ | | 284 | int nblks; /* size of dump device */ |
285 | int skip; | | 285 | int skip; |
286 | | | 286 | |
287 | if (dumpdev == NODEV) | | 287 | if (dumpdev == NODEV) |
288 | return; | | 288 | return; |
289 | bdev = bdevsw_lookup(dumpdev); | | 289 | bdev = bdevsw_lookup(dumpdev); |
290 | if (bdev == NULL) { | | 290 | if (bdev == NULL) { |
291 | dumpdev = NODEV; | | 291 | dumpdev = NODEV; |
292 | return; | | 292 | return; |
293 | } | | 293 | } |
294 | if (bdev->d_psize == NULL) | | 294 | if (bdev->d_psize == NULL) |
295 | return; | | 295 | return; |
296 | nblks = (*bdev->d_psize)(dumpdev); | | 296 | nblks = (*bdev->d_psize)(dumpdev); |
297 | if (nblks <= ctod(1)) | | 297 | if (nblks <= ctod(1)) |
298 | return; | | 298 | return; |
299 | | | 299 | |
300 | dumpsize = physmem; | | 300 | dumpsize = physmem; |
301 | | | 301 | |
302 | /* Skip enough blocks at start of disk to preserve an eventual disklabel. */ | | 302 | /* Skip enough blocks at start of disk to preserve an eventual disklabel. */ |
303 | skip = LABELSECTOR + 1; | | 303 | skip = LABELSECTOR + 1; |
304 | skip += ctod(1) - 1; | | 304 | skip += ctod(1) - 1; |
305 | skip = ctod(dtoc(skip)); | | 305 | skip = ctod(dtoc(skip)); |
306 | if (dumplo < skip) | | 306 | if (dumplo < skip) |
307 | dumplo = skip; | | 307 | dumplo = skip; |
308 | | | 308 | |
309 | /* Put dump at end of partition */ | | 309 | /* Put dump at end of partition */ |
310 | if (dumpsize > dtoc(nblks - dumplo)) | | 310 | if (dumpsize > dtoc(nblks - dumplo)) |
311 | dumpsize = dtoc(nblks - dumplo); | | 311 | dumpsize = dtoc(nblks - dumplo); |
312 | if (dumplo < nblks - ctod(dumpsize)) | | 312 | if (dumplo < nblks - ctod(dumpsize)) |
313 | dumplo = nblks - ctod(dumpsize); | | 313 | dumplo = nblks - ctod(dumpsize); |
314 | } | | 314 | } |
315 | | | 315 | |
316 | /* | | 316 | /* |
317 | * Start a new LWP | | 317 | * Start a new LWP |
318 | */ | | 318 | */ |
319 | void | | 319 | void |
320 | startlwp(void *arg) | | 320 | startlwp(void *arg) |
321 | { | | 321 | { |
322 | ucontext_t * const uc = arg; | | 322 | ucontext_t * const uc = arg; |
323 | lwp_t * const l = curlwp; | | 323 | lwp_t * const l = curlwp; |
324 | struct trapframe * const tf = l->l_md.md_utf; | | 324 | struct trapframe * const tf = l->l_md.md_utf; |
325 | int error; | | 325 | int error; |
326 | | | 326 | |
327 | error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags); | | 327 | error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags); |
328 | KASSERT(error == 0); | | 328 | KASSERT(error == 0); |
329 | | | 329 | |
330 | kmem_free(uc, sizeof(ucontext_t)); | | 330 | kmem_free(uc, sizeof(ucontext_t)); |
331 | userret(l, tf); | | 331 | userret(l, tf); |
332 | } | | 332 | } |
333 | | | 333 | |
334 | void | | 334 | void |
335 | upcallret(struct lwp *l) | | 335 | upcallret(struct lwp *l) |
336 | { | | 336 | { |
337 | struct trapframe * const tf = l->l_md.md_utf; | | 337 | struct trapframe * const tf = l->l_md.md_utf; |
338 | | | 338 | |
339 | KERNEL_UNLOCK_LAST(l); | | 339 | KERNEL_UNLOCK_LAST(l); |
340 | userret(l, tf); | | 340 | userret(l, tf); |
341 | } | | 341 | } |
342 | | | 342 | |
343 | void | | 343 | void |
344 | cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, | | 344 | cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, |
345 | void *sas, void *ap, void *sp, sa_upcall_t upcall) | | 345 | void *sas, void *ap, void *sp, sa_upcall_t upcall) |
346 | { | | 346 | { |
347 | struct trapframe * const tf = l->l_md.md_utf; | | 347 | struct trapframe * const tf = l->l_md.md_utf; |
348 | | | 348 | |
349 | /* | | 349 | /* |
350 | * Build context to run handler in. | | 350 | * Build context to run handler in. |
351 | */ | | 351 | */ |
352 | tf->tf_fixreg[1] = (register_t)((struct saframe *)sp - 1); | | 352 | tf->tf_fixreg[1] = (register_t)((struct saframe *)sp - 1); |
353 | tf->tf_lr = 0; | | 353 | tf->tf_lr = 0; |
354 | tf->tf_fixreg[3] = (register_t)type; | | 354 | tf->tf_fixreg[3] = (register_t)type; |
355 | tf->tf_fixreg[4] = (register_t)sas; | | 355 | tf->tf_fixreg[4] = (register_t)sas; |
356 | tf->tf_fixreg[5] = (register_t)nevents; | | 356 | tf->tf_fixreg[5] = (register_t)nevents; |
357 | tf->tf_fixreg[6] = (register_t)ninterrupted; | | 357 | tf->tf_fixreg[6] = (register_t)ninterrupted; |
358 | tf->tf_fixreg[7] = (register_t)ap; | | 358 | tf->tf_fixreg[7] = (register_t)ap; |
359 | tf->tf_srr0 = (register_t)upcall; | | 359 | tf->tf_srr0 = (register_t)upcall; |
360 | tf->tf_srr1 &= ~PSL_SE; | | 360 | tf->tf_srr1 &= ~PSL_SE; |
361 | } | | 361 | } |
362 | | | 362 | |
363 | bool | | 363 | bool |
364 | cpu_intr_p(void) | | 364 | cpu_intr_p(void) |
365 | { | | 365 | { |
366 | | | 366 | |
367 | return curcpu()->ci_idepth >= 0; | | 367 | return curcpu()->ci_idepth >= 0; |
368 | } | | 368 | } |
369 | | | 369 | |
370 | void | | 370 | void |
371 | cpu_idle(void) | | 371 | cpu_idle(void) |
372 | { | | 372 | { |
373 | KASSERT(mfmsr() & PSL_EE); | | 373 | KASSERT(mfmsr() & PSL_EE); |
374 | KASSERT(curcpu()->ci_cpl == IPL_NONE); | | 374 | KASSERT(curcpu()->ci_cpl == IPL_NONE); |
375 | (*curcpu()->ci_idlespin)(); | | 375 | (*curcpu()->ci_idlespin)(); |
376 | } | | 376 | } |
377 | | | 377 | |
378 | void | | 378 | void |
379 | cpu_ast(struct lwp *l, struct cpu_info *ci) | | 379 | cpu_ast(struct lwp *l, struct cpu_info *ci) |
380 | { | | 380 | { |
381 | l->l_md.md_astpending = 0; /* we are about to do it */ | | 381 | l->l_md.md_astpending = 0; /* we are about to do it */ |
382 | | | 382 | |
383 | if (l->l_pflag & LP_OWEUPC) { | | 383 | if (l->l_pflag & LP_OWEUPC) { |
384 | l->l_pflag &= ~LP_OWEUPC; | | 384 | l->l_pflag &= ~LP_OWEUPC; |
385 | ADDUPROF(l); | | 385 | ADDUPROF(l); |
386 | } | | 386 | } |
387 | | | 387 | |
388 | /* Check whether we are being preempted. */ | | 388 | /* Check whether we are being preempted. */ |
389 | if (ci->ci_want_resched) { | | 389 | if (ci->ci_want_resched) { |
390 | preempt(); | | 390 | preempt(); |
391 | } | | 391 | } |
392 | } | | 392 | } |
393 | | | 393 | |
394 | void | | 394 | void |
395 | cpu_need_resched(struct cpu_info *ci, int flags) | | 395 | cpu_need_resched(struct cpu_info *ci, int flags) |
396 | { | | 396 | { |
397 | struct lwp * const l = ci->ci_data.cpu_onproc; | | 397 | struct lwp * const l = ci->ci_data.cpu_onproc; |
398 | #if defined(MULTIPROCESSOR) | | 398 | #if defined(MULTIPROCESSOR) |
399 | struct cpu_info * const cur_ci = curcpu(); | | 399 | struct cpu_info * const cur_ci = curcpu(); |
400 | #endif | | 400 | #endif |
401 | | | 401 | |
402 | KASSERT(kpreempt_disabled()); | | 402 | KASSERT(kpreempt_disabled()); |
403 | | | 403 | |
404 | #ifdef MULTIPROCESSOR | | 404 | #ifdef MULTIPROCESSOR |
405 | atomic_or_uint(&ci->ci_want_resched, flags); | | 405 | atomic_or_uint(&ci->ci_want_resched, flags); |
406 | #else | | 406 | #else |
407 | ci->ci_want_resched |= flags; | | 407 | ci->ci_want_resched |= flags; |
408 | #endif | | 408 | #endif |
409 | | | 409 | |
410 | if (__predict_false((l->l_pflag & LP_INTR) != 0)) { | | 410 | if (__predict_false((l->l_pflag & LP_INTR) != 0)) { |
411 | /* | | 411 | /* |
412 | * No point doing anything, it will switch soon. | | 412 | * No point doing anything, it will switch soon. |
413 | * Also here to prevent an assertion failure in | | 413 | * Also here to prevent an assertion failure in |
414 | * kpreempt() due to preemption being set on a | | 414 | * kpreempt() due to preemption being set on a |
415 | * soft interrupt LWP. | | 415 | * soft interrupt LWP. |
416 | */ | | 416 | */ |
417 | return; | | 417 | return; |
418 | } | | 418 | } |
419 | | | 419 | |
420 | if (__predict_false(l == ci->ci_data.cpu_idlelwp)) { | | 420 | if (__predict_false(l == ci->ci_data.cpu_idlelwp)) { |
421 | #if defined(MULTIPROCESSOR) | | 421 | #if defined(MULTIPROCESSOR) |
422 | /* | | 422 | /* |
423 | * If the other CPU is idling, it must be waiting for an | | 423 | * If the other CPU is idling, it must be waiting for an |
424 | * interrupt. So give it one. | | 424 | * interrupt. So give it one. |
425 | */ | | 425 | */ |
426 | if (__predict_false(ci != cur_ci)) | | 426 | if (__predict_false(ci != cur_ci)) |
427 | cpu_send_ipi(cpu_index(ci), IPI_NOMESG); | | 427 | cpu_send_ipi(cpu_index(ci), IPI_NOMESG); |
428 | #endif | | 428 | #endif |
429 | return; | | 429 | return; |
430 | } | | 430 | } |
431 | | | 431 | |
432 | #ifdef __HAVE_PREEMPTION | | 432 | #ifdef __HAVE_PREEMPTION |
433 | if (flags & RESCHED_KPREEMPT) { | | 433 | if (flags & RESCHED_KPREEMPT) { |
434 | atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE); | | 434 | atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE); |
435 | if (ci == cur_ci) { | | 435 | if (ci == cur_ci) { |
436 | softint_trigger(SOFTINT_KPREEMPT); | | 436 | softint_trigger(SOFTINT_KPREEMPT); |
437 | } else { | | 437 | } else { |
438 | cpu_send_ipi(cpu_index(ci), IPI_KPREEMPT); | | 438 | cpu_send_ipi(cpu_index(ci), IPI_KPREEMPT); |
439 | } | | 439 | } |
440 | return; | | 440 | return; |
441 | } | | 441 | } |
442 | #endif | | 442 | #endif |
443 | l->l_md.md_astpending = 1; /* force call to ast() */ | | 443 | l->l_md.md_astpending = 1; /* force call to ast() */ |
444 | #if defined(MULTIPROCESSOR) | | 444 | #if defined(MULTIPROCESSOR) |
445 | if (ci != cur_ci && (flags & RESCHED_IMMED)) { | | 445 | if (ci != cur_ci && (flags & RESCHED_IMMED)) { |
446 | cpu_send_ipi(cpu_index(ci), IPI_NOMESG); | | 446 | cpu_send_ipi(cpu_index(ci), IPI_NOMESG); |
447 | } | | 447 | } |
448 | #endif | | 448 | #endif |
449 | } | | 449 | } |
450 | | | 450 | |
451 | void | | 451 | void |
452 | cpu_need_proftick(lwp_t *l) | | 452 | cpu_need_proftick(lwp_t *l) |
453 | { | | 453 | { |
454 | l->l_pflag |= LP_OWEUPC; | | 454 | l->l_pflag |= LP_OWEUPC; |
455 | l->l_md.md_astpending = 1; | | 455 | l->l_md.md_astpending = 1; |
456 | } | | 456 | } |
457 | | | 457 | |
458 | void | | 458 | void |
459 | cpu_signotify(lwp_t *l) | | 459 | cpu_signotify(lwp_t *l) |
460 | { | | 460 | { |
461 | l->l_md.md_astpending = 1; | | 461 | l->l_md.md_astpending = 1; |
462 | } | | 462 | } |
463 | | | 463 | |
464 | vaddr_t | | 464 | vaddr_t |
465 | cpu_lwp_pc(lwp_t *l) | | 465 | cpu_lwp_pc(lwp_t *l) |
466 | { | | 466 | { |
467 | return l->l_md.md_utf->tf_srr0; | | 467 | return l->l_md.md_utf->tf_srr0; |
468 | } | | 468 | } |
469 | | | 469 | |
470 | bool | | 470 | bool |
471 | cpu_clkf_usermode(const struct clockframe *cf) | | 471 | cpu_clkf_usermode(const struct clockframe *cf) |
472 | { | | 472 | { |
473 | return (cf->cf_srr1 & PSL_PR) != 0; | | 473 | return (cf->cf_srr1 & PSL_PR) != 0; |
474 | } | | 474 | } |
475 | | | 475 | |
476 | vaddr_t | | 476 | vaddr_t |
477 | cpu_clkf_pc(const struct clockframe *cf) | | 477 | cpu_clkf_pc(const struct clockframe *cf) |
478 | { | | 478 | { |
479 | return cf->cf_srr0; | | 479 | return cf->cf_srr0; |
480 | } | | 480 | } |
481 | | | 481 | |
482 | bool | | 482 | bool |
483 | cpu_clkf_intr(const struct clockframe *cf) | | 483 | cpu_clkf_intr(const struct clockframe *cf) |
484 | { | | 484 | { |
485 | return cf->cf_idepth > 0; | | 485 | return cf->cf_idepth > 0; |
486 | } | | 486 | } |
487 | | | 487 | |
488 | #ifdef MULTIPROCESSOR | | 488 | #ifdef MULTIPROCESSOR |
489 | /* | | 489 | /* |
490 | * MD support for xcall(9) interface. | | 490 | * MD support for xcall(9) interface. |
491 | */ | | 491 | */ |
492 | | | 492 | |
493 | void | | 493 | void |
494 | xc_send_ipi(struct cpu_info *ci) | | 494 | xc_send_ipi(struct cpu_info *ci) |
495 | { | | 495 | { |
496 | KASSERT(kpreempt_disabled()); | | 496 | KASSERT(kpreempt_disabled()); |
497 | KASSERT(curcpu() != ci); | | 497 | KASSERT(curcpu() != ci); |
498 | | | 498 | |
499 | cpuid_t target = (ci != NULL ? cpu_index(ci) : IPI_DST_NOTME); | | 499 | cpuid_t target = (ci != NULL ? cpu_index(ci) : IPI_DST_NOTME); |
500 | | | 500 | |
501 | /* Unicast: remote CPU. */ | | 501 | /* Unicast: remote CPU. */ |
502 | /* Broadcast: all, but local CPU (caller will handle it). */ | | 502 | /* Broadcast: all, but local CPU (caller will handle it). */ |
503 | cpu_send_ipi(target, IPI_XCALL); | | 503 | cpu_send_ipi(target, IPI_XCALL); |
504 | } | | 504 | } |
505 | #endif /* MULTIPROCESSOR */ | | 505 | #endif /* MULTIPROCESSOR */ |
506 | | | 506 | |
507 | #ifdef MODULAR | | 507 | #ifdef MODULAR |
508 | /* | | 508 | /* |
509 | * Push any modules loaded by the boot loader. | | 509 | * Push any modules loaded by the boot loader. |
510 | */ | | 510 | */ |
511 | void | | 511 | void |
512 | module_init_md(void) | | 512 | module_init_md(void) |
513 | { | | 513 | { |
514 | } | | 514 | } |
515 | #endif /* MODULAR */ | | 515 | #endif /* MODULAR */ |
516 | | | 516 | |
517 | bool | | 517 | bool |
518 | mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap) | | 518 | mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap) |
519 | { | | 519 | { |
520 | if (atop(pa) < physmem) { | | 520 | if (atop(pa) < physmem) { |
521 | *vap = pa; | | 521 | *vap = pa; |
522 | return true; | | 522 | return true; |
523 | } | | 523 | } |
524 | | | 524 | |
525 | return false; | | 525 | return false; |
526 | } | | 526 | } |
527 | | | 527 | |
528 | int | | 528 | int |
529 | mm_md_physacc(paddr_t pa, vm_prot_t prot) | | 529 | mm_md_physacc(paddr_t pa, vm_prot_t prot) |
530 | { | | 530 | { |
531 | | | 531 | |
532 | return (atop(pa) < physmem) ? 0 : EFAULT; | | 532 | return (atop(pa) < physmem) ? 0 : EFAULT; |
533 | } | | 533 | } |
534 | | | 534 | |
535 | int | | 535 | int |
536 | mm_md_kernacc(void *va, vm_prot_t prot, bool *handled) | | 536 | mm_md_kernacc(void *va, vm_prot_t prot, bool *handled) |
537 | { | | 537 | { |
538 | if (atop((paddr_t)va) < physmem) { | | 538 | if (atop((paddr_t)va) < physmem) { |
539 | *handled = true; | | 539 | *handled = true; |
540 | return 0; | | 540 | return 0; |
541 | } | | 541 | } |
542 | | | 542 | |
543 | if ((vaddr_t)va < VM_MIN_KERNEL_ADDRESS | | 543 | if ((vaddr_t)va < VM_MIN_KERNEL_ADDRESS |
544 | || (vaddr_t)va >= VM_MAX_KERNEL_ADDRESS) | | 544 | || (vaddr_t)va >= VM_MAX_KERNEL_ADDRESS) |
545 | return EFAULT; | | 545 | return EFAULT; |
546 | | | 546 | |
547 | *handled = false; | | 547 | *handled = false; |
548 | return 0; | | 548 | return 0; |
549 | } | | 549 | } |
550 | | | 550 | |