Fri Sep 21 09:05:09 2012 UTC ()
adapt for _UC_TLSBASE.

Approved by manu@ and msaitoh@


(ryo)
diff -r1.100 -r1.101 src/sys/arch/sh3/sh3/sh3_machdep.c

cvs diff -r1.100 -r1.101 src/sys/arch/sh3/sh3/sh3_machdep.c (switch to unified diff)

--- src/sys/arch/sh3/sh3/sh3_machdep.c 2012/07/08 20:14:12 1.100
+++ src/sys/arch/sh3/sh3/sh3_machdep.c 2012/09/21 09:05:08 1.101
@@ -1,572 +1,574 @@ @@ -1,572 +1,574 @@
1/* $NetBSD: sh3_machdep.c,v 1.100 2012/07/08 20:14:12 dsl Exp $ */ 1/* $NetBSD: sh3_machdep.c,v 1.101 2012/09/21 09:05:08 ryo Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1996, 1997, 1998, 2002 The NetBSD Foundation, Inc. 4 * Copyright (c) 1996, 1997, 1998, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center. 9 * Simulation Facility, NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/*- 33/*-
34 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 34 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
35 * All rights reserved. 35 * All rights reserved.
36 * 36 *
37 * This code is derived from software contributed to Berkeley by 37 * This code is derived from software contributed to Berkeley by
38 * William Jolitz. 38 * William Jolitz.
39 * 39 *
40 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions 41 * modification, are permitted provided that the following conditions
42 * are met: 42 * are met:
43 * 1. Redistributions of source code must retain the above copyright 43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer. 44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright 45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the 46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution. 47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors 48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software 49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission. 50 * without specific prior written permission.
51 * 51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE. 62 * SUCH DAMAGE.
63 * 63 *
64 * @(#)machdep.c 7.4 (Berkeley) 6/3/91 64 * @(#)machdep.c 7.4 (Berkeley) 6/3/91
65 */ 65 */
66 66
67#include <sys/cdefs.h> 67#include <sys/cdefs.h>
68__KERNEL_RCSID(0, "$NetBSD: sh3_machdep.c,v 1.100 2012/07/08 20:14:12 dsl Exp $"); 68__KERNEL_RCSID(0, "$NetBSD: sh3_machdep.c,v 1.101 2012/09/21 09:05:08 ryo Exp $");
69 69
70#include "opt_ddb.h" 70#include "opt_ddb.h"
71#include "opt_kgdb.h" 71#include "opt_kgdb.h"
72#include "opt_memsize.h" 72#include "opt_memsize.h"
73#include "opt_kstack_debug.h" 73#include "opt_kstack_debug.h"
74#include "opt_ptrace.h" 74#include "opt_ptrace.h"
75 75
76#include <sys/param.h> 76#include <sys/param.h>
77#include <sys/systm.h> 77#include <sys/systm.h>
78 78
79#include <sys/buf.h> 79#include <sys/buf.h>
80#include <sys/exec.h> 80#include <sys/exec.h>
81#include <sys/kernel.h> 81#include <sys/kernel.h>
82#include <sys/malloc.h> 82#include <sys/malloc.h>
83#include <sys/mount.h> 83#include <sys/mount.h>
84#include <sys/proc.h> 84#include <sys/proc.h>
85#include <sys/signalvar.h> 85#include <sys/signalvar.h>
86#include <sys/ras.h> 86#include <sys/ras.h>
87#include <sys/syscallargs.h> 87#include <sys/syscallargs.h>
88#include <sys/ucontext.h> 88#include <sys/ucontext.h>
89#include <sys/cpu.h> 89#include <sys/cpu.h>
90#include <sys/bus.h> 90#include <sys/bus.h>
91 91
92#ifdef KGDB 92#ifdef KGDB
93#include <sys/kgdb.h> 93#include <sys/kgdb.h>
94#ifndef KGDB_DEVNAME 94#ifndef KGDB_DEVNAME
95#define KGDB_DEVNAME "nodev" 95#define KGDB_DEVNAME "nodev"
96#endif 96#endif
97const char kgdb_devname[] = KGDB_DEVNAME; 97const char kgdb_devname[] = KGDB_DEVNAME;
98#endif /* KGDB */ 98#endif /* KGDB */
99 99
100#include <uvm/uvm.h> 100#include <uvm/uvm.h>
101 101
102#include <sh3/cache.h> 102#include <sh3/cache.h>
103#include <sh3/clock.h> 103#include <sh3/clock.h>
104#include <sh3/exception.h> 104#include <sh3/exception.h>
105#include <sh3/locore.h> 105#include <sh3/locore.h>
106#include <sh3/mmu.h> 106#include <sh3/mmu.h>
107#include <sh3/pcb.h> 107#include <sh3/pcb.h>
108#include <sh3/intr.h> 108#include <sh3/intr.h>
109#include <sh3/ubcreg.h> 109#include <sh3/ubcreg.h>
110 110
111/* Our exported CPU info; we can have only one. */ 111/* Our exported CPU info; we can have only one. */
112struct cpu_info cpu_info_store; 112struct cpu_info cpu_info_store;
113int cpu_arch; 113int cpu_arch;
114int cpu_product; 114int cpu_product;
115char cpu_model[120]; 115char cpu_model[120];
116 116
117struct vm_map *phys_map; 117struct vm_map *phys_map;
118 118
119struct pcb *curpcb; 119struct pcb *curpcb;
120 120
121#if !defined(IOM_RAM_BEGIN) 121#if !defined(IOM_RAM_BEGIN)
122#error "define IOM_RAM_BEGIN" 122#error "define IOM_RAM_BEGIN"
123#elif (IOM_RAM_BEGIN & SH3_P1SEG_BASE) != 0 123#elif (IOM_RAM_BEGIN & SH3_P1SEG_BASE) != 0
124#error "IOM_RAM_BEGIN is physical address. not P1 address." 124#error "IOM_RAM_BEGIN is physical address. not P1 address."
125#endif 125#endif
126 126
127#define VBR (uint8_t *)SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN) 127#define VBR (uint8_t *)SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN)
128vaddr_t ram_start = SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN); 128vaddr_t ram_start = SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN);
129/* exception handler holder (sh3/sh3/exception_vector.S) */ 129/* exception handler holder (sh3/sh3/exception_vector.S) */
130extern char sh_vector_generic[], sh_vector_generic_end[]; 130extern char sh_vector_generic[], sh_vector_generic_end[];
131extern char sh_vector_interrupt[], sh_vector_interrupt_end[]; 131extern char sh_vector_interrupt[], sh_vector_interrupt_end[];
132#ifdef SH3 132#ifdef SH3
133extern char sh3_vector_tlbmiss[], sh3_vector_tlbmiss_end[]; 133extern char sh3_vector_tlbmiss[], sh3_vector_tlbmiss_end[];
134#endif 134#endif
135#ifdef SH4 135#ifdef SH4
136extern char sh4_vector_tlbmiss[], sh4_vector_tlbmiss_end[]; 136extern char sh4_vector_tlbmiss[], sh4_vector_tlbmiss_end[];
137#endif 137#endif
138/* 138/*
139 * These variables are needed by /sbin/savecore 139 * These variables are needed by /sbin/savecore
140 */ 140 */
141uint32_t dumpmag = 0x8fca0101; /* magic number */ 141uint32_t dumpmag = 0x8fca0101; /* magic number */
142int dumpsize; /* pages */ 142int dumpsize; /* pages */
143long dumplo; /* blocks */ 143long dumplo; /* blocks */
144 144
145 145
146void 146void
147sh_cpu_init(int arch, int product) 147sh_cpu_init(int arch, int product)
148{ 148{
149 /* CPU type */ 149 /* CPU type */
150 cpu_arch = arch; 150 cpu_arch = arch;
151 cpu_product = product; 151 cpu_product = product;
152 152
153#if defined(SH3) && defined(SH4) 153#if defined(SH3) && defined(SH4)
154 /* Set register addresses */ 154 /* Set register addresses */
155 sh_devreg_init(); 155 sh_devreg_init();
156#endif 156#endif
157 /* Cache access ops. */ 157 /* Cache access ops. */
158 sh_cache_init(); 158 sh_cache_init();
159 159
160 /* MMU access ops. */ 160 /* MMU access ops. */
161 sh_mmu_init(); 161 sh_mmu_init();
162 162
163 /* Hardclock, RTC initialize. */ 163 /* Hardclock, RTC initialize. */
164 machine_clock_init(); 164 machine_clock_init();
165 165
166 /* ICU initiailze. */ 166 /* ICU initiailze. */
167 curcpu()->ci_idepth = -1; 167 curcpu()->ci_idepth = -1;
168 intc_init(); 168 intc_init();
169 169
170 /* Exception vector. */ 170 /* Exception vector. */
171 memcpy(VBR + 0x100, sh_vector_generic, 171 memcpy(VBR + 0x100, sh_vector_generic,
172 sh_vector_generic_end - sh_vector_generic); 172 sh_vector_generic_end - sh_vector_generic);
173#ifdef SH3 173#ifdef SH3
174 if (CPU_IS_SH3) 174 if (CPU_IS_SH3)
175 memcpy(VBR + 0x400, sh3_vector_tlbmiss, 175 memcpy(VBR + 0x400, sh3_vector_tlbmiss,
176 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss); 176 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss);
177#endif 177#endif
178#ifdef SH4 178#ifdef SH4
179 if (CPU_IS_SH4) 179 if (CPU_IS_SH4)
180 memcpy(VBR + 0x400, sh4_vector_tlbmiss, 180 memcpy(VBR + 0x400, sh4_vector_tlbmiss,
181 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss); 181 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss);
182#endif 182#endif
183 memcpy(VBR + 0x600, sh_vector_interrupt, 183 memcpy(VBR + 0x600, sh_vector_interrupt,
184 sh_vector_interrupt_end - sh_vector_interrupt); 184 sh_vector_interrupt_end - sh_vector_interrupt);
185 185
186 if (!SH_HAS_UNIFIED_CACHE) 186 if (!SH_HAS_UNIFIED_CACHE)
187 sh_icache_sync_all(); 187 sh_icache_sync_all();
188 188
189 __asm volatile("ldc %0, vbr" :: "r"(VBR)); 189 __asm volatile("ldc %0, vbr" :: "r"(VBR));
190 190
191 /* kernel stack setup */ 191 /* kernel stack setup */
192 __sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume; 192 __sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume;
193 193
194 /* Set page size (4KB) */ 194 /* Set page size (4KB) */
195 uvm_setpagesize(); 195 uvm_setpagesize();
196 196
197 /* setup UBC channel A for single-stepping */ 197 /* setup UBC channel A for single-stepping */
198#if defined(PTRACE) || defined(DDB) 198#if defined(PTRACE) || defined(DDB)
199 _reg_write_2(SH_(BBRA), 0); /* disable channel A */ 199 _reg_write_2(SH_(BBRA), 0); /* disable channel A */
200 _reg_write_2(SH_(BBRB), 0); /* disable channel B */ 200 _reg_write_2(SH_(BBRB), 0); /* disable channel B */
201 201
202#ifdef SH3 202#ifdef SH3
203 if (CPU_IS_SH3) { 203 if (CPU_IS_SH3) {
204 /* A: break after execution, ignore ASID */ 204 /* A: break after execution, ignore ASID */
205 _reg_write_4(SH3_BRCR, (UBC_CTL_A_AFTER_INSN 205 _reg_write_4(SH3_BRCR, (UBC_CTL_A_AFTER_INSN
206 | SH3_UBC_CTL_A_MASK_ASID)); 206 | SH3_UBC_CTL_A_MASK_ASID));
207 207
208 /* A: compare all address bits */ 208 /* A: compare all address bits */
209 _reg_write_4(SH3_BAMRA, 0x00000000); 209 _reg_write_4(SH3_BAMRA, 0x00000000);
210 } 210 }
211#endif /* SH3 */ 211#endif /* SH3 */
212 212
213#ifdef SH4 213#ifdef SH4
214 if (CPU_IS_SH4) { 214 if (CPU_IS_SH4) {
215 /* A: break after execution */ 215 /* A: break after execution */
216 _reg_write_2(SH4_BRCR, UBC_CTL_A_AFTER_INSN); 216 _reg_write_2(SH4_BRCR, UBC_CTL_A_AFTER_INSN);
217 217
218 /* A: compare all address bits, ignore ASID */ 218 /* A: compare all address bits, ignore ASID */
219 _reg_write_1(SH4_BAMRA, SH4_UBC_MASK_NONE | SH4_UBC_MASK_ASID); 219 _reg_write_1(SH4_BAMRA, SH4_UBC_MASK_NONE | SH4_UBC_MASK_ASID);
220 } 220 }
221#endif /* SH4 */ 221#endif /* SH4 */
222#endif 222#endif
223} 223}
224 224
225 225
226/* 226/*
227 * void sh_proc0_init(void): 227 * void sh_proc0_init(void):
228 * Setup proc0 u-area. 228 * Setup proc0 u-area.
229 */ 229 */
230void 230void
231sh_proc0_init(void) 231sh_proc0_init(void)
232{ 232{
233 struct switchframe *sf; 233 struct switchframe *sf;
234 vaddr_t u; 234 vaddr_t u;
235 235
236 /* Steal process0 u-area */ 236 /* Steal process0 u-area */
237 u = uvm_pageboot_alloc(USPACE); 237 u = uvm_pageboot_alloc(USPACE);
238 memset((void *)u, 0, USPACE); 238 memset((void *)u, 0, USPACE);
239 239
240 /* Setup uarea for lwp0 */ 240 /* Setup uarea for lwp0 */
241 uvm_lwp_setuarea(&lwp0, u); 241 uvm_lwp_setuarea(&lwp0, u);
242 242
243 /* 243 /*
244 * u-area map: 244 * u-area map:
245 * |pcb| .... | .................. | 245 * |pcb| .... | .................. |
246 * | PAGE_SIZE | USPACE - PAGE_SIZE | 246 * | PAGE_SIZE | USPACE - PAGE_SIZE |
247 * frame bot stack bot 247 * frame bot stack bot
248 * current frame ... r6_bank 248 * current frame ... r6_bank
249 * stack bottom ... r7_bank 249 * stack bottom ... r7_bank
250 * current stack ... r15 250 * current stack ... r15
251 */ 251 */
252 curpcb = lwp_getpcb(&lwp0); 252 curpcb = lwp_getpcb(&lwp0);
253 lwp0.l_md.md_pcb = curpcb; 253 lwp0.l_md.md_pcb = curpcb;
254 254
255 sf = &curpcb->pcb_sf; 255 sf = &curpcb->pcb_sf;
256 256
257#ifdef KSTACK_DEBUG 257#ifdef KSTACK_DEBUG
258 memset((char *)(u + sizeof(struct pcb)), 0x5a, 258 memset((char *)(u + sizeof(struct pcb)), 0x5a,
259 PAGE_SIZE - sizeof(struct pcb)); 259 PAGE_SIZE - sizeof(struct pcb));
260 memset((char *)(u + PAGE_SIZE), 0xa5, USPACE - PAGE_SIZE); 260 memset((char *)(u + PAGE_SIZE), 0xa5, USPACE - PAGE_SIZE);
261 memset(sf, 0xb4, sizeof(struct switchframe)); 261 memset(sf, 0xb4, sizeof(struct switchframe));
262#endif /* KSTACK_DEBUG */ 262#endif /* KSTACK_DEBUG */
263 263
264 sf->sf_r6_bank = u + PAGE_SIZE; 264 sf->sf_r6_bank = u + PAGE_SIZE;
265 sf->sf_r7_bank = sf->sf_r15 = u + USPACE; 265 sf->sf_r7_bank = sf->sf_r15 = u + USPACE;
266 __asm volatile("ldc %0, r6_bank" :: "r"(sf->sf_r6_bank)); 266 __asm volatile("ldc %0, r6_bank" :: "r"(sf->sf_r6_bank));
267 __asm volatile("ldc %0, r7_bank" :: "r"(sf->sf_r7_bank)); 267 __asm volatile("ldc %0, r7_bank" :: "r"(sf->sf_r7_bank));
268 268
269 lwp0.l_md.md_regs = (struct trapframe *)sf->sf_r6_bank - 1; 269 lwp0.l_md.md_regs = (struct trapframe *)sf->sf_r6_bank - 1;
270} 270}
271 271
272void 272void
273sh_startup(void) 273sh_startup(void)
274{ 274{
275 vaddr_t minaddr, maxaddr; 275 vaddr_t minaddr, maxaddr;
276 char pbuf[9]; 276 char pbuf[9];
277 277
278 printf("%s%s", copyright, version); 278 printf("%s%s", copyright, version);
279 if (*cpu_model != '\0') 279 if (*cpu_model != '\0')
280 printf("%s", cpu_model); 280 printf("%s", cpu_model);
281#ifdef DEBUG 281#ifdef DEBUG
282 printf("general exception handler:\t%d byte\n", 282 printf("general exception handler:\t%d byte\n",
283 sh_vector_generic_end - sh_vector_generic); 283 sh_vector_generic_end - sh_vector_generic);
284 printf("TLB miss exception handler:\t%d byte\n", 284 printf("TLB miss exception handler:\t%d byte\n",
285#if defined(SH3) && defined(SH4) 285#if defined(SH3) && defined(SH4)
286 CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss : 286 CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss :
287 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss 287 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
288#elif defined(SH3) 288#elif defined(SH3)
289 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss 289 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss
290#elif defined(SH4) 290#elif defined(SH4)
291 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss 291 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
292#endif 292#endif
293 ); 293 );
294 printf("interrupt exception handler:\t%d byte\n", 294 printf("interrupt exception handler:\t%d byte\n",
295 sh_vector_interrupt_end - sh_vector_interrupt); 295 sh_vector_interrupt_end - sh_vector_interrupt);
296#endif /* DEBUG */ 296#endif /* DEBUG */
297 297
298 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 298 format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
299 printf("total memory = %s\n", pbuf); 299 printf("total memory = %s\n", pbuf);
300 300
301 minaddr = 0; 301 minaddr = 0;
302 302
303 /* 303 /*
304 * Allocate a submap for physio 304 * Allocate a submap for physio
305 */ 305 */
306 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 306 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
307 VM_PHYS_SIZE, 0, false, NULL); 307 VM_PHYS_SIZE, 0, false, NULL);
308 308
309 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 309 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
310 printf("avail memory = %s\n", pbuf); 310 printf("avail memory = %s\n", pbuf);
311} 311}
312 312
313/* 313/*
314 * This is called by main to set dumplo and dumpsize. 314 * This is called by main to set dumplo and dumpsize.
315 * Dumps always skip the first CLBYTES of disk space 315 * Dumps always skip the first CLBYTES of disk space
316 * in case there might be a disk label stored there. 316 * in case there might be a disk label stored there.
317 * If there is extra space, put dump at the end to 317 * If there is extra space, put dump at the end to
318 * reduce the chance that swapping trashes it. 318 * reduce the chance that swapping trashes it.
319 */ 319 */
320void 320void
321cpu_dumpconf(void) 321cpu_dumpconf(void)
322{ 322{
323} 323}
324 324
325void 325void
326dumpsys(void) 326dumpsys(void)
327{ 327{
328} 328}
329 329
330/* 330/*
331 * Get the base address of the signal frame either on the lwp's stack 331 * Get the base address of the signal frame either on the lwp's stack
332 * or on the signal stack and set *onstack accordingly. Caller then 332 * or on the signal stack and set *onstack accordingly. Caller then
333 * just subtracts the size of appropriate struct sigframe_foo. 333 * just subtracts the size of appropriate struct sigframe_foo.
334 */ 334 */
335void * 335void *
336getframe(const struct lwp *l, int sig, int *onstack) 336getframe(const struct lwp *l, int sig, int *onstack)
337{ 337{
338 const struct proc *p = l->l_proc; 338 const struct proc *p = l->l_proc;
339 const struct sigaltstack *sigstk= &l->l_sigstk; 339 const struct sigaltstack *sigstk= &l->l_sigstk;
340 340
341 /* Do we need to jump onto the signal stack? */ 341 /* Do we need to jump onto the signal stack? */
342 *onstack = (sigstk->ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 342 *onstack = (sigstk->ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
343 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; 343 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
344 344
345 if (*onstack) 345 if (*onstack)
346 return ((char *)sigstk->ss_sp + sigstk->ss_size); 346 return ((char *)sigstk->ss_sp + sigstk->ss_size);
347 else 347 else
348 return ((void *)l->l_md.md_regs->tf_r15); 348 return ((void *)l->l_md.md_regs->tf_r15);
349} 349}
350 350
351void 351void
352sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask) 352sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
353{ 353{
354 struct lwp *l = curlwp; 354 struct lwp *l = curlwp;
355 struct proc *p = l->l_proc; 355 struct proc *p = l->l_proc;
356 struct sigacts *ps = p->p_sigacts; 356 struct sigacts *ps = p->p_sigacts;
357 struct trapframe *tf = l->l_md.md_regs; 357 struct trapframe *tf = l->l_md.md_regs;
358 int sig = ksi->ksi_signo, error; 358 int sig = ksi->ksi_signo, error;
359 sig_t catcher = SIGACTION(p, sig).sa_handler; 359 sig_t catcher = SIGACTION(p, sig).sa_handler;
360 struct sigframe_siginfo *fp, frame; 360 struct sigframe_siginfo *fp, frame;
361 int onstack; 361 int onstack;
362 362
363 fp = getframe(l, sig, &onstack); 363 fp = getframe(l, sig, &onstack);
364 --fp; 364 --fp;
365 365
366 frame.sf_si._info = ksi->ksi_info; 366 frame.sf_si._info = ksi->ksi_info;
367 frame.sf_uc.uc_link = l->l_ctxlink; 367 frame.sf_uc.uc_link = l->l_ctxlink;
368 frame.sf_uc.uc_sigmask = *mask; 368 frame.sf_uc.uc_sigmask = *mask;
369 frame.sf_uc.uc_flags = _UC_SIGMASK; 369 frame.sf_uc.uc_flags = _UC_SIGMASK;
370 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK) 370 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
371 ? _UC_SETSTACK : _UC_CLRSTACK; 371 ? _UC_SETSTACK : _UC_CLRSTACK;
372 memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack)); 372 memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack));
373 sendsig_reset(l, sig); 373 sendsig_reset(l, sig);
374 mutex_exit(p->p_lock); 374 mutex_exit(p->p_lock);
375 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags); 375 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
376 error = copyout(&frame, fp, sizeof(frame)); 376 error = copyout(&frame, fp, sizeof(frame));
377 mutex_enter(p->p_lock); 377 mutex_enter(p->p_lock);
378 378
379 if (error != 0) { 379 if (error != 0) {
380 /* 380 /*
381 * Process has trashed its stack; give it an illegal 381 * Process has trashed its stack; give it an illegal
382 * instruction to halt it in its tracks. 382 * instruction to halt it in its tracks.
383 */ 383 */
384 sigexit(l, SIGILL); 384 sigexit(l, SIGILL);
385 /* NOTREACHED */ 385 /* NOTREACHED */
386 } 386 }
387 387
388 tf->tf_r4 = sig; /* "signum" argument for handler */ 388 tf->tf_r4 = sig; /* "signum" argument for handler */
389 tf->tf_r5 = (int)&fp->sf_si; /* "sip" argument for handler */ 389 tf->tf_r5 = (int)&fp->sf_si; /* "sip" argument for handler */
390 tf->tf_r6 = (int)&fp->sf_uc; /* "ucp" argument for handler */ 390 tf->tf_r6 = (int)&fp->sf_uc; /* "ucp" argument for handler */
391 tf->tf_spc = (int)catcher; 391 tf->tf_spc = (int)catcher;
392 tf->tf_r15 = (int)fp; 392 tf->tf_r15 = (int)fp;
393 tf->tf_pr = (int)ps->sa_sigdesc[sig].sd_tramp; 393 tf->tf_pr = (int)ps->sa_sigdesc[sig].sd_tramp;
394 394
395 /* Remember if we're now on the signal stack. */ 395 /* Remember if we're now on the signal stack. */
396 if (onstack) 396 if (onstack)
397 l->l_sigstk.ss_flags |= SS_ONSTACK; 397 l->l_sigstk.ss_flags |= SS_ONSTACK;
398} 398}
399 399
400void 400void
401cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags) 401cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
402{ 402{
403 const struct trapframe *tf = l->l_md.md_regs; 403 const struct trapframe *tf = l->l_md.md_regs;
404 __greg_t *gr = mcp->__gregs; 404 __greg_t *gr = mcp->__gregs;
405 __greg_t ras_pc; 405 __greg_t ras_pc;
406 406
407 /* Save register context. */ 407 /* Save register context. */
408 gr[_REG_GBR] = tf->tf_gbr; 408 gr[_REG_GBR] = tf->tf_gbr;
409 gr[_REG_PC] = tf->tf_spc; 409 gr[_REG_PC] = tf->tf_spc;
410 gr[_REG_SR] = tf->tf_ssr; 410 gr[_REG_SR] = tf->tf_ssr;
411 gr[_REG_MACL] = tf->tf_macl; 411 gr[_REG_MACL] = tf->tf_macl;
412 gr[_REG_MACH] = tf->tf_mach; 412 gr[_REG_MACH] = tf->tf_mach;
413 gr[_REG_PR] = tf->tf_pr; 413 gr[_REG_PR] = tf->tf_pr;
414 gr[_REG_R14] = tf->tf_r14; 414 gr[_REG_R14] = tf->tf_r14;
415 gr[_REG_R13] = tf->tf_r13; 415 gr[_REG_R13] = tf->tf_r13;
416 gr[_REG_R12] = tf->tf_r12; 416 gr[_REG_R12] = tf->tf_r12;
417 gr[_REG_R11] = tf->tf_r11; 417 gr[_REG_R11] = tf->tf_r11;
418 gr[_REG_R10] = tf->tf_r10; 418 gr[_REG_R10] = tf->tf_r10;
419 gr[_REG_R9] = tf->tf_r9; 419 gr[_REG_R9] = tf->tf_r9;
420 gr[_REG_R8] = tf->tf_r8; 420 gr[_REG_R8] = tf->tf_r8;
421 gr[_REG_R7] = tf->tf_r7; 421 gr[_REG_R7] = tf->tf_r7;
422 gr[_REG_R6] = tf->tf_r6; 422 gr[_REG_R6] = tf->tf_r6;
423 gr[_REG_R5] = tf->tf_r5; 423 gr[_REG_R5] = tf->tf_r5;
424 gr[_REG_R4] = tf->tf_r4; 424 gr[_REG_R4] = tf->tf_r4;
425 gr[_REG_R3] = tf->tf_r3; 425 gr[_REG_R3] = tf->tf_r3;
426 gr[_REG_R2] = tf->tf_r2; 426 gr[_REG_R2] = tf->tf_r2;
427 gr[_REG_R1] = tf->tf_r1; 427 gr[_REG_R1] = tf->tf_r1;
428 gr[_REG_R0] = tf->tf_r0; 428 gr[_REG_R0] = tf->tf_r0;
429 gr[_REG_R15] = tf->tf_r15; 429 gr[_REG_R15] = tf->tf_r15;
430 430
431 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc, 431 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
432 (void *) gr[_REG_PC])) != -1) 432 (void *) gr[_REG_PC])) != -1)
433 gr[_REG_PC] = ras_pc; 433 gr[_REG_PC] = ras_pc;
434 434
435 *flags |= _UC_CPU; 435 *flags |= (_UC_CPU|_UC_TLSBASE);
436 436
437 /* FPU context is currently not handled by the kernel. */ 437 /* FPU context is currently not handled by the kernel. */
438 memset(&mcp->__fpregs, 0, sizeof (mcp->__fpregs)); 438 memset(&mcp->__fpregs, 0, sizeof (mcp->__fpregs));
439} 439}
440 440
441int 441int
442cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp) 442cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
443{ 443{
444 struct trapframe *tf = l->l_md.md_regs; 444 struct trapframe *tf = l->l_md.md_regs;
445 const __greg_t *gr = mcp->__gregs; 445 const __greg_t *gr = mcp->__gregs;
446 446
447 if (((tf->tf_ssr ^ gr[_REG_SR]) & PSL_USERSTATIC) != 0) 447 if (((tf->tf_ssr ^ gr[_REG_SR]) & PSL_USERSTATIC) != 0)
448 return EINVAL; 448 return EINVAL;
449 449
450 return 0; 450 return 0;
451} 451}
452 452
453int 453int
454cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags) 454cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
455{ 455{
456 struct trapframe *tf = l->l_md.md_regs; 456 struct trapframe *tf = l->l_md.md_regs;
457 const __greg_t *gr = mcp->__gregs; 457 const __greg_t *gr = mcp->__gregs;
458 struct proc *p = l->l_proc; 458 struct proc *p = l->l_proc;
459 int error; 459 int error;
460 460
461 /* Restore register context, if any. */ 461 /* Restore register context, if any. */
462 if ((flags & _UC_CPU) != 0) { 462 if ((flags & _UC_CPU) != 0) {
463 /* Check for security violations. */ 463 /* Check for security violations. */
464 error = cpu_mcontext_validate(l, mcp); 464 error = cpu_mcontext_validate(l, mcp);
465 if (error) 465 if (error)
466 return error; 466 return error;
467 467
468 tf->tf_gbr = gr[_REG_GBR]; 468 /* done in lwp_setprivate */
 469 /* tf->tf_gbr = gr[_REG_GBR]; */
469 tf->tf_spc = gr[_REG_PC]; 470 tf->tf_spc = gr[_REG_PC];
470 tf->tf_ssr = gr[_REG_SR]; 471 tf->tf_ssr = gr[_REG_SR];
471 tf->tf_macl = gr[_REG_MACL]; 472 tf->tf_macl = gr[_REG_MACL];
472 tf->tf_mach = gr[_REG_MACH]; 473 tf->tf_mach = gr[_REG_MACH];
473 tf->tf_pr = gr[_REG_PR]; 474 tf->tf_pr = gr[_REG_PR];
474 tf->tf_r14 = gr[_REG_R14]; 475 tf->tf_r14 = gr[_REG_R14];
475 tf->tf_r13 = gr[_REG_R13]; 476 tf->tf_r13 = gr[_REG_R13];
476 tf->tf_r12 = gr[_REG_R12]; 477 tf->tf_r12 = gr[_REG_R12];
477 tf->tf_r11 = gr[_REG_R11]; 478 tf->tf_r11 = gr[_REG_R11];
478 tf->tf_r10 = gr[_REG_R10]; 479 tf->tf_r10 = gr[_REG_R10];
479 tf->tf_r9 = gr[_REG_R9]; 480 tf->tf_r9 = gr[_REG_R9];
480 tf->tf_r8 = gr[_REG_R8]; 481 tf->tf_r8 = gr[_REG_R8];
481 tf->tf_r7 = gr[_REG_R7]; 482 tf->tf_r7 = gr[_REG_R7];
482 tf->tf_r6 = gr[_REG_R6]; 483 tf->tf_r6 = gr[_REG_R6];
483 tf->tf_r5 = gr[_REG_R5]; 484 tf->tf_r5 = gr[_REG_R5];
484 tf->tf_r4 = gr[_REG_R4]; 485 tf->tf_r4 = gr[_REG_R4];
485 tf->tf_r3 = gr[_REG_R3]; 486 tf->tf_r3 = gr[_REG_R3];
486 tf->tf_r2 = gr[_REG_R2]; 487 tf->tf_r2 = gr[_REG_R2];
487 tf->tf_r1 = gr[_REG_R1]; 488 tf->tf_r1 = gr[_REG_R1];
488 tf->tf_r0 = gr[_REG_R0]; 489 tf->tf_r0 = gr[_REG_R0];
489 tf->tf_r15 = gr[_REG_R15]; 490 tf->tf_r15 = gr[_REG_R15];
490 491
491 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_GBR]); 492 if (flags & _UC_TLSBASE)
 493 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_GBR]);
492 } 494 }
493 495
494#if 0 496#if 0
495 /* XXX: FPU context is currently not handled by the kernel. */ 497 /* XXX: FPU context is currently not handled by the kernel. */
496 if (flags & _UC_FPU) { 498 if (flags & _UC_FPU) {
497 /* TODO */; 499 /* TODO */;
498 } 500 }
499#endif 501#endif
500 502
501 mutex_enter(p->p_lock); 503 mutex_enter(p->p_lock);
502 if (flags & _UC_SETSTACK) 504 if (flags & _UC_SETSTACK)
503 l->l_sigstk.ss_flags |= SS_ONSTACK; 505 l->l_sigstk.ss_flags |= SS_ONSTACK;
504 if (flags & _UC_CLRSTACK) 506 if (flags & _UC_CLRSTACK)
505 l->l_sigstk.ss_flags &= ~SS_ONSTACK; 507 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
506 mutex_exit(p->p_lock); 508 mutex_exit(p->p_lock);
507 509
508 return (0); 510 return (0);
509} 511}
510 512
511/* 513/*
512 * Clear registers on exec 514 * Clear registers on exec
513 */ 515 */
514void 516void
515setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) 517setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
516{ 518{
517 struct trapframe *tf; 519 struct trapframe *tf;
518 520
519 l->l_md.md_flags &= ~(MDL_USEDFPU | MDL_SSTEP); 521 l->l_md.md_flags &= ~(MDL_USEDFPU | MDL_SSTEP);
520 522
521 tf = l->l_md.md_regs; 523 tf = l->l_md.md_regs;
522 524
523 tf->tf_ssr = PSL_USERSET; 525 tf->tf_ssr = PSL_USERSET;
524 tf->tf_spc = pack->ep_entry; 526 tf->tf_spc = pack->ep_entry;
525 tf->tf_pr = 0; 527 tf->tf_pr = 0;
526 528
527 tf->tf_gbr = 0; 529 tf->tf_gbr = 0;
528 tf->tf_macl = 0; 530 tf->tf_macl = 0;
529 tf->tf_mach = 0; 531 tf->tf_mach = 0;
530 532
531 tf->tf_r0 = 0; 533 tf->tf_r0 = 0;
532 tf->tf_r1 = 0; 534 tf->tf_r1 = 0;
533 tf->tf_r2 = 0; 535 tf->tf_r2 = 0;
534 tf->tf_r3 = 0; 536 tf->tf_r3 = 0;
535 tf->tf_r4 = fuword((void *)stack); /* argc */ 537 tf->tf_r4 = fuword((void *)stack); /* argc */
536 tf->tf_r5 = stack + 4; /* argv */ 538 tf->tf_r5 = stack + 4; /* argv */
537 tf->tf_r6 = stack + 4 * tf->tf_r4 + 8; /* envp */ 539 tf->tf_r6 = stack + 4 * tf->tf_r4 + 8; /* envp */
538 tf->tf_r7 = 0; 540 tf->tf_r7 = 0;
539 tf->tf_r8 = 0; 541 tf->tf_r8 = 0;
540 tf->tf_r9 = l->l_proc->p_psstrp; 542 tf->tf_r9 = l->l_proc->p_psstrp;
541 tf->tf_r10 = 0; 543 tf->tf_r10 = 0;
542 tf->tf_r11 = 0; 544 tf->tf_r11 = 0;
543 tf->tf_r12 = 0; 545 tf->tf_r12 = 0;
544 tf->tf_r13 = 0; 546 tf->tf_r13 = 0;
545 tf->tf_r14 = 0; 547 tf->tf_r14 = 0;
546 tf->tf_r15 = stack; 548 tf->tf_r15 = stack;
547} 549}
548 550
549/* 551/*
550 * Jump to reset vector. 552 * Jump to reset vector.
551 */ 553 */
552void 554void
553cpu_reset(void) 555cpu_reset(void)
554{ 556{
555 557
556 _cpu_exception_suspend(); 558 _cpu_exception_suspend();
557 _reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL); 559 _reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL);
558 560
559#ifndef __lint__ 561#ifndef __lint__
560 goto *(void *)0xa0000000; 562 goto *(void *)0xa0000000;
561#endif 563#endif
562 /* NOTREACHED */ 564 /* NOTREACHED */
563} 565}
564 566
565int 567int
566cpu_lwp_setprivate(lwp_t *l, void *addr) 568cpu_lwp_setprivate(lwp_t *l, void *addr)
567{ 569{
568 570
569 l->l_md.md_regs->tf_gbr = (int)addr; 571 l->l_md.md_regs->tf_gbr = (int)addr;
570 return 0; 572 return 0;
571} 573}
572 574