Mon Jul 20 05:10:49 2009 UTC ()
Fix little bit, and add a few files.
  current status is to see following thread.
    http://mail-index.netbsd.org/port-ia64/2009/06/18/msg000102.html
Also call banner().


(kiyohara)
diff -r1.15 -r1.16 src/sys/arch/ia64/ia64/machdep.c
diff -r1.4 -r1.5 src/sys/arch/ia64/ia64/mainbus.c

cvs diff -r1.15 -r1.16 src/sys/arch/ia64/ia64/machdep.c (expand / switch to unified diff)

--- src/sys/arch/ia64/ia64/machdep.c 2009/03/18 16:00:12 1.15
+++ src/sys/arch/ia64/ia64/machdep.c 2009/07/20 05:10:49 1.16
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: machdep.c,v 1.15 2009/03/18 16:00:12 cegger Exp $ */ 1/* $NetBSD: machdep.c,v 1.16 2009/07/20 05:10:49 kiyohara Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2003,2004 Marcel Moolenaar 4 * Copyright (c) 2003,2004 Marcel Moolenaar
5 * Copyright (c) 2000,2001 Doug Rabson 5 * Copyright (c) 2000,2001 Doug Rabson
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -52,54 +52,54 @@ @@ -52,54 +52,54 @@
52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
57 * POSSIBILITY OF SUCH DAMAGE. 57 * POSSIBILITY OF SUCH DAMAGE.
58 */ 58 */
59 59
60/* 60/*
61 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 61 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
62 * All rights reserved. 62 * All rights reserved.
63 * 63 *
64 * Author: Chris G. Demetriou 64 * Author: Chris G. Demetriou
65 *  65 *
66 * Permission to use, copy, modify and distribute this software and 66 * Permission to use, copy, modify and distribute this software and
67 * its documentation is hereby granted, provided that both the copyright 67 * its documentation is hereby granted, provided that both the copyright
68 * notice and this permission notice appear in all copies of the 68 * notice and this permission notice appear in all copies of the
69 * software, derivative works or modified versions, and any portions 69 * software, derivative works or modified versions, and any portions
70 * thereof, and that both notices appear in supporting documentation. 70 * thereof, and that both notices appear in supporting documentation.
71 *  71 *
72 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"  72 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
73 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND  73 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
74 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 74 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
75 *  75 *
76 * Carnegie Mellon requests users of this software to return to 76 * Carnegie Mellon requests users of this software to return to
77 * 77 *
78 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 78 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
79 * School of Computer Science 79 * School of Computer Science
80 * Carnegie Mellon University 80 * Carnegie Mellon University
81 * Pittsburgh PA 15213-3890 81 * Pittsburgh PA 15213-3890
82 * 82 *
83 * any improvements or extensions that they make and grant Carnegie the 83 * any improvements or extensions that they make and grant Carnegie the
84 * rights to redistribute these changes. 84 * rights to redistribute these changes.
85 */ 85 */
86 86
87#include <sys/cdefs.h> 87#include <sys/cdefs.h>
88/*__FBSDID("$FreeBSD: src/sys/ia64/ia64/machdep.c,v 1.203 2005/10/14 12:43:45 davidxu Exp $"); */ 88/*__FBSDID("$FreeBSD: src/sys/ia64/ia64/machdep.c,v 1.203 2005/10/14 12:43:45 davidxu Exp $"); */
89 89
90#include "opt_modular.h" 90#include "opt_modular.h"
91 91
92#include <sys/param.h>  92#include <sys/param.h>
93#include <sys/cpu.h> 93#include <sys/cpu.h>
94#include <sys/exec.h> 94#include <sys/exec.h>
95#include <sys/ksyms.h> 95#include <sys/ksyms.h>
96#include <sys/sa.h> 96#include <sys/sa.h>
97#include <sys/savar.h> 97#include <sys/savar.h>
98#include <sys/msgbuf.h> 98#include <sys/msgbuf.h>
99#include <sys/mutex.h> 99#include <sys/mutex.h>
100#include <sys/proc.h> 100#include <sys/proc.h>
101#include <sys/reboot.h> 101#include <sys/reboot.h>
102#include <sys/systm.h> 102#include <sys/systm.h>
103#include <sys/user.h> 103#include <sys/user.h>
104 104
105#include <machine/ia64_cpu.h> 105#include <machine/ia64_cpu.h>
@@ -141,331 +141,323 @@ char cpu_model[64]; @@ -141,331 +141,323 @@ char cpu_model[64];
141char cpu_family[64]; 141char cpu_family[64];
142 142
143vaddr_t kernstart, kernend; 143vaddr_t kernstart, kernend;
144 144
145/* XXX: Move this stuff to cpu_info */ 145/* XXX: Move this stuff to cpu_info */
146 146
147uint64_t processor_frequency; 147uint64_t processor_frequency;
148uint64_t bus_frequency; 148uint64_t bus_frequency;
149uint64_t itc_frequency; 149uint64_t itc_frequency;
150uint64_t ia64_pal_base; 150uint64_t ia64_pal_base;
151uint64_t ia64_port_base; 151uint64_t ia64_port_base;
152 152
153 153
154extern u_int64_t ia64_gateway_page[]; 154extern uint64_t ia64_gateway_page[];
155 155
156u_int64_t pa_bootinfo; 156uint64_t pa_bootinfo;
157struct bootinfo bootinfo; 157struct bootinfo bootinfo;
158 158
159 159
160extern vaddr_t kernel_text, end; 160extern vaddr_t kernel_text, end;
161 161
162struct fpswa_iface *fpswa_iface; 162struct fpswa_iface *fpswa_iface;
163 163
164struct user *proc0paddr; /* XXX: See: kern/kern_proc.c:proc0_init() */ 164struct user *proc0paddr; /* XXX: See: kern/kern_proc.c:proc0_init() */
165 165
166#define Mhz 1000000L 166#define Mhz 1000000L
167#define Ghz (1000L*Mhz) 167#define Ghz (1000L*Mhz)
168 168
169static void 169static void
170identifycpu(void) 170identifycpu(void)
171{  171{
172 u_int64_t vendor[3]; 172 uint64_t vendor[3];
173 const char *family_name, *model_name; 173 const char *family_name, *model_name;
174 u_int64_t features, tmp; 174 uint64_t features, tmp;
175 int number, revision, model, family, archrev; 175 int number, revision, model, family, archrev;
176  176
177 /*  177 /*
178 * Assumes little-endian. 178 * Assumes little-endian.
179 */ 179 */
180 vendor[0] = ia64_get_cpuid(0); 180 vendor[0] = ia64_get_cpuid(0);
181 vendor[1] = ia64_get_cpuid(1);  181 vendor[1] = ia64_get_cpuid(1);
182 vendor[2] = '\0';  182 vendor[2] = '\0';
183  183
184 tmp = ia64_get_cpuid(3); 184 tmp = ia64_get_cpuid(3);
185 number = (tmp >> 0) & 0xff; 185 number = (tmp >> 0) & 0xff;
186 revision = (tmp >> 8) & 0xff; 186 revision = (tmp >> 8) & 0xff;
187 model = (tmp >> 16) & 0xff; 187 model = (tmp >> 16) & 0xff;
188 family = (tmp >> 24) & 0xff; 188 family = (tmp >> 24) & 0xff;
189 archrev = (tmp >> 32) & 0xff; 189 archrev = (tmp >> 32) & 0xff;
190  190
191 family_name = model_name = "unknown"; 191 family_name = model_name = "unknown";
192 switch (family) { 192 switch (family) {
193 case 0x07: 193 case 0x07:
194 family_name = "Itanium"; 194 family_name = "Itanium";
195 model_name = "Merced"; 195 model_name = "Merced";
196 break; 196 break;
197 case 0x1f: 197 case 0x1f:
198 family_name = "Itanium 2"; 198 family_name = "Itanium 2";
199 switch (model) { 199 switch (model) {
200 case 0x00: 200 case 0x00:
201 model_name = "McKinley"; 201 model_name = "McKinley";
202 break; 202 break;
203 case 0x01: 203 case 0x01:
204 /* 204 /*
205 * Deerfield is a low-voltage variant based on the 205 * Deerfield is a low-voltage variant based on the
206 * Madison core. We need circumstantial evidence 206 * Madison core. We need circumstantial evidence
207 * (i.e. the clock frequency) to identify those. 207 * (i.e. the clock frequency) to identify those.
208 * Allow for roughly 1% error margin. 208 * Allow for roughly 1% error margin.
209 */  209 */
210 tmp = processor_frequency >> 7;  210 tmp = processor_frequency >> 7;
211 if ((processor_frequency - tmp) < 1*Ghz && 211 if ((processor_frequency - tmp) < 1*Ghz &&
212 (processor_frequency + tmp) >= 1*Ghz) 212 (processor_frequency + tmp) >= 1*Ghz)
213 model_name = "Deerfield"; 213 model_name = "Deerfield";
214 else  214 else
215 model_name = "Madison"; 215 model_name = "Madison";
216 break; 216 break;
217 case 0x02: 217 case 0x02:
218 model_name = "Madison II"; 218 model_name = "Madison II";
219 break; 219 break;
220 } 220 }
221 break; 221 break;
222 } 222 }
223 snprintf(cpu_family, sizeof(cpu_family), "%s", family_name); 223 snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
224 snprintf(cpu_model, sizeof(cpu_model), "%s", model_name); 224 snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
225  225
226 features = ia64_get_cpuid(4); 226 features = ia64_get_cpuid(4);
227  227
228 printf("CPU: %s (", model_name); 228 printf("CPU: %s (", model_name);
229 if (processor_frequency) { 229 if (processor_frequency) {
230 printf("%ld.%02ld-Mhz ",  230 printf("%ld.%02ld-Mhz ", (processor_frequency + 4999) / Mhz,
231 (processor_frequency + 4999) / Mhz, 231 ((processor_frequency + 4999) / (Mhz/100)) % 100);
232 ((processor_frequency + 4999) / (Mhz/100)) % 100); 232 }
233 } 233 printf("%s)\n", family_name);
234 printf("%s)\n", family_name); 234 printf(" Origin = \"%s\" Revision = %d\n", (char *) vendor, revision);
235 printf(" Origin = \"%s\" Revision = %d\n", (char *) vendor, revision); 235 printf(" Features = 0x%x\n", (uint32_t) features);
236 printf(" Features = 0x%x\n", (u_int32_t) features); 
237 236
238} 237}
239 238
240/* 239/*
241 * Machine-dependent startup code 240 * Machine-dependent startup code
242 */ 241 */
243void 242void
244cpu_startup(void) 243cpu_startup(void)
245{ 244{
246 vaddr_t minaddr, maxaddr; 245 vaddr_t minaddr, maxaddr;
247  
248 char pbuf[9]; 
249 246
250 /* 247 /*
251 * Good {morning,afternoon,evening,night}. 248 * Good {morning,afternoon,evening,night}.
252 */ 249 */
253 identifycpu(); 250 identifycpu();
254 251
255 /* XXX: startrtclock(); */ 252 /* XXX: startrtclock(); */
256#ifdef PERFMON 253#ifdef PERFMON
257 perfmon_init(); 254 perfmon_init();
258#endif 255#endif
259 printf("Detected memory = %ld (%ld MB)\n", ia64_ptob(physmem), 256 printf("Detected memory = %ld (%ld MB)\n", ia64_ptob(physmem),
260 ptoa(physmem) / 1048576); 257 ptoa(physmem) / 1048576);
 258
 259 /*
 260 * Display any holes after the first chunk of extended memory.
 261 */
 262 if (bootverbose) {
 263 int lcv, sizetmp;
261 264
262 /* 265 printf("Physical memory chunk(s):\n");
263 * Display any holes after the first chunk of extended memory. 266 for (lcv = 0;
264 */ 267 lcv < vm_nphysseg || vm_physmem[lcv].avail_end != 0;
265 if (bootverbose) { 268 lcv++) {
266 int lcv, sizetmp; 269 sizetmp = vm_physmem[lcv].avail_end -
267 270 vm_physmem[lcv].avail_start;
268 printf("Physical memory chunk(s):\n"); 271
269 for (lcv = 0; lcv < vm_nphysseg || vm_physmem[lcv].avail_end != 0; lcv++) { 272 printf("0x%016lx - 0x%016lx, %ld bytes (%d pages)\n",
270 sizetmp = vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start; 273 ptoa(vm_physmem[lcv].avail_start),
271 274 ptoa(vm_physmem[lcv].avail_end) - 1,
272 printf("0x%016lx - 0x%016lx, %ld bytes (%d pages)\n", ptoa(vm_physmem[lcv].avail_start), 275 ptoa(sizetmp), sizetmp);
273 ptoa(vm_physmem[lcv].avail_end) - 1, ptoa(sizetmp), sizetmp); 276 }
274 } 277 printf("Total number of segments: vm_nphysseg = %d \n",
275 printf("Total number of segments: vm_nphysseg = %d \n", vm_nphysseg); 278 vm_nphysseg);
276 } 279 }
277 280
278 minaddr = 0; 281 minaddr = 0;
279 282
280 /* 283 /*
281 * Allocate a submap for physio 284 * Allocate a submap for physio
282 */ 285 */
283 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 286 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
284 VM_PHYS_SIZE, 0, false, NULL); 287 VM_PHYS_SIZE, 0, false, NULL);
285 288
286 /* 289 /*
287 * No need to allocate an mbuf cluster submap. Mbuf clusters 290 * No need to allocate an mbuf cluster submap. Mbuf clusters
288 * are allocated via the pool allocator, and we use RR7 to 291 * are allocated via the pool allocator, and we use RR7 to
289 * map those pages. 292 * map those pages.
290 */ 293 */
291 294
292 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 295 banner();
293 printf("avail memory = %s\n", pbuf); 
294 296
295 if (fpswa_iface == NULL) 297 if (fpswa_iface == NULL)
296 printf("Warning: no FPSWA package supplied\n"); 298 printf("Warning: no FPSWA package supplied\n");
297 else 299 else
298 printf("FPSWA Revision = 0x%lx, Entry = %p\n", 300 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
299 (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa); 301 (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
300 302
301 303
302 /* XXX: TODO this stuff when we start the platform port. 304 /*
303 * Traverse the MADT to discover IOSAPIC and Local SAPIC 305 * Traverse the MADT to discover IOSAPIC and Local SAPIC
304 * information. 306 * information.
305 */ 307 */
306 /*XXX: ia64_probe_sapics();*/ 308 ia64_probe_sapics();
307 /*XXX: ia64_mca_init();*/ 309 /*XXX: ia64_mca_init();*/
308} 310}
309 311
310void 312void
311cpu_reboot(int howto, char *bootstr) 313cpu_reboot(int howto, char *bootstr)
312{ 314{
313 315
314 efi_reset_system(); 316 efi_reset_system();
315 317
316 panic("XXX: Reset didn't work ? \n"); 318 panic("XXX: Reset didn't work ? \n");
317 /*NOTREACHED*/ 319 /*NOTREACHED*/
318} 320}
319 321
320lwp_t * 
321cpu_switchto(lwp_t *cur, lwp_t *new, bool b) 
322{ 
323 return new; 
324} 
325 
326bool 322bool
327cpu_intr_p(void) 323cpu_intr_p(void)
328{ 324{
329 return 0; 325 return 0;
330} 326}
331 327
332/* 328/*
333 * This is called by main to set dumplo and dumpsize. 329 * This is called by main to set dumplo and dumpsize.
334 * Dumps always skip the first PAGE_SIZE of disk space 330 * Dumps always skip the first PAGE_SIZE of disk space
335 * in case there might be a disk label stored there. 331 * in case there might be a disk label stored there.
336 * If there is extra space, put dump at the end to 332 * If there is extra space, put dump at the end to
337 * reduce the chance that swapping trashes it. 333 * reduce the chance that swapping trashes it.
338 */ 334 */
339void 335void
340cpu_dumpconf(void) 336cpu_dumpconf(void)
341{ 337{
342 return; 338 return;
343} 339}
344 340
345void 341void
346consinit(void) 
347{ 
348 cninit(); 
349} 
350 
351void 
352map_pal_code(void) 342map_pal_code(void)
353{ 343{
354 pt_entry_t pte; 344 pt_entry_t pte;
355 u_int64_t psr; 345 uint64_t psr;
356 346
357 if (ia64_pal_base == 0) 347 if (ia64_pal_base == 0)
358 return; 348 return;
359  349
360 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY | 350 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
361 PTE_PL_KERN | PTE_AR_RWX; 351 PTE_PL_KERN | PTE_AR_RWX;
362 pte |= ia64_pal_base & PTE_PPN_MASK; 352 pte |= ia64_pal_base & PTE_PPN_MASK;
363 353
364 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" :: 354 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
365 "r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(IA64_ID_PAGE_SHIFT<<2)); 355 "r"(IA64_PHYS_TO_RR7(ia64_pal_base)),
 356 "r"(IA64_ID_PAGE_SHIFT<<2));
366 357
367 __asm __volatile("mov %0=psr" : "=r"(psr)); 358 __asm __volatile("mov %0=psr" : "=r"(psr));
368 __asm __volatile("rsm psr.ic|psr.i"); 359 __asm __volatile("rsm psr.ic|psr.i");
369 __asm __volatile("srlz.i"); 360 ia64_srlz_i();
370 __asm __volatile("mov cr.ifa=%0" :: 361 ia64_set_ifa(IA64_PHYS_TO_RR7(ia64_pal_base));
371 "r"(IA64_PHYS_TO_RR7(ia64_pal_base))); 362 ia64_set_itir(IA64_ID_PAGE_SHIFT << 2);
372 __asm __volatile("mov cr.itir=%0" :: "r"(IA64_ID_PAGE_SHIFT << 2)); 363 ia64_srlz_d();
373 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(*(pt_entry_t *)&pte));  364 __asm __volatile("itr.d dtr[%0]=%1" ::
374 __asm __volatile("srlz.d");  365 "r"(1), "r"(*(pt_entry_t *)&pte));
375 __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(*(pt_entry_t *)&pte));  366 ia64_srlz_d();
 367 __asm __volatile("itr.i itr[%0]=%1" ::
 368 "r"(1), "r"(*(pt_entry_t *)&pte));
376 __asm __volatile("mov psr.l=%0" :: "r" (psr)); 369 __asm __volatile("mov psr.l=%0" :: "r" (psr));
377 __asm __volatile("srlz.i"); 370 ia64_srlz_i();
378} 371}
379 372
380void 373void
381map_gateway_page(void) 374map_gateway_page(void)
382{ 375{
383 pt_entry_t pte; 376 pt_entry_t pte;
384 u_int64_t psr; 377 uint64_t psr;
385 378
386 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY | 379 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
387 PTE_PL_KERN | PTE_AR_X_RX; 380 PTE_PL_KERN | PTE_AR_X_RX;
388 pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK; 381 pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK;
389 382
390 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" :: 383 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
391 "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2)); 384 "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
392 385
393 __asm __volatile("mov %0=psr" : "=r"(psr)); 386 __asm __volatile("mov %0=psr" : "=r"(psr));
394 __asm __volatile("rsm psr.ic|psr.i"); 387 __asm __volatile("rsm psr.ic|psr.i");
395 __asm __volatile("srlz.i"); 388 ia64_srlz_i();
396 __asm __volatile("mov cr.ifa=%0" :: "r"(VM_MAX_ADDRESS)); 389 ia64_set_ifa(VM_MAX_ADDRESS);
397 __asm __volatile("mov cr.itir=%0" :: "r"(PAGE_SHIFT << 2)); 390 ia64_set_itir(PAGE_SHIFT << 2);
 391 ia64_srlz_d();
398 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(*(pt_entry_t*)&pte)); 392 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(*(pt_entry_t*)&pte));
399 __asm __volatile("srlz.d");  393 ia64_srlz_d();
400 __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(*(pt_entry_t*)&pte)); 394 __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(*(pt_entry_t*)&pte));
401 __asm __volatile("mov psr.l=%0" :: "r" (psr)); 395 __asm __volatile("mov psr.l=%0" :: "r" (psr));
402 __asm __volatile("srlz.i"); 396 ia64_srlz_i();
403 397
404 /* Expose the mapping to userland in ar.k5 */ 398 /* Expose the mapping to userland in ar.k5 */
405 ia64_set_k5(VM_MAX_ADDRESS); 399 ia64_set_k5(VM_MAX_ADDRESS);
406} 400}
407 401
408static void 402static void
409calculate_frequencies(void) 403calculate_frequencies(void)
410{ 404{
411 struct ia64_sal_result sal; 405 struct ia64_sal_result sal;
412 struct ia64_pal_result pal; 406 struct ia64_pal_result pal;
413 407
414 sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0); 408 sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
415 pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0); 409 pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
416 if (sal.sal_status == 0 && pal.pal_status == 0) { 410 if (sal.sal_status == 0 && pal.pal_status == 0) {
417 if (bootverbose) { 411 if (bootverbose) {
418 printf("Platform clock frequency %ld Hz\n", 412 printf("Platform clock frequency %ld Hz\n",
419 sal.sal_result[0]); 413 sal.sal_result[0]);
420 printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, " 414 printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
421 "ITC ratio %ld/%ld\n", 415 "ITC ratio %ld/%ld\n",
422 pal.pal_result[0] >> 32, 416 pal.pal_result[0] >> 32,
423 pal.pal_result[0] & ((1L << 32) - 1), 417 pal.pal_result[0] & ((1L << 32) - 1),
424 pal.pal_result[1] >> 32, 418 pal.pal_result[1] >> 32,
425 pal.pal_result[1] & ((1L << 32) - 1), 419 pal.pal_result[1] & ((1L << 32) - 1),
426 pal.pal_result[2] >> 32, 420 pal.pal_result[2] >> 32,
427 pal.pal_result[2] & ((1L << 32) - 1)); 421 pal.pal_result[2] & ((1L << 32) - 1));
428 } 422 }
429 processor_frequency = 423 processor_frequency =
430 sal.sal_result[0] * (pal.pal_result[0] >> 32) 424 sal.sal_result[0] * (pal.pal_result[0] >> 32)
431 / (pal.pal_result[0] & ((1L << 32) - 1)); 425 / (pal.pal_result[0] & ((1L << 32) - 1));
432 bus_frequency = 426 bus_frequency =
433 sal.sal_result[0] * (pal.pal_result[1] >> 32) 427 sal.sal_result[0] * (pal.pal_result[1] >> 32)
434 / (pal.pal_result[1] & ((1L << 32) - 1)); 428 / (pal.pal_result[1] & ((1L << 32) - 1));
435 itc_frequency = 429 itc_frequency =
436 sal.sal_result[0] * (pal.pal_result[2] >> 32) 430 sal.sal_result[0] * (pal.pal_result[2] >> 32)
437 / (pal.pal_result[2] & ((1L << 32) - 1)); 431 / (pal.pal_result[2] & ((1L << 32) - 1));
438 } 432 }
439 433
440} 434}
441 435
442 436
 437/* XXXX: Don't allocate 'ci' on stack. */
 438register struct cpu_info *ci __asm__("r13");
443void 439void
444ia64_init() 440ia64_init(void)
445{ 441{
446 
447 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1; 442 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1;
448 
449 struct efi_md *md; 443 struct efi_md *md;
450 444
451 register struct cpu_info *ci __asm__("r13"); 
452 
453 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */ 445 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
454 446
455 /* 447 /*
456 * TODO: Disable interrupts, floating point etc. 448 * TODO: Disable interrupts, floating point etc.
457 * Maybe flush cache and tlb 449 * Maybe flush cache and tlb
458 */ 450 */
459 451
460 ia64_set_fpsr(IA64_FPSR_DEFAULT); 452 ia64_set_fpsr(IA64_FPSR_DEFAULT);
461 453
462 454
463 /* 455 /*
464 * TODO: Get critical system information (if possible, from the 456 * TODO: Get critical system information (if possible, from the
465 * information provided by the boot program). 457 * information provided by the boot program).
466 */ 458 */
467 459
468 /* 460 /*
469 * pa_bootinfo is the physical address of the bootinfo block as 461 * pa_bootinfo is the physical address of the bootinfo block as
470 * passed to us by the loader and set in locore.s. 462 * passed to us by the loader and set in locore.s.
471 */ 463 */
@@ -483,38 +475,38 @@ ia64_init() @@ -483,38 +475,38 @@ ia64_init()
483 */ 475 */
484 for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) { 476 for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
485 switch (md->md_type) { 477 switch (md->md_type) {
486 case EFI_MD_TYPE_IOPORT: 478 case EFI_MD_TYPE_IOPORT:
487 ia64_port_base = IA64_PHYS_TO_RR6(md->md_phys); 479 ia64_port_base = IA64_PHYS_TO_RR6(md->md_phys);
488 break; 480 break;
489 case EFI_MD_TYPE_PALCODE: 481 case EFI_MD_TYPE_PALCODE:
490 ia64_pal_base = md->md_phys; 482 ia64_pal_base = md->md_phys;
491 break; 483 break;
492 } 484 }
493 } 485 }
494 486
495 487
496 /* XXX: We need to figure out whether/how much of the FreeBSD  488 /* XXX: We need to figure out whether/how much of the FreeBSD
497 * getenv/setenv stuff we need. The info we get from ski 489 * getenv/setenv stuff we need. The info we get from ski
498 * is too trivial to go to the hassle of importing the  490 * is too trivial to go to the hassle of importing the
499 * FreeBSD environment stuff. 491 * FreeBSD environment stuff.
500 */ 492 */
501 493
502 /* 494 /*
503 * Look at arguments passed to us and compute boothowto. 495 * Look at arguments passed to us and compute boothowto.
504 */ 496 */
505 boothowto = bootinfo.bi_boothowto; 497 boothowto = bootinfo.bi_boothowto;
506 498
507 /* XXX: Debug: Override to verbose */ 499 /* XXX: Debug: Override to verbose */
508 boothowto |= AB_VERBOSE; 500 boothowto |= AB_VERBOSE;
509 501
510 502
511 /* 503 /*
512 * Initialize the console before we print anything out. 504 * Initialize the console before we print anything out.
513 */ 505 */
514 cninit(); 506 cninit();
515 507
516 /* OUTPUT NOW ALLOWED */ 508 /* OUTPUT NOW ALLOWED */
517 509
518 if (ia64_pal_base != 0) { 510 if (ia64_pal_base != 0) {
519 ia64_pal_base &= ~IA64_ID_PAGE_MASK; 511 ia64_pal_base &= ~IA64_ID_PAGE_MASK;
520 /* 512 /*
@@ -585,27 +577,27 @@ ia64_init() @@ -585,27 +577,27 @@ ia64_init()
585 /* 577 /*
586 * Wimp out for now since we do not DTRT here with 578 * Wimp out for now since we do not DTRT here with
587 * pci bus mastering (no bounce buffering, for example). 579 * pci bus mastering (no bounce buffering, for example).
588 */ 580 */
589 if (pfn0 >= ia64_btop(0x100000000UL)) { 581 if (pfn0 >= ia64_btop(0x100000000UL)) {
590 printf("Skipping memory chunk start 0x%lx\n", 582 printf("Skipping memory chunk start 0x%lx\n",
591 md->md_phys); 583 md->md_phys);
592 continue; 584 continue;
593 } 585 }
594 if (pfn1 >= ia64_btop(0x100000000UL)) { 586 if (pfn1 >= ia64_btop(0x100000000UL)) {
595 printf("Skipping memory chunk end 0x%lx\n", 587 printf("Skipping memory chunk end 0x%lx\n",
596 md->md_phys + md->md_pages * 4096); 588 md->md_phys + md->md_pages * 4096);
597 continue; 589 continue;
598 }  590 }
599 591
600 /* 592 /*
601 * We have a memory descriptor that describes conventional 593 * We have a memory descriptor that describes conventional
602 * memory that is for general use. We must determine if the 594 * memory that is for general use. We must determine if the
603 * loader has put the kernel in this region. 595 * loader has put the kernel in this region.
604 */ 596 */
605 physmem += (pfn1 - pfn0); 597 physmem += (pfn1 - pfn0);
606 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) { 598 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
607 /* 599 /*
608 * Must compute the location of the kernel 600 * Must compute the location of the kernel
609 * within the segment. 601 * within the segment.
610 */ 602 */
611#ifdef DEBUG 603#ifdef DEBUG
@@ -624,35 +616,35 @@ ia64_init() @@ -624,35 +616,35 @@ ia64_init()
624 616
625 } 617 }
626 if (kernendpfn < pfn1) { 618 if (kernendpfn < pfn1) {
627 /* 619 /*
628 * There is a chunk after the kernel. 620 * There is a chunk after the kernel.
629 */ 621 */
630#ifdef DEBUG 622#ifdef DEBUG
631 printf("Loading chunk after kernel: " 623 printf("Loading chunk after kernel: "
632 "0x%lx / 0x%lx\n", kernendpfn, pfn1); 624 "0x%lx / 0x%lx\n", kernendpfn, pfn1);
633#endif 625#endif
634 626
635 uvm_page_physload(kernendpfn, pfn1, 627 uvm_page_physload(kernendpfn, pfn1,
636 kernendpfn, pfn1, VM_FREELIST_DEFAULT); 628 kernendpfn, pfn1, VM_FREELIST_DEFAULT);
637  629
638 } 630 }
639 } else { 631 } else {
640 /* 632 /*
641 * Just load this cluster as one chunk. 633 * Just load this cluster as one chunk.
642 */ 634 */
643#ifdef DEBUG 635#ifdef DEBUG
644 printf("Loading descriptor %p: 0x%lx / 0x%lx\n", md, 636 printf("Loading descriptor %p: 0x%lx / 0x%lx\n",
645 pfn0, pfn1); 637 md, pfn0, pfn1);
646#endif 638#endif
647 639
648 uvm_page_physload(pfn0, pfn1, pfn0, pfn1, 640 uvm_page_physload(pfn0, pfn1, pfn0, pfn1,
649 VM_FREELIST_DEFAULT); 641 VM_FREELIST_DEFAULT);
650 642
651 } 643 }
652 } 644 }
653 645
654 if (physmem == 0) 646 if (physmem == 0)
655 panic("can't happen: system seems to have no memory!"); 647 panic("can't happen: system seems to have no memory!");
656 648
657 /* 649 /*
658 * Initialize the virtual memory system. 650 * Initialize the virtual memory system.
@@ -670,126 +662,138 @@ ia64_init() @@ -670,126 +662,138 @@ ia64_init()
670 * Init mapping for u page(s) for proc 0 662 * Init mapping for u page(s) for proc 0
671 */ 663 */
672 lwp0.l_addr = proc0paddr = 664 lwp0.l_addr = proc0paddr =
673 (struct user *)uvm_pageboot_alloc(UPAGES * PAGE_SIZE); 665 (struct user *)uvm_pageboot_alloc(UPAGES * PAGE_SIZE);
674 666
675 667
676 /* 668 /*
677 * Set the kernel sp, reserving space for an (empty) trapframe, 669 * Set the kernel sp, reserving space for an (empty) trapframe,
678 * and make proc0's trapframe pointer point to it for sanity. 670 * and make proc0's trapframe pointer point to it for sanity.
679 */ 671 */
680 672
681 /* 673 /*
682 * Process u-area is organised as follows: 674 * Process u-area is organised as follows:
683 *  675 *
684 * ----------------------------------------------------------- 676 * -----------------------------------------------------------
685 * | P | | | 16Bytes | T | 677 * | P | | | 16Bytes | T |
686 * | C | Register Stack | Memory Stack | <-----> | F | 678 * | C | Register Stack | Memory Stack | <-----> | F |
687 * | B | -------------> | <---------- | | | 679 * | B | -------------> | <---------- | | |
688 * ----------------------------------------------------------- 680 * -----------------------------------------------------------
689 * ^ ^ 681 * ^ ^
690 * |___ bspstore |___ sp 682 * |___ bspstore |___ sp
691 * 683 *
692 * ---------------------------> 684 * --------------------------->
693 * Higher Addresses 685 * Higher Addresses
694 * 686 *
695 * PCB: struct user; TF: struct trapframe;  687 * PCB: struct user; TF: struct trapframe;
696 */ 688 */
697 689
698 690
699 lwp0.l_md.md_tf = (struct trapframe *)  691 lwp0.l_md.md_tf = (struct trapframe *)((uint64_t)proc0paddr +
700 ((u_int64_t)proc0paddr + USPACE - sizeof(struct trapframe)); 692 USPACE - sizeof(struct trapframe));
701 693
702 proc0paddr->u_pcb.pcb_special.sp =  694 proc0paddr->u_pcb.pcb_special.sp =
703 (u_int64_t)lwp0.l_md.md_tf - 16; /* 16 bytes is the  695 (uint64_t)lwp0.l_md.md_tf - 16; /* 16 bytes is the
704 * scratch area defined  696 * scratch area defined
705 * by the ia64 ABI  697 * by the ia64 ABI
706 */ 698 */
707 699
708 proc0paddr->u_pcb.pcb_special.bspstore =  700 proc0paddr->u_pcb.pcb_special.bspstore =
709 (u_int64_t) proc0paddr + sizeof(struct user); 701 (uint64_t) proc0paddr + sizeof(struct user);
710 702
711 mutex_init(&proc0paddr->u_pcb.pcb_fpcpu_slock, MUTEX_SPIN, 0); 703 mutex_init(&proc0paddr->u_pcb.pcb_fpcpu_slock, MUTEX_DEFAULT, 0);
712 704
713 705
714 /* 706 /*
715 * Setup global data for the bootstrap cpu. 707 * Setup global data for the bootstrap cpu.
716 */ 708 */
717 709
718 710
719 ci = curcpu(); 711 ci = curcpu();
720 712
721 /* ar.k4 contains the cpu_info pointer to the  713 /* ar.k4 contains the cpu_info pointer to the
722 * current cpu. 714 * current cpu.
723 */ 715 */
724 ia64_set_k4((u_int64_t) ci);  716 ia64_set_k4((uint64_t) ci);
725 ci->ci_cpuid = cpu_number(); 717 ci->ci_cpuid = cpu_number();
726 718
727 719
728 /* Initialise process context. XXX: This should really be in cpu_switch*/ 720 /*
 721 * Initialise process context. XXX: This should really be in cpu_switch
 722 */
729 ci->ci_curlwp = &lwp0; 723 ci->ci_curlwp = &lwp0;
730 724
731 /* 725 /*
732 * Initialize the primary CPU's idle PCB to proc0's. In a 726 * Initialize the primary CPU's idle PCB to proc0's. In a
733 * MULTIPROCESSOR configuration, each CPU will later get 727 * MULTIPROCESSOR configuration, each CPU will later get
734 * its own idle PCB when autoconfiguration runs. 728 * its own idle PCB when autoconfiguration runs.
735 */ 729 */
736 ci->ci_idle_pcb = &proc0paddr->u_pcb; 730 ci->ci_idle_pcb = &proc0paddr->u_pcb;
737 731
738 /* Indicate that proc0 has a CPU. */ 732 /* Indicate that proc0 has a CPU. */
739 lwp0.l_cpu = ci; 733 lwp0.l_cpu = ci;
740 734
741 735
742 ia64_set_tpr(0); 736 ia64_set_tpr(0);
 737 ia64_srlz_d();
743 738
744 /* 739 /*
745 * Save our current context so that we have a known (maybe even 740 * Save our current context so that we have a known (maybe even
746 * sane) context as the initial context for new threads that are 741 * sane) context as the initial context for new threads that are
747 * forked from us.  742 * forked from us.
748 */ 743 */
749 if (savectx(&lwp0.l_addr->u_pcb)) panic("savectx failed"); 744 if (savectx(&lwp0.l_addr->u_pcb)) panic("savectx failed");
750 745
751 /* 746 /*
752 * Initialize debuggers, and break into them if appropriate. 747 * Initialize debuggers, and break into them if appropriate.
753 */ 748 */
754#if NKSYMS || defined(DDB) || defined(MODULAR) 749#if NKSYMS || defined(DDB) || defined(MODULAR)
755 ksyms_addsyms_elf((int)((u_int64_t)ksym_end - (u_int64_t)ksym_start), 750 ksyms_addsyms_elf((int)((uint64_t)ksym_end - (uint64_t)ksym_start),
756 ksym_start, ksym_end); 751 ksym_start, ksym_end);
757#endif 752#endif
758 753
759#if defined(DDB) 754#ifdef DDB
760 Debugger();  755 if (boothowto & RB_KDB)
761#endif  756 Debugger();
 757#endif
762 758
763 extern void main(void); 759 extern void main(void);
764 main(); 760 main();
765 761
766 panic("Wheeee!!! main() returned!!! \n"); 762 panic("Wheeee!!! main() returned!!! \n");
767} 763}
768 764
 765uint64_t
 766ia64_get_hcdp(void)
 767{
 768
 769 return bootinfo.bi_hcdp;
 770}
 771
769/* 772/*
770 * Set registers on exec. 773 * Set registers on exec.
771 */ 774 */
772void 775void
773setregs(register struct lwp *l, struct exec_package *pack, u_long stack) 776setregs(register struct lwp *l, struct exec_package *pack, u_long stack)
774{ 777{
775 struct trapframe *tf; 778 struct trapframe *tf;
776 uint64_t *ksttop, *kst, regstkp; 779 uint64_t *ksttop, *kst, regstkp;
777 780
778 tf = l->l_md.md_tf; 781 tf = l->l_md.md_tf;
779 regstkp = (uint64_t) (l->l_addr) + sizeof(struct user); 782 regstkp = (uint64_t) (l->l_addr) + sizeof(struct user);
780 783
781 ksttop = (uint64_t*) (regstkp + tf->tf_special.ndirty +  784 ksttop =
782 (tf->tf_special.bspstore & 0x1ffUL));  785 (uint64_t*)(regstkp + tf->tf_special.ndirty +
 786 (tf->tf_special.bspstore & 0x1ffUL));
783 787
784 /* XXX: tf_special.ndirty on a new stack frame ??? */ 788 /* XXX: tf_special.ndirty on a new stack frame ??? */
785 789
786 /* 790 /*
787 * We can ignore up to 8KB of dirty registers by masking off the 791 * We can ignore up to 8KB of dirty registers by masking off the
788 * lower 13 bits in exception_restore() or epc_syscall(). This 792 * lower 13 bits in exception_restore() or epc_syscall(). This
789 * should be enough for a couple of years, but if there are more 793 * should be enough for a couple of years, but if there are more
790 * than 8KB of dirty registers, we lose track of the bottom of 794 * than 8KB of dirty registers, we lose track of the bottom of
791 * the kernel stack. The solution is to copy the active part of 795 * the kernel stack. The solution is to copy the active part of
792 * the kernel stack down 1 page (or 2, but not more than that) 796 * the kernel stack down 1 page (or 2, but not more than that)
793 * so that we always have less than 8KB of dirty registers. 797 * so that we always have less than 8KB of dirty registers.
794 */ 798 */
795 KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0); 799 KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0);
@@ -798,79 +802,79 @@ setregs(register struct lwp *l, struct e @@ -798,79 +802,79 @@ setregs(register struct lwp *l, struct e
798 if ((tf->tf_flags & FRAME_SYSCALL) == 0) { /* break syscalls. */ 802 if ((tf->tf_flags & FRAME_SYSCALL) == 0) { /* break syscalls. */
799 memset(&tf->tf_scratch, 0, sizeof(tf->tf_scratch)); 803 memset(&tf->tf_scratch, 0, sizeof(tf->tf_scratch));
800 memset(&tf->tf_scratch_fp, 0, sizeof(tf->tf_scratch_fp)); 804 memset(&tf->tf_scratch_fp, 0, sizeof(tf->tf_scratch_fp));
801 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL; 805 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
802 tf->tf_special.bspstore = IA64_BACKINGSTORE; 806 tf->tf_special.bspstore = IA64_BACKINGSTORE;
803 /* 807 /*
804 * Copy the arguments onto the kernel register stack so that 808 * Copy the arguments onto the kernel register stack so that
805 * they get loaded by the loadrs instruction. Skip over the 809 * they get loaded by the loadrs instruction. Skip over the
806 * NaT collection points. 810 * NaT collection points.
807 */ 811 */
808 kst = ksttop - 1; 812 kst = ksttop - 1;
809 if (((uintptr_t)kst & 0x1ff) == 0x1f8) 813 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
810 *kst-- = 0; 814 *kst-- = 0;
811 *kst-- = (u_int64_t)l->l_proc->p_psstr; /* in3 = ps_strings */ 815 *kst-- = (uint64_t)l->l_proc->p_psstr; /* in3 = ps_strings */
812 if (((uintptr_t)kst & 0x1ff) == 0x1f8) 816 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
813 *kst-- = 0; 817 *kst-- = 0;
814 *kst-- = 0; /* in2 = *obj */ 818 *kst-- = 0; /* in2 = *obj */
815 if (((uintptr_t)kst & 0x1ff) == 0x1f8) 819 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
816 *kst-- = 0; 820 *kst-- = 0;
817 *kst-- = 0; /* in1 = *cleanup */ 821 *kst-- = 0; /* in1 = *cleanup */
818 if (((uintptr_t)kst & 0x1ff) == 0x1f8) 822 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
819 *kst-- = 0; 823 *kst-- = 0;
820 *kst = stack; /* in0 = sp */ 824 *kst = stack; /* in0 = sp */
821 tf->tf_special.ndirty = (ksttop - kst) << 3; 825 tf->tf_special.ndirty = (ksttop - kst) << 3;
822 } else { /* epc syscalls (default). */ 826 } else { /* epc syscalls (default). */
823 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL; 827 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
824 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24; 828 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
825 /* 829 /*
826 * Write values for out0, out1, out2 and out3 to the user's backing 830 * Write values for out0, out1, out2 and out3 to the user's
827 * store and arrange for them to be restored into the user's 831 * backing store and arrange for them to be restored into
828 * initial register frame. 832 * the user's initial register frame.
829 * Assumes that (bspstore & 0x1f8) < 0x1e0. 833 * Assumes that (bspstore & 0x1f8) < 0x1e0.
830 */ 834 */
831 835
832 /* in0 = sp */ 836 /* in0 = sp */
833 suword((char *)tf->tf_special.bspstore - 32, stack); 837 suword((char *)tf->tf_special.bspstore - 32, stack);
834 838
835 /* in1 == *cleanup */ 839 /* in1 == *cleanup */
836 suword((char *)tf->tf_special.bspstore - 24, 0); 840 suword((char *)tf->tf_special.bspstore - 24, 0);
837 841
838 /* in2 == *obj */ 842 /* in2 == *obj */
839 suword((char *)tf->tf_special.bspstore - 16, 0); 843 suword((char *)tf->tf_special.bspstore - 16, 0);
840 844
841 /* in3 = ps_strings */  845 /* in3 = ps_strings */
842 suword((char *)tf->tf_special.bspstore - 8,  846 suword((char *)tf->tf_special.bspstore - 8,
843 (u_int64_t)l->l_proc->p_psstr);  847 (uint64_t)l->l_proc->p_psstr);
844 848
845 } 849 }
846 850
847 tf->tf_special.iip = pack->ep_entry; 851 tf->tf_special.iip = pack->ep_entry;
848 tf->tf_special.sp = (stack & ~15) - 16; 852 tf->tf_special.sp = (stack & ~15) - 16;
849 tf->tf_special.rsc = 0xf; 853 tf->tf_special.rsc = 0xf;
850 tf->tf_special.fpsr = IA64_FPSR_DEFAULT; 854 tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
851 tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT | 855 tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
852 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN | 856 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
853 IA64_PSR_CPL_USER; 857 IA64_PSR_CPL_USER;
854 return; 858 return;
855} 859}
856 860
857void 861void
858sendsig(const ksiginfo_t *ksi, const sigset_t *mask) 862sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
859{ 863{
860 return; 864 return;
861} 865}
862  866
863void  867void
864cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, void *sas, void *ap, void *sp, sa_upcall_t upcall) 868cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, void *sas, void *ap, void *sp, sa_upcall_t upcall)
865{ 869{
866 return; 870 return;
867} 871}
868 872
869void 873void
870cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags) 874cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
871{ 875{
872 return; 876 return;
873} 877}
874 878
875int 879int
876cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags) 880cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)

cvs diff -r1.4 -r1.5 src/sys/arch/ia64/ia64/mainbus.c (expand / switch to unified diff)

--- src/sys/arch/ia64/ia64/mainbus.c 2009/03/14 21:04:10 1.4
+++ src/sys/arch/ia64/ia64/mainbus.c 2009/07/20 05:10:49 1.5
@@ -1,63 +1,90 @@ @@ -1,63 +1,90 @@
1/* $NetBSD: mainbus.c,v 1.4 2009/03/14 21:04:10 dsl Exp $ */ 1/* $NetBSD: mainbus.c,v 1.5 2009/07/20 05:10:49 kiyohara Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Author:  7 * Author:
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE. 28 * POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31#include <sys/cdefs.h> 31#include <sys/cdefs.h>
32__KERNEL_RCSID(0, "$NetBSD: mainbus.c,v 1.4 2009/03/14 21:04:10 dsl Exp $"); 32__KERNEL_RCSID(0, "$NetBSD: mainbus.c,v 1.5 2009/07/20 05:10:49 kiyohara Exp $");
33 33
 34#include "acpi.h"
34 35
35#include <sys/param.h> 36#include <sys/param.h>
36#include <sys/systm.h> 
37#include <sys/device.h> 37#include <sys/device.h>
 38#include <sys/errno.h>
38 39
39int mainbus_match(struct device *, struct cfdata *, void *); 40#include <machine/bus.h>
40void mainbus_attach(struct device *, struct device *, void *); 
41 41
42CFATTACH_DECL(mainbus, sizeof(struct device), 42#include <dev/acpi/acpivar.h>
 43
 44
 45static int mainbus_match(struct device *, struct cfdata *, void *);
 46static void mainbus_attach(struct device *, struct device *, void *);
 47
 48CFATTACH_DECL_NEW(mainbus, sizeof(struct device),
43 mainbus_match, mainbus_attach, NULL, NULL); 49 mainbus_match, mainbus_attach, NULL, NULL);
44 50
45 51
46/* 52/*
47 * Probe for the mainbus; always succeeds. 53 * Probe for the mainbus; always succeeds.
48 */ 54 */
49int 55static int
50mainbus_match(struct device *parent, struct cfdata *match, void *aux) 56mainbus_match(device_t parent, struct cfdata *match, void *aux)
51{ 57{
52 58
53 return 1; 59 return 1;
54} 60}
55 61
56/* 62/*
57 * Attach the mainbus. 63 * Attach the mainbus.
58 */ 64 */
59void 65static void
60mainbus_attach(struct device *parent, struct device *self, void *aux) 66mainbus_attach(device_t parent, device_t self, void *aux)
61{ 67{
 68#if NACPI > 0
 69 struct acpibus_attach_args aaa;
 70#endif
 71
 72 aprint_naive("\n");
 73 aprint_normal("\n");
 74
 75#if NACPI > 0
 76 acpi_probe();
 77
 78 aaa.aa_iot = IA64_BUS_SPACE_IO;
 79 aaa.aa_memt = IA64_BUS_SPACE_MEM;
 80 aaa.aa_pc = 0;
 81 aaa.aa_pciflags =
 82 PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED |
 83 PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY |
 84 PCI_FLAGS_MWI_OKAY;
 85 aaa.aa_ic = 0;
 86 config_found_ia(self, "acpibus", &aaa, 0);
 87#endif
 88
62 return; 89 return;
63} 90}