Sat Dec 3 01:56:56 2011 UTC ()
Rework things a bit for the XLR/XLS/XLP TLB.  Before dealing with the TLB when
MP on the XL?, disable interrupts and take out a lock to prevent concurrent
updates to the TLB.  In the TLB miss and invalid exception handlers, if the
lock is already owned by another CPU, simply return from the exception and
let it continue or restart as appropriate.  This prevents concurrent TLB
exceptions in multiple threads from possibly updating the TLB multiple times
for a single address.


(matt)
diff -r1.1.2.36 -r1.1.2.37 src/sys/arch/evbmips/rmixl/machdep.c
diff -r1.90.16.37 -r1.90.16.38 src/sys/arch/mips/include/cpu.h
diff -r1.54.26.18 -r1.54.26.19 src/sys/arch/mips/include/pmap.h
diff -r1.1.2.20 -r1.1.2.21 src/sys/arch/mips/mips/cpu_subr.c
diff -r1.44.12.29 -r1.44.12.30 src/sys/arch/mips/mips/genassym.cf
diff -r1.26.36.1.2.49 -r1.26.36.1.2.50 src/sys/arch/mips/mips/mipsX_subr.S
diff -r1.1.2.18 -r1.1.2.19 src/sys/arch/mips/mips/pmap_tlb.c
diff -r1.1.2.9 -r1.1.2.10 src/sys/arch/mips/rmi/rmixl_subr.S

cvs diff -r1.1.2.36 -r1.1.2.37 src/sys/arch/evbmips/rmixl/machdep.c (expand / switch to unified diff)

--- src/sys/arch/evbmips/rmixl/machdep.c 2011/11/29 07:48:32 1.1.2.36
+++ src/sys/arch/evbmips/rmixl/machdep.c 2011/12/03 01:56:55 1.1.2.37
@@ -243,26 +243,32 @@ static void rmixl_fixup_curcpu(void); @@ -243,26 +243,32 @@ static void rmixl_fixup_curcpu(void);
243 */ 243 */
244void 244void
245mach_init(int argc, int32_t *argv, void *envp, int64_t infop) 245mach_init(int argc, int32_t *argv, void *envp, int64_t infop)
246{ 246{
247 struct rmixl_config *rcp = &rmixl_configuration; 247 struct rmixl_config *rcp = &rmixl_configuration;
248 void *kernend; 248 void *kernend;
249 uint64_t memsize; 249 uint64_t memsize;
250 extern char edata[], end[]; 250 extern char edata[], end[];
251 size_t fl_count = 0; 251 size_t fl_count = 0;
252 struct mips_vmfreelist fl[1]; 252 struct mips_vmfreelist fl[1];
253 253
254 rmixl_pcr_init_core(); 254 rmixl_pcr_init_core();
255 255
 256#ifdef MULTIPROCESSOR
 257 __asm __volatile("dmtc0 %0,$%1,2"
 258 :: "r"(&pmap_tlb0_info.ti_hwlock->mtx_lock),
 259 "n"(MIPS_COP_0_OSSCRATCH));
 260#endif
 261
256 /* 262 /*
257 * Clear the BSS segment. 263 * Clear the BSS segment.
258 */ 264 */
259 kernend = (void *)mips_round_page(end); 265 kernend = (void *)mips_round_page(end);
260 memset(edata, 0, (char *)kernend - edata); 266 memset(edata, 0, (char *)kernend - edata);
261 267
262 /* 268 /*
263 * Set up the exception vectors and CPU-specific function 269 * Set up the exception vectors and CPU-specific function
264 * vectors early on. We need the wbflush() vector set up 270 * vectors early on. We need the wbflush() vector set up
265 * before comcnattach() is called (or at least before the 271 * before comcnattach() is called (or at least before the
266 * first printf() after that is called). 272 * first printf() after that is called).
267 * Also clears the I+D caches. 273 * Also clears the I+D caches.
268 * 274 *
@@ -364,28 +370,28 @@ mach_init(int argc, int32_t *argv, void  @@ -364,28 +370,28 @@ mach_init(int argc, int32_t *argv, void
364 370
365 /* reserve 0..start..kernend pages */ 371 /* reserve 0..start..kernend pages */
366 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 372 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt,
367 0, round_page(MIPS_KSEG0_TO_PHYS(kernend))); 373 0, round_page(MIPS_KSEG0_TO_PHYS(kernend)));
368 374
369 /* reserve reset exception vector page */ 375 /* reserve reset exception vector page */
370 /* should never be in our clusters anyway... */ 376 /* should never be in our clusters anyway... */
371 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 377 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt,
372 0x1FC00000, 0x1FC00000+NBPG); 378 0x1FC00000, 0x1FC00000+NBPG);
373 379
374#ifdef MULTIPROCESSOR 380#ifdef MULTIPROCESSOR
375 /* reserve the cpu_wakeup_info area */ 381 /* reserve the cpu_wakeup_info area */
376 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 382 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt,
377 (u_quad_t)trunc_page(rcp->rc_cpu_wakeup_info), 383 (u_quad_t)trunc_page((vaddr_t)rcp->rc_cpu_wakeup_info),
378 (u_quad_t)round_page(rcp->rc_cpu_wakeup_end)); 384 (u_quad_t)round_page((vaddr_t)rcp->rc_cpu_wakeup_end));
379#endif 385#endif
380 386
381#ifdef MEMLIMIT 387#ifdef MEMLIMIT
382 /* reserve everything >= MEMLIMIT */ 388 /* reserve everything >= MEMLIMIT */
383 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt, 389 mem_cluster_cnt = ram_seg_resv(mem_clusters, mem_cluster_cnt,
384 (u_quad_t)MEMLIMIT, (u_quad_t)~0); 390 (u_quad_t)MEMLIMIT, (u_quad_t)~0);
385#endif 391#endif
386 392
387#ifdef ENABLE_MIPS_KSEGX 393#ifdef ENABLE_MIPS_KSEGX
388 /* 394 /*
389 * Now we need to reserve an aligned block of memory for pre-init 395 * Now we need to reserve an aligned block of memory for pre-init
390 * allocations so we don't deplete KSEG0. 396 * allocations so we don't deplete KSEG0.
391 */ 397 */
@@ -445,29 +451,26 @@ mach_init(int argc, int32_t *argv, void  @@ -445,29 +451,26 @@ mach_init(int argc, int32_t *argv, void
445#endif 451#endif
446 452
447#if defined(DDB) 453#if defined(DDB)
448 if (boothowto & RB_KDB) 454 if (boothowto & RB_KDB)
449 Debugger(); 455 Debugger();
450#endif 456#endif
451 /* 457 /*
452 * store (cpu#0) curcpu in COP0 OSSCRATCH0 458 * store (cpu#0) curcpu in COP0 OSSCRATCH0
453 * used in exception vector 459 * used in exception vector
454 */ 460 */
455 __asm __volatile("dmtc0 %0,$%1" 461 __asm __volatile("dmtc0 %0,$%1"
456 :: "r"(&cpu_info_store), "n"(MIPS_COP_0_OSSCRATCH)); 462 :: "r"(&cpu_info_store), "n"(MIPS_COP_0_OSSCRATCH));
457#ifdef MULTIPROCESSOR 463#ifdef MULTIPROCESSOR
458 __asm __volatile("dmtc0 %0,$%1,2" 
459 :: "r"(&pmap_tlb0_info.ti_lock->mtx_lock), 
460 "n"(MIPS_COP_0_OSSCRATCH)); 
461 mips_fixup_exceptions(rmixl_fixup_cop0_oscratch); 464 mips_fixup_exceptions(rmixl_fixup_cop0_oscratch);
462#endif 465#endif
463 rmixl_fixup_curcpu(); 466 rmixl_fixup_curcpu();
464} 467}
465 468
466/* 469/*
467 * set up Processor Control Regs for this core 470 * set up Processor Control Regs for this core
468 */ 471 */
469void 472void
470rmixl_pcr_init_core() 473rmixl_pcr_init_core()
471{ 474{
472 uint32_t r; 475 uint32_t r;
473 476

cvs diff -r1.90.16.37 -r1.90.16.38 src/sys/arch/mips/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/cpu.h 2011/05/26 19:21:55 1.90.16.37
+++ src/sys/arch/mips/include/cpu.h 2011/12/03 01:56:55 1.90.16.38
@@ -97,27 +97,29 @@ struct cpu_info { @@ -97,27 +97,29 @@ struct cpu_info {
97#endif 97#endif
98 volatile int ci_want_resched; /* user preemption pending */ 98 volatile int ci_want_resched; /* user preemption pending */
99 int ci_mtx_count; /* negative count of held mutexes */ 99 int ci_mtx_count; /* negative count of held mutexes */
100 int ci_mtx_oldspl; /* saved SPL value */ 100 int ci_mtx_oldspl; /* saved SPL value */
101 int ci_idepth; /* hardware interrupt depth */ 101 int ci_idepth; /* hardware interrupt depth */
102 int ci_cpl; /* current [interrupt] priority level */ 102 int ci_cpl; /* current [interrupt] priority level */
103 uint32_t ci_next_cp0_clk_intr; /* for hard clock intr scheduling */ 103 uint32_t ci_next_cp0_clk_intr; /* for hard clock intr scheduling */
104 struct evcnt ci_ev_count_compare; /* hard clock intr counter */ 104 struct evcnt ci_ev_count_compare; /* hard clock intr counter */
105 struct evcnt ci_ev_count_compare_missed; /* hard clock miss counter */ 105 struct evcnt ci_ev_count_compare_missed; /* hard clock miss counter */
106 struct lwp *ci_softlwps[SOFTINT_COUNT]; 106 struct lwp *ci_softlwps[SOFTINT_COUNT];
107 volatile u_int ci_softints; 107 volatile u_int ci_softints;
108 struct evcnt ci_ev_fpu_loads; /* fpu load counter */ 108 struct evcnt ci_ev_fpu_loads; /* fpu load counter */
109 struct evcnt ci_ev_fpu_saves; /* fpu save counter */ 109 struct evcnt ci_ev_fpu_saves; /* fpu save counter */
110 struct evcnt ci_ev_tlbmisses; 110 struct evcnt ci_ev_kern_tlbmisses;
 111 struct evcnt ci_ev_user_tlbmisses;
 112 struct evcnt ci_ev_tlblocked;
111 113
112 /* 114 /*
113 * Per-cpu pmap information 115 * Per-cpu pmap information
114 */ 116 */
115 int ci_tlb_slot; /* reserved tlb entry for cpu_info */ 117 int ci_tlb_slot; /* reserved tlb entry for cpu_info */
116 u_int ci_pmap_asid_cur; /* current ASID */ 118 u_int ci_pmap_asid_cur; /* current ASID */
117 struct pmap_tlb_info *ci_tlb_info; /* tlb information for this cpu */ 119 struct pmap_tlb_info *ci_tlb_info; /* tlb information for this cpu */
118 union segtab *ci_pmap_seg0tab; 120 union segtab *ci_pmap_seg0tab;
119#ifdef _LP64 121#ifdef _LP64
120 union segtab *ci_pmap_segtab; 122 union segtab *ci_pmap_segtab;
121#else 123#else
122 vaddr_t ci_pmap_srcbase; /* starting VA of ephemeral src space */ 124 vaddr_t ci_pmap_srcbase; /* starting VA of ephemeral src space */
123 vaddr_t ci_pmap_dstbase; /* starting VA of ephemeral dst space */ 125 vaddr_t ci_pmap_dstbase; /* starting VA of ephemeral dst space */

cvs diff -r1.54.26.18 -r1.54.26.19 src/sys/arch/mips/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/pmap.h 2011/04/29 08:26:21 1.54.26.18
+++ src/sys/arch/mips/include/pmap.h 2011/12/03 01:56:55 1.54.26.19
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.54.26.18 2011/04/29 08:26:21 matt Exp $ */ 1/* $NetBSD: pmap.h,v 1.54.26.19 2011/12/03 01:56:55 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell. 8 * Ralph Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -192,26 +192,27 @@ enum tlb_invalidate_op { @@ -192,26 +192,27 @@ enum tlb_invalidate_op {
192}; 192};
193 193
194struct pmap_tlb_info { 194struct pmap_tlb_info {
195 char ti_name[8]; 195 char ti_name[8];
196 uint32_t ti_asid_hint; /* probable next ASID to use */ 196 uint32_t ti_asid_hint; /* probable next ASID to use */
197 uint32_t ti_asids_free; /* # of ASIDs free */ 197 uint32_t ti_asids_free; /* # of ASIDs free */
198#define tlbinfo_noasids_p(ti) ((ti)->ti_asids_free == 0) 198#define tlbinfo_noasids_p(ti) ((ti)->ti_asids_free == 0)
199 kmutex_t *ti_lock; 199 kmutex_t *ti_lock;
200 u_int ti_wired; /* # of wired TLB entries */ 200 u_int ti_wired; /* # of wired TLB entries */
201 uint32_t ti_asid_mask; 201 uint32_t ti_asid_mask;
202 uint32_t ti_asid_max; 202 uint32_t ti_asid_max;
203 LIST_HEAD(, pmap_asid_info) ti_pais; /* list of active ASIDs */ 203 LIST_HEAD(, pmap_asid_info) ti_pais; /* list of active ASIDs */
204#ifdef MULTIPROCESSOR 204#ifdef MULTIPROCESSOR
 205 kmutex_t *ti_hwlock;
205 pmap_t ti_victim; 206 pmap_t ti_victim;
206 uint32_t ti_synci_page_bitmap; /* page indices needing a syncicache */ 207 uint32_t ti_synci_page_bitmap; /* page indices needing a syncicache */
207 uint32_t ti_cpu_mask; /* bitmask of CPUs sharing this TLB */ 208 uint32_t ti_cpu_mask; /* bitmask of CPUs sharing this TLB */
208 enum tlb_invalidate_op ti_tlbinvop; 209 enum tlb_invalidate_op ti_tlbinvop;
209 u_int ti_index; 210 u_int ti_index;
210#define tlbinfo_index(ti) ((ti)->ti_index) 211#define tlbinfo_index(ti) ((ti)->ti_index)
211 struct evcnt ti_evcnt_synci_asts; 212 struct evcnt ti_evcnt_synci_asts;
212 struct evcnt ti_evcnt_synci_all; 213 struct evcnt ti_evcnt_synci_all;
213 struct evcnt ti_evcnt_synci_pages; 214 struct evcnt ti_evcnt_synci_pages;
214 struct evcnt ti_evcnt_synci_deferred; 215 struct evcnt ti_evcnt_synci_deferred;
215 struct evcnt ti_evcnt_synci_desired; 216 struct evcnt ti_evcnt_synci_desired;
216 struct evcnt ti_evcnt_synci_duplicate; 217 struct evcnt ti_evcnt_synci_duplicate;
217#else 218#else

cvs diff -r1.1.2.20 -r1.1.2.21 src/sys/arch/mips/mips/cpu_subr.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/cpu_subr.c 2011/11/29 07:48:31 1.1.2.20
+++ src/sys/arch/mips/mips/cpu_subr.c 2011/12/03 01:56:55 1.1.2.21
@@ -232,29 +232,35 @@ cpu_attach_common(device_t self, struct  @@ -232,29 +232,35 @@ cpu_attach_common(device_t self, struct
232 232
233 evcnt_attach_dynamic(&ci->ci_ev_count_compare, 233 evcnt_attach_dynamic(&ci->ci_ev_count_compare,
234 EVCNT_TYPE_INTR, NULL, xname, 234 EVCNT_TYPE_INTR, NULL, xname,
235 "intr 5 (clock)"); 235 "intr 5 (clock)");
236 evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed, 236 evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed,
237 EVCNT_TYPE_INTR, NULL, xname, 237 EVCNT_TYPE_INTR, NULL, xname,
238 "intr 5 (clock) missed"); 238 "intr 5 (clock) missed");
239 evcnt_attach_dynamic(&ci->ci_ev_fpu_loads, 239 evcnt_attach_dynamic(&ci->ci_ev_fpu_loads,
240 EVCNT_TYPE_MISC, NULL, xname, 240 EVCNT_TYPE_MISC, NULL, xname,
241 "fpu loads"); 241 "fpu loads");
242 evcnt_attach_dynamic(&ci->ci_ev_fpu_saves, 242 evcnt_attach_dynamic(&ci->ci_ev_fpu_saves,
243 EVCNT_TYPE_MISC, NULL, xname, 243 EVCNT_TYPE_MISC, NULL, xname,
244 "fpu saves"); 244 "fpu saves");
245 evcnt_attach_dynamic(&ci->ci_ev_tlbmisses, 245 evcnt_attach_dynamic(&ci->ci_ev_user_tlbmisses,
246 EVCNT_TYPE_TRAP, NULL, xname, 246 EVCNT_TYPE_TRAP, NULL, xname,
247 "tlb misses"); 247 "user tlb misses");
 248 evcnt_attach_dynamic(&ci->ci_ev_kern_tlbmisses,
 249 EVCNT_TYPE_TRAP, NULL, xname,
 250 "kern tlb misses");
 251 evcnt_attach_dynamic(&ci->ci_ev_tlblocked,
 252 EVCNT_TYPE_MISC, NULL, xname,
 253 "tlb locked");
248 254
249 if (ci == &cpu_info_store) 255 if (ci == &cpu_info_store)
250 pmap_tlb_info_evcnt_attach(ci->ci_tlb_info); 256 pmap_tlb_info_evcnt_attach(ci->ci_tlb_info);
251 257
252#ifdef MULTIPROCESSOR 258#ifdef MULTIPROCESSOR
253 if (ci != &cpu_info_store) { 259 if (ci != &cpu_info_store) {
254 /* 260 /*
255 * Tail insert this onto the list of cpu_info's. 261 * Tail insert this onto the list of cpu_info's.
256 */ 262 */
257 KASSERT(ci->ci_next == NULL); 263 KASSERT(ci->ci_next == NULL);
258 KASSERT(cpu_info_last->ci_next == NULL); 264 KASSERT(cpu_info_last->ci_next == NULL);
259 cpu_info_last->ci_next = ci; 265 cpu_info_last->ci_next = ci;
260 cpu_info_last = ci; 266 cpu_info_last = ci;

cvs diff -r1.44.12.29 -r1.44.12.30 src/sys/arch/mips/mips/genassym.cf (expand / switch to unified diff)

--- src/sys/arch/mips/mips/genassym.cf 2011/12/02 00:01:37 1.44.12.29
+++ src/sys/arch/mips/mips/genassym.cf 2011/12/03 01:56:55 1.44.12.30
@@ -274,34 +274,38 @@ define SF_REG_S5 offsetof(label_t, val[_ @@ -274,34 +274,38 @@ define SF_REG_S5 offsetof(label_t, val[_
274define SF_REG_S6 offsetof(label_t, val[_L_S6]) 274define SF_REG_S6 offsetof(label_t, val[_L_S6])
275define SF_REG_S7 offsetof(label_t, val[_L_S7]) 275define SF_REG_S7 offsetof(label_t, val[_L_S7])
276define SF_REG_T8 offsetof(label_t, val[_L_T8]) 276define SF_REG_T8 offsetof(label_t, val[_L_T8])
277define SF_REG_GP offsetof(label_t, val[_L_GP]) 277define SF_REG_GP offsetof(label_t, val[_L_GP])
278define SF_REG_SP offsetof(label_t, val[_L_SP]) 278define SF_REG_SP offsetof(label_t, val[_L_SP])
279define SF_REG_S8 offsetof(label_t, val[_L_S8]) 279define SF_REG_S8 offsetof(label_t, val[_L_S8])
280define SF_REG_RA offsetof(label_t, val[_L_RA]) 280define SF_REG_RA offsetof(label_t, val[_L_RA])
281define SF_REG_SR offsetof(label_t, val[_L_SR]) 281define SF_REG_SR offsetof(label_t, val[_L_SR])
282 282
283define MTX_OWNER offsetof(struct kmutex, mtx_owner) 283define MTX_OWNER offsetof(struct kmutex, mtx_owner)
284define MTX_LOCK offsetof(struct kmutex, mtx_lock) 284define MTX_LOCK offsetof(struct kmutex, mtx_lock)
285define MTX_IPL offsetof(struct kmutex, mtx_ipl) 285define MTX_IPL offsetof(struct kmutex, mtx_ipl)
286 286
287define TI_LOCK offsetof(struct pmap_tlb_info, ti_lock) 287ifdef MULTIPROCESSOR
 288define TI_HWLOCK offsetof(struct pmap_tlb_info, ti_hwlock)
 289endif
288 290
289# CPU info 291# CPU info
290define CPU_INFO_CPL offsetof(struct cpu_info, ci_cpl) 292define CPU_INFO_CPL offsetof(struct cpu_info, ci_cpl)
291define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth) 293define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth)
292define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp) 294define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp)
293define CPU_INFO_IDLELWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) 295define CPU_INFO_IDLELWP offsetof(struct cpu_info, ci_data.cpu_idlelwp)
294define CPU_INFO_EV_TLBMISSES offsetof(struct cpu_info, ci_ev_tlbmisses.ev_count) 296define CPU_INFO_EV_USER_TLBMISSES offsetof(struct cpu_info, ci_ev_user_tlbmisses.ev_count)
 297define CPU_INFO_EV_KERN_TLBMISSES offsetof(struct cpu_info, ci_ev_kern_tlbmisses.ev_count)
 298define CPU_INFO_EV_TLBLOCKED offsetof(struct cpu_info, ci_ev_tlblocked.ev_count)
295define CPU_INFO_PMAP_SEG0TAB offsetof(struct cpu_info, ci_pmap_seg0tab) 299define CPU_INFO_PMAP_SEG0TAB offsetof(struct cpu_info, ci_pmap_seg0tab)
296ifdef _LP64 300ifdef _LP64
297define CPU_INFO_PMAP_SEGTAB offsetof(struct cpu_info, ci_pmap_segtab) 301define CPU_INFO_PMAP_SEGTAB offsetof(struct cpu_info, ci_pmap_segtab)
298endif 302endif
299define CPU_INFO_DIVISOR_DELAY offsetof(struct cpu_info, ci_divisor_delay) 303define CPU_INFO_DIVISOR_DELAY offsetof(struct cpu_info, ci_divisor_delay)
300define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) 304define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count)
301define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl) 305define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl)
302ifdef MULTIPROCESSOR 306ifdef MULTIPROCESSOR
303define CPU_INFO_KSP_TLB_SLOT offsetof(struct cpu_info, ci_ksp_tlb_slot) 307define CPU_INFO_KSP_TLB_SLOT offsetof(struct cpu_info, ci_ksp_tlb_slot)
304endif 308endif
305define CPU_INFO_TLB_INFO offsetof(struct cpu_info, ci_tlb_info) 309define CPU_INFO_TLB_INFO offsetof(struct cpu_info, ci_tlb_info)
306 310
307define IPL_NONE IPL_NONE 311define IPL_NONE IPL_NONE

cvs diff -r1.26.36.1.2.49 -r1.26.36.1.2.50 src/sys/arch/mips/mips/mipsX_subr.S (expand / switch to unified diff)

--- src/sys/arch/mips/mips/mipsX_subr.S 2011/12/02 00:01:37 1.26.36.1.2.49
+++ src/sys/arch/mips/mips/mipsX_subr.S 2011/12/03 01:56:55 1.26.36.1.2.50
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: mipsX_subr.S,v 1.26.36.1.2.49 2011/12/02 00:01:37 matt Exp $ */ 1/* $NetBSD: mipsX_subr.S,v 1.26.36.1.2.50 2011/12/03 01:56:55 matt Exp $ */
2 2
3/* 3/*
4 * Copyright 2002 Wasabi Systems, Inc. 4 * Copyright 2002 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Simon Burge for Wasabi Systems, Inc. 7 * Written by Simon Burge for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -335,27 +335,27 @@ @@ -335,27 +335,27 @@
335 * on an r4000. 335 * on an r4000.
336 * 336 *
337 * This code is copied to the TLB exception vector address to 337 * This code is copied to the TLB exception vector address to
338 * handle TLB translation misses. 338 * handle TLB translation misses.
339 * NOTE: This code should be relocatable and max 32 instructions!!! 339 * NOTE: This code should be relocatable and max 32 instructions!!!
340 * 340 *
341 * Don't check for invalid pte's here. We load them as well and 341 * Don't check for invalid pte's here. We load them as well and
342 * let the processor trap to load the correct value after service. 342 * let the processor trap to load the correct value after service.
343 *---------------------------------------------------------------------------- 343 *----------------------------------------------------------------------------
344 */ 344 */
345VECTOR(MIPSX(tlb_miss), unknown) 345VECTOR(MIPSX(tlb_miss), unknown)
346 .set noat 346 .set noat
347#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 347#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
348 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 #00: get tlbinfo lock addr 348 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 #00: get tlbinfo hwlock addr
349 li k0, __SIMPLELOCK_LOCKED #01: lock value 349 li k0, __SIMPLELOCK_LOCKED #01: lock value
350 swapw k0, k1 #02: swap it in place 350 swapw k0, k1 #02: swap it in place
351 bnez k0, MIPSX(tlblocked) #03: a lie 351 bnez k0, MIPSX(tlblocked) #03: a lie
352 # lui in delay slot 352 # lui in delay slot
353#endif 353#endif
354 lui k1, %hi(CPUVAR(PMAP_SEG0TAB)) #00: k1=hi of seg0tab 354 lui k1, %hi(CPUVAR(PMAP_SEG0TAB)) #00: k1=hi of seg0tab
355 _MFC0 k0, MIPS_COP_0_BAD_VADDR #01: k0=bad address 355 _MFC0 k0, MIPS_COP_0_BAD_VADDR #01: k0=bad address
356 bltz k0, MIPSX(kernelfault) #02: k0<0 -> kernel fault 356 bltz k0, MIPSX(kernelfault) #02: k0<0 -> kernel fault
357 PTR_SRL k0, 1*(PGSHIFT-PTR_SCALESHIFT)+(PGSHIFT-2)#03: k0=seg offset (almost) 357 PTR_SRL k0, 1*(PGSHIFT-PTR_SCALESHIFT)+(PGSHIFT-2)#03: k0=seg offset (almost)
358 PTR_L k1, %lo(CPUVAR(PMAP_SEG0TAB))(k1)#04: k1=seg0tab 358 PTR_L k1, %lo(CPUVAR(PMAP_SEG0TAB))(k1)#04: k1=seg0tab
359MIPSX(tlb_miss_common): 359MIPSX(tlb_miss_common):
360#ifdef _LP64 360#ifdef _LP64
361 beqz k1, MIPSX(nopagetable) #05: is there a pagetable? 361 beqz k1, MIPSX(nopagetable) #05: is there a pagetable?
@@ -422,30 +422,30 @@ MIPSX(tlb_miss_common): @@ -422,30 +422,30 @@ MIPSX(tlb_miss_common):
422#endif 422#endif
423 _MTC0 k0, MIPS_COP_0_TLB_LO0 #14: lo0 is loaded 423 _MTC0 k0, MIPS_COP_0_TLB_LO0 #14: lo0 is loaded
424 _MTC0 k1, MIPS_COP_0_TLB_LO1 #15: lo1 is loaded 424 _MTC0 k1, MIPS_COP_0_TLB_LO1 #15: lo1 is loaded
425 sll $0, $0, 3 #16: standard nop (ehb) 425 sll $0, $0, 3 #16: standard nop (ehb)
426#ifdef MIPS3 426#ifdef MIPS3
427 nop #17: extra nop for QED5230 427 nop #17: extra nop for QED5230
428#endif 428#endif
429 tlbwr #18: write to tlb 429 tlbwr #18: write to tlb
430 sll $0, $0, 3 #19: standard nop (ehb) 430 sll $0, $0, 3 #19: standard nop (ehb)
431#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 431#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
432 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 #1a get tlbinfo lock addr 432 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 #1a get tlbinfo lock addr
433 INT_S zero, 0(k1) #1b clear lock 433 INT_S zero, 0(k1) #1b clear lock
434#elif (MIPS3 + MIPS64 + MIPS64R2 + MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 434#elif (MIPS3 + MIPS64 + MIPS64R2 + MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
435 lui k1, %hi(CPUVAR(EV_TLBMISSES)) #1a: k1=hi of tlbmisses 435 lui k1, %hi(CPUVAR(EV_USER_TLBMISSES)) #1a: k1=hi of tlbmisses
436 REG_L k0, %lo(CPUVAR(EV_TLBMISSES))(k1) #1b 436 REG_L k0, %lo(CPUVAR(EV_USER_TLBMISSES))(k1) #1b
437 REG_ADDU k0, 1 #1c 437 REG_ADDU k0, 1 #1c
438 REG_S k0, %lo(CPUVAR(EV_TLBMISSES))(k1) #1d 438 REG_S k0, %lo(CPUVAR(EV_USER_TLBMISSES))(k1) #1d
439#endif 439#endif
440 eret #1e: return from exception 440 eret #1e: return from exception
441 .set at 441 .set at
442_VECTOR_END(MIPSX(tlb_miss)) 442_VECTOR_END(MIPSX(tlb_miss))
443 443
444#if defined(USE_64BIT_CP0_FUNCTIONS) 444#if defined(USE_64BIT_CP0_FUNCTIONS)
445/* 445/*
446 * mipsN_xtlb_miss routine 446 * mipsN_xtlb_miss routine
447 * 447 *
448 * Vector code for the XTLB-miss exception vector 0x80000080 on an r4000. 448 * Vector code for the XTLB-miss exception vector 0x80000080 on an r4000.
449 * 449 *
450 * This code is copied to the XTLB exception vector address to 450 * This code is copied to the XTLB exception vector address to
451 * handle TLB translation misses while in 64-bit mode. 451 * handle TLB translation misses while in 64-bit mode.
@@ -556,38 +556,48 @@ VECTOR(MIPSX(exception), unknown) @@ -556,38 +556,48 @@ VECTOR(MIPSX(exception), unknown)
556 nop #0d 556 nop #0d
557 nop #0e 557 nop #0e
558#ifndef _LP64 558#ifndef _LP64
559 nop #0f 559 nop #0f
560#endif 560#endif
561 .p2align 4 561 .p2align 4
562MIPSX(kernelfault): 562MIPSX(kernelfault):
563 j _C_LABEL(MIPSX(kern_tlb_miss)) #10: kernel exception 563 j _C_LABEL(MIPSX(kern_tlb_miss)) #10: kernel exception
564 nop #11: branch delay slot 564 nop #11: branch delay slot
565 nop 565 nop
566 nop 566 nop
567MIPSX(nopagetable): 567MIPSX(nopagetable):
568#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 568#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
569 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 #14: get tlbinfo lock addr 569 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 #14: get tlbinfo hwlock addr
570 INT_S zero, 0(k1) #15: clear lock 570 INT_S zero, 0(k1) #15: clear lock
571#endif 571#endif
572 lui k1, %hi(CPUVAR(CURLWP)) #16: k1=hi of curlwp 572 lui k1, %hi(CPUVAR(CURLWP)) #16: k1=hi of curlwp
573 j MIPSX(slowfault) #17: no page table present 573 j MIPSX(slowfault) #17: no page table present
574 PTR_L k1, %lo(CPUVAR(CURLWP))(k1) #18: k1=lo of curlwp 574 PTR_L k1, %lo(CPUVAR(CURLWP))(k1) #18: k1=lo of curlwp
575 nop #19: branch delay slot 575 nop #19: branch delay slot
576#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 576#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 577/*
 578 * If the TLB was locked, then it must have been locked by another thread
 579 * context. If so, that thread is updating the TLB and may be updated the
 580 * address we are concerned with. So the best thing we can do is just return
 581 * from the exception and hope the other thread has fixed the reason for this
 582 * exception. If not, another exception will be raised and hopefully then
 583 * we'll get the TLB hwlock.
 584 */
577MIPSX(tlblocked): 585MIPSX(tlblocked):
578 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 0 #1a: k1=hi of curlwp 586 lui k1, %hi(CPUVAR(EV_TLBLOCKED)) #1a: k1=hi of tlbmisses
579 j MIPSX(slowfault) #1b: no page table present 587 REG_L k0, %lo(CPUVAR(EV_TLBLOCKED))(k1) #1b
580 PTR_L k1, CPU_INFO_CURLWP(k1) #1c: k1=lo of curlwp 588 REG_ADDU k0, 1 #1c
 589 REG_S k0, %lo(CPUVAR(EV_TLBLOCKED))(k1) #1d
 590 eret #1e
581#endif 591#endif
582 .set at 592 .set at
583_VECTOR_END(MIPSX(exception)) 593_VECTOR_END(MIPSX(exception))
584 594
585/* 595/*
586 * Handle MIPS32/MIPS64 style interrupt exception vector. 596 * Handle MIPS32/MIPS64 style interrupt exception vector.
587 */ 597 */
588VECTOR(MIPSX(intr), unknown) 598VECTOR(MIPSX(intr), unknown)
589 .set noat 599 .set noat
590 mfc0 k1, MIPS_COP_0_STATUS #00: get the status register 600 mfc0 k1, MIPS_COP_0_STATUS #00: get the status register
591 nop #01: stall 601 nop #01: stall
592 and k1, k1, MIPS3_SR_KSU_USER #02: test for user mode 602 and k1, k1, MIPS3_SR_KSU_USER #02: test for user mode
593 PTR_LA k0, MIPSX(user_intr) #03: assume user mode 603 PTR_LA k0, MIPSX(user_intr) #03: assume user mode
@@ -1640,67 +1650,73 @@ LEAF_NOPROFILE(MIPSX(kern_tlb_miss)) @@ -1640,67 +1650,73 @@ LEAF_NOPROFILE(MIPSX(kern_tlb_miss))
1640 _SRL k1, k1, WIRED_SHIFT 1650 _SRL k1, k1, WIRED_SHIFT
1641#endif 1651#endif
1642#else 1652#else
1643 INT_ADDU k1, k0, MIPS3_PG_NEXT # point to next page 1653 INT_ADDU k1, k0, MIPS3_PG_NEXT # point to next page
1644#endif /* PGSHIFT & 1) == 0 */ 1654#endif /* PGSHIFT & 1) == 0 */
1645 _MTC0 k1, MIPS_COP_0_TLB_LO1 # load PTE entry 1655 _MTC0 k1, MIPS_COP_0_TLB_LO1 # load PTE entry
1646 COP0_SYNC 1656 COP0_SYNC
1647 tlbwr # write random TLB 1657 tlbwr # write random TLB
1648 COP0_SYNC 1658 COP0_SYNC
1649#ifdef MIPS3 1659#ifdef MIPS3
1650 nop 1660 nop
1651 nop 1661 nop
1652#endif 1662#endif
 1663#if (MIPS3 + MIPS64 + MIPS64R2 + MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 1664 lui k1, %hi(CPUVAR(EV_KERN_TLBMISSES))
 1665 REG_L k0, %lo(CPUVAR(EV_KERN_TLBMISSES))(k1)
 1666 REG_ADDU k0, 1
 1667 REG_S k0, %lo(CPUVAR(EV_KERN_TLBMISSES))(k1)
 1668#endif
1653#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 1669#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
1654 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 # get tlbinfo lock addr 1670 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 # get tlbinfo hwlock addr
1655 INT_S zero, 0(k1) # clear lock 1671 INT_S zero, 0(k1) # clear lock
1656#endif 1672#endif
1657 eret 1673 eret
1658 .set at 1674 .set at
1659END(MIPSX(kern_tlb_miss)) 1675END(MIPSX(kern_tlb_miss))
1660 1676
1661/*---------------------------------------------------------------------------- 1677/*----------------------------------------------------------------------------
1662 * 1678 *
1663 * mipsN_tlb_invalid_exception -- 1679 * mipsN_kern_tlb_invalid_exception --
1664 * 1680 *
1665 * Handle a TLB invalid exception from kernel mode in kernel space. 1681 * Handle a TLB invalid exception from kernel mode in kernel space.
1666 * The BaddVAddr, Context, and EntryHi registers contain the failed 1682 * The BaddVAddr, Context, and EntryHi registers contain the failed
1667 * virtual address. 1683 * virtual address.
1668 * 1684 *
1669 * The case of wired TLB entries is special. The wired TLB entries 1685 * The case of wired TLB entries is special. The wired TLB entries
1670 * are used to keep the u area TLB's valid. The PTE entries for these 1686 * are used to keep the u area TLB's valid. The PTE entries for these
1671 * do not have MIPS3_PG_G set; the kernel instead relies 1687 * do not have MIPS3_PG_G set; the kernel instead relies
1672 * on the switch_resume function to set these bits. 1688 * on the switch_resume function to set these bits.
1673 * 1689 *
1674 * To preserve this situation, we set PG_G bits on the "other" TLB entries 1690 * To preserve this situation, we set PG_G bits on the "other" TLB entries
1675 * when they are wired. 1691 * when they are wired.
1676 * 1692 *
1677 * Results: 1693 * Results:
1678 * None. 1694 * None.
1679 * 1695 *
1680 * Side effects: 1696 * Side effects:
1681 * None. 1697 * None.
1682 * 1698 *
1683 *---------------------------------------------------------------------------- 1699 *----------------------------------------------------------------------------
1684 */ 1700 */
1685LEAF_NOPROFILE(MIPSX(tlb_invalid_exception)) 1701LEAF_NOPROFILE(MIPSX(kern_tlb_invalid_exception))
1686 .set noat 1702 .set noat
1687#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 1703#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
1688#define TLB_INVALID_EXCEPTION_EXIT _C_LABEL(MIPSX(tlbunlock_kern_gen_exception)) 1704#define TLB_INVALID_EXCEPTION_EXIT _C_LABEL(MIPSX(tlbunlock_kern_gen_exception))
1689 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 # get tlblock addr 1705 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 # get tlblock addr
1690 li k0, __SIMPLELOCK_LOCKED 1706 li k0, __SIMPLELOCK_LOCKED
16911: swapw k0, k1 # set it to locked 1707 swapw k0, k1 # set it to locked
1692 bnez k0, 1b # was it locked? 1708 bnez k0, 99f # was it locked?
1693 nop # if it was, try again 1709 nop # if it was, do an eret
1694#else 1710#else
1695#define TLB_INVALID_EXCEPTION_EXIT _C_LABEL(MIPSX(kern_gen_exception)) 1711#define TLB_INVALID_EXCEPTION_EXIT _C_LABEL(MIPSX(kern_gen_exception))
1696#endif 1712#endif
1697 _MFC0 k0, MIPS_COP_0_BAD_VADDR # get the fault address 1713 _MFC0 k0, MIPS_COP_0_BAD_VADDR # get the fault address
1698#if VM_MIN_KERNEL_ADDRESS == MIPS_KSEG2_START 1714#if VM_MIN_KERNEL_ADDRESS == MIPS_KSEG2_START
1699 li k1, VM_MIN_KERNEL_ADDRESS # compute index 1715 li k1, VM_MIN_KERNEL_ADDRESS # compute index
1700#else 1716#else
1701 li k1, VM_MIN_KERNEL_ADDRESS>>32 # compute index 1717 li k1, VM_MIN_KERNEL_ADDRESS>>32 # compute index
1702 dsll32 k1, k1, 0 1718 dsll32 k1, k1, 0
1703#endif 1719#endif
1704 bgez k0, TLB_INVALID_EXCEPTION_EXIT # full trap processing 1720 bgez k0, TLB_INVALID_EXCEPTION_EXIT # full trap processing
1705 nop # - delay slot - 1721 nop # - delay slot -
1706 PTR_SUBU k0, k1 1722 PTR_SUBU k0, k1
@@ -1764,26 +1780,27 @@ LEAF_NOPROFILE(MIPSX(tlb_invalid_excepti @@ -1764,26 +1780,27 @@ LEAF_NOPROFILE(MIPSX(tlb_invalid_excepti
1764 or k1, k1, k0 1780 or k1, k1, k0
1765 _MTC0 k0, MIPS_COP_0_TLB_LO1 # load PTE entry 1781 _MTC0 k0, MIPS_COP_0_TLB_LO1 # load PTE entry
1766 COP0_SYNC 1782 COP0_SYNC
1767#endif /* (PGSHIFT & 1) == 0 */ 1783#endif /* (PGSHIFT & 1) == 0 */
1768 tlbwi # write TLB 1784 tlbwi # write TLB
1769 COP0_SYNC 1785 COP0_SYNC
1770#ifdef MIPS3 1786#ifdef MIPS3
1771 nop 1787 nop
1772 nop 1788 nop
1773#endif 1789#endif
1774#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 1790#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
1775 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 # get tlblock addr 1791 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 # get tlblock addr
1776 INT_S zero, 0(k1) # clear lock 1792 INT_S zero, 0(k1) # clear lock
 179399:
1777#endif 1794#endif
1778 eret 1795 eret
1779 1796
1780#if (PGSHIFT & 1) == 0 1797#if (PGSHIFT & 1) == 0
1781MIPSX(kern_tlbi_odd): 1798MIPSX(kern_tlbi_odd):
1782 INT_L k0, 0(k1) # get PTE entry 1799 INT_L k0, 0(k1) # get PTE entry
1783#if (MIPS32R2 + MIPS64R2 + MIPS64R2_RMIXL) > 0 1800#if (MIPS32R2 + MIPS64R2 + MIPS64R2_RMIXL) > 0
1784 _EXT k0, k0, 0, WIRED_POS 1801 _EXT k0, k0, 0, WIRED_POS
1785#else 1802#else
1786 _SLL k0, k0, WIRED_SHIFT # get rid of wired bit 1803 _SLL k0, k0, WIRED_SHIFT # get rid of wired bit
1787 _SRL k0, k0, WIRED_SHIFT 1804 _SRL k0, k0, WIRED_SHIFT
1788#endif 1805#endif
1789 _MTC0 k0, MIPS_COP_0_TLB_LO1 # save PTE entry 1806 _MTC0 k0, MIPS_COP_0_TLB_LO1 # save PTE entry
@@ -1816,32 +1833,32 @@ MIPSX(kern_tlbi_odd): @@ -1816,32 +1833,32 @@ MIPSX(kern_tlbi_odd):
1816#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 1833#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
1817 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 # get tlblock addr 1834 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 # get tlblock addr
1818 INT_S zero, 0(k1) # clear lock 1835 INT_S zero, 0(k1) # clear lock
1819#endif 1836#endif
1820 eret 1837 eret
1821#endif /* (PGSHIFT & 1) == 0 */ 1838#endif /* (PGSHIFT & 1) == 0 */
1822 1839
1823#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 1840#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
1824/* 1841/*
1825 * Before entering kern_gen_exception we need to clear the tlb lock that 1842 * Before entering kern_gen_exception we need to clear the tlb lock that
1826 * we locked. 1843 * we locked.
1827 */ 1844 */
1828MIPSX(tlbunlock_kern_gen_exception): 1845MIPSX(tlbunlock_kern_gen_exception):
1829 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 # get tlblock addr 1846 _MFC0 k1, MIPS_COP_0_OSSCRATCH, 2 # get tlb hwlock addr
1830 b _C_LABEL(MIPSX(kern_gen_exception)) 1847 b _C_LABEL(MIPSX(kern_gen_exception))
1831 INT_S zero, 0(k1) # clear lock 1848 INT_S zero, 0(k1) # clear lock
1832#endif 1849#endif
1833 1850
1834END(MIPSX(tlb_invalid_exception)) 1851END(MIPSX(kern_tlb_invalid_exception))
1835 1852
1836/* 1853/*
1837 * Mark where code entered from exception hander jumptable 1854 * Mark where code entered from exception hander jumptable
1838 * ends, for stack traceback code. 1855 * ends, for stack traceback code.
1839 */ 1856 */
1840 1857
1841 .globl _C_LABEL(MIPSX(exceptionentry_end)) 1858 .globl _C_LABEL(MIPSX(exceptionentry_end))
1842_C_LABEL(MIPSX(exceptionentry_end)): 1859_C_LABEL(MIPSX(exceptionentry_end)):
1843 1860
1844/*-------------------------------------------------------------------------- 1861/*--------------------------------------------------------------------------
1845 * 1862 *
1846 * mipsN_tlb_set_asid -- 1863 * mipsN_tlb_set_asid --
1847 * 1864 *
@@ -1872,26 +1889,33 @@ END(MIPSX(tlb_set_asid)) @@ -1872,26 +1889,33 @@ END(MIPSX(tlb_set_asid))
1872 * 1889 *
1873 * Results: 1890 * Results:
1874 * < 0 if skipped, >= 0 if updated. 1891 * < 0 if skipped, >= 0 if updated.
1875 * 1892 *
1876 * Side effects: 1893 * Side effects:
1877 * None. 1894 * None.
1878 * 1895 *
1879 *-------------------------------------------------------------------------- 1896 *--------------------------------------------------------------------------
1880 */ 1897 */
1881LEAF(MIPSX(tlb_update_addr)) 1898LEAF(MIPSX(tlb_update_addr))
1882 mfc0 v1, MIPS_COP_0_STATUS # Save the status register. 1899 mfc0 v1, MIPS_COP_0_STATUS # Save the status register.
1883 mtc0 zero, MIPS_COP_0_STATUS # Disable interrupts 1900 mtc0 zero, MIPS_COP_0_STATUS # Disable interrupts
1884 COP0_SYNC 1901 COP0_SYNC
 1902#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 1903 _MFC0 ta3, MIPS_COP_0_OSSCRATCH, 2
 19041: li v0, __SIMPLELOCK_LOCKED
 1905 swapw v0, ta3
 1906 bnez v0, 1b
 1907 nop
 1908#endif
1885#if (PGSHIFT & 1) == 0 1909#if (PGSHIFT & 1) == 0
1886 and t1, a0, MIPS3_PG_ODDPG # t1 = Even/Odd flag 1910 and t1, a0, MIPS3_PG_ODDPG # t1 = Even/Odd flag
1887#endif 1911#endif
1888 li v0, (MIPS3_PG_HVPN | MIPS3_PG_ASID) 1912 li v0, (MIPS3_PG_HVPN | MIPS3_PG_ASID)
1889 and a0, a0, v0 1913 and a0, a0, v0
1890 _MFC0 t0, MIPS_COP_0_TLB_HI # Save current PID 1914 _MFC0 t0, MIPS_COP_0_TLB_HI # Save current PID
1891 _MTC0 a0, MIPS_COP_0_TLB_HI # Init high reg 1915 _MTC0 a0, MIPS_COP_0_TLB_HI # Init high reg
1892 COP0_SYNC 1916 COP0_SYNC
1893 and a2, a1, MIPS3_PG_G # Copy global bit 1917 and a2, a1, MIPS3_PG_G # Copy global bit
1894 tlbp # Probe for the entry. 1918 tlbp # Probe for the entry.
1895 COP0_SYNC 1919 COP0_SYNC
1896#if (MIPS32R2 + MIPS64R2 + MIPS64R2_RMIXL) > 0 1920#if (MIPS32R2 + MIPS64R2 + MIPS64R2_RMIXL) > 0
1897 _EXT a1, a1, 0, WIRED_POS 1921 _EXT a1, a1, 0, WIRED_POS
@@ -1936,87 +1960,107 @@ LEAF(MIPSX(tlb_update_addr)) @@ -1936,87 +1960,107 @@ LEAF(MIPSX(tlb_update_addr))
1936 COP0_SYNC 1960 COP0_SYNC
1937 tlbwi # update slot found 1961 tlbwi # update slot found
1938 COP0_SYNC 1962 COP0_SYNC
1939#endif /* (PGSHIFT & 1) == 0 */ 1963#endif /* (PGSHIFT & 1) == 0 */
19404: 19644:
1941#ifdef MIPS3 1965#ifdef MIPS3
1942 nop # Make sure pipeline 1966 nop # Make sure pipeline
1943 nop # advances before we 1967 nop # advances before we
1944 nop # use the TLB. 1968 nop # use the TLB.
1945 nop 1969 nop
1946#endif 1970#endif
1947 _MTC0 t0, MIPS_COP_0_TLB_HI # restore PID 1971 _MTC0 t0, MIPS_COP_0_TLB_HI # restore PID
1948 COP0_SYNC 1972 COP0_SYNC
 1973#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 1974 INT_S zero, 0(ta3)
 1975#endif
1949 mtc0 v1, MIPS_COP_0_STATUS # Restore the status register 1976 mtc0 v1, MIPS_COP_0_STATUS # Restore the status register
1950 JR_HB_RA 1977 JR_HB_RA
1951END(MIPSX(tlb_update_addr)) 1978END(MIPSX(tlb_update_addr))
1952 1979
1953/*-------------------------------------------------------------------------- 1980/*--------------------------------------------------------------------------
1954 * 1981 *
1955 * mipsN_tlb_read_indexed -- 1982 * mipsN_tlb_read_indexed --
1956 * 1983 *
1957 * Read the TLB entry. 1984 * Read the TLB entry.
1958 * 1985 *
1959 * void mipsN_tlb_read_indexed(size_t tlb_index, struct tlbmask *tlb); 1986 * void mipsN_tlb_read_indexed(size_t tlb_index, struct tlbmask *tlb);
1960 * 1987 *
1961 * Results: 1988 * Results:
1962 * None. 1989 * None.
1963 * 1990 *
1964 * Side effects: 1991 * Side effects:
1965 * tlb will contain the TLB entry found. 1992 * tlb will contain the TLB entry found.
1966 * 1993 *
1967 *-------------------------------------------------------------------------- 1994 *--------------------------------------------------------------------------
1968 */ 1995 */
1969LEAF(MIPSX(tlb_read_indexed)) 1996LEAF(MIPSX(tlb_read_indexed))
1970 mfc0 v1, MIPS_COP_0_STATUS # Save the status register. 1997 mfc0 v1, MIPS_COP_0_STATUS # Save the status register.
1971 mtc0 zero, MIPS_COP_0_STATUS # Disable interrupts 1998 mtc0 zero, MIPS_COP_0_STATUS # Disable interrupts
1972 COP0_SYNC 1999 COP0_SYNC
 2000#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2001 _MFC0 ta3, MIPS_COP_0_OSSCRATCH, 2
 20021: li v0, __SIMPLELOCK_LOCKED
 2003 swapw v0, ta3
 2004 bnez v0, 1b
 2005 nop
 2006#endif
1973 mfc0 ta2, MIPS_COP_0_TLB_PG_MASK # save current pgMask 2007 mfc0 ta2, MIPS_COP_0_TLB_PG_MASK # save current pgMask
1974#ifdef MIPS3 2008#ifdef MIPS3
1975 nop 2009 nop
1976#endif 2010#endif
1977 _MFC0 t0, MIPS_COP_0_TLB_HI # Get current PID 2011 _MFC0 t0, MIPS_COP_0_TLB_HI # Get current PID
1978 2012
1979 mtc0 a0, MIPS_COP_0_TLB_INDEX # Set the index register 2013 mtc0 a0, MIPS_COP_0_TLB_INDEX # Set the index register
1980 COP0_SYNC 2014 COP0_SYNC
1981 tlbr # Read from the TLB 2015 tlbr # Read from the TLB
1982 COP0_SYNC 2016 COP0_SYNC
1983 mfc0 t2, MIPS_COP_0_TLB_PG_MASK # fetch the pgMask 2017 mfc0 t2, MIPS_COP_0_TLB_PG_MASK # fetch the pgMask
1984 _MFC0 t3, MIPS_COP_0_TLB_HI # fetch the hi entry 2018 _MFC0 t3, MIPS_COP_0_TLB_HI # fetch the hi entry
1985 _MFC0 ta0, MIPS_COP_0_TLB_LO0 # See what we got 2019 _MFC0 ta0, MIPS_COP_0_TLB_LO0 # See what we got
1986 _MFC0 ta1, MIPS_COP_0_TLB_LO1 # See what we got 2020 _MFC0 ta1, MIPS_COP_0_TLB_LO1 # See what we got
1987 _MTC0 t0, MIPS_COP_0_TLB_HI # restore PID 2021 _MTC0 t0, MIPS_COP_0_TLB_HI # restore PID
1988 mtc0 ta2, MIPS_COP_0_TLB_PG_MASK # restore pgMask 2022 mtc0 ta2, MIPS_COP_0_TLB_PG_MASK # restore pgMask
1989 COP0_SYNC 2023 COP0_SYNC
 2024#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2025 INT_S zero, 0(ta3) # unlock the tlb
 2026#endif
1990 mtc0 v1, MIPS_COP_0_STATUS # Restore the status register 2027 mtc0 v1, MIPS_COP_0_STATUS # Restore the status register
1991 COP0_SYNC 2028 COP0_SYNC
1992 PTR_S t3, TLBMASK_HI(a1) 2029 PTR_S t3, TLBMASK_HI(a1)
1993 INT_S ta0, TLBMASK_LO0(a1) 2030 INT_S ta0, TLBMASK_LO0(a1)
1994 INT_S ta1, TLBMASK_LO1(a1) 2031 INT_S ta1, TLBMASK_LO1(a1)
1995 j ra 2032 j ra
1996 INT_S t2, TLBMASK_MASK(a1) 2033 INT_S t2, TLBMASK_MASK(a1)
1997END(MIPSX(tlb_read_indexed)) 2034END(MIPSX(tlb_read_indexed))
1998 2035
1999/*-------------------------------------------------------------------------- 2036/*--------------------------------------------------------------------------
2000 * 2037 *
2001 * void mipsN_tlb_invalidate_addr(vaddr_t va) 2038 * void mipsN_tlb_invalidate_addr(vaddr_t va)
2002 * 2039 *
2003 * Invalidate a TLB entry which has the given vaddr and ASID if found. 2040 * Invalidate a TLB entry which has the given vaddr and ASID if found.
2004 *-------------------------------------------------------------------------- 2041 *--------------------------------------------------------------------------
2005 */ 2042 */
2006LEAF_NOPROFILE(MIPSX(tlb_invalidate_addr)) 2043LEAF_NOPROFILE(MIPSX(tlb_invalidate_addr))
2007 mfc0 v1, MIPS_COP_0_STATUS # save status register 2044 mfc0 v1, MIPS_COP_0_STATUS # save status register
2008 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 2045 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts
2009 COP0_SYNC 2046 COP0_SYNC
 2047#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2048 _MFC0 ta3, MIPS_COP_0_OSSCRATCH, 2
 20491: li v0, __SIMPLELOCK_LOCKED
 2050 swapw v0, ta3
 2051 bnez v0, 1b
 2052 nop
 2053#endif
2010 2054
2011 li v0, (MIPS3_PG_HVPN | MIPS3_PG_ASID) 2055 li v0, (MIPS3_PG_HVPN | MIPS3_PG_ASID)
2012 _MFC0 t0, MIPS_COP_0_TLB_HI # save current ASID 2056 _MFC0 t0, MIPS_COP_0_TLB_HI # save current ASID
2013 mfc0 t3, MIPS_COP_0_TLB_PG_MASK # save current pgMask 2057 mfc0 t3, MIPS_COP_0_TLB_PG_MASK # save current pgMask
2014 and a0, v0 # make sure valid entryHi 2058 and a0, v0 # make sure valid entryHi
2015 _MTC0 a0, MIPS_COP_0_TLB_HI # look for the vaddr & ASID 2059 _MTC0 a0, MIPS_COP_0_TLB_HI # look for the vaddr & ASID
2016 COP0_SYNC 2060 COP0_SYNC
2017 tlbp # probe the entry in question 2061 tlbp # probe the entry in question
2018 COP0_SYNC 2062 COP0_SYNC
2019 mfc0 v0, MIPS_COP_0_TLB_INDEX # see what we got 2063 mfc0 v0, MIPS_COP_0_TLB_INDEX # see what we got
2020 bltz v0, 1f # index < 0 then skip 2064 bltz v0, 1f # index < 0 then skip
2021 li t1, MIPS_KSEG0_START # invalid address 2065 li t1, MIPS_KSEG0_START # invalid address
2022 PTR_SLL v0, (PGSHIFT | 1) # PAGE_SHIFT | 1 2066 PTR_SLL v0, (PGSHIFT | 1) # PAGE_SHIFT | 1
@@ -2025,41 +2069,51 @@ LEAF_NOPROFILE(MIPSX(tlb_invalidate_addr @@ -2025,41 +2069,51 @@ LEAF_NOPROFILE(MIPSX(tlb_invalidate_addr
2025 _MTC0 zero, MIPS_COP_0_TLB_LO0 # zero out entryLo0 2069 _MTC0 zero, MIPS_COP_0_TLB_LO0 # zero out entryLo0
2026 _MTC0 zero, MIPS_COP_0_TLB_LO1 # zero out entryLo1 2070 _MTC0 zero, MIPS_COP_0_TLB_LO1 # zero out entryLo1
2027#if 0 2071#if 0
2028 mtc0 zero, MIPS_COP_0_TLB_PG_MASK # zero out pageMask 2072 mtc0 zero, MIPS_COP_0_TLB_PG_MASK # zero out pageMask
2029#endif 2073#endif
2030 COP0_SYNC 2074 COP0_SYNC
2031 2075
2032 tlbwi 2076 tlbwi
2033 COP0_SYNC 2077 COP0_SYNC
20341: 20781:
2035 _MTC0 t0, MIPS_COP_0_TLB_HI # restore current ASID 2079 _MTC0 t0, MIPS_COP_0_TLB_HI # restore current ASID
2036 mtc0 t3, MIPS_COP_0_TLB_PG_MASK # restore pgMask 2080 mtc0 t3, MIPS_COP_0_TLB_PG_MASK # restore pgMask
2037 COP0_SYNC 2081 COP0_SYNC
 2082#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2083 INT_S zero, 0(ta3) # unlock the tlb
 2084#endif
2038 mtc0 v1, MIPS_COP_0_STATUS # restore status register 2085 mtc0 v1, MIPS_COP_0_STATUS # restore status register
2039 JR_HB_RA 2086 JR_HB_RA
2040END(MIPSX(tlb_invalidate_addr)) 2087END(MIPSX(tlb_invalidate_addr))
2041 2088
2042/* 2089/*
2043 * void mipsN_tlb_invalidate_asids(uint32_t base, uint32_t limit); 2090 * void mipsN_tlb_invalidate_asids(uint32_t base, uint32_t limit);
2044 * 2091 *
2045 * Invalidate TLB entries belong to per process user spaces with 2092 * Invalidate TLB entries belong to per process user spaces with
2046 * base <= ASIDs < limit while leaving entries for kernel space 2093 * base <= ASIDs < limit while leaving entries for kernel space
2047 * marked global intact. 2094 * marked global intact.
2048 */ 2095 */
2049LEAF_NOPROFILE(MIPSX(tlb_invalidate_asids)) 2096LEAF_NOPROFILE(MIPSX(tlb_invalidate_asids))
2050 mfc0 v1, MIPS_COP_0_STATUS # save status register 2097 mfc0 v1, MIPS_COP_0_STATUS # save status register
2051 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 2098 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts
2052 COP0_SYNC 2099 COP0_SYNC
 2100#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2101 _MFC0 ta3, MIPS_COP_0_OSSCRATCH, 2
 21021: li v0, __SIMPLELOCK_LOCKED
 2103 swapw v0, ta3
 2104 bnez v0, 1b
 2105 nop
 2106#endif
2053 2107
2054 _MFC0 t0, MIPS_COP_0_TLB_HI # Save the current PID. 2108 _MFC0 t0, MIPS_COP_0_TLB_HI # Save the current PID.
2055 mfc0 t1, MIPS_COP_0_TLB_WIRED 2109 mfc0 t1, MIPS_COP_0_TLB_WIRED
2056 li v0, MIPS_KSEG0_START # invalid address 2110 li v0, MIPS_KSEG0_START # invalid address
2057 INT_L t2, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES 2111 INT_L t2, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES
2058 mfc0 t3, MIPS_COP_0_TLB_PG_MASK # save current pgMask 2112 mfc0 t3, MIPS_COP_0_TLB_PG_MASK # save current pgMask
2059 2113
2060 # do {} while (t1 < t2) 2114 # do {} while (t1 < t2)
20611: 21151:
2062 mtc0 t1, MIPS_COP_0_TLB_INDEX # set index 2116 mtc0 t1, MIPS_COP_0_TLB_INDEX # set index
2063 COP0_SYNC 2117 COP0_SYNC
2064 sll ta0, t1, PGSHIFT | 1 # PAGE_SHIFT | 1 2118 sll ta0, t1, PGSHIFT | 1 # PAGE_SHIFT | 1
2065 tlbr # obtain an entry 2119 tlbr # obtain an entry
@@ -2083,40 +2137,50 @@ LEAF_NOPROFILE(MIPSX(tlb_invalidate_asid @@ -2083,40 +2137,50 @@ LEAF_NOPROFILE(MIPSX(tlb_invalidate_asid
2083 _MTC0 zero, MIPS_COP_0_TLB_LO1 # zero out entryLo1 2137 _MTC0 zero, MIPS_COP_0_TLB_LO1 # zero out entryLo1
2084 mtc0 zero, MIPS_COP_0_TLB_PG_MASK # zero out mask entry 2138 mtc0 zero, MIPS_COP_0_TLB_PG_MASK # zero out mask entry
2085 COP0_SYNC 2139 COP0_SYNC
2086 tlbwi # invalidate the TLB entry 2140 tlbwi # invalidate the TLB entry
2087 COP0_SYNC 2141 COP0_SYNC
20882: 21422:
2089 addu t1, 1 2143 addu t1, 1
2090 bne t1, t2, 1b 2144 bne t1, t2, 1b
2091 nop 2145 nop
2092 2146
2093 _MTC0 t0, MIPS_COP_0_TLB_HI # restore PID. 2147 _MTC0 t0, MIPS_COP_0_TLB_HI # restore PID.
2094 mtc0 t3, MIPS_COP_0_TLB_PG_MASK # restore pgMask 2148 mtc0 t3, MIPS_COP_0_TLB_PG_MASK # restore pgMask
2095 COP0_SYNC 2149 COP0_SYNC
 2150#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2151 INT_S zero, 0(ta3) # unlock the tlb
 2152#endif
2096 mtc0 v1, MIPS_COP_0_STATUS # restore status register 2153 mtc0 v1, MIPS_COP_0_STATUS # restore status register
2097 JR_HB_RA # new ASID will be set soon 2154 JR_HB_RA # new ASID will be set soon
2098END(MIPSX(tlb_invalidate_asids)) 2155END(MIPSX(tlb_invalidate_asids))
2099 2156
2100/* 2157/*
2101 * void mipsN_tlb_invalidate_globals(void); 2158 * void mipsN_tlb_invalidate_globals(void);
2102 * 2159 *
2103 * Invalidate the non-wired TLB entries belonging to kernel space while 2160 * Invalidate the non-wired TLB entries belonging to kernel space while
2104 * leaving entries for user space (not marked global) intact. 2161 * leaving entries for user space (not marked global) intact.
2105 */ 2162 */
2106LEAF_NOPROFILE(MIPSX(tlb_invalidate_globals)) 2163LEAF_NOPROFILE(MIPSX(tlb_invalidate_globals))
2107 mfc0 v1, MIPS_COP_0_STATUS # save status register 2164 mfc0 v1, MIPS_COP_0_STATUS # save status register
2108 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 2165 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts
2109 COP0_SYNC 2166 COP0_SYNC
 2167#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2168 _MFC0 ta3, MIPS_COP_0_OSSCRATCH, 2
 21691: li v0, __SIMPLELOCK_LOCKED
 2170 swapw v0, ta3
 2171 bnez v0, 1b
 2172 nop
 2173#endif
2110 2174
2111 _MFC0 t0, MIPS_COP_0_TLB_HI # save current ASID 2175 _MFC0 t0, MIPS_COP_0_TLB_HI # save current ASID
2112 mfc0 t1, MIPS_COP_0_TLB_WIRED 2176 mfc0 t1, MIPS_COP_0_TLB_WIRED
2113 li v0, MIPS_KSEG0_START # invalid address 2177 li v0, MIPS_KSEG0_START # invalid address
2114 INT_L t2, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES 2178 INT_L t2, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES
2115 mfc0 t3, MIPS_COP_0_TLB_PG_MASK # save current pgMask 2179 mfc0 t3, MIPS_COP_0_TLB_PG_MASK # save current pgMask
2116 2180
2117 # do {} while (t1 < t2) 2181 # do {} while (t1 < t2)
21181: 21821:
2119 mtc0 t1, MIPS_COP_0_TLB_INDEX # set index 2183 mtc0 t1, MIPS_COP_0_TLB_INDEX # set index
2120 COP0_SYNC 2184 COP0_SYNC
2121 sll ta0, t1, PGSHIFT | 1 # PAGE_SHIFT | 1 2185 sll ta0, t1, PGSHIFT | 1 # PAGE_SHIFT | 1
2122 tlbr # obtain an entry 2186 tlbr # obtain an entry
@@ -2132,39 +2196,49 @@ LEAF_NOPROFILE(MIPSX(tlb_invalidate_glob @@ -2132,39 +2196,49 @@ LEAF_NOPROFILE(MIPSX(tlb_invalidate_glob
2132 _MTC0 zero, MIPS_COP_0_TLB_LO1 # zero out entryLo1 2196 _MTC0 zero, MIPS_COP_0_TLB_LO1 # zero out entryLo1
2133 mtc0 zero, MIPS_COP_0_TLB_PG_MASK # zero out mask entry 2197 mtc0 zero, MIPS_COP_0_TLB_PG_MASK # zero out mask entry
2134 COP0_SYNC 2198 COP0_SYNC
2135 tlbwi # invalidate the TLB entry 2199 tlbwi # invalidate the TLB entry
2136 COP0_SYNC 2200 COP0_SYNC
21372: 22012:
2138 addu t1, 1 2202 addu t1, 1
2139 bne t1, t2, 1b 2203 bne t1, t2, 1b
2140 nop 2204 nop
2141 2205
2142 _MTC0 t0, MIPS_COP_0_TLB_HI # restore current ASID 2206 _MTC0 t0, MIPS_COP_0_TLB_HI # restore current ASID
2143 mtc0 t3, MIPS_COP_0_TLB_PG_MASK # restore pgMask 2207 mtc0 t3, MIPS_COP_0_TLB_PG_MASK # restore pgMask
2144 COP0_SYNC 2208 COP0_SYNC
 2209#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2210 INT_S zero, 0(ta3) # unlock the tlb
 2211#endif
2145 mtc0 v1, MIPS_COP_0_STATUS # restore status register 2212 mtc0 v1, MIPS_COP_0_STATUS # restore status register
2146 JR_HB_RA 2213 JR_HB_RA
2147END(MIPSX(tlb_invalidate_globals)) 2214END(MIPSX(tlb_invalidate_globals))
2148 2215
2149/* 2216/*
2150 * void mipsN_tlb_invalidate_all(void); 2217 * void mipsN_tlb_invalidate_all(void);
2151 * 2218 *
2152 * Invalidate all of non-wired TLB entries. 2219 * Invalidate all of non-wired TLB entries.
2153 */ 2220 */
2154LEAF_NOPROFILE(MIPSX(tlb_invalidate_all)) 2221LEAF_NOPROFILE(MIPSX(tlb_invalidate_all))
2155 mfc0 v1, MIPS_COP_0_STATUS # save status register 2222 mfc0 v1, MIPS_COP_0_STATUS # save status register
2156 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 2223 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts
2157 COP0_SYNC 2224 COP0_SYNC
 2225#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2226 _MFC0 ta3, MIPS_COP_0_OSSCRATCH, 2
 22271: li v0, __SIMPLELOCK_LOCKED
 2228 swapw v0, ta3
 2229 bnez v0, 1b
 2230 nop
 2231#endif
2158 2232
2159 INT_L a0, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES 2233 INT_L a0, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES
2160 2234
2161 li v0, MIPS_KSEG0_START # invalid address 2235 li v0, MIPS_KSEG0_START # invalid address
2162 _MFC0 t0, MIPS_COP_0_TLB_HI # save current ASID 2236 _MFC0 t0, MIPS_COP_0_TLB_HI # save current ASID
2163 mfc0 t1, MIPS_COP_0_TLB_WIRED 2237 mfc0 t1, MIPS_COP_0_TLB_WIRED
2164 mfc0 t2, MIPS_COP_0_TLB_PG_MASK # save current pgMask 2238 mfc0 t2, MIPS_COP_0_TLB_PG_MASK # save current pgMask
2165 2239
2166 _MTC0 zero, MIPS_COP_0_TLB_LO0 # zero out entryLo0 2240 _MTC0 zero, MIPS_COP_0_TLB_LO0 # zero out entryLo0
2167 _MTC0 zero, MIPS_COP_0_TLB_LO1 # zero out entryLo1 2241 _MTC0 zero, MIPS_COP_0_TLB_LO1 # zero out entryLo1
2168 mtc0 zero, MIPS_COP_0_TLB_PG_MASK # zero out pageMask 2242 mtc0 zero, MIPS_COP_0_TLB_PG_MASK # zero out pageMask
2169 2243
2170 # do {} while (t1 < a0) 2244 # do {} while (t1 < a0)
@@ -2174,109 +2248,132 @@ LEAF_NOPROFILE(MIPSX(tlb_invalidate_all) @@ -2174,109 +2248,132 @@ LEAF_NOPROFILE(MIPSX(tlb_invalidate_all)
2174 sll ta0, t1, PGSHIFT | 1 # PAGE_SHIFT | 1 2248 sll ta0, t1, PGSHIFT | 1 # PAGE_SHIFT | 1
2175 PTR_ADDU ta0, v0 2249 PTR_ADDU ta0, v0
2176 _MTC0 ta0, MIPS_COP_0_TLB_HI # make entryHi invalid 2250 _MTC0 ta0, MIPS_COP_0_TLB_HI # make entryHi invalid
2177 COP0_SYNC 2251 COP0_SYNC
2178 tlbwi # clear the entry 2252 tlbwi # clear the entry
2179 COP0_SYNC 2253 COP0_SYNC
2180 addu t1, 1 # increment index 2254 addu t1, 1 # increment index
2181 bne t1, a0, 1b 2255 bne t1, a0, 1b
2182 nop 2256 nop
2183 2257
2184 _MTC0 t0, MIPS_COP_0_TLB_HI # restore ASID 2258 _MTC0 t0, MIPS_COP_0_TLB_HI # restore ASID
2185 mtc0 t2, MIPS_COP_0_TLB_PG_MASK # restore pgMask 2259 mtc0 t2, MIPS_COP_0_TLB_PG_MASK # restore pgMask
2186 COP0_SYNC 2260 COP0_SYNC
 2261#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2262 INT_S zero, 0(ta3) # unlock the tlb
 2263#endif
2187 mtc0 v1, MIPS_COP_0_STATUS # restore status register 2264 mtc0 v1, MIPS_COP_0_STATUS # restore status register
2188 JR_HB_RA 2265 JR_HB_RA
2189END(MIPSX(tlb_invalidate_all)) 2266END(MIPSX(tlb_invalidate_all))
2190 2267
2191/* 2268/*
2192 * u_int mipsN_tlb_record_asids(u_long *bitmap, uint32_t asid_mask); 2269 * u_int mipsN_tlb_record_asids(u_long *bitmap, uint32_t asid_mask);
2193 * 2270 *
2194 * Record all the ASIDs in use in the TLB and return the number of different 2271 * Record all the ASIDs in use in the TLB and return the number of different
2195 * ASIDs present. 2272 * ASIDs present.
2196 */ 2273 */
2197LEAF_NOPROFILE(MIPSX(tlb_record_asids)) 2274LEAF_NOPROFILE(MIPSX(tlb_record_asids))
2198 2275
2199 _MFC0 a3, MIPS_COP_0_TLB_HI # Save the current PID. 2276 _MFC0 a3, MIPS_COP_0_TLB_HI # Save the current PID.
2200 mfc0 ta0, MIPS_COP_0_TLB_WIRED 2277 mfc0 ta0, MIPS_COP_0_TLB_WIRED
2201 INT_L ta1, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES 2278 INT_L ta1, _C_LABEL(mips_options) + MO_NUM_TLB_ENTRIES
2202 move ta2, zero 2279 move ta2, zero
2203 li ta3, 1 2280 li t3, 1
2204 move v0, zero 
2205 2281
2206 mfc0 v1, MIPS_COP_0_STATUS # save status register 2282 mfc0 v1, MIPS_COP_0_STATUS # save status register
2207#ifdef _LP64 2283#ifdef _LP64
2208 and t0, v1, MIPS_SR_INT_IE 2284 and t0, v1, MIPS_SR_INT_IE
2209 xor t0, v1 2285 xor t0, v1
2210 mtc0 t0, MIPS_COP_0_STATUS # disable interrupts 2286 mtc0 t0, MIPS_COP_0_STATUS # disable interrupts
2211#else 2287#else
2212 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts 2288 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts
2213#endif 2289#endif
2214 COP0_SYNC 2290 COP0_SYNC
 2291#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2292 _MFC0 ta3, MIPS_COP_0_OSSCRATCH, 2
 22931: li v0, __SIMPLELOCK_LOCKED
 2294 swapw v0, ta3
 2295 bnez v0, 1b
 2296 nop
 2297#else
 2298 move v0, zero
 2299#endif
2215 2300
2216 # do {} while (ta0 < ta1) 2301 # do {} while (ta0 < ta1)
22171: 23021:
2218 mtc0 ta0, MIPS_COP_0_TLB_INDEX # set index 2303 mtc0 ta0, MIPS_COP_0_TLB_INDEX # set index
2219 COP0_SYNC 2304 COP0_SYNC
2220 tlbr # obtain an entry 2305 tlbr # obtain an entry
2221 COP0_SYNC 2306 COP0_SYNC
2222 _MFC0 t0, MIPS_COP_0_TLB_LO1 2307 _MFC0 t0, MIPS_COP_0_TLB_LO1
2223 and t0, MIPS3_PG_G # check to see it has G bit 2308 and t0, MIPS3_PG_G # check to see it has G bit
2224 bnez t0, 4f # yep, skip this one. 2309 bnez t0, 4f # yep, skip this one.
2225 nop 2310 nop
2226 _MFC0 t0, MIPS_COP_0_TLB_HI # get VA and ASID 2311 _MFC0 t0, MIPS_COP_0_TLB_HI # get VA and ASID
2227 and t0, a1 # focus on ASID 2312 and t0, a1 # focus on ASID
2228 2313
2229 srl a2, t0, 3 + LONG_SCALESHIFT # drop low 5 or 6 bits 2314 srl a2, t0, 3 + LONG_SCALESHIFT # drop low 5 or 6 bits
2230 sll a2, LONG_SCALESHIFT # make an index for the bitmap 2315 sll a2, LONG_SCALESHIFT # make an index for the bitmap
2231 _SLLV t0, ta3, t0 # t0 is mask (ta3 == 1) 2316 _SLLV t0, t3, t0 # t0 is mask (t3 == 1)
2232 2317
2233 PTR_ADDU a2, a0 # index into the bitmap  2318 PTR_ADDU a2, a0 # index into the bitmap
2234 beq a2, ta2, 3f # is the desired cell loaded? 2319 beq a2, ta2, 3f # is the desired cell loaded?
2235 nop # yes, don't reload it 2320 nop # yes, don't reload it
2236 beqz ta2, 2f # have we ever loaded it? 2321 beqz ta2, 2f # have we ever loaded it?
2237 nop # nope, so don't save it. 2322 nop # nope, so don't save it.
2238 2323
2239 LONG_S t2, 0(ta2) # save the updated value. 2324 LONG_S t2, 0(ta2) # save the updated value.
22402: 23252:
2241 move ta2, a2 # remember the new cell's addr 2326 move ta2, a2 # remember the new cell's addr
2242 LONG_L t2, 0(ta2) # and load it 2327 LONG_L t2, 0(ta2) # and load it
22433: 23283:
2244 and t1, t2, t0 # t1 = t2 & t0 2329 and t1, t2, t0 # t1 = t2 & t0
2245 sltu t1, t1, ta3 # t1 = t1 < 1 (aka t1 == 0) 2330 sltu t1, t1, t3 # t1 = t1 < 1 (aka t1 == 0)
2246 addu v0, t1 # v0 += t1 2331 addu v0, t1 # v0 += t1
2247 or t2, t0 # or in the new ASID bits 2332 or t2, t0 # or in the new ASID bits
22484: 23334:
2249 addu ta0, 1 # increment TLB entry # 2334 addu ta0, 1 # increment TLB entry #
2250 bne ta0, ta1, 1b # keep lookup if not limit 2335 bne ta0, ta1, 1b # keep lookup if not limit
2251 nop 2336 nop
2252 2337
2253 beqz ta2, 5f # do we have a cell to write? 2338 beqz ta2, 5f # do we have a cell to write?
2254 nop # nope. nothing 2339 nop # nope. nothing
2255 2340
2256 LONG_S t2, 0(ta2) # save the updated value. 2341 LONG_S t2, 0(ta2) # save the updated value.
22575: 23425:
2258 _MTC0 a3, MIPS_COP_0_TLB_HI # restore ASID 2343 _MTC0 a3, MIPS_COP_0_TLB_HI # restore ASID
2259 COP0_SYNC 2344 COP0_SYNC
2260 2345
 2346#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2347 INT_S zero, 0(ta3) # unlock the tlb
 2348#endif
2261 mtc0 v1, MIPS_COP_0_STATUS # restore status register 2349 mtc0 v1, MIPS_COP_0_STATUS # restore status register
2262 JR_HB_RA 2350 JR_HB_RA
2263END(MIPSX(tlb_record_asids)) 2351END(MIPSX(tlb_record_asids))
2264 2352
2265/* 2353/*
2266 * mipsN_tlb_enter(size_t tlb_index, vaddr_t va, uint32_t pte); 2354 * mipsN_tlb_enter(size_t tlb_index, vaddr_t va, uint32_t pte);
2267 */ 2355 */
2268LEAF(MIPSX(tlb_enter)) 2356LEAF(MIPSX(tlb_enter))
2269 .set noat 2357 .set noat
 2358 mfc0 v1, MIPS_COP_0_STATUS # save status
 2359 mtc0 zero, MIPS_COP_0_STATUS # disable interupts
 2360#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2361 _MFC0 ta3, MIPS_COP_0_OSSCRATCH, 2
 23621: li v0, __SIMPLELOCK_LOCKED
 2363 swapw v0, ta3
 2364 bnez v0, 1b
 2365 nop
 2366#endif
2270 _MFC0 ta0, MIPS_COP_0_TLB_HI # save EntryHi 2367 _MFC0 ta0, MIPS_COP_0_TLB_HI # save EntryHi
2271 2368
2272#if (PGSHIFT & 1) == 0 2369#if (PGSHIFT & 1) == 0
2273 and a3, a1, MIPS3_PG_ODDPG # select odd page bit 2370 and a3, a1, MIPS3_PG_ODDPG # select odd page bit
2274 xor a3, a1 # clear it. 2371 xor a3, a1 # clear it.
2275#endif 2372#endif
2276 _MTC0 a3, MIPS_COP_0_TLB_HI # set the VA for tlbp 2373 _MTC0 a3, MIPS_COP_0_TLB_HI # set the VA for tlbp
2277 COP0_SYNC 2374 COP0_SYNC
2278 2375
2279#if (PGSHIFT & 1) == 0 2376#if (PGSHIFT & 1) == 0
2280 and t2, a2, MIPS3_PG_G # make prototype tlb_lo0 2377 and t2, a2, MIPS3_PG_G # make prototype tlb_lo0
2281 and t3, a2, MIPS3_PG_G # make prototype tlb_lo1 2378 and t3, a2, MIPS3_PG_G # make prototype tlb_lo1
2282#endif 2379#endif
@@ -2296,76 +2393,140 @@ LEAF(MIPSX(tlb_enter)) @@ -2296,76 +2393,140 @@ LEAF(MIPSX(tlb_enter))
2296 mfc0 t3, MIPS_COP_0_TLB_LO1 # save for update 2393 mfc0 t3, MIPS_COP_0_TLB_LO1 # save for update
2297#endif 2394#endif
2298 2395
2299 /* 2396 /*
2300 * If it's already where we want, no reason to invalidate it. 2397 * If it's already where we want, no reason to invalidate it.
2301 */ 2398 */
2302 beq v0, a0, 2f # already where we want it? 2399 beq v0, a0, 2f # already where we want it?
2303 nop  2400 nop
2304 2401
2305 /* 2402 /*
2306 * Clear the existing TLB entry for it. 2403 * Clear the existing TLB entry for it.
2307 */ 2404 */
2308 sll t1, v0, (1 | PGSHIFT) # make a fake addr for the entry 2405 sll t1, v0, (1 | PGSHIFT) # make a fake addr for the entry
2309 lui v1, %hi(MIPS_KSEG0_START) 2406 lui t3, %hi(MIPS_KSEG0_START)
2310 or t1, v1 2407 or t1, t3
2311 _MTC0 t1, MIPS_COP_0_TLB_HI 2408 _MTC0 t1, MIPS_COP_0_TLB_HI
2312 COP0_SYNC 2409 COP0_SYNC
2313 2410
2314 and t0, a2, MIPS3_PG_G # make prototype tlb_lo 2411 and t0, a2, MIPS3_PG_G # make prototype tlb_lo
2315 mtc0 t0, MIPS_COP_0_TLB_LO0 # use an invalid tlb_lo0 2412 mtc0 t0, MIPS_COP_0_TLB_LO0 # use an invalid tlb_lo0
2316 mtc0 t0, MIPS_COP_0_TLB_LO1 # use an invalid tlb_lo1 2413 mtc0 t0, MIPS_COP_0_TLB_LO1 # use an invalid tlb_lo1
2317 COP0_SYNC 2414 COP0_SYNC
2318 2415
2319 tlbwi # now write the invalid TLB 2416 tlbwi # now write the invalid TLB
2320 COP0_SYNC 2417 COP0_SYNC
2321 2418
2322 _MTC0 a3, MIPS_COP_0_TLB_HI # restore the addr for new TLB 2419 _MTC0 a3, MIPS_COP_0_TLB_HI # restore the addr for new TLB
2323 COP0_SYNC 2420 COP0_SYNC
23241: 24211:
2325 mtc0 a0, MIPS_COP_0_TLB_INDEX # set the index 2422 mtc0 a0, MIPS_COP_0_TLB_INDEX # set the index
2326 COP0_SYNC 2423 COP0_SYNC
2327 2424
23282: 24252:
2329#if (PGSHIFT & 1) == 0 2426#if (PGSHIFT & 1) == 0
2330 and v1, a1, MIPS3_PG_ODDPG # odd or even page 2427 and t3, a1, MIPS3_PG_ODDPG # odd or even page
2331 sll v1, 31 - PGSHIFT # move to MSB 2428 sll t3, 31 - PGSHIFT # move to MSB
2332 sra v1, 31 # v1 a mask (0/~0 = even/odd) 2429 sra t3, 31 # t3 a mask (0/~0 = even/odd)
2333 not v0, v1 # v0 a mask (~0/0 = even/odd) 2430 not v0, t3 # v0 a mask (~0/0 = even/odd)
2334 2431
2335 and ta2, t2, v1 2432 and ta1, t2, t3
2336 and ta3, a2, v0 2433 and ta2, a2, v0
2337 or t2, ta2, ta3 # t2 = (v1 & t2) | (~v1 & a2) 2434 or t2, ta1, ta2 # t2 = (t3 & t2) | (~t3 & a2)
2338 and ta2, t3, v0 2435 and ta1, t3, v0
2339 and ta3, a2, v1 2436 and ta2, a2, t3
2340 or t3, ta2, ta3 # t3 = (~v1 & t3) | (v1 & a2) 2437 or t3, ta1, ta2 # t3 = (~t3 & t3) | (t3 & a2)
2341 2438
2342 mtc0 t2, MIPS_COP_0_TLB_LO0 # set tlb_lo0 (even) 2439 mtc0 t2, MIPS_COP_0_TLB_LO0 # set tlb_lo0 (even)
2343 mtc0 t3, MIPS_COP_0_TLB_LO1 # set tlb_lo1 (odd) 2440 mtc0 t3, MIPS_COP_0_TLB_LO1 # set tlb_lo1 (odd)
2344#else 2441#else
2345 mtc0 a2, MIPS_COP_0_TLB_LO0 # set tlb_lo1 (lower half) 2442 mtc0 a2, MIPS_COP_0_TLB_LO0 # set tlb_lo1 (lower half)
2346 INT_ADDU a2, MIPS3_PG_NEXT 2443 INT_ADDU a2, MIPS3_PG_NEXT
2347 mtc0 a2, MIPS_COP_0_TLB_LO1 # set tlb_lo1 (upper half) 2444 mtc0 a2, MIPS_COP_0_TLB_LO1 # set tlb_lo1 (upper half)
2348#endif 2445#endif
2349 COP0_SYNC 2446 COP0_SYNC
2350 2447
2351 tlbwi # enter it into the TLB 2448 tlbwi # enter it into the TLB
2352 COP0_SYNC 2449 COP0_SYNC
2353 2450
2354 _MTC0 ta1, MIPS_COP_0_TLB_HI # restore EntryHi 2451 _MTC0 ta0, MIPS_COP_0_TLB_HI # restore EntryHi
 2452 COP0_SYNC
 2453
 2454#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2455 INT_S zero, 0(ta3) # unlock the tlb
 2456#endif
 2457 mtc0 v1, MIPS_COP_0_STATUS # restore status register
2355 JR_HB_RA 2458 JR_HB_RA
2356 .set at 2459 .set at
2357END(MIPSX(tlb_enter)) 2460END(MIPSX(tlb_enter))
2358 2461
 2462/*--------------------------------------------------------------------------
 2463 *
 2464 * mipsN_tlb_write_indexed --
 2465 *
 2466 * Write the given entry into the TLB at the given index.
 2467 * Pass full R4000 style TLB info including variable page size mask.
 2468 *
 2469 * mipsN_tlb_write_indexed(size_t tlb_index, const struct tlbmask *tlb)
 2470 *
 2471 * Results:
 2472 * None.
 2473 *
 2474 * Side effects:
 2475 * TLB entry set.
 2476 *
 2477 *--------------------------------------------------------------------------
 2478 */
 2479LEAF(MIPSX(tlb_write_indexed))
 2480 /*
 2481 * Fetch the arguments first so we don't need to worry about KX/UX/PX
 2482 */
 2483 INT_L t0, TLBMASK_LO0(a1) # fetch tlb->tlb_lo0
 2484 INT_L t1, TLBMASK_LO1(a1) # fetch tlb->tlb_lo1
 2485 INT_L t2, TLBMASK_MASK(a1) # fetch tlb->tlb_mask
 2486 PTR_L t3, TLBMASK_HI(a1) # fetch tlb->tlb_hi
 2487 mfc0 v1, MIPS_COP_0_STATUS # save status
 2488 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts
 2489 COP0_SYNC
 2490#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2491 _MFC0 ta3, MIPS_COP_0_OSSCRATCH, 2
 24921: li v0, __SIMPLELOCK_LOCKED
 2493 swapw v0, ta3
 2494 bnez v0, 1b
 2495 nop
 2496#endif
 2497 mfc0 ta1, MIPS_COP_0_TLB_PG_MASK # Save current page mask.
 2498 _MFC0 ta0, MIPS_COP_0_TLB_HI # Save the current PID.
 2499
 2500 _MTC0 t0, MIPS_COP_0_TLB_LO0 # Set up entry lo0.
 2501 _MTC0 t1, MIPS_COP_0_TLB_LO1 # Set up entry lo1.
 2502 COP0_SYNC
 2503 mtc0 a0, MIPS_COP_0_TLB_INDEX # Set the index.
 2504 mtc0 t2, MIPS_COP_0_TLB_PG_MASK # Set up entry pagemask.
 2505 _MTC0 t3, MIPS_COP_0_TLB_HI # Set up entry high.
 2506 COP0_SYNC
 2507 tlbwi # Write the TLB
 2508 COP0_SYNC
 2509
 2510 _MTC0 ta0, MIPS_COP_0_TLB_HI # Restore the PID.
 2511 mtc0 ta1, MIPS_COP_0_TLB_PG_MASK # Restore page mask.
 2512 COP0_SYNC
 2513#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
 2514 INT_S zero, 0(ta3) # unlock the tlb
 2515#endif
 2516 mtc0 v1, MIPS_COP_0_STATUS # Restore the status register
 2517 JR_HB_RA
 2518END(MIPSX(tlb_write_indexed))
 2519
2359/* 2520/*
2360 * mipsN_lwp_trampoline() 2521 * mipsN_lwp_trampoline()
2361 * 2522 *
2362 * Arrange for a function to be invoked neatly, after a cpu_switch(). 2523 * Arrange for a function to be invoked neatly, after a cpu_switch().
2363 * Call the service function with one argument, specified by the s0 2524 * Call the service function with one argument, specified by the s0
2364 * and s1 respectively. There is no need register save operation. 2525 * and s1 respectively. There is no need register save operation.
2365 */ 2526 */
2366LEAF(MIPSX(lwp_trampoline)) 2527LEAF(MIPSX(lwp_trampoline))
2367 PTR_ADDU sp, -CALLFRAME_SIZ 2528 PTR_ADDU sp, -CALLFRAME_SIZ
2368 2529
2369 # Call lwp_startup(), with args from cpu_switchto()/cpu_setfunc() 2530 # Call lwp_startup(), with args from cpu_switchto()/cpu_setfunc()
2370 move a0, v0 2531 move a0, v0
2371 jal _C_LABEL(lwp_startup) 2532 jal _C_LABEL(lwp_startup)
@@ -2460,166 +2621,130 @@ END(MIPSX(setfunc_trampoline)) @@ -2460,166 +2621,130 @@ END(MIPSX(setfunc_trampoline))
2460 * 2621 *
2461 * Wiredown the USPACE of newproc in TLB entry#0. Check whether target 2622 * Wiredown the USPACE of newproc in TLB entry#0. Check whether target
2462 * USPACE is already in another place of TLB before that, and make 2623 * USPACE is already in another place of TLB before that, and make
2463 * sure TBIS(it) in the case. 2624 * sure TBIS(it) in the case.
2464 */ 2625 */
2465LEAF_NOPROFILE(MIPSX(cpu_switch_resume)) 2626LEAF_NOPROFILE(MIPSX(cpu_switch_resume))
2466#if PAGE_SIZE < USPACE || 1 2627#if PAGE_SIZE < USPACE || 1
2467 INT_L a1, L_MD_UPTE_0(a0) # a1 = upte[0] 2628 INT_L a1, L_MD_UPTE_0(a0) # a1 = upte[0]
2468#if (PGSHIFT & 1) == 0 2629#if (PGSHIFT & 1) == 0
2469 INT_L a2, L_MD_UPTE_1(a0) # a2 = upte[1] 2630 INT_L a2, L_MD_UPTE_1(a0) # a2 = upte[1]
2470#else 2631#else
2471 INT_ADDU a2, a1, MIPS3_PG_NEXT # a2 = page following upte[0] 2632 INT_ADDU a2, a1, MIPS3_PG_NEXT # a2 = page following upte[0]
2472#endif 2633#endif
2473 PTR_L v0, L_PCB(a0) # va = l->l_addr 2634 PTR_L a3, L_PCB(a0) # va = l->l_addr
2474#if VM_MIN_KERNEL_ADDRESS == MIPS_KSEG2_START 2635#if VM_MIN_KERNEL_ADDRESS == MIPS_KSEG2_START
2475 li t0, VM_MIN_KERNEL_ADDRESS # compute index 2636 li t0, VM_MIN_KERNEL_ADDRESS # compute index
2476 blt v0, t0, MIPSX(resume) 2637 blt a3, t0, MIPSX(resume)
2477 nop 2638 nop
2478#if defined(ENABLE_MIPS_KSEGX) 2639#if defined(ENABLE_MIPS_KSEGX)
2479 li t0, VM_KSEGX_ADDRESS # below KSEGX? 2640 li t0, VM_KSEGX_ADDRESS # below KSEGX?
2480 blt v0, t0, 1f 2641 blt a3, t0, 1f
2481 nop 2642 nop
2482 li t0, VM_KSEGX_ADDRESS+VM_KSEGX_SIZE # within KSEGX? 2643 li t0, VM_KSEGX_ADDRESS+VM_KSEGX_SIZE # within KSEGX?
2483 blt v0, t0, MIPSX(resume) 2644 blt a3, t0, MIPSX(resume)
2484 nop 2645 nop
24851: 26461:
2486#endif 2647#endif
2487#else 2648#else
2488 li t0, MIPS_KSEG0_START # above XKSEG? 2649 li t0, MIPS_KSEG0_START # above XKSEG?
2489 blt t0, v0, MIPSX(resume) 2650 blt t0, a3, MIPSX(resume)
2490 nop 2651 nop
2491 li t0, VM_MIN_KERNEL_ADDRESS>>32 # below XKSEG? 2652 li t0, VM_MIN_KERNEL_ADDRESS>>32 # below XKSEG?
2492 dsll32 t0, t0, 0 2653 dsll32 t0, t0, 0
2493 blt v0, t0, MIPSX(resume) 2654 blt a3, t0, MIPSX(resume)
2494 nop 2655 nop
2495#endif 2656#endif
2496 2657
2497#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 2658#ifdef MULTIPROCESSOR
2498 /* 2659 /*
2499 * Grab the TLB lock (we could use LL/SC but this is shorter) 2660 * Fetch TLB slot before zeroing status.
2500 */ 2661 */
2501 _MFC0 a3, MIPS_COP_0_OSSCRATCH, 2 2662 PTR_L t0, L_CPU(a0) # get cpu_info
2502 li v1, __SIMPLELOCK_LOCKED 2663 INT_L t1, CPU_INFO_KSP_TLB_SLOT(t0) # get TLB# for KSP
25031: swapw v1, a3 
2504 bnez v1, 1b 
2505 nop 
2506#endif 2664#endif
2507 2665
2508#if (PGSHIFT & 1) == 0 2666#if (PGSHIFT & 1) == 0
2509 and t0, v0, MIPS3_PG_ODDPG 2667 and v0, a3, MIPS3_PG_ODDPG
2510 beqz t0, MIPSX(entry0) 2668 beqz v0, MIPSX(entry0)
2511 nop 2669 nop
2512 2670
2513 PANIC("USPACE sat on odd page boundary") 2671 PANIC("USPACE sat on odd page boundary")
2514#endif 2672#endif
2515 2673
2516MIPSX(entry0): 2674MIPSX(entry0):
2517 _MFC0 t3, MIPS_COP_0_TLB_HI # save TLB_HI 2675#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
2518 _MTC0 v0, MIPS_COP_0_TLB_HI # VPN = va 2676 mfc0 v1, MIPS_COP_0_STATUS # save status
 2677 mtc0 zero, MIPS_COP_0_STATUS # disable interrupts
 2678 /*
 2679 * Grab the TLB lock (we could use LL/SC but this is shorter)
 2680 */
 2681 _MFC0 ta3, MIPS_COP_0_OSSCRATCH, 2
 2682 li v0, __SIMPLELOCK_LOCKED
 26831: swapw v0, ta3
 2684 bnez v0, 1b
 2685 nop
 2686#endif
 2687
 2688 _MFC0 ta0, MIPS_COP_0_TLB_HI # save TLB_HI
 2689 _MTC0 a3, MIPS_COP_0_TLB_HI # VPN = va
2519 COP0_SYNC 2690 COP0_SYNC
2520 tlbp # probe VPN 2691 tlbp # probe VPN
2521 COP0_SYNC 2692 COP0_SYNC
2522 mfc0 t0, MIPS_COP_0_TLB_INDEX 2693 mfc0 t0, MIPS_COP_0_TLB_INDEX
2523#ifdef MIPS3 2694#ifdef MIPS3
2524 nop 2695 nop
2525#endif 2696#endif
2526 bltz t0, MIPSX(entry0set) 2697 bltz t0, MIPSX(entry0set)
2527 sll t0, t0, (PGSHIFT | 1) # (PAGE_SHIFT | 1) 2698 sll t0, (PGSHIFT | 1) # (PAGE_SHIFT | 1)
2528 PTR_LA t0, MIPS_KSEG0_START(t0) 2699 PTR_LA t0, MIPS_KSEG0_START(t0)
2529 _MTC0 t0, MIPS_COP_0_TLB_HI 2700 _MTC0 t0, MIPS_COP_0_TLB_HI
2530 _MTC0 zero, MIPS_COP_0_TLB_LO0 2701 _MTC0 zero, MIPS_COP_0_TLB_LO0
2531 _MTC0 zero, MIPS_COP_0_TLB_LO1 2702 _MTC0 zero, MIPS_COP_0_TLB_LO1
2532 COP0_SYNC 2703 COP0_SYNC
2533 tlbwi 2704 tlbwi
2534 COP0_SYNC 2705 COP0_SYNC
2535 _MTC0 v0, MIPS_COP_0_TLB_HI # set VPN again 2706 _MTC0 a3, MIPS_COP_0_TLB_HI # set VPN again
2536 COP0_SYNC 2707 COP0_SYNC
2537MIPSX(entry0set): 2708MIPSX(entry0set):
2538#ifdef MULTIPROCESSOR 2709#ifdef MULTIPROCESSOR
2539 PTR_L t0, L_CPU(a0) # get cpu_info 
2540 INT_L t1, CPU_INFO_KSP_TLB_SLOT(t0) # get TLB# for KSP 
2541 mtc0 t1, MIPS_COP_0_TLB_INDEX # TLB entry (virtual) 2710 mtc0 t1, MIPS_COP_0_TLB_INDEX # TLB entry (virtual)
2542#else 2711#else
2543 mtc0 zero, MIPS_COP_0_TLB_INDEX # TLB entry #0 (virtual) 2712 mtc0 zero, MIPS_COP_0_TLB_INDEX # TLB entry #0 (virtual)
2544#endif 2713#endif
2545 COP0_SYNC 2714 COP0_SYNC
2546 or a1, MIPS3_PG_G 2715 or a1, MIPS3_PG_G
2547 _MTC0 a1, MIPS_COP_0_TLB_LO0 # upte[0] | PG_G 2716 _MTC0 a1, MIPS_COP_0_TLB_LO0 # upte[0] | PG_G
2548 or a2, MIPS3_PG_G 2717 or a2, MIPS3_PG_G
2549 _MTC0 a2, MIPS_COP_0_TLB_LO1 # upte[1] | PG_G 2718 _MTC0 a2, MIPS_COP_0_TLB_LO1 # upte[1] | PG_G
2550 COP0_SYNC 2719 COP0_SYNC
2551 tlbwi # set TLB entry #0 2720 tlbwi # set TLB entry #0
2552 COP0_SYNC 2721 COP0_SYNC
2553 _MTC0 t3, MIPS_COP_0_TLB_HI # restore TLB_HI 2722 _MTC0 ta0, MIPS_COP_0_TLB_HI # restore TLB_HI
2554 COP0_SYNC 2723 COP0_SYNC
2555#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0 2724#if defined(MULTIPROCESSOR) && (MIPS64_RMIXL + MIPS64R2_RMIXL) > 0
2556 INT_S zero, 0(a3) # clear tlb lock 2725 mtc0 v1, MIPS_COP_0_STATUS # restore Status register
 2726 INT_S zero, 0(ta3) # clear tlb lock
2557#endif 2727#endif
2558MIPSX(resume): 2728MIPSX(resume):
2559#endif /* PAGE_SIZE < USPACE */ 2729#endif /* PAGE_SIZE < USPACE */
2560#if (MIPS32R2 + MIPS64R2 + MIPS64R2_RMIXL) > 0 2730#if (MIPS32R2 + MIPS64R2 + MIPS64R2_RMIXL) > 0
2561 PTR_L v0, L_PRIVATE(a0) # get lwp private 2731 PTR_L v0, L_PRIVATE(a0) # get lwp private
2562 _MTC0 v0, MIPS_COP_0_TLB_CONTEXT, 4 # make available for rdhwr 2732 _MTC0 v0, MIPS_COP_0_TLB_CONTEXT, 4 # make available for rdhwr
2563#endif 2733#endif
2564 j ra 2734 j ra
2565 nop 2735 nop
2566END(MIPSX(cpu_switch_resume)) 2736END(MIPSX(cpu_switch_resume))
2567 2737
2568/*-------------------------------------------------------------------------- 
2569 * 
2570 * mipsN_tlb_write_indexed -- 
2571 * 
2572 * Write the given entry into the TLB at the given index. 
2573 * Pass full R4000 style TLB info including variable page size mask. 
2574 * 
2575 * mipsN_tlb_write_indexed(size_t tlb_index, const struct tlbmask *tlb) 
2576 * 
2577 * Results: 
2578 * None. 
2579 * 
2580 * Side effects: 
2581 * TLB entry set. 
2582 * 
2583 *-------------------------------------------------------------------------- 
2584 */ 
2585LEAF(MIPSX(tlb_write_indexed)) 
2586 mfc0 v1, MIPS_COP_0_STATUS # Save the status register. 
2587 RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0) 
2588 COP0_SYNC 
2589 INT_L a2, TLBMASK_LO0(a1) # fetch tlb->tlb_lo0 
2590 INT_L a3, TLBMASK_LO1(a1) # fetch tlb->tlb_lo1 
2591 mfc0 v0, MIPS_COP_0_TLB_PG_MASK # Save current page mask. 
2592 _MFC0 t0, MIPS_COP_0_TLB_HI # Save the current PID. 
2593 
2594 _MTC0 a2, MIPS_COP_0_TLB_LO0 # Set up entry low0. 
2595 _MTC0 a3, MIPS_COP_0_TLB_LO1 # Set up entry low1. 
2596 COP0_SYNC 
2597 INT_L a2, TLBMASK_MASK(a1) # fetch tlb->tlb_mask 
2598 PTR_L a3, TLBMASK_HI(a1) # fetch tlb->tlb_hi 
2599 mtc0 a0, MIPS_COP_0_TLB_INDEX # Set the index. 
2600 mtc0 a2, MIPS_COP_0_TLB_PG_MASK # Set up entry pagemask. 
2601 _MTC0 a3, MIPS_COP_0_TLB_HI # Set up entry high. 
2602 COP0_SYNC 
2603 tlbwi # Write the TLB 
2604 COP0_SYNC 
2605 
2606 _MTC0 t0, MIPS_COP_0_TLB_HI # Restore the PID. 
2607 mtc0 v0, MIPS_COP_0_TLB_PG_MASK # Restore page mask. 
2608 COP0_SYNC 
2609 mtc0 v1, MIPS_COP_0_STATUS # Restore the status register 
2610 JR_HB_RA 
2611END(MIPSX(tlb_write_indexed)) 
2612 
2613#if defined(MIPS3) 2738#if defined(MIPS3)
2614/*---------------------------------------------------------------------------- 2739/*----------------------------------------------------------------------------
2615 * 2740 *
2616 * mipsN_VCED -- 2741 * mipsN_VCED --
2617 * 2742 *
2618 * Handle virtual coherency exceptions. 2743 * Handle virtual coherency exceptions.
2619 * Called directly from the mips3 exception-table code. 2744 * Called directly from the mips3 exception-table code.
2620 * only k0, k1 are available on entry 2745 * only k0, k1 are available on entry
2621 * 2746 *
2622 * Results: 2747 * Results:
2623 * None. 2748 * None.
2624 * 2749 *
2625 * Side effects: 2750 * Side effects:
@@ -2749,28 +2874,28 @@ _C_LABEL(MIPSX(locoresw)): @@ -2749,28 +2874,28 @@ _C_LABEL(MIPSX(locoresw)):
2749 PTR_WORD _C_LABEL(nullop) # lsw_cpu_idle 2874 PTR_WORD _C_LABEL(nullop) # lsw_cpu_idle
2750 PTR_WORD _C_LABEL(nullop) # lsw_send_ipi 2875 PTR_WORD _C_LABEL(nullop) # lsw_send_ipi
2751 PTR_WORD _C_LABEL(nullop) # lsw_cpu_offline_md 2876 PTR_WORD _C_LABEL(nullop) # lsw_cpu_offline_md
2752 PTR_WORD _C_LABEL(nullop) # lsw_cpu_init 2877 PTR_WORD _C_LABEL(nullop) # lsw_cpu_init
2753 PTR_WORD _C_LABEL(nullop) # lsw_cpu_run 2878 PTR_WORD _C_LABEL(nullop) # lsw_cpu_run
2754 PTR_WORD _C_LABEL(nullop) # lsw_bus_error 2879 PTR_WORD _C_LABEL(nullop) # lsw_bus_error
2755 2880
2756MIPSX(excpt_sw): 2881MIPSX(excpt_sw):
2757 #### 2882 ####
2758 #### The kernel exception handlers. 2883 #### The kernel exception handlers.
2759 #### 2884 ####
2760 PTR_WORD _C_LABEL(MIPSX(kern_intr)) # 0 external interrupt 2885 PTR_WORD _C_LABEL(MIPSX(kern_intr)) # 0 external interrupt
2761 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 1 TLB modification 2886 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 1 TLB modification
2762 PTR_WORD _C_LABEL(MIPSX(tlb_invalid_exception)) # 2 TLB miss (LW/I-fetch) 2887 PTR_WORD _C_LABEL(MIPSX(kern_tlb_invalid_exception)) # 2 TLB miss (LW/I-fetch)
2763 PTR_WORD _C_LABEL(MIPSX(tlb_invalid_exception)) # 3 TLB miss (SW) 2888 PTR_WORD _C_LABEL(MIPSX(kern_tlb_invalid_exception)) # 3 TLB miss (SW)
2764 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 4 address error (LW/I-fetch) 2889 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 4 address error (LW/I-fetch)
2765 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 5 address error (SW) 2890 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 5 address error (SW)
2766 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 6 bus error (I-fetch) 2891 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 6 bus error (I-fetch)
2767 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 7 bus error (load or store) 2892 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 7 bus error (load or store)
2768 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 8 system call 2893 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 8 system call
2769 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 9 breakpoint 2894 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 9 breakpoint
2770 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 10 reserved instruction 2895 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 10 reserved instruction
2771 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 11 coprocessor unusable 2896 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 11 coprocessor unusable
2772 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 12 arithmetic overflow 2897 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 12 arithmetic overflow
2773 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 13 r4k trap exception 2898 PTR_WORD _C_LABEL(MIPSX(kern_gen_exception)) # 13 r4k trap exception
2774#if defined(MIPS3) 2899#if defined(MIPS3)
2775 PTR_WORD _C_LABEL(MIPSX(VCEI)) # 14 r4k virt coherence 2900 PTR_WORD _C_LABEL(MIPSX(VCEI)) # 14 r4k virt coherence
2776#else 2901#else

cvs diff -r1.1.2.18 -r1.1.2.19 src/sys/arch/mips/mips/Attic/pmap_tlb.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/Attic/pmap_tlb.c 2011/05/13 17:36:39 1.1.2.18
+++ src/sys/arch/mips/mips/Attic/pmap_tlb.c 2011/12/03 01:56:55 1.1.2.19
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap_tlb.c,v 1.1.2.18 2011/05/13 17:36:39 matt Exp $ */ 1/* $NetBSD: pmap_tlb.c,v 1.1.2.19 2011/12/03 01:56:55 matt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc. 4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas at 3am Software Foundry. 8 * by Matt Thomas at 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -21,27 +21,27 @@ @@ -21,27 +21,27 @@
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33 33
34__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.1.2.18 2011/05/13 17:36:39 matt Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.1.2.19 2011/12/03 01:56:55 matt Exp $");
35 35
36/* 36/*
37 * Manages address spaces in a TLB. 37 * Manages address spaces in a TLB.
38 * 38 *
39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some 39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some
40 * implementations may share a TLB between multiple CPUs (really CPU thread 40 * implementations may share a TLB between multiple CPUs (really CPU thread
41 * contexts). This requires the TLB abstraction to be separated from the 41 * contexts). This requires the TLB abstraction to be separated from the
42 * CPU abstraction. It also requires that the TLB be locked while doing 42 * CPU abstraction. It also requires that the TLB be locked while doing
43 * TLB activities. 43 * TLB activities.
44 * 44 *
45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps 45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps
46 * that have a valid ASID. 46 * that have a valid ASID.
47 * 47 *
@@ -133,39 +133,43 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v @@ -133,39 +133,43 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v
133#include <sys/proc.h> 133#include <sys/proc.h>
134#include <sys/mutex.h> 134#include <sys/mutex.h>
135#include <sys/atomic.h> 135#include <sys/atomic.h>
136#include <sys/kernel.h> /* for cold */ 136#include <sys/kernel.h> /* for cold */
137#include <sys/cpu.h> 137#include <sys/cpu.h>
138 138
139#include <uvm/uvm.h> 139#include <uvm/uvm.h>
140 140
141#include <mips/cache.h> 141#include <mips/cache.h>
142#include <mips/cpuregs.h> 142#include <mips/cpuregs.h>
143#include <mips/locore.h> 143#include <mips/locore.h>
144#include <mips/pte.h> 144#include <mips/pte.h>
145 145
146static kmutex_t pmap_tlb0_mutex __aligned(32); 146static kmutex_t pmap_tlb0_mutex __cacheline_aligned;
 147#ifdef MULTIPROCESSOR
 148static kmutex_t pmap_tlb0_hwmutex __cacheline_aligned;
 149#endif
147 150
148struct pmap_tlb_info pmap_tlb0_info = { 151struct pmap_tlb_info pmap_tlb0_info = {
149 .ti_name = "tlb0", 152 .ti_name = "tlb0",
150 .ti_asid_hint = 1, 153 .ti_asid_hint = 1,
151 .ti_asid_mask = __builtin_constant_p(MIPS_TLB_NUM_PIDS) ? MIPS_TLB_NUM_PIDS - 1 : 0, 154 .ti_asid_mask = __builtin_constant_p(MIPS_TLB_NUM_PIDS) ? MIPS_TLB_NUM_PIDS - 1 : 0,
152 .ti_asid_max = __builtin_constant_p(MIPS_TLB_NUM_PIDS) ? MIPS_TLB_NUM_PIDS - 1 : 0, 155 .ti_asid_max = __builtin_constant_p(MIPS_TLB_NUM_PIDS) ? MIPS_TLB_NUM_PIDS - 1 : 0,
153 .ti_asids_free = __builtin_constant_p(MIPS_TLB_NUM_PIDS) ? MIPS_TLB_NUM_PIDS - 1 : 0, 156 .ti_asids_free = __builtin_constant_p(MIPS_TLB_NUM_PIDS) ? MIPS_TLB_NUM_PIDS - 1 : 0,
154 .ti_asid_bitmap[0] = 1, 157 .ti_asid_bitmap[0] = 1,
155 .ti_wired = MIPS3_TLB_WIRED_UPAGES, 158 .ti_wired = MIPS3_TLB_WIRED_UPAGES,
156 .ti_lock = &pmap_tlb0_mutex, 159 .ti_lock = &pmap_tlb0_mutex,
157 .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb_info.ti_pais), 160 .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb_info.ti_pais),
158#ifdef MULTIPROCESSOR 161#ifdef MULTIPROCESSOR
 162 .ti_hwlock = &pmap_tlb0_hwmutex,
159 .ti_cpu_mask = 1, 163 .ti_cpu_mask = 1,
160 .ti_tlbinvop = TLBINV_NOBODY, 164 .ti_tlbinvop = TLBINV_NOBODY,
161#endif 165#endif
162}; 166};
163 167
164#ifdef MULTIPROCESSOR 168#ifdef MULTIPROCESSOR
165struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = { 169struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = {
166 [0] = &pmap_tlb0_info, 170 [0] = &pmap_tlb0_info,
167}; 171};
168u_int pmap_ntlbs = 1; 172u_int pmap_ntlbs = 1;
169u_int pmap_tlb_synci_page_mask; 173u_int pmap_tlb_synci_page_mask;
170u_int pmap_tlb_synci_map_mask; 174u_int pmap_tlb_synci_map_mask;
171#endif 175#endif
@@ -242,26 +246,29 @@ pmap_tlb_info_evcnt_attach(struct pmap_t @@ -242,26 +246,29 @@ pmap_tlb_info_evcnt_attach(struct pmap_t
242 evcnt_attach_dynamic(&ti->ti_evcnt_asid_reinits, 246 evcnt_attach_dynamic(&ti->ti_evcnt_asid_reinits,
243 EVCNT_TYPE_MISC, NULL, 247 EVCNT_TYPE_MISC, NULL,
244 ti->ti_name, "asid pool reinit"); 248 ti->ti_name, "asid pool reinit");
245} 249}
246 250
247void 251void
248pmap_tlb_info_init(struct pmap_tlb_info *ti) 252pmap_tlb_info_init(struct pmap_tlb_info *ti)
249{ 253{
250#ifdef MULTIPROCESSOR 254#ifdef MULTIPROCESSOR
251 if (ti == &pmap_tlb0_info) { 255 if (ti == &pmap_tlb0_info) {
252#endif /* MULTIPROCESSOR */ 256#endif /* MULTIPROCESSOR */
253 KASSERT(ti == &pmap_tlb0_info); 257 KASSERT(ti == &pmap_tlb0_info);
254 mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED); 258 mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED);
 259#ifdef MULTIPROCESSOR
 260 mutex_init(ti->ti_hwlock, MUTEX_DEFAULT, IPL_SCHED);
 261#endif
255 if (!CPUISMIPSNN || !__builtin_constant_p(MIPS_TLB_NUM_PIDS)) { 262 if (!CPUISMIPSNN || !__builtin_constant_p(MIPS_TLB_NUM_PIDS)) {
256 ti->ti_asid_max = mips_options.mips_num_tlb_entries - 1; 263 ti->ti_asid_max = mips_options.mips_num_tlb_entries - 1;
257 ti->ti_asids_free = ti->ti_asid_max; 264 ti->ti_asids_free = ti->ti_asid_max;
258 ti->ti_asid_mask = ti->ti_asid_max; 265 ti->ti_asid_mask = ti->ti_asid_max;
259 /* 266 /*
260 * Now figure out what mask we need to focus on 267 * Now figure out what mask we need to focus on
261 * asid_max. 268 * asid_max.
262 */ 269 */
263 while ((ti->ti_asid_mask + 1) & ti->ti_asid_mask) { 270 while ((ti->ti_asid_mask + 1) & ti->ti_asid_mask) {
264 ti->ti_asid_mask |= ti->ti_asid_mask >> 1; 271 ti->ti_asid_mask |= ti->ti_asid_mask >> 1;
265 } 272 }
266 } 273 }
267#ifdef MULTIPROCESSOR 274#ifdef MULTIPROCESSOR
@@ -271,26 +278,27 @@ pmap_tlb_info_init(struct pmap_tlb_info  @@ -271,26 +278,27 @@ pmap_tlb_info_init(struct pmap_tlb_info
271 pmap_tlb_synci_page_mask = icache_way_pages - 1; 278 pmap_tlb_synci_page_mask = icache_way_pages - 1;
272 pmap_tlb_synci_map_mask = ~(~0 << icache_way_pages); 279 pmap_tlb_synci_map_mask = ~(~0 << icache_way_pages);
273 printf("tlb0: synci page mask %#x and map mask %#x used for %u pages\n", 280 printf("tlb0: synci page mask %#x and map mask %#x used for %u pages\n",
274 pmap_tlb_synci_page_mask, pmap_tlb_synci_map_mask, 281 pmap_tlb_synci_page_mask, pmap_tlb_synci_map_mask,
275 icache_way_pages); 282 icache_way_pages);
276#endif 283#endif
277 return; 284 return;
278#ifdef MULTIPROCESSOR 285#ifdef MULTIPROCESSOR
279 } 286 }
280 287
281 KASSERT(pmap_tlbs[pmap_ntlbs] == NULL); 288 KASSERT(pmap_tlbs[pmap_ntlbs] == NULL);
282 289
283 ti->ti_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED); 290 ti->ti_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
 291 ti->ti_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
284 ti->ti_asid_bitmap[0] = 1; 292 ti->ti_asid_bitmap[0] = 1;
285 ti->ti_asid_hint = 1; 293 ti->ti_asid_hint = 1;
286 ti->ti_asid_max = pmap_tlb0_info.ti_asid_max; 294 ti->ti_asid_max = pmap_tlb0_info.ti_asid_max;
287 ti->ti_asid_mask = pmap_tlb0_info.ti_asid_mask; 295 ti->ti_asid_mask = pmap_tlb0_info.ti_asid_mask;
288 ti->ti_asids_free = ti->ti_asid_max; 296 ti->ti_asids_free = ti->ti_asid_max;
289 ti->ti_tlbinvop = TLBINV_NOBODY, 297 ti->ti_tlbinvop = TLBINV_NOBODY,
290 ti->ti_victim = NULL; 298 ti->ti_victim = NULL;
291 ti->ti_cpu_mask = 0; 299 ti->ti_cpu_mask = 0;
292 ti->ti_index = pmap_ntlbs++; 300 ti->ti_index = pmap_ntlbs++;
293 snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u", ti->ti_index); 301 snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u", ti->ti_index);
294 302
295 KASSERT(ti != &pmap_tlb0_info); 303 KASSERT(ti != &pmap_tlb0_info);
296 pmap_tlb_info_evcnt_attach(ti); 304 pmap_tlb_info_evcnt_attach(ti);

cvs diff -r1.1.2.9 -r1.1.2.10 src/sys/arch/mips/rmi/rmixl_subr.S (expand / switch to unified diff)

--- src/sys/arch/mips/rmi/rmixl_subr.S 2011/05/26 19:21:57 1.1.2.9
+++ src/sys/arch/mips/rmi/rmixl_subr.S 2011/12/03 01:56:56 1.1.2.10
@@ -161,24 +161,24 @@ NESTED(rmixl_cpu_trampoline, CALLFRAME_S @@ -161,24 +161,24 @@ NESTED(rmixl_cpu_trampoline, CALLFRAME_S
161 /* 161 /*
162 * load our (idle) lwp from trampoline args 162 * load our (idle) lwp from trampoline args
163 * save in t8 reg dedicated as 'mips_curlwp' 163 * save in t8 reg dedicated as 'mips_curlwp'
164 */ 164 */
165 REG_L t8, 1*SZREG(s0) /* XXX ta_lwp */ 165 REG_L t8, 1*SZREG(s0) /* XXX ta_lwp */
166 166
167 /* 167 /*
168 * load our ta_cpuinfo from trampoline args and pass in a1 168 * load our ta_cpuinfo from trampoline args and pass in a1
169 * jump to common mips cpu_trampoline 169 * jump to common mips cpu_trampoline
170 */ 170 */
171 REG_L a1, 2*SZREG(s0) /* XXX ta_cpuinfo */ 171 REG_L a1, 2*SZREG(s0) /* XXX ta_cpuinfo */
172 dmtc0 a1, $22, 0 /* MIPS_COP_0_OSSCRATCH */ 172 dmtc0 a1, $22, 0 /* MIPS_COP_0_OSSCRATCH */
173 PTR_L v1, CPU_INFO_TLB_INFO(a1) 173 PTR_L v1, CPU_INFO_TLB_INFO(a1)
174 PTR_L v1, TI_LOCK(v1) 174 PTR_L v1, TI_HWLOCK(v1)
175 PTR_ADDU v1, MTX_LOCK 175 PTR_ADDU v1, MTX_LOCK
176 dmtc0 v1, $22, 2 176 dmtc0 v1, $22, 2
177 j cpu_trampoline 177 j cpu_trampoline
178 nop 178 nop
179 179
180 /* NOTREACHED */ 180 /* NOTREACHED */
181 181
182END(rmixl_cpu_trampoline) 182END(rmixl_cpu_trampoline)
183 183
184#endif /* MULTIPROCESSOR */ 184#endif /* MULTIPROCESSOR */