Thu Feb 16 23:02:22 2012 UTC ()
Move the ksegx tlb init code into its own function.
Fix a problem with concurrent shootdowns by tracking what cpus want a
shootdown for a pmap, and if anoter cpu wants a shootdown, perform the
shootdown on ourselves.


(matt)
diff -r1.54.26.23 -r1.54.26.24 src/sys/arch/mips/include/pmap.h
diff -r1.179.16.42 -r1.179.16.43 src/sys/arch/mips/mips/pmap.c
diff -r1.1.2.22 -r1.1.2.23 src/sys/arch/mips/mips/pmap_tlb.c

cvs diff -r1.54.26.23 -r1.54.26.24 src/sys/arch/mips/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/pmap.h 2012/01/19 08:28:48 1.54.26.23
+++ src/sys/arch/mips/include/pmap.h 2012/02/16 23:02:21 1.54.26.24
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.54.26.23 2012/01/19 08:28:48 matt Exp $ */ 1/* pmap.h,v 1.54.26.23 2012/01/19 08:28:48 matt Exp */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell. 8 * Ralph Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -170,27 +170,27 @@ struct pmap_asid_info { @@ -170,27 +170,27 @@ struct pmap_asid_info {
170#define PMAP_PAI_ASIDVALID_P(pai, ti) ((pai)->pai_asid != 0) 170#define PMAP_PAI_ASIDVALID_P(pai, ti) ((pai)->pai_asid != 0)
171#define PMAP_PAI(pmap, ti) (&(pmap)->pm_pai[tlbinfo_index(ti)]) 171#define PMAP_PAI(pmap, ti) (&(pmap)->pm_pai[tlbinfo_index(ti)])
172#define PAI_PMAP(pai, ti) \ 172#define PAI_PMAP(pai, ti) \
173 ((pmap_t)((intptr_t)(pai) \ 173 ((pmap_t)((intptr_t)(pai) \
174 - offsetof(struct pmap, pm_pai[tlbinfo_index(ti)]))) 174 - offsetof(struct pmap, pm_pai[tlbinfo_index(ti)])))
175 175
176/* 176/*
177 * Machine dependent pmap structure. 177 * Machine dependent pmap structure.
178 */ 178 */
179typedef struct pmap { 179typedef struct pmap {
180#ifdef MULTIPROCESSOR 180#ifdef MULTIPROCESSOR
181 volatile uint32_t pm_active; /* pmap was active on ... */ 181 volatile uint32_t pm_active; /* pmap was active on ... */
182 volatile uint32_t pm_onproc; /* pmap is active on ... */ 182 volatile uint32_t pm_onproc; /* pmap is active on ... */
183 volatile u_int pm_shootdown_pending; 183 volatile uint32_t pm_shootdown_pending;
184#endif 184#endif
185 union segtab *pm_segtab; /* pointers to pages of PTEs */ 185 union segtab *pm_segtab; /* pointers to pages of PTEs */
186 u_int pm_count; /* pmap reference count */ 186 u_int pm_count; /* pmap reference count */
187 u_int pm_flags; 187 u_int pm_flags;
188#define PMAP_DEFERRED_ACTIVATE 0x0001 188#define PMAP_DEFERRED_ACTIVATE 0x0001
189 struct pmap_statistics pm_stats; /* pmap statistics */ 189 struct pmap_statistics pm_stats; /* pmap statistics */
190 struct pmap_asid_info pm_pai[1]; 190 struct pmap_asid_info pm_pai[1];
191} *pmap_t; 191} *pmap_t;
192 192
193enum tlb_invalidate_op { 193enum tlb_invalidate_op {
194 TLBINV_NOBODY=0, 194 TLBINV_NOBODY=0,
195 TLBINV_ONE=1, 195 TLBINV_ONE=1,
196 TLBINV_ALLUSER=2, 196 TLBINV_ALLUSER=2,
@@ -244,35 +244,36 @@ extern paddr_t mips_avail_start; @@ -244,35 +244,36 @@ extern paddr_t mips_avail_start;
244extern paddr_t mips_avail_end; 244extern paddr_t mips_avail_end;
245extern vaddr_t mips_virtual_end; 245extern vaddr_t mips_virtual_end;
246 246
247#define pmap_kernel() (&kernel_pmap_store.kernel_pmap) 247#define pmap_kernel() (&kernel_pmap_store.kernel_pmap)
248#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 248#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
249#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 249#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
250 250
251#define pmap_phys_address(x) mips_ptob(x) 251#define pmap_phys_address(x) mips_ptob(x)
252 252
253/* 253/*
254 * Bootstrap the system enough to run with virtual memory. 254 * Bootstrap the system enough to run with virtual memory.
255 */ 255 */
256void pmap_bootstrap(void); 256void pmap_bootstrap(void);
 257void pmap_ksegx_bootstrap(void);
257 258
258void pmap_remove_all(pmap_t); 259void pmap_remove_all(pmap_t);
259void pmap_set_modified(paddr_t); 260void pmap_set_modified(paddr_t);
260void pmap_procwr(struct proc *, vaddr_t, size_t); 261void pmap_procwr(struct proc *, vaddr_t, size_t);
261#define PMAP_NEED_PROCWR 262#define PMAP_NEED_PROCWR
262 263
263#ifdef MULTIPROCESSOR 264#ifdef MULTIPROCESSOR
264void pmap_tlb_shootdown_process(void); 265void pmap_tlb_shootdown_process(void);
265bool pmap_tlb_shootdown_bystanders(pmap_t pmap); 266bool pmap_tlb_shootdown_bystanders(pmap_t pmap, uint32_t);
266void pmap_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *); 267void pmap_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *);
267#endif 268#endif
268void pmap_syncicache_page(struct vm_page *, uint32_t); 269void pmap_syncicache_page(struct vm_page *, uint32_t);
269void pmap_tlb_info_init(struct pmap_tlb_info *); 270void pmap_tlb_info_init(struct pmap_tlb_info *);
270void pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *); 271void pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *);
271void pmap_tlb_asid_acquire(pmap_t pmap, struct lwp *l); 272void pmap_tlb_asid_acquire(pmap_t pmap, struct lwp *l);
272void pmap_tlb_asid_deactivate(pmap_t pmap); 273void pmap_tlb_asid_deactivate(pmap_t pmap);
273void pmap_tlb_asid_check(void); 274void pmap_tlb_asid_check(void);
274void pmap_tlb_asid_release_all(pmap_t pmap); 275void pmap_tlb_asid_release_all(pmap_t pmap);
275int pmap_tlb_update_addr(pmap_t pmap, vaddr_t, uint32_t, bool); 276int pmap_tlb_update_addr(pmap_t pmap, vaddr_t, uint32_t, bool);
276void pmap_tlb_invalidate_addr(pmap_t pmap, vaddr_t); 277void pmap_tlb_invalidate_addr(pmap_t pmap, vaddr_t);
277 278
278/* 279/*

cvs diff -r1.179.16.42 -r1.179.16.43 src/sys/arch/mips/mips/Attic/pmap.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/Attic/pmap.c 2012/02/14 01:51:11 1.179.16.42
+++ src/sys/arch/mips/mips/Attic/pmap.c 2012/02/16 23:02:22 1.179.16.43
@@ -433,59 +433,75 @@ pmap_unmap_ephemeral_page(struct vm_page @@ -433,59 +433,75 @@ pmap_unmap_ephemeral_page(struct vm_page
433 433
434 if (va >= VM_MIN_KERNEL_ADDRESS) { 434 if (va >= VM_MIN_KERNEL_ADDRESS) {
435 pmap_kremove(va, PAGE_SIZE); 435 pmap_kremove(va, PAGE_SIZE);
436 if (mips_pg_v(old_pt_entry.pt_entry)) { 436 if (mips_pg_v(old_pt_entry.pt_entry)) {
437 *kvtopte(va) = old_pt_entry; 437 *kvtopte(va) = old_pt_entry;
438 pmap_tlb_update_addr(pmap_kernel(), va, 438 pmap_tlb_update_addr(pmap_kernel(), va,
439 old_pt_entry.pt_entry, false); 439 old_pt_entry.pt_entry, false);
440 } 440 }
441 kpreempt_enable(); 441 kpreempt_enable();
442 } 442 }
443#endif 443#endif
444} 444}
445 445
 446#ifdef ENABLE_MIPS_KSEGX
 447void
 448pmap_ksegx_bootstrap(void)
 449{
 450 const vaddr_t kva_inc = 1 << ((VM_KSEGX_SHIFT - 1) & ~1);
 451 const uint32_t tlb_mask = (2 * kva_inc - 1) & 0x1ffffc00;
 452
 453 if (mips_ksegx_tlb_slot < 0) {
 454 mips_ksegx_tlb_slot = pmap_tlb0_info.ti_wired;
 455 pmap_tlb0_info.ti_wired += VM_KSEGX_SIZE / (2 * kva_inc);
 456 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
 457 }
 458
 459 u_int tlb_slot = mips_ksegx_tlb_slot;
 460 for (vaddr_t kva = 0;
 461 kva < VM_KSEGX_SIZE;
 462 kva += 2 * kva_inc, tlb_slot++) {
 463 extern pt_entry_t mips_ksegx_pte;
 464 struct tlbmask tlb = {
 465 .tlb_hi = VM_KSEGX_ADDRESS + kva,
 466 .tlb_lo0 = mips_ksegx_pte.pt_entry
 467 + mips_paddr_to_tlbpfn(kva),
 468 .tlb_lo1 = mips_ksegx_pte.pt_entry
 469 + mips_paddr_to_tlbpfn(kva + kva_inc),
 470 .tlb_mask = tlb_mask,
 471 };
 472 tlb_write_indexed(tlb_slot, &tlb);
 473 }
 474}
 475#endif
 476
446/* 477/*
447 * Bootstrap the system enough to run with virtual memory. 478 * Bootstrap the system enough to run with virtual memory.
448 * firstaddr is the first unused kseg0 address (not page aligned). 479 * firstaddr is the first unused kseg0 address (not page aligned).
449 */ 480 */
450void 481void
451pmap_bootstrap(void) 482pmap_bootstrap(void)
452{ 483{
453 vsize_t bufsz; 484 vsize_t bufsz;
454 485
455 if (MIPS_CACHE_VIRTUAL_ALIAS && uvmexp.ncolors) 486 if (MIPS_CACHE_VIRTUAL_ALIAS && uvmexp.ncolors)
456 pmap_page_colormask = (uvmexp.ncolors - 1) << PAGE_SHIFT; 487 pmap_page_colormask = (uvmexp.ncolors - 1) << PAGE_SHIFT;
457 488
458 KASSERT(uvmexp.ncolors <= 16 - PG_MD_EXECPAGE_SHIFT); 489 KASSERT(uvmexp.ncolors <= 16 - PG_MD_EXECPAGE_SHIFT);
459 490
460 pmap_tlb_info_init(&pmap_tlb0_info); /* init the lock */ 491 pmap_tlb_info_init(&pmap_tlb0_info); /* init the lock */
461 492
462#ifdef ENABLE_MIPS_KSEGX 493#ifdef ENABLE_MIPS_KSEGX
463 const vaddr_t kva_inc = 1 << ((VM_KSEGX_SHIFT - 1) & ~1); 494 pmap_ksegx_bootstrap();
464 const uint32_t tlb_mask = (2 * kva_inc - 1) & 0x1ffffc00; 
465 for (vaddr_t kva = 0; kva < VM_KSEGX_SIZE; kva += 2 * kva_inc) { 
466 extern pt_entry_t mips_ksegx_pte; 
467 struct tlbmask tlb = { 
468 .tlb_hi = VM_KSEGX_ADDRESS + kva, 
469 .tlb_lo0 = mips_ksegx_pte.pt_entry 
470 + mips_paddr_to_tlbpfn(kva), 
471 .tlb_lo1 = mips_ksegx_pte.pt_entry 
472 + mips_paddr_to_tlbpfn(kva + kva_inc), 
473 .tlb_mask = tlb_mask, 
474 }; 
475 tlb_write_indexed(pmap_tlb0_info.ti_wired, &tlb); 
476 pmap_tlb0_info.ti_wired++; 
477 } 
478 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired); 
479#endif 495#endif
480 496
481 /* 497 /*
482 * Compute the number of pages kmem_map will have. 498 * Compute the number of pages kmem_map will have.
483 */ 499 */
484 kmeminit_nkmempages(); 500 kmeminit_nkmempages();
485 501
486 /* 502 /*
487 * Figure out how many PTE's are necessary to map the kernel. 503 * Figure out how many PTE's are necessary to map the kernel.
488 * We also reserve space for kmem_alloc_pageable() for vm_fork(). 504 * We also reserve space for kmem_alloc_pageable() for vm_fork().
489 */ 505 */
490 506
491 /* Get size of buffer cache and set an upper limit */ 507 /* Get size of buffer cache and set an upper limit */
@@ -908,27 +924,27 @@ pmap_deactivate(struct lwp *l) @@ -908,27 +924,27 @@ pmap_deactivate(struct lwp *l)
908#endif 924#endif
909 pmap_tlb_asid_deactivate(l->l_proc->p_vmspace->vm_map.pmap); 925 pmap_tlb_asid_deactivate(l->l_proc->p_vmspace->vm_map.pmap);
910 kpreempt_enable(); 926 kpreempt_enable();
911} 927}
912 928
913void 929void
914pmap_update(struct pmap *pm) 930pmap_update(struct pmap *pm)
915{ 931{
916 PMAP_COUNT(update); 932 PMAP_COUNT(update);
917 933
918 kpreempt_disable(); 934 kpreempt_disable();
919#ifdef MULTIPROCESSOR 935#ifdef MULTIPROCESSOR
920 u_int pending = atomic_swap_uint(&pm->pm_shootdown_pending, 0); 936 u_int pending = atomic_swap_uint(&pm->pm_shootdown_pending, 0);
921 if (pending && pmap_tlb_shootdown_bystanders(pm)) 937 if (pending && pmap_tlb_shootdown_bystanders(pm, pending))
922 PMAP_COUNT(shootdown_ipis); 938 PMAP_COUNT(shootdown_ipis);
923#endif 939#endif
924 /* 940 /*
925 * If pmap_remove_all was called, we deactivated ourselves and nuked 941 * If pmap_remove_all was called, we deactivated ourselves and nuked
926 * our ASID. Now we have to reactivate ourselves. 942 * our ASID. Now we have to reactivate ourselves.
927 */ 943 */
928 if (__predict_false(pm->pm_flags & PMAP_DEFERRED_ACTIVATE)) { 944 if (__predict_false(pm->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
929 pm->pm_flags ^= PMAP_DEFERRED_ACTIVATE; 945 pm->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
930 pmap_tlb_asid_acquire(pm, curlwp); 946 pmap_tlb_asid_acquire(pm, curlwp);
931 pmap_segtab_activate(pm, curlwp); 947 pmap_segtab_activate(pm, curlwp);
932 } 948 }
933 kpreempt_enable(); 949 kpreempt_enable();
934} 950}

cvs diff -r1.1.2.22 -r1.1.2.23 src/sys/arch/mips/mips/Attic/pmap_tlb.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/Attic/pmap_tlb.c 2012/01/19 08:28:50 1.1.2.22
+++ src/sys/arch/mips/mips/Attic/pmap_tlb.c 2012/02/16 23:02:22 1.1.2.23
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap_tlb.c,v 1.1.2.22 2012/01/19 08:28:50 matt Exp $ */ 1/* pmap_tlb.c,v 1.1.2.22 2012/01/19 08:28:50 matt Exp */
2 2
3/*- 3/*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc. 4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas at 3am Software Foundry. 8 * by Matt Thomas at 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -21,27 +21,27 @@ @@ -21,27 +21,27 @@
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33 33
34__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.1.2.22 2012/01/19 08:28:50 matt Exp $"); 34__KERNEL_RCSID(0, "pmap_tlb.c,v 1.1.2.22 2012/01/19 08:28:50 matt Exp");
35 35
36/* 36/*
37 * Manages address spaces in a TLB. 37 * Manages address spaces in a TLB.
38 * 38 *
39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some 39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some
40 * implementations may share a TLB between multiple CPUs (really CPU thread 40 * implementations may share a TLB between multiple CPUs (really CPU thread
41 * contexts). This requires the TLB abstraction to be separated from the 41 * contexts). This requires the TLB abstraction to be separated from the
42 * CPU abstraction. It also requires that the TLB be locked while doing 42 * CPU abstraction. It also requires that the TLB be locked while doing
43 * TLB activities. 43 * TLB activities.
44 * 44 *
45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps 45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps
46 * that have a valid ASID. 46 * that have a valid ASID.
47 * 47 *
@@ -211,26 +211,29 @@ pmap_pai_reset(struct pmap_tlb_info *ti, @@ -211,26 +211,29 @@ pmap_pai_reset(struct pmap_tlb_info *ti,
211 211
212#ifdef MULTIPROCESSOR 212#ifdef MULTIPROCESSOR
213 /* 213 /*
214 * The bits in pm_active belonging to this TLB can only be changed 214 * The bits in pm_active belonging to this TLB can only be changed
215 * while this TLB's lock is held. 215 * while this TLB's lock is held.
216 */ 216 */
217 atomic_and_32(&pm->pm_active, ~ti->ti_cpu_mask); 217 atomic_and_32(&pm->pm_active, ~ti->ti_cpu_mask);
218#endif /* MULTIPROCESSOR */ 218#endif /* MULTIPROCESSOR */
219} 219}
220 220
221void 221void
222pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *ti) 222pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *ti)
223{ 223{
 224 KDASSERT(ti->ti_name[0] == 't');
 225 KDASSERT(ti->ti_name[1] == 'l');
 226 KDASSERT(ti->ti_name[2] == 'b');
224 evcnt_attach_dynamic(&ti->ti_evcnt_asid_reinits, 227 evcnt_attach_dynamic(&ti->ti_evcnt_asid_reinits,
225 EVCNT_TYPE_MISC, NULL, 228 EVCNT_TYPE_MISC, NULL,
226 ti->ti_name, "asid pool reinit"); 229 ti->ti_name, "asid pool reinit");
227 evcnt_attach_dynamic(&ti->ti_evcnt_asid_reclaims, 230 evcnt_attach_dynamic(&ti->ti_evcnt_asid_reclaims,
228 EVCNT_TYPE_MISC, NULL, 231 EVCNT_TYPE_MISC, NULL,
229 ti->ti_name, "asid pool reclaims"); 232 ti->ti_name, "asid pool reclaims");
230} 233}
231 234
232void 235void
233pmap_tlb_info_init(struct pmap_tlb_info *ti) 236pmap_tlb_info_init(struct pmap_tlb_info *ti)
234{ 237{
235 const struct mips_options * const opts = &mips_options; 238 const struct mips_options * const opts = &mips_options;
236#ifdef MULTIPROCESSOR 239#ifdef MULTIPROCESSOR
@@ -280,37 +283,44 @@ pmap_tlb_info_init(struct pmap_tlb_info  @@ -280,37 +283,44 @@ pmap_tlb_info_init(struct pmap_tlb_info
280 ti->ti_cpu_mask = 0; 283 ti->ti_cpu_mask = 0;
281 ti->ti_index = pmap_ntlbs++; 284 ti->ti_index = pmap_ntlbs++;
282 snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u", ti->ti_index); 285 snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u", ti->ti_index);
283 286
284 KASSERT(ti != &pmap_tlb0_info); 287 KASSERT(ti != &pmap_tlb0_info);
285 pmap_tlb_info_evcnt_attach(ti); 288 pmap_tlb_info_evcnt_attach(ti);
286 289
287 /* 290 /*
288 * If we are reserving a tlb slot for mapping cpu_info, 291 * If we are reserving a tlb slot for mapping cpu_info,
289 * allocate it now. 292 * allocate it now.
290 */ 293 */
291 ti->ti_wired = (cpu_info_store.ci_tlb_slot >= 0); 294 ti->ti_wired = (cpu_info_store.ci_tlb_slot >= 0);
292 pmap_tlbs[ti->ti_index] = ti; 295 pmap_tlbs[ti->ti_index] = ti;
 296 KDASSERT(ti->ti_name[0] == 't');
 297 KDASSERT(ti->ti_name[1] == 'l');
 298 KDASSERT(ti->ti_name[2] == 'b');
293#endif /* MULTIPROCESSOR */ 299#endif /* MULTIPROCESSOR */
294} 300}
295 301
296#ifdef MULTIPROCESSOR 302#ifdef MULTIPROCESSOR
297void 303void
298pmap_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci) 304pmap_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
299{ 305{
300 KASSERT(!CPU_IS_PRIMARY(ci)); 306 KASSERT(!CPU_IS_PRIMARY(ci));
301 KASSERT(ci->ci_data.cpu_idlelwp != NULL); 307 KASSERT(ci->ci_data.cpu_idlelwp != NULL);
302 KASSERT(cold); 308 KASSERT(cold);
303 309
 310 KDASSERT(ti->ti_name[0] == 't');
 311 KDASSERT(ti->ti_name[1] == 'l');
 312 KDASSERT(ti->ti_name[2] == 'b');
 313
304 TLBINFO_LOCK(ti); 314 TLBINFO_LOCK(ti);
305 uint32_t cpu_mask = 1 << cpu_index(ci); 315 uint32_t cpu_mask = 1 << cpu_index(ci);
306 ti->ti_cpu_mask |= cpu_mask; 316 ti->ti_cpu_mask |= cpu_mask;
307 ci->ci_tlb_info = ti; 317 ci->ci_tlb_info = ti;
308 ci->ci_ksp_tlb_slot = ti->ti_wired++; 318 ci->ci_ksp_tlb_slot = ti->ti_wired++;
309 /* 319 /*
310 * If we need a tlb slot for mapping cpu_info, use 0. If we don't 320 * If we need a tlb slot for mapping cpu_info, use 0. If we don't
311 * need one then ci_tlb_slot will be -1, and so will ci->ci_tlb_slot 321 * need one then ci_tlb_slot will be -1, and so will ci->ci_tlb_slot
312 */ 322 */
313 ci->ci_tlb_slot = -(cpu_info_store.ci_tlb_slot < 0); 323 ci->ci_tlb_slot = -(cpu_info_store.ci_tlb_slot < 0);
314 /* 324 /*
315 * Mark the kernel as active and "onproc" for this cpu. We assume 325 * Mark the kernel as active and "onproc" for this cpu. We assume
316 * we are the only CPU running so atomic ops are not needed. 326 * we are the only CPU running so atomic ops are not needed.
@@ -426,30 +436,36 @@ pmap_tlb_asid_reinitialize(struct pmap_t @@ -426,30 +436,36 @@ pmap_tlb_asid_reinitialize(struct pmap_t
426#endif 436#endif
427} 437}
428 438
429#ifdef MULTIPROCESSOR 439#ifdef MULTIPROCESSOR
430void 440void
431pmap_tlb_shootdown_process(void) 441pmap_tlb_shootdown_process(void)
432{ 442{
433 struct cpu_info * const ci = curcpu(); 443 struct cpu_info * const ci = curcpu();
434 struct pmap_tlb_info * const ti = ci->ci_tlb_info; 444 struct pmap_tlb_info * const ti = ci->ci_tlb_info;
435#ifdef DIAGNOSTIC 445#ifdef DIAGNOSTIC
436 struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap; 446 struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
437#endif 447#endif
438 448
 449 KDASSERT(ti->ti_name[0] == 't');
 450 KDASSERT(ti->ti_name[1] == 'l');
 451 KDASSERT(ti->ti_name[2] == 'b');
 452
 453#if 0
439 KASSERT(cpu_intr_p()); 454 KASSERT(cpu_intr_p());
440 KASSERTMSG(ci->ci_cpl >= IPL_SCHED, 455 KASSERTMSG(ci->ci_cpl >= IPL_SCHED,
441 ("%s: cpl (%d) < IPL_SCHED (%d)", 456 ("%s: cpl (%d) < IPL_SCHED (%d)",
442 __func__, ci->ci_cpl, IPL_SCHED)); 457 __func__, ci->ci_cpl, IPL_SCHED));
 458#endif
443 TLBINFO_LOCK(ti); 459 TLBINFO_LOCK(ti);
444 460
445 switch (ti->ti_tlbinvop) { 461 switch (ti->ti_tlbinvop) {
446 case TLBINV_ONE: { 462 case TLBINV_ONE: {
447 /* 463 /*
448 * We only need to invalidate one user ASID. 464 * We only need to invalidate one user ASID.
449 */ 465 */
450 struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti); 466 struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti);
451 KASSERT(ti->ti_victim != pmap_kernel()); 467 KASSERT(ti->ti_victim != pmap_kernel());
452 if (ti->ti_victim->pm_onproc & ti->ti_cpu_mask) { 468 if (ti->ti_victim->pm_onproc & ti->ti_cpu_mask) {
453 /* 469 /*
454 * The victim is an active pmap so we will just 470 * The victim is an active pmap so we will just
455 * invalidate its TLB entries. 471 * invalidate its TLB entries.
@@ -516,151 +532,189 @@ pmap_tlb_shootdown_process(void) @@ -516,151 +532,189 @@ pmap_tlb_shootdown_process(void)
516 | ( (one) << 3*TLBINV_ONE) \ 532 | ( (one) << 3*TLBINV_ONE) \
517 | ( (alluser) << 3*TLBINV_ALLUSER) \ 533 | ( (alluser) << 3*TLBINV_ALLUSER) \
518 | ((allkernel) << 3*TLBINV_ALLKERNEL) \ 534 | ((allkernel) << 3*TLBINV_ALLKERNEL) \
519 | ( (all) << 3*TLBINV_ALL)) >> 3*(op)) & 7) 535 | ( (all) << 3*TLBINV_ALL)) >> 3*(op)) & 7)
520 536
521#define TLBINV_USER_MAP(op) \ 537#define TLBINV_USER_MAP(op) \
522 TLBINV_MAP(op, TLBINV_ONE, TLBINV_ALLUSER, TLBINV_ALLUSER, \ 538 TLBINV_MAP(op, TLBINV_ONE, TLBINV_ALLUSER, TLBINV_ALLUSER, \
523 TLBINV_ALL, TLBINV_ALL) 539 TLBINV_ALL, TLBINV_ALL)
524 540
525#define TLBINV_KERNEL_MAP(op) \ 541#define TLBINV_KERNEL_MAP(op) \
526 TLBINV_MAP(op, TLBINV_ALLKERNEL, TLBINV_ALL, TLBINV_ALL, \ 542 TLBINV_MAP(op, TLBINV_ALLKERNEL, TLBINV_ALL, TLBINV_ALL, \
527 TLBINV_ALLKERNEL, TLBINV_ALL) 543 TLBINV_ALLKERNEL, TLBINV_ALL)
528 544
529bool 545static struct cpu_info *
530pmap_tlb_shootdown_bystanders(pmap_t pm) 546pmap_tlb_target_bystander(struct pmap_tlb_info *ti, struct pmap *pm,
 547 bool kernel_p)
531{ 548{
 549 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
 550 TLBINFO_LOCK(ti);
 551 const uint32_t onproc = (pm->pm_onproc & ti->ti_cpu_mask);
 552 if (onproc == 0) {
 553 if (pm->pm_active & ti->ti_cpu_mask) {
 554 /*
 555 * If this pmap has an ASID assigned but it's not
 556 * currently running, nuke its ASID. Next time the
 557 * pmap is activated, it will allocate a new ASID.
 558 * And best of all, we avoid an IPI.
 559 */
 560 KASSERT(!kernel_p);
 561 pmap_pai_reset(ti, pai, pm);
 562 //ti->ti_evcnt_lazy_shots.ev_count++;
 563 }
 564 TLBINFO_UNLOCK(ti);
 565 return NULL;
 566 }
 567 if (kernel_p) {
 568 ti->ti_tlbinvop = TLBINV_KERNEL_MAP(ti->ti_tlbinvop);
 569 ti->ti_victim = NULL;
 570 } else {
 571 KASSERT(pai->pai_asid);
 572 if (__predict_false(ti->ti_victim == pm)) {
 573 KASSERT(ti->ti_tlbinvop == TLBINV_ONE);
 574 /*
 575 * We still need to invalidate this one
 576 * ASID so there's nothing to change.
 577 */
 578 } else {
 579 ti->ti_tlbinvop = TLBINV_USER_MAP(ti->ti_tlbinvop);
 580 if (ti->ti_tlbinvop == TLBINV_ONE)
 581 ti->ti_victim = pm;
 582 else
 583 ti->ti_victim = NULL;
 584 }
 585 }
 586 TLBINFO_UNLOCK(ti);
532 /* 587 /*
533 * We don't need to deal our own TLB. 588 * Return a pointer to the cpu_info of one of the tlb_info's cpus
534 */ 589 */
535 uint32_t pm_active = pm->pm_active & ~curcpu()->ci_tlb_info->ti_cpu_mask; 590 const u_int j = ffs(onproc) - 1;
 591 return cpu_lookup(j);
 592}
 593
 594bool
 595pmap_tlb_shootdown_bystanders(pmap_t pm, uint32_t pending)
 596{
 597 struct cpu_info * const ci = curcpu();
 598 struct pmap_tlb_info * const curti = ci->ci_tlb_info;
 599 uint32_t pm_active = pm->pm_active & ~curti->ti_cpu_mask;
536 const bool kernel_p = (pm == pmap_kernel()); 600 const bool kernel_p = (pm == pmap_kernel());
537 bool ipi_sent = false; 601 bool ipi_sent = false;
538 602
 603 KDASSERT(curti->ti_name[0] == 't');
 604 KDASSERT(curti->ti_name[1] == 'l');
 605 KDASSERT(curti->ti_name[2] == 'b');
 606
 607 if (__predict_false(pending & ~curti->ti_cpu_mask) != 0) {
 608 /*
 609 * Now if another cpu (not sharing this tlb_info) wants a
 610 * shootdown, then they must mean us since this pmap is
 611 * obviously active. But since we cleared their bit, they
 612 * won't know they need to do it. So we do it ourselves
 613 * and save them from sending an IPI.
 614 */
 615 if (pmap_tlb_target_bystander(curti, pm, kernel_p) != NULL)
 616 pmap_tlb_shootdown_process();
 617 }
 618
539 /* 619 /*
540 * If pm_active gets more bits set, then it's after all our changes 620 * If pm_active gets more bits set, then it's after all our changes
541 * have been made so they will already be cognizant of them. 621 * have been made so they will already be cognizant of them.
542 */ 622 */
543 
544 for (size_t i = 0; pm_active != 0; i++) { 623 for (size_t i = 0; pm_active != 0; i++) {
545 KASSERT(i < pmap_ntlbs); 624 KASSERT(i < pmap_ntlbs);
546 struct pmap_tlb_info * const ti = pmap_tlbs[i]; 625 struct pmap_tlb_info * const ti = pmap_tlbs[i];
547 KASSERT(tlbinfo_index(ti) == i); 626 KASSERT(tlbinfo_index(ti) == i);
548 /* 627 /*
549 * Skip this TLB if there are no active mappings for it. 628 * Skip this TLB if there are no active mappings for it.
550 */ 629 */
551 if ((pm_active & ti->ti_cpu_mask) == 0) 630 if ((pm_active & ti->ti_cpu_mask) == 0) {
552 continue; 631 continue;
553 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); 632 }
554 pm_active &= ~ti->ti_cpu_mask; 633 pm_active &= ~ti->ti_cpu_mask;
555 TLBINFO_LOCK(ti); 634 struct cpu_info * const ipi_ci =
556 const uint32_t onproc = (pm->pm_onproc & ti->ti_cpu_mask); 635 pmap_tlb_target_bystander(ti, pm, kernel_p);
557 if (onproc != 0) { 636 if (ipi_ci != NULL) {
558 if (kernel_p) { 
559 ti->ti_tlbinvop = 
560 TLBINV_KERNEL_MAP(ti->ti_tlbinvop); 
561 ti->ti_victim = NULL; 
562 } else { 
563 KASSERT(pai->pai_asid); 
564 if (__predict_false(ti->ti_victim == pm)) { 
565 KASSERT(ti->ti_tlbinvop == TLBINV_ONE); 
566 /* 
567 * We still need to invalidate this one 
568 * ASID so there's nothing to change. 
569 */ 
570 } else { 
571 ti->ti_tlbinvop = 
572 TLBINV_USER_MAP(ti->ti_tlbinvop); 
573 if (ti->ti_tlbinvop == TLBINV_ONE) 
574 ti->ti_victim = pm; 
575 else 
576 ti->ti_victim = NULL; 
577 } 
578 } 
579 TLBINFO_UNLOCK(ti); 
580 /* 637 /*
581 * Now we can send out the shootdown IPIs to a CPU 638 * Now we can send out the shootdown IPIs to a CPU
582 * that shares this TLB and is currently using this 639 * that shares this TLB and is currently using this
583 * pmap. That CPU will process the IPI and do the 640 * pmap. That CPU will process the IPI and do the
584 * all the work. Any other CPUs sharing that TLB 641 * all the work. Any other CPUs sharing that TLB
585 * will take advantage of that work. pm_onproc might 642 * will take advantage of that work. pm_onproc might
586 * change now that we have released the lock but we 643 * change now that we have released the lock but we
587 * can tolerate spurious shootdowns. 644 * can tolerate spurious shootdowns.
588 */ 645 */
589 KASSERT(onproc != 0); 646 cpu_send_ipi(ipi_ci, IPI_SHOOTDOWN);
590 u_int j = ffs(onproc) - 1; 
591 cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN); 
592 ipi_sent = true; 647 ipi_sent = true;
593 continue; 
594 } 
595 if (pm->pm_active & ti->ti_cpu_mask) { 
596 /* 
597 * If this pmap has an ASID assigned but it's not 
598 * currently running, nuke its ASID. Next time the 
599 * pmap is activated, it will allocate a new ASID. 
600 * And best of all, we avoid an IPI. 
601 */ 
602 KASSERT(!kernel_p); 
603 pmap_pai_reset(ti, pai, pm); 
604 //ti->ti_evcnt_lazy_shots.ev_count++; 
605 } 648 }
606 TLBINFO_UNLOCK(ti); 
607 } 649 }
608 650
609 return ipi_sent; 651 return ipi_sent;
610} 652}
611#endif /* MULTIPROCESSOR */ 653#endif /* MULTIPROCESSOR */
612 654
613int 655int
614pmap_tlb_update_addr(pmap_t pm, vaddr_t va, uint32_t pt_entry, bool need_ipi) 656pmap_tlb_update_addr(pmap_t pm, vaddr_t va, uint32_t pt_entry, bool need_ipi)
615{ 657{
616 struct pmap_tlb_info * const ti = curcpu()->ci_tlb_info; 658 struct pmap_tlb_info * const ti = curcpu()->ci_tlb_info;
617 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); 659 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
618 int rv = -1; 660 int rv = -1;
619 661
 662 KDASSERT(ti->ti_name[0] == 't');
 663 KDASSERT(ti->ti_name[1] == 'l');
 664 KDASSERT(ti->ti_name[2] == 'b');
 665
620 KASSERT(kpreempt_disabled()); 666 KASSERT(kpreempt_disabled());
621 667
622 TLBINFO_LOCK(ti); 668 TLBINFO_LOCK(ti);
623 if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) { 669 if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
624 va |= pai->pai_asid << MIPS_TLB_PID_SHIFT; 670 va |= pai->pai_asid << MIPS_TLB_PID_SHIFT;
625 pmap_tlb_asid_check(); 671 pmap_tlb_asid_check();
626 rv = tlb_update(va, pt_entry); 672 rv = tlb_update(va, pt_entry);
627 pmap_tlb_asid_check(); 673 pmap_tlb_asid_check();
628 } 674 }
629#ifdef MULTIPROCESSOR 675#ifdef MULTIPROCESSOR
630 atomic_or_uint(&pm->pm_shootdown_pending, need_ipi); 676 if (need_ipi && (pm->pm_active & ~ti->ti_cpu_mask) != 0) {
 677 atomic_or_uint(&pm->pm_shootdown_pending, 1 << cpu_number());
 678 }
631#endif 679#endif
632 TLBINFO_UNLOCK(ti); 680 TLBINFO_UNLOCK(ti);
633 681
634 return rv; 682 return rv;
635} 683}
636 684
637void 685void
638pmap_tlb_invalidate_addr(pmap_t pm, vaddr_t va) 686pmap_tlb_invalidate_addr(pmap_t pm, vaddr_t va)
639{ 687{
640 struct pmap_tlb_info * const ti = curcpu()->ci_tlb_info; 688 struct pmap_tlb_info * const ti = curcpu()->ci_tlb_info;
641 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); 689 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
642 690
 691 KDASSERT(ti->ti_name[0] == 't');
 692 KDASSERT(ti->ti_name[1] == 'l');
 693 KDASSERT(ti->ti_name[2] == 'b');
 694
643 KASSERT(kpreempt_disabled()); 695 KASSERT(kpreempt_disabled());
644 696
645 TLBINFO_LOCK(ti); 697 TLBINFO_LOCK(ti);
646 if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) { 698 if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
647 va |= pai->pai_asid << MIPS_TLB_PID_SHIFT; 699 va |= pai->pai_asid << MIPS_TLB_PID_SHIFT;
648 pmap_tlb_asid_check(); 700 pmap_tlb_asid_check();
649 tlb_invalidate_addr(va); 701 tlb_invalidate_addr(va);
650 pmap_tlb_asid_check(); 702 pmap_tlb_asid_check();
651 } 703 }
652#ifdef MULTIPROCESSOR 704#ifdef MULTIPROCESSOR
653 (void) atomic_swap_uint(&pm->pm_shootdown_pending, 1); 705 if ((pm->pm_active & ~ti->ti_cpu_mask) != 0) {
 706 atomic_or_uint(&pm->pm_shootdown_pending, 1 << cpu_number());
 707 }
654#endif 708#endif
655 TLBINFO_UNLOCK(ti); 709 TLBINFO_UNLOCK(ti);
656} 710}
657 711
658static inline void 712static inline void
659pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm, 713pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm,
660 struct pmap_asid_info *pai) 714 struct pmap_asid_info *pai)
661{ 715{
662 /* 716 /*
663 * We shouldn't have an ASID assigned, and thusly must not be onproc 717 * We shouldn't have an ASID assigned, and thusly must not be onproc
664 * nor active. 718 * nor active.
665 */ 719 */
666 KASSERT(pai->pai_asid == 0); 720 KASSERT(pai->pai_asid == 0);
@@ -727,26 +781,30 @@ pmap_tlb_asid_alloc(struct pmap_tlb_info @@ -727,26 +781,30 @@ pmap_tlb_asid_alloc(struct pmap_tlb_info
727} 781}
728 782
729/* 783/*
730 * Acquire a TLB address space tag (called ASID or TLBPID) and return it. 784 * Acquire a TLB address space tag (called ASID or TLBPID) and return it.
731 * ASID might have already been previously acquired. 785 * ASID might have already been previously acquired.
732 */ 786 */
733void 787void
734pmap_tlb_asid_acquire(pmap_t pm, struct lwp *l) 788pmap_tlb_asid_acquire(pmap_t pm, struct lwp *l)
735{ 789{
736 struct cpu_info * const ci = l->l_cpu; 790 struct cpu_info * const ci = l->l_cpu;
737 struct pmap_tlb_info * const ti = ci->ci_tlb_info; 791 struct pmap_tlb_info * const ti = ci->ci_tlb_info;
738 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); 792 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
739 793
 794 KDASSERT(ti->ti_name[0] == 't');
 795 KDASSERT(ti->ti_name[1] == 'l');
 796 KDASSERT(ti->ti_name[2] == 'b');
 797
740 KASSERT(kpreempt_disabled()); 798 KASSERT(kpreempt_disabled());
741 799
742 /* 800 /*
743 * Kernels use a fixed ASID of 0 and don't need to acquire one. 801 * Kernels use a fixed ASID of 0 and don't need to acquire one.
744 */ 802 */
745 if (pm == pmap_kernel()) 803 if (pm == pmap_kernel())
746 return; 804 return;
747 805
748 TLBINFO_LOCK(ti); 806 TLBINFO_LOCK(ti);
749 if (__predict_false(!PMAP_PAI_ASIDVALID_P(pai, ti))) { 807 if (__predict_false(!PMAP_PAI_ASIDVALID_P(pai, ti))) {
750 /* 808 /*
751 * If we've run out ASIDs, reinitialize the ASID space. 809 * If we've run out ASIDs, reinitialize the ASID space.
752 */ 810 */
@@ -821,41 +879,48 @@ pmap_tlb_asid_deactivate(pmap_t pm) @@ -821,41 +879,48 @@ pmap_tlb_asid_deactivate(pmap_t pm)
821#endif 879#endif
822} 880}
823 881
824void 882void
825pmap_tlb_asid_release_all(struct pmap *pm) 883pmap_tlb_asid_release_all(struct pmap *pm)
826{ 884{
827 KASSERT(pm != pmap_kernel()); 885 KASSERT(pm != pmap_kernel());
828 KASSERT(kpreempt_disabled()); 886 KASSERT(kpreempt_disabled());
829#ifdef MULTIPROCESSOR 887#ifdef MULTIPROCESSOR
830 KASSERT(pm->pm_onproc == 0); 888 KASSERT(pm->pm_onproc == 0);
831 for (u_int i = 0; pm->pm_active != 0; i++) { 889 for (u_int i = 0; pm->pm_active != 0; i++) {
832 KASSERT(i < pmap_ntlbs); 890 KASSERT(i < pmap_ntlbs);
833 struct pmap_tlb_info * const ti = pmap_tlbs[i]; 891 struct pmap_tlb_info * const ti = pmap_tlbs[i];
 892 KDASSERT(ti->ti_name[0] == 't');
 893 KDASSERT(ti->ti_name[1] == 'l');
 894 KDASSERT(ti->ti_name[2] == 'b');
 895
834 if (pm->pm_active & ti->ti_cpu_mask) { 896 if (pm->pm_active & ti->ti_cpu_mask) {
835 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); 897 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
836 TLBINFO_LOCK(ti); 898 TLBINFO_LOCK(ti);
837 KASSERT(ti->ti_victim != pm); 899 KASSERT(ti->ti_victim != pm);
838 pmap_pai_reset(ti, pai, pm); 900 pmap_pai_reset(ti, pai, pm);
839 TLBINFO_UNLOCK(ti); 901 TLBINFO_UNLOCK(ti);
840 } 902 }
841 } 903 }
842#else 904#else
843 /* 905 /*
844 * Handle the case of an UP kernel which only has, at most, one ASID. 906 * Handle the case of an UP kernel which only has, at most, one ASID.
845 * If the pmap has an ASID allocated, free it. 907 * If the pmap has an ASID allocated, free it.
846 */ 908 */
847 struct pmap_tlb_info * const ti = curcpu()->ci_tlb_info; 909 struct pmap_tlb_info * const ti = curcpu()->ci_tlb_info;
848 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); 910 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
 911 KDASSERT(ti->ti_name[0] == 't');
 912 KDASSERT(ti->ti_name[1] == 'l');
 913 KDASSERT(ti->ti_name[2] == 'b');
849 TLBINFO_LOCK(ti); 914 TLBINFO_LOCK(ti);
850 if (pai->pai_asid) { 915 if (pai->pai_asid) {
851 pmap_pai_reset(ti, pai, pm); 916 pmap_pai_reset(ti, pai, pm);
852 } 917 }
853 TLBINFO_UNLOCK(ti); 918 TLBINFO_UNLOCK(ti);
854#endif /* MULTIPROCESSOR */ 919#endif /* MULTIPROCESSOR */
855} 920}
856 921
857void 922void
858pmap_tlb_asid_check(void) 923pmap_tlb_asid_check(void)
859{ 924{
860#ifdef DEBUG 925#ifdef DEBUG
861 kpreempt_disable(); 926 kpreempt_disable();