| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: pmap_tlb.c,v 1.9 2011/06/23 08:11:56 matt Exp $ */ | | 1 | /* $NetBSD: pmap_tlb.c,v 1.10 2011/06/29 05:53:44 matt Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2010 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2010 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Matt Thomas at 3am Software Foundry. | | 8 | * by Matt Thomas at 3am Software Foundry. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
| @@ -21,27 +21,27 @@ | | | @@ -21,27 +21,27 @@ |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | #include <sys/cdefs.h> | | 32 | #include <sys/cdefs.h> |
33 | | | 33 | |
34 | __KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.9 2011/06/23 08:11:56 matt Exp $"); | | 34 | __KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.10 2011/06/29 05:53:44 matt Exp $"); |
35 | | | 35 | |
36 | /* | | 36 | /* |
37 | * Manages address spaces in a TLB. | | 37 | * Manages address spaces in a TLB. |
38 | * | | 38 | * |
39 | * Normally there is a 1:1 mapping between a TLB and a CPU. However, some | | 39 | * Normally there is a 1:1 mapping between a TLB and a CPU. However, some |
40 | * implementations may share a TLB between multiple CPUs (really CPU thread | | 40 | * implementations may share a TLB between multiple CPUs (really CPU thread |
41 | * contexts). This requires the TLB abstraction to be separated from the | | 41 | * contexts). This requires the TLB abstraction to be separated from the |
42 | * CPU abstraction. It also requires that the TLB be locked while doing | | 42 | * CPU abstraction. It also requires that the TLB be locked while doing |
43 | * TLB activities. | | 43 | * TLB activities. |
44 | * | | 44 | * |
45 | * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps | | 45 | * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps |
46 | * that have a valid ASID. | | 46 | * that have a valid ASID. |
47 | * | | 47 | * |
| @@ -152,30 +152,30 @@ struct pmap_tlb_info pmap_tlb0_info = { | | | @@ -152,30 +152,30 @@ struct pmap_tlb_info pmap_tlb0_info = { |
152 | .ti_wired = PMAP_TLB_WIRED_UPAGES, | | 152 | .ti_wired = PMAP_TLB_WIRED_UPAGES, |
153 | #endif | | 153 | #endif |
154 | .ti_lock = &pmap_tlb0_mutex, | | 154 | .ti_lock = &pmap_tlb0_mutex, |
155 | .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb0_info.ti_pais), | | 155 | .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb0_info.ti_pais), |
156 | #if defined(MULTIPROCESSOR) | | 156 | #if defined(MULTIPROCESSOR) |
157 | .ti_cpu_mask = 1, | | 157 | .ti_cpu_mask = 1, |
158 | .ti_tlbinvop = TLBINV_NOBODY, | | 158 | .ti_tlbinvop = TLBINV_NOBODY, |
159 | #endif | | 159 | #endif |
160 | }; | | 160 | }; |
161 | | | 161 | |
162 | #undef IFCONSTANT | | 162 | #undef IFCONSTANT |
163 | | | 163 | |
164 | #if defined(MULTIPROCESSOR) | | 164 | #if defined(MULTIPROCESSOR) |
165 | static struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = { | | 165 | struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = { |
166 | [0] = &pmap_tlb0_info, | | 166 | [0] = &pmap_tlb0_info, |
167 | }; | | 167 | }; |
168 | static u_int pmap_ntlbs = 1; | | 168 | u_int pmap_ntlbs = 1; |
169 | #endif | | 169 | #endif |
170 | | | 170 | |
171 | #define __BITMAP_SET(bm, n) \ | | 171 | #define __BITMAP_SET(bm, n) \ |
172 | ((bm)[(n) / (8*sizeof(bm[0]))] |= 1LU << ((n) % (8*sizeof(bm[0])))) | | 172 | ((bm)[(n) / (8*sizeof(bm[0]))] |= 1LU << ((n) % (8*sizeof(bm[0])))) |
173 | #define __BITMAP_CLR(bm, n) \ | | 173 | #define __BITMAP_CLR(bm, n) \ |
174 | ((bm)[(n) / (8*sizeof(bm[0]))] &= ~(1LU << ((n) % (8*sizeof(bm[0]))))) | | 174 | ((bm)[(n) / (8*sizeof(bm[0]))] &= ~(1LU << ((n) % (8*sizeof(bm[0]))))) |
175 | #define __BITMAP_ISSET_P(bm, n) \ | | 175 | #define __BITMAP_ISSET_P(bm, n) \ |
176 | (((bm)[(n) / (8*sizeof(bm[0]))] & (1LU << ((n) % (8*sizeof(bm[0]))))) != 0) | | 176 | (((bm)[(n) / (8*sizeof(bm[0]))] & (1LU << ((n) % (8*sizeof(bm[0]))))) != 0) |
177 | | | 177 | |
178 | #define TLBINFO_ASID_MARK_USED(ti, asid) \ | | 178 | #define TLBINFO_ASID_MARK_USED(ti, asid) \ |
179 | __BITMAP_SET((ti)->ti_asid_bitmap, (asid)) | | 179 | __BITMAP_SET((ti)->ti_asid_bitmap, (asid)) |
180 | #define TLBINFO_ASID_INUSE_P(ti, asid) \ | | 180 | #define TLBINFO_ASID_INUSE_P(ti, asid) \ |
181 | __BITMAP_ISSET_P((ti)->ti_asid_bitmap, (asid)) | | 181 | __BITMAP_ISSET_P((ti)->ti_asid_bitmap, (asid)) |
| @@ -453,27 +453,27 @@ pmap_tlb_shootdown_process(void) | | | @@ -453,27 +453,27 @@ pmap_tlb_shootdown_process(void) |
453 | KASSERTMSG(ci->ci_cpl >= IPL_SCHED, | | 453 | KASSERTMSG(ci->ci_cpl >= IPL_SCHED, |
454 | ("%s: cpl (%d) < IPL_SCHED (%d)", | | 454 | ("%s: cpl (%d) < IPL_SCHED (%d)", |
455 | __func__, ci->ci_cpl, IPL_SCHED)); | | 455 | __func__, ci->ci_cpl, IPL_SCHED)); |
456 | | | 456 | |
457 | TLBINFO_LOCK(ti); | | 457 | TLBINFO_LOCK(ti); |
458 | | | 458 | |
459 | switch (ti->ti_tlbinvop) { | | 459 | switch (ti->ti_tlbinvop) { |
460 | case TLBINV_ONE: { | | 460 | case TLBINV_ONE: { |
461 | /* | | 461 | /* |
462 | * We only need to invalidate one user ASID. | | 462 | * We only need to invalidate one user ASID. |
463 | */ | | 463 | */ |
464 | struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti); | | 464 | struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti); |
465 | KASSERT(ti->ti_victim != pmap_kernel()); | | 465 | KASSERT(ti->ti_victim != pmap_kernel()); |
466 | if (!CPU_EMPTY_P(CPUSET_SUBSET(ti->ti_victim->pm_onproc, ti->ti_cpu_mask))) { | | 466 | if (!CPUSET_EMPTY_P(CPUSET_SUBSET(ti->ti_victim->pm_onproc, ti->ti_cpu_mask))) { |
467 | /* | | 467 | /* |
468 | * The victim is an active pmap so we will just | | 468 | * The victim is an active pmap so we will just |
469 | * invalidate its TLB entries. | | 469 | * invalidate its TLB entries. |
470 | */ | | 470 | */ |
471 | KASSERT(pai->pai_asid > KERNEL_PID); | | 471 | KASSERT(pai->pai_asid > KERNEL_PID); |
472 | pmap_tlb_asid_check(); | | 472 | pmap_tlb_asid_check(); |
473 | tlb_invalidate_asids(pai->pai_asid, pai->pai_asid); | | 473 | tlb_invalidate_asids(pai->pai_asid, pai->pai_asid); |
474 | pmap_tlb_asid_check(); | | 474 | pmap_tlb_asid_check(); |
475 | } else if (pai->pai_asid) { | | 475 | } else if (pai->pai_asid) { |
476 | /* | | 476 | /* |
477 | * The victim is no longer an active pmap for this TLB. | | 477 | * The victim is no longer an active pmap for this TLB. |
478 | * So simply clear its ASID and when pmap_activate is | | 478 | * So simply clear its ASID and when pmap_activate is |
479 | * next called for this pmap, it will allocate a new | | 479 | * next called for this pmap, it will allocate a new |
| @@ -592,27 +592,27 @@ pmap_tlb_shootdown_bystanders(pmap_t pm) | | | @@ -592,27 +592,27 @@ pmap_tlb_shootdown_bystanders(pmap_t pm) |
592 | ti->ti_victim = NULL; | | 592 | ti->ti_victim = NULL; |
593 | } | | 593 | } |
594 | } | | 594 | } |
595 | TLBINFO_UNLOCK(ti); | | 595 | TLBINFO_UNLOCK(ti); |
596 | /* | | 596 | /* |
597 | * Now we can send out the shootdown IPIs to a CPU | | 597 | * Now we can send out the shootdown IPIs to a CPU |
598 | * that shares this TLB and is currently using this | | 598 | * that shares this TLB and is currently using this |
599 | * pmap. That CPU will process the IPI and do the | | 599 | * pmap. That CPU will process the IPI and do the |
600 | * all the work. Any other CPUs sharing that TLB | | 600 | * all the work. Any other CPUs sharing that TLB |
601 | * will take advantage of that work. pm_onproc might | | 601 | * will take advantage of that work. pm_onproc might |
602 | * change now that we have released the lock but we | | 602 | * change now that we have released the lock but we |
603 | * can tolerate spurious shootdowns. | | 603 | * can tolerate spurious shootdowns. |
604 | */ | | 604 | */ |
605 | KASSERT(!CPU_EMPTY_P(onproc)); | | 605 | KASSERT(!CPUSET_EMPTY_P(onproc)); |
606 | u_int j = CPUSET_NEXT(onproc); | | 606 | u_int j = CPUSET_NEXT(onproc); |
607 | cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN); | | 607 | cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN); |
608 | ipi_sent = true; | | 608 | ipi_sent = true; |
609 | continue; | | 609 | continue; |
610 | } | | 610 | } |
611 | if (!CPUSET_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask) { | | 611 | if (!CPUSET_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask) { |
612 | /* | | 612 | /* |
613 | * If this pmap has an ASID assigned but it's not | | 613 | * If this pmap has an ASID assigned but it's not |
614 | * currently running, nuke its ASID. Next time the | | 614 | * currently running, nuke its ASID. Next time the |
615 | * pmap is activated, it will allocate a new ASID. | | 615 | * pmap is activated, it will allocate a new ASID. |
616 | * And best of all, we avoid an IPI. | | 616 | * And best of all, we avoid an IPI. |
617 | */ | | 617 | */ |
618 | KASSERT(!kernel_p); | | 618 | KASSERT(!kernel_p); |
| @@ -672,28 +672,28 @@ pmap_tlb_invalidate_addr(pmap_t pm, vadd | | | @@ -672,28 +672,28 @@ pmap_tlb_invalidate_addr(pmap_t pm, vadd |
672 | | | 672 | |
673 | static inline void | | 673 | static inline void |
674 | pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm, | | 674 | pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm, |
675 | struct pmap_asid_info *pai) | | 675 | struct pmap_asid_info *pai) |
676 | { | | 676 | { |
677 | /* | | 677 | /* |
678 | * We shouldn't have an ASID assigned, and thusly must not be onproc | | 678 | * We shouldn't have an ASID assigned, and thusly must not be onproc |
679 | * nor active. | | 679 | * nor active. |
680 | */ | | 680 | */ |
681 | KASSERT(pm != pmap_kernel()); | | 681 | KASSERT(pm != pmap_kernel()); |
682 | KASSERT(pai->pai_asid == 0); | | 682 | KASSERT(pai->pai_asid == 0); |
683 | KASSERT(pai->pai_link.le_prev == NULL); | | 683 | KASSERT(pai->pai_link.le_prev == NULL); |
684 | #if defined(MULTIPROCESSOR) | | 684 | #if defined(MULTIPROCESSOR) |
685 | KASSERT(CPU_EMPTY_P(CPUSET_SUBSET(pm->pm_onproc, ti->ti_cpu_mask))); | | 685 | KASSERT(CPUSET_EMPTY_P(CPUSET_SUBSET(pm->pm_onproc, ti->ti_cpu_mask))); |
686 | KASSERT(CPU_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask))); | | 686 | KASSERT(CPUSET_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask))); |
687 | #endif | | 687 | #endif |
688 | KASSERT(ti->ti_asids_free > 0); | | 688 | KASSERT(ti->ti_asids_free > 0); |
689 | KASSERT(ti->ti_asid_hint <= ti->ti_asid_max); | | 689 | KASSERT(ti->ti_asid_hint <= ti->ti_asid_max); |
690 | | | 690 | |
691 | /* | | 691 | /* |
692 | * Let's see if the hinted ASID is free. If not search for | | 692 | * Let's see if the hinted ASID is free. If not search for |
693 | * a new one. | | 693 | * a new one. |
694 | */ | | 694 | */ |
695 | if (__predict_false(TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint))) { | | 695 | if (__predict_false(TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint))) { |
696 | #ifdef DIAGNOSTIC | | 696 | #ifdef DIAGNOSTIC |
697 | const size_t words = __arraycount(ti->ti_asid_bitmap); | | 697 | const size_t words = __arraycount(ti->ti_asid_bitmap); |
698 | #endif | | 698 | #endif |
699 | const size_t nbpw = 8 * sizeof(ti->ti_asid_bitmap[0]); | | 699 | const size_t nbpw = 8 * sizeof(ti->ti_asid_bitmap[0]); |
| @@ -780,27 +780,27 @@ pmap_tlb_asid_acquire(pmap_t pm, struct | | | @@ -780,27 +780,27 @@ pmap_tlb_asid_acquire(pmap_t pm, struct |
780 | /* | | 780 | /* |
781 | * Get an ASID. | | 781 | * Get an ASID. |
782 | */ | | 782 | */ |
783 | pmap_tlb_asid_alloc(ti, pm, pai); | | 783 | pmap_tlb_asid_alloc(ti, pm, pai); |
784 | } | | 784 | } |
785 | | | 785 | |
786 | if (l == curlwp) { | | 786 | if (l == curlwp) { |
787 | #if defined(MULTIPROCESSOR) | | 787 | #if defined(MULTIPROCESSOR) |
788 | /* | | 788 | /* |
789 | * The bits in pm_onproc belonging to this TLB can only | | 789 | * The bits in pm_onproc belonging to this TLB can only |
790 | * be changed while this TLBs lock is held unless atomic | | 790 | * be changed while this TLBs lock is held unless atomic |
791 | * operations are used. | | 791 | * operations are used. |
792 | */ | | 792 | */ |
793 | CPUSET_ADD(&pm->pm_onproc, cpu_index(ci)); | | 793 | CPUSET_ADD(pm->pm_onproc, cpu_index(ci)); |
794 | #endif | | 794 | #endif |
795 | ci->ci_pmap_asid_cur = pai->pai_asid; | | 795 | ci->ci_pmap_asid_cur = pai->pai_asid; |
796 | tlb_set_asid(pai->pai_asid); | | 796 | tlb_set_asid(pai->pai_asid); |
797 | pmap_tlb_asid_check(); | | 797 | pmap_tlb_asid_check(); |
798 | } else { | | 798 | } else { |
799 | printf("%s: l (%p) != curlwp %p\n", __func__, l, curlwp); | | 799 | printf("%s: l (%p) != curlwp %p\n", __func__, l, curlwp); |
800 | } | | 800 | } |
801 | TLBINFO_UNLOCK(ti); | | 801 | TLBINFO_UNLOCK(ti); |
802 | } | | 802 | } |
803 | | | 803 | |
804 | void | | 804 | void |
805 | pmap_tlb_asid_deactivate(pmap_t pm) | | 805 | pmap_tlb_asid_deactivate(pmap_t pm) |
806 | { | | 806 | { |
| @@ -827,31 +827,31 @@ pmap_tlb_asid_deactivate(pmap_t pm) | | | @@ -827,31 +827,31 @@ pmap_tlb_asid_deactivate(pmap_t pm) |
827 | } | | 827 | } |
828 | #elif defined(DEBUG) | | 828 | #elif defined(DEBUG) |
829 | curcpu()->ci_pmap_asid_cur = 0; | | 829 | curcpu()->ci_pmap_asid_cur = 0; |
830 | tlb_set_asid(0); | | 830 | tlb_set_asid(0); |
831 | pmap_tlb_asid_check(); | | 831 | pmap_tlb_asid_check(); |
832 | #endif | | 832 | #endif |
833 | } | | 833 | } |
834 | | | 834 | |
835 | void | | 835 | void |
836 | pmap_tlb_asid_release_all(struct pmap *pm) | | 836 | pmap_tlb_asid_release_all(struct pmap *pm) |
837 | { | | 837 | { |
838 | KASSERT(pm != pmap_kernel()); | | 838 | KASSERT(pm != pmap_kernel()); |
839 | #if defined(MULTIPROCESSOR) | | 839 | #if defined(MULTIPROCESSOR) |
840 | KASSERT(CPUSET_EMPTY(pm->pm_onproc)); | | 840 | KASSERT(CPUSET_EMPTY_P(pm->pm_onproc)); |
841 | for (u_int i = 0; !CPUSET_EMPTY(pm->pm_active); i++) { | | 841 | for (u_int i = 0; !CPUSET_EMPTY_P(pm->pm_active); i++) { |
842 | KASSERT(i < pmap_ntlbs); | | 842 | KASSERT(i < pmap_ntlbs); |
843 | struct pmap_tlb_info * const ti = pmap_tlbs[i]; | | 843 | struct pmap_tlb_info * const ti = pmap_tlbs[i]; |
844 | if (!CPU_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask))) { | | 844 | if (!CPUSET_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask))) { |
845 | struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); | | 845 | struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); |
846 | TLBINFO_LOCK(ti); | | 846 | TLBINFO_LOCK(ti); |
847 | KASSERT(ti->ti_victim != pm); | | 847 | KASSERT(ti->ti_victim != pm); |
848 | pmap_pai_reset(ti, pai, pm); | | 848 | pmap_pai_reset(ti, pai, pm); |
849 | TLBINFO_UNLOCK(ti); | | 849 | TLBINFO_UNLOCK(ti); |
850 | } | | 850 | } |
851 | } | | 851 | } |
852 | #else | | 852 | #else |
853 | /* | | 853 | /* |
854 | * Handle the case of an UP kernel which only has, at most, one ASID. | | 854 | * Handle the case of an UP kernel which only has, at most, one ASID. |
855 | * If the pmap has an ASID allocated, free it. | | 855 | * If the pmap has an ASID allocated, free it. |
856 | */ | | 856 | */ |
857 | struct pmap_tlb_info * const ti = &pmap_tlb0_info; | | 857 | struct pmap_tlb_info * const ti = &pmap_tlb0_info; |