| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: cpu.c,v 1.149 2018/02/22 13:27:18 maxv Exp $ */ | | 1 | /* $NetBSD: cpu.c,v 1.150 2018/03/11 13:38:02 maxv Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2000-2012 NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2000-2012 NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran. | | 8 | * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
| @@ -52,27 +52,27 @@ | | | @@ -52,27 +52,27 @@ |
52 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 52 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
53 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 53 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
54 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE | | 54 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE |
55 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 55 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
56 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 56 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
57 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 57 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
58 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 58 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
59 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 59 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
60 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 60 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
61 | * SUCH DAMAGE. | | 61 | * SUCH DAMAGE. |
62 | */ | | 62 | */ |
63 | | | 63 | |
64 | #include <sys/cdefs.h> | | 64 | #include <sys/cdefs.h> |
65 | __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.149 2018/02/22 13:27:18 maxv Exp $"); | | 65 | __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.150 2018/03/11 13:38:02 maxv Exp $"); |
66 | | | 66 | |
67 | #include "opt_ddb.h" | | 67 | #include "opt_ddb.h" |
68 | #include "opt_mpbios.h" /* for MPDEBUG */ | | 68 | #include "opt_mpbios.h" /* for MPDEBUG */ |
69 | #include "opt_mtrr.h" | | 69 | #include "opt_mtrr.h" |
70 | #include "opt_multiprocessor.h" | | 70 | #include "opt_multiprocessor.h" |
71 | #include "opt_svs.h" | | 71 | #include "opt_svs.h" |
72 | | | 72 | |
73 | #include "lapic.h" | | 73 | #include "lapic.h" |
74 | #include "ioapic.h" | | 74 | #include "ioapic.h" |
75 | | | 75 | |
76 | #include <sys/param.h> | | 76 | #include <sys/param.h> |
77 | #include <sys/proc.h> | | 77 | #include <sys/proc.h> |
78 | #include <sys/systm.h> | | 78 | #include <sys/systm.h> |
| @@ -664,27 +664,27 @@ cpu_init(struct cpu_info *ci) | | | @@ -664,27 +664,27 @@ cpu_init(struct cpu_info *ci) |
664 | if (CPUID_TO_MODEL(ci->ci_signature) > 8 || | | 664 | if (CPUID_TO_MODEL(ci->ci_signature) > 8 || |
665 | (CPUID_TO_MODEL(ci->ci_signature) == 8 && | | 665 | (CPUID_TO_MODEL(ci->ci_signature) == 8 && |
666 | CPUID_TO_STEPPING(ci->ci_signature) >= 7)) { | | 666 | CPUID_TO_STEPPING(ci->ci_signature) >= 7)) { |
667 | mtrr_funcs = &k6_mtrr_funcs; | | 667 | mtrr_funcs = &k6_mtrr_funcs; |
668 | k6_mtrr_init_first(); | | 668 | k6_mtrr_init_first(); |
669 | mtrr_init_cpu(ci); | | 669 | mtrr_init_cpu(ci); |
670 | } | | 670 | } |
671 | } | | 671 | } |
672 | } | | 672 | } |
673 | #endif /* i386 */ | | 673 | #endif /* i386 */ |
674 | #endif /* MTRR */ | | 674 | #endif /* MTRR */ |
675 | | | 675 | |
676 | if (ci != &cpu_info_primary) { | | 676 | if (ci != &cpu_info_primary) { |
677 | /* Synchronize TSC again, and check for drift. */ | | 677 | /* Synchronize TSC */ |
678 | wbinvd(); | | 678 | wbinvd(); |
679 | atomic_or_32(&ci->ci_flags, CPUF_RUNNING); | | 679 | atomic_or_32(&ci->ci_flags, CPUF_RUNNING); |
680 | tsc_sync_ap(ci); | | 680 | tsc_sync_ap(ci); |
681 | } else { | | 681 | } else { |
682 | atomic_or_32(&ci->ci_flags, CPUF_RUNNING); | | 682 | atomic_or_32(&ci->ci_flags, CPUF_RUNNING); |
683 | } | | 683 | } |
684 | } | | 684 | } |
685 | | | 685 | |
686 | #ifdef MULTIPROCESSOR | | 686 | #ifdef MULTIPROCESSOR |
687 | void | | 687 | void |
688 | cpu_boot_secondary_processors(void) | | 688 | cpu_boot_secondary_processors(void) |
689 | { | | 689 | { |
690 | struct cpu_info *ci; | | 690 | struct cpu_info *ci; |
| @@ -776,28 +776,29 @@ cpu_start_secondary(struct cpu_info *ci) | | | @@ -776,28 +776,29 @@ cpu_start_secondary(struct cpu_info *ci) |
776 | for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) { | | 776 | for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) { |
777 | i8254_delay(10); | | 777 | i8254_delay(10); |
778 | } | | 778 | } |
779 | | | 779 | |
780 | if ((ci->ci_flags & CPUF_PRESENT) == 0) { | | 780 | if ((ci->ci_flags & CPUF_PRESENT) == 0) { |
781 | aprint_error_dev(ci->ci_dev, "failed to become ready\n"); | | 781 | aprint_error_dev(ci->ci_dev, "failed to become ready\n"); |
782 | #if defined(MPDEBUG) && defined(DDB) | | 782 | #if defined(MPDEBUG) && defined(DDB) |
783 | printf("dropping into debugger; continue from here to resume boot\n"); | | 783 | printf("dropping into debugger; continue from here to resume boot\n"); |
784 | Debugger(); | | 784 | Debugger(); |
785 | #endif | | 785 | #endif |
786 | } else { | | 786 | } else { |
787 | /* | | 787 | /* |
788 | * Synchronize time stamp counters. Invalidate cache and do | | 788 | * Synchronize time stamp counters. Invalidate cache and do |
789 | * twice to try and minimize possible cache effects. Disable | | 789 | * twice (in tsc_sync_bp) to minimize possible cache effects. |
790 | * interrupts to try and rule out any external interference. | | 790 | * Disable interrupts to try and rule out any external |
| | | 791 | * interference. |
791 | */ | | 792 | */ |
792 | psl = x86_read_psl(); | | 793 | psl = x86_read_psl(); |
793 | x86_disable_intr(); | | 794 | x86_disable_intr(); |
794 | wbinvd(); | | 795 | wbinvd(); |
795 | tsc_sync_bp(ci); | | 796 | tsc_sync_bp(ci); |
796 | x86_write_psl(psl); | | 797 | x86_write_psl(psl); |
797 | } | | 798 | } |
798 | | | 799 | |
799 | CPU_START_CLEANUP(ci); | | 800 | CPU_START_CLEANUP(ci); |
800 | cpu_starting = NULL; | | 801 | cpu_starting = NULL; |
801 | } | | 802 | } |
802 | | | 803 | |
803 | void | | 804 | void |
| @@ -844,39 +845,45 @@ cpu_hatch(void *v) | | | @@ -844,39 +845,45 @@ cpu_hatch(void *v) |
844 | struct cpu_info *ci = (struct cpu_info *)v; | | 845 | struct cpu_info *ci = (struct cpu_info *)v; |
845 | struct pcb *pcb; | | 846 | struct pcb *pcb; |
846 | int s, i; | | 847 | int s, i; |
847 | | | 848 | |
848 | cpu_init_msrs(ci, true); | | 849 | cpu_init_msrs(ci, true); |
849 | cpu_probe(ci); | | 850 | cpu_probe(ci); |
850 | | | 851 | |
851 | ci->ci_data.cpu_cc_freq = cpu_info_primary.ci_data.cpu_cc_freq; | | 852 | ci->ci_data.cpu_cc_freq = cpu_info_primary.ci_data.cpu_cc_freq; |
852 | /* cpu_get_tsc_freq(ci); */ | | 853 | /* cpu_get_tsc_freq(ci); */ |
853 | | | 854 | |
854 | KDASSERT((ci->ci_flags & CPUF_PRESENT) == 0); | | 855 | KDASSERT((ci->ci_flags & CPUF_PRESENT) == 0); |
855 | | | 856 | |
856 | /* | | 857 | /* |
857 | * Synchronize time stamp counters. Invalidate cache and do twice | | 858 | * Synchronize the TSC for the first time. Note that interrupts are |
858 | * to try and minimize possible cache effects. Note that interrupts | | 859 | * off at this point. |
859 | * are off at this point. | | | |
860 | */ | | 860 | */ |
861 | wbinvd(); | | 861 | wbinvd(); |
862 | atomic_or_32(&ci->ci_flags, CPUF_PRESENT); | | 862 | atomic_or_32(&ci->ci_flags, CPUF_PRESENT); |
863 | tsc_sync_ap(ci); | | 863 | tsc_sync_ap(ci); |
864 | | | 864 | |
865 | /* | | 865 | /* |
866 | * Wait to be brought online. Use 'monitor/mwait' if available, | | 866 | * Wait to be brought online. |
867 | * in order to make the TSC drift as much as possible. so that | | 867 | * |
868 | * we can detect it later. If not available, try 'pause'. | | 868 | * Use MONITOR/MWAIT if available. These instructions put the CPU in |
869 | * We'd like to use 'hlt', but we have interrupts off. | | 869 | * a low consumption mode (C-state), and if the TSC is not invariant, |
| | | 870 | * this causes the TSC to drift. We want this to happen, so that we |
| | | 871 | * can later detect (in tsc_tc_init) any abnormal drift with invariant |
| | | 872 | * TSCs. That's just for safety; by definition such drifts should |
| | | 873 | * never occur with invariant TSCs. |
| | | 874 | * |
| | | 875 | * If not available, try PAUSE. We'd like to use HLT, but we have |
| | | 876 | * interrupts off. |
870 | */ | | 877 | */ |
871 | while ((ci->ci_flags & CPUF_GO) == 0) { | | 878 | while ((ci->ci_flags & CPUF_GO) == 0) { |
872 | if ((cpu_feature[1] & CPUID2_MONITOR) != 0) { | | 879 | if ((cpu_feature[1] & CPUID2_MONITOR) != 0) { |
873 | x86_monitor(&ci->ci_flags, 0, 0); | | 880 | x86_monitor(&ci->ci_flags, 0, 0); |
874 | if ((ci->ci_flags & CPUF_GO) != 0) { | | 881 | if ((ci->ci_flags & CPUF_GO) != 0) { |
875 | continue; | | 882 | continue; |
876 | } | | 883 | } |
877 | x86_mwait(0, 0); | | 884 | x86_mwait(0, 0); |
878 | } else { | | 885 | } else { |
879 | /* | | 886 | /* |
880 | * XXX The loop repetition count could be a lot higher, but | | 887 | * XXX The loop repetition count could be a lot higher, but |
881 | * XXX currently qemu emulator takes a _very_long_time_ to | | 888 | * XXX currently qemu emulator takes a _very_long_time_ to |
882 | * XXX execute the pause instruction. So for now, use a low | | 889 | * XXX execute the pause instruction. So for now, use a low |
| @@ -912,26 +919,31 @@ cpu_hatch(void *v) | | | @@ -912,26 +919,31 @@ cpu_hatch(void *v) |
912 | | | 919 | |
913 | cpu_init_idt(); | | 920 | cpu_init_idt(); |
914 | gdt_init_cpu(ci); | | 921 | gdt_init_cpu(ci); |
915 | #if NLAPIC > 0 | | 922 | #if NLAPIC > 0 |
916 | lapic_enable(); | | 923 | lapic_enable(); |
917 | lapic_set_lvt(); | | 924 | lapic_set_lvt(); |
918 | lapic_initclocks(); | | 925 | lapic_initclocks(); |
919 | #endif | | 926 | #endif |
920 | | | 927 | |
921 | fpuinit(ci); | | 928 | fpuinit(ci); |
922 | lldt(GSYSSEL(GLDT_SEL, SEL_KPL)); | | 929 | lldt(GSYSSEL(GLDT_SEL, SEL_KPL)); |
923 | ltr(ci->ci_tss_sel); | | 930 | ltr(ci->ci_tss_sel); |
924 | | | 931 | |
| | | 932 | /* |
| | | 933 | * cpu_init will re-synchronize the TSC, and will detect any abnormal |
| | | 934 | * drift that would have been caused by the use of MONITOR/MWAIT |
| | | 935 | * above. |
| | | 936 | */ |
925 | cpu_init(ci); | | 937 | cpu_init(ci); |
926 | cpu_get_tsc_freq(ci); | | 938 | cpu_get_tsc_freq(ci); |
927 | | | 939 | |
928 | s = splhigh(); | | 940 | s = splhigh(); |
929 | lapic_write_tpri(0); | | 941 | lapic_write_tpri(0); |
930 | x86_enable_intr(); | | 942 | x86_enable_intr(); |
931 | splx(s); | | 943 | splx(s); |
932 | x86_errata(); | | 944 | x86_errata(); |
933 | | | 945 | |
934 | aprint_debug_dev(ci->ci_dev, "running\n"); | | 946 | aprint_debug_dev(ci->ci_dev, "running\n"); |
935 | | | 947 | |
936 | idle_loop(NULL); | | 948 | idle_loop(NULL); |
937 | KASSERT(false); | | 949 | KASSERT(false); |