Mon Jul 20 14:19:41 2020 UTC ()
Serialize CPU hatch annoucement printfs and wait for CPUs to start before
returning from cpu_boot_secondary_processors.


(jmcneill)
diff -r1.52 -r1.53 src/sys/arch/mips/mips/cpu_subr.c

cvs diff -r1.52 -r1.53 src/sys/arch/mips/mips/cpu_subr.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/cpu_subr.c 2020/07/20 10:53:47 1.52
+++ src/sys/arch/mips/mips/cpu_subr.c 2020/07/20 14:19:41 1.53
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu_subr.c,v 1.52 2020/07/20 10:53:47 skrll Exp $ */ 1/* $NetBSD: cpu_subr.c,v 1.53 2020/07/20 14:19:41 jmcneill Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2010, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry. 8 * by Matt Thomas of 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.52 2020/07/20 10:53:47 skrll Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.53 2020/07/20 14:19:41 jmcneill Exp $");
34 34
35#include "opt_cputype.h" 35#include "opt_cputype.h"
36#include "opt_ddb.h" 36#include "opt_ddb.h"
37#include "opt_modular.h" 37#include "opt_modular.h"
38#include "opt_multiprocessor.h" 38#include "opt_multiprocessor.h"
39 39
40#include <sys/param.h> 40#include <sys/param.h>
41#include <sys/cpu.h> 41#include <sys/cpu.h>
42#include <sys/intr.h> 42#include <sys/intr.h>
43#include <sys/atomic.h> 43#include <sys/atomic.h>
44#include <sys/device.h> 44#include <sys/device.h>
45#include <sys/lwp.h> 45#include <sys/lwp.h>
46#include <sys/proc.h> 46#include <sys/proc.h>
@@ -65,26 +65,28 @@ __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v @@ -65,26 +65,28 @@ __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v
65#if defined(DDB) || defined(KGDB) 65#if defined(DDB) || defined(KGDB)
66#ifdef DDB 66#ifdef DDB
67#include <mips/db_machdep.h> 67#include <mips/db_machdep.h>
68#include <ddb/db_command.h> 68#include <ddb/db_command.h>
69#include <ddb/db_output.h> 69#include <ddb/db_output.h>
70#endif 70#endif
71#endif 71#endif
72 72
73#ifdef MIPS64_OCTEON 73#ifdef MIPS64_OCTEON
74#include <mips/cavium/octeonvar.h> 74#include <mips/cavium/octeonvar.h>
75extern struct cpu_softc octeon_cpu_softc[]; 75extern struct cpu_softc octeon_cpu_softc[];
76#endif 76#endif
77 77
 78static kmutex_t cpu_hatch_lock;
 79
78struct cpu_info cpu_info_store 80struct cpu_info cpu_info_store
79#if defined(MULTIPROCESSOR) && !defined(MIPS64_OCTEON) 81#if defined(MULTIPROCESSOR) && !defined(MIPS64_OCTEON)
80 __section(".data1") 82 __section(".data1")
81 __aligned(1LU << ilog2((2*sizeof(struct cpu_info)-1))) 83 __aligned(1LU << ilog2((2*sizeof(struct cpu_info)-1)))
82#endif 84#endif
83 = { 85 = {
84 .ci_curlwp = &lwp0, 86 .ci_curlwp = &lwp0,
85 .ci_tlb_info = &pmap_tlb0_info, 87 .ci_tlb_info = &pmap_tlb0_info,
86 .ci_pmap_kern_segtab = &pmap_kern_segtab, 88 .ci_pmap_kern_segtab = &pmap_kern_segtab,
87 .ci_pmap_user_segtab = NULL, 89 .ci_pmap_user_segtab = NULL,
88#ifdef _LP64 90#ifdef _LP64
89 .ci_pmap_user_seg0tab = NULL, 91 .ci_pmap_user_seg0tab = NULL,
90#endif 92#endif
@@ -939,67 +941,71 @@ cpu_hatch(struct cpu_info *ci) @@ -939,67 +941,71 @@ cpu_hatch(struct cpu_info *ci)
939 /* 941 /*
940 * initialize the MIPS count/compare clock 942 * initialize the MIPS count/compare clock
941 */ 943 */
942 mips3_cp0_count_write(ci->ci_data.cpu_cc_skew); 944 mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
943 KASSERT(ci->ci_cycles_per_hz != 0); 945 KASSERT(ci->ci_cycles_per_hz != 0);
944 ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz; 946 ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
945 mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr); 947 mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
946 ci->ci_data.cpu_cc_skew = 0; 948 ci->ci_data.cpu_cc_skew = 0;
947 949
948 /* 950 /*
949 * Let this CPU do its own post-running initialization 951 * Let this CPU do its own post-running initialization
950 * (for things that have to be done on the local CPU). 952 * (for things that have to be done on the local CPU).
951 */ 953 */
 954 mutex_enter(&cpu_hatch_lock);
952 (*mips_locoresw.lsw_cpu_run)(ci); 955 (*mips_locoresw.lsw_cpu_run)(ci);
 956 mutex_exit(&cpu_hatch_lock);
953 957
954 /* 958 /*
955 * Now turn on interrupts (and verify they are on). 959 * Now turn on interrupts (and verify they are on).
956 */ 960 */
957 spl0(); 961 spl0();
958 KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl); 962 KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
959 KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); 963 KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
960 964
961 kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci)); 965 kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
962 kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci)); 966 kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));
963 967
964 /* 968 /*
965 * And do a tail call to idle_loop 969 * And do a tail call to idle_loop
966 */ 970 */
967 idle_loop(NULL); 971 idle_loop(NULL);
968} 972}
969 973
970void 974void
971cpu_boot_secondary_processors(void) 975cpu_boot_secondary_processors(void)
972{ 976{
973 CPU_INFO_ITERATOR cii; 977 CPU_INFO_ITERATOR cii;
974 struct cpu_info *ci; 978 struct cpu_info *ci;
975 979
 980 mutex_init(&cpu_hatch_lock, MUTEX_DEFAULT, IPL_HIGH);
 981
976 for (CPU_INFO_FOREACH(cii, ci)) { 982 for (CPU_INFO_FOREACH(cii, ci)) {
977 if (CPU_IS_PRIMARY(ci)) 983 if (CPU_IS_PRIMARY(ci))
978 continue; 984 continue;
979 KASSERT(ci->ci_data.cpu_idlelwp); 985 KASSERT(ci->ci_data.cpu_idlelwp);
980 986
981 /* 987 /*
982 * Skip this CPU if it didn't successfully hatch. 988 * Skip this CPU if it didn't successfully hatch.
983 */ 989 */
984 if (!kcpuset_isset(cpus_hatched, cpu_index(ci))) 990 if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
985 continue; 991 continue;
986 992
987 ci->ci_data.cpu_cc_skew = mips3_cp0_count_read(); 993 ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
988 atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING); 994 atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
989 kcpuset_set(cpus_running, cpu_index(ci)); 995 kcpuset_set(cpus_running, cpu_index(ci));
990 // Spin until the cpu calls idle_loop 996 // Spin until the cpu calls idle_loop
991 for (u_int i = 0; i < 100; i++) { 997 for (u_int i = 0; i < 10000; i++) {
992 if (kcpuset_isset(cpus_running, cpu_index(ci))) 998 if (kcpuset_isset(kcpuset_running, cpu_index(ci)))
993 break; 999 break;
994 delay(1000); 1000 delay(1000);
995 } 1001 }
996 } 1002 }
997} 1003}
998 1004
999void 1005void
1000xc_send_ipi(struct cpu_info *ci) 1006xc_send_ipi(struct cpu_info *ci)
1001{ 1007{
1002 1008
1003 (*mips_locoresw.lsw_send_ipi)(ci, IPI_XCALL); 1009 (*mips_locoresw.lsw_send_ipi)(ci, IPI_XCALL);
1004} 1010}
1005 1011