Mon Jul 2 01:05:49 2012 UTC ()
in cpu_boot_secondary_processors(), wait until all the other CPUs
have registered themselves in kcpuset_running before returning.
recent changes to the TLB invalidation xcall code assume that
any CPU which will receive a broadcast IPI is registered in
kcpuset_running, so ensure that is true by waiting here.


(chs)
diff -r1.99 -r1.100 src/sys/arch/x86/x86/cpu.c

cvs diff -r1.99 -r1.100 src/sys/arch/x86/x86/cpu.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/cpu.c 2012/06/12 17:14:19 1.99
+++ src/sys/arch/x86/x86/cpu.c 2012/07/02 01:05:48 1.100
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.c,v 1.99 2012/06/12 17:14:19 yamt Exp $ */ 1/* $NetBSD: cpu.c,v 1.100 2012/07/02 01:05:48 chs Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2000-2012 NetBSD Foundation, Inc. 4 * Copyright (c) 2000-2012 NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran. 8 * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -52,27 +52,27 @@ @@ -52,27 +52,27 @@
52 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE 54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE. 61 * SUCH DAMAGE.
62 */ 62 */
63 63
64#include <sys/cdefs.h> 64#include <sys/cdefs.h>
65__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.99 2012/06/12 17:14:19 yamt Exp $"); 65__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.100 2012/07/02 01:05:48 chs Exp $");
66 66
67#include "opt_ddb.h" 67#include "opt_ddb.h"
68#include "opt_mpbios.h" /* for MPDEBUG */ 68#include "opt_mpbios.h" /* for MPDEBUG */
69#include "opt_mtrr.h" 69#include "opt_mtrr.h"
70 70
71#include "lapic.h" 71#include "lapic.h"
72#include "ioapic.h" 72#include "ioapic.h"
73 73
74#ifdef i386 74#ifdef i386
75#include "npx.h" 75#include "npx.h"
76#endif 76#endif
77 77
78#include <sys/param.h> 78#include <sys/param.h>
@@ -579,43 +579,50 @@ cpu_init(struct cpu_info *ci) @@ -579,43 +579,50 @@ cpu_init(struct cpu_info *ci)
579 /* Synchronize TSC again, and check for drift. */ 579 /* Synchronize TSC again, and check for drift. */
580 wbinvd(); 580 wbinvd();
581 atomic_or_32(&ci->ci_flags, CPUF_RUNNING); 581 atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
582 tsc_sync_ap(ci); 582 tsc_sync_ap(ci);
583 } else { 583 } else {
584 atomic_or_32(&ci->ci_flags, CPUF_RUNNING); 584 atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
585 } 585 }
586} 586}
587 587
588void 588void
589cpu_boot_secondary_processors(void) 589cpu_boot_secondary_processors(void)
590{ 590{
591 struct cpu_info *ci; 591 struct cpu_info *ci;
 592 kcpuset_t *cpus;
592 u_long i; 593 u_long i;
593 594
594 /* Now that we know the number of CPUs, patch the text segment. */ 595 /* Now that we know the number of CPUs, patch the text segment. */
595 x86_patch(false); 596 x86_patch(false);
596 597
597 for (i=0; i < maxcpus; i++) { 598 kcpuset_create(&cpus, true);
 599 kcpuset_set(cpus, cpu_index(curcpu()));
 600 for (i = 0; i < maxcpus; i++) {
598 ci = cpu_lookup(i); 601 ci = cpu_lookup(i);
599 if (ci == NULL) 602 if (ci == NULL)
600 continue; 603 continue;
601 if (ci->ci_data.cpu_idlelwp == NULL) 604 if (ci->ci_data.cpu_idlelwp == NULL)
602 continue; 605 continue;
603 if ((ci->ci_flags & CPUF_PRESENT) == 0) 606 if ((ci->ci_flags & CPUF_PRESENT) == 0)
604 continue; 607 continue;
605 if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) 608 if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY))
606 continue; 609 continue;
607 cpu_boot_secondary(ci); 610 cpu_boot_secondary(ci);
 611 kcpuset_set(cpus, cpu_index(ci));
608 } 612 }
 613 while (!kcpuset_match(cpus, kcpuset_running))
 614 ;
 615 kcpuset_destroy(cpus);
609 616
610 x86_mp_online = true; 617 x86_mp_online = true;
611 618
612 /* Now that we know about the TSC, attach the timecounter. */ 619 /* Now that we know about the TSC, attach the timecounter. */
613 tsc_tc_init(); 620 tsc_tc_init();
614 621
615 /* Enable zeroing of pages in the idle loop if we have SSE2. */ 622 /* Enable zeroing of pages in the idle loop if we have SSE2. */
616 vm_page_zero_enable = ((cpu_feature[0] & CPUID_SSE2) != 0); 623 vm_page_zero_enable = ((cpu_feature[0] & CPUID_SSE2) != 0);
617} 624}
618 625
619static void 626static void
620cpu_init_idle_lwp(struct cpu_info *ci) 627cpu_init_idle_lwp(struct cpu_info *ci)
621{ 628{