Mon Oct 31 12:49:04 2016 UTC ()
Pre-allocate some kcpuset_ts so that we don't try and allocate in the
wrong context.


(skrll)
diff -r1.120 -r1.121 src/sys/arch/mips/include/cpu.h
diff -r1.29 -r1.30 src/sys/arch/mips/mips/cpu_subr.c

cvs diff -r1.120 -r1.121 src/sys/arch/mips/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/cpu.h 2016/07/16 01:59:05 1.120
+++ src/sys/arch/mips/include/cpu.h 2016/10/31 12:49:04 1.121
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.120 2016/07/16 01:59:05 macallan Exp $ */ 1/* $NetBSD: cpu.h,v 1.121 2016/10/31 12:49:04 skrll Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell and Rick Macklem. 8 * Ralph Campbell and Rick Macklem.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -140,26 +140,29 @@ struct cpu_info { @@ -140,26 +140,29 @@ struct cpu_info {
140 uint32_t ci_ksp_tlb_slot; /* tlb entry for kernel stack */ 140 uint32_t ci_ksp_tlb_slot; /* tlb entry for kernel stack */
141 struct evcnt ci_evcnt_all_ipis; /* aggregated IPI counter */ 141 struct evcnt ci_evcnt_all_ipis; /* aggregated IPI counter */
142 struct evcnt ci_evcnt_per_ipi[NIPIS]; /* individual IPI counters*/ 142 struct evcnt ci_evcnt_per_ipi[NIPIS]; /* individual IPI counters*/
143 struct evcnt ci_evcnt_synci_activate_rqst; 143 struct evcnt ci_evcnt_synci_activate_rqst;
144 struct evcnt ci_evcnt_synci_onproc_rqst; 144 struct evcnt ci_evcnt_synci_onproc_rqst;
145 struct evcnt ci_evcnt_synci_deferred_rqst; 145 struct evcnt ci_evcnt_synci_deferred_rqst;
146 struct evcnt ci_evcnt_synci_ipi_rqst; 146 struct evcnt ci_evcnt_synci_ipi_rqst;
147 147
148#define CPUF_PRIMARY 0x01 /* CPU is primary CPU */ 148#define CPUF_PRIMARY 0x01 /* CPU is primary CPU */
149#define CPUF_PRESENT 0x02 /* CPU is present */ 149#define CPUF_PRESENT 0x02 /* CPU is present */
150#define CPUF_RUNNING 0x04 /* CPU is running */ 150#define CPUF_RUNNING 0x04 /* CPU is running */
151#define CPUF_PAUSED 0x08 /* CPU is paused */ 151#define CPUF_PAUSED 0x08 /* CPU is paused */
152#define CPUF_USERPMAP 0x20 /* CPU has a user pmap activated */ 152#define CPUF_USERPMAP 0x20 /* CPU has a user pmap activated */
 153 kcpuset_t *ci_multicastcpus;
 154 kcpuset_t *ci_watchcpus;
 155 kcpuset_t *ci_ddbcpus;
153#endif 156#endif
154 157
155}; 158};
156 159
157#ifdef MULTIPROCESSOR 160#ifdef MULTIPROCESSOR
158#define CPU_INFO_ITERATOR int 161#define CPU_INFO_ITERATOR int
159#define CPU_INFO_FOREACH(cii, ci) \ 162#define CPU_INFO_FOREACH(cii, ci) \
160 cii = 0, ci = cpu_infos[0]; cii < ncpu && (ci = cpu_infos[cii]) != NULL; cii++ 163 cii = 0, ci = cpu_infos[0]; cii < ncpu && (ci = cpu_infos[cii]) != NULL; cii++
161#else 164#else
162#define CPU_INFO_ITERATOR int __unused 165#define CPU_INFO_ITERATOR int __unused
163#define CPU_INFO_FOREACH(cii, ci) \ 166#define CPU_INFO_FOREACH(cii, ci) \
164 ci = &cpu_info_store; ci != NULL; ci = NULL 167 ci = &cpu_info_store; ci != NULL; ci = NULL
165#endif 168#endif

cvs diff -r1.29 -r1.30 src/sys/arch/mips/mips/cpu_subr.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/cpu_subr.c 2016/08/23 07:29:46 1.29
+++ src/sys/arch/mips/mips/cpu_subr.c 2016/10/31 12:49:04 1.30
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu_subr.c,v 1.29 2016/08/23 07:29:46 skrll Exp $ */ 1/* $NetBSD: cpu_subr.c,v 1.30 2016/10/31 12:49:04 skrll Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc. 4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry. 8 * by Matt Thomas of 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.29 2016/08/23 07:29:46 skrll Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.30 2016/10/31 12:49:04 skrll Exp $");
34 34
35#include "opt_ddb.h" 35#include "opt_ddb.h"
36#include "opt_cputype.h" 36#include "opt_cputype.h"
37#include "opt_modular.h" 37#include "opt_modular.h"
38#include "opt_multiprocessor.h" 38#include "opt_multiprocessor.h"
39 39
40#include <sys/param.h> 40#include <sys/param.h>
41#include <sys/cpu.h> 41#include <sys/cpu.h>
42#include <sys/intr.h> 42#include <sys/intr.h>
43#include <sys/atomic.h> 43#include <sys/atomic.h>
44#include <sys/device.h> 44#include <sys/device.h>
45#include <sys/lwp.h> 45#include <sys/lwp.h>
46#include <sys/proc.h> 46#include <sys/proc.h>
@@ -275,26 +275,30 @@ cpu_attach_common(device_t self, struct  @@ -275,26 +275,30 @@ cpu_attach_common(device_t self, struct
275 EVCNT_TYPE_MISC, NULL, xname, 275 EVCNT_TYPE_MISC, NULL, xname,
276 "syncicache deferred request"); 276 "syncicache deferred request");
277 evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst, 277 evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst,
278 EVCNT_TYPE_MISC, NULL, xname, 278 EVCNT_TYPE_MISC, NULL, xname,
279 "syncicache ipi request"); 279 "syncicache ipi request");
280 evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst, 280 evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst,
281 EVCNT_TYPE_MISC, NULL, xname, 281 EVCNT_TYPE_MISC, NULL, xname,
282 "syncicache onproc request"); 282 "syncicache onproc request");
283 283
284 /* 284 /*
285 * Initialize IPI framework for this cpu instance 285 * Initialize IPI framework for this cpu instance
286 */ 286 */
287 ipi_init(ci); 287 ipi_init(ci);
 288
 289 kcpuset_create(&ci->ci_multicastcpus, true);
 290 kcpuset_create(&ci->ci_watchcpus, true);
 291 kcpuset_create(&ci->ci_ddbcpus, true);
288#endif 292#endif
289} 293}
290 294
291void 295void
292cpu_startup_common(void) 296cpu_startup_common(void)
293{ 297{
294 vaddr_t minaddr, maxaddr; 298 vaddr_t minaddr, maxaddr;
295 char pbuf[9]; /* "99999 MB" */ 299 char pbuf[9]; /* "99999 MB" */
296 300
297 pmap_tlb_info_evcnt_attach(&pmap_tlb0_info); 301 pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);
298 302
299#ifdef MULTIPROCESSOR 303#ifdef MULTIPROCESSOR
300 kcpuset_create(&cpus_halted, true); 304 kcpuset_create(&cpus_halted, true);
@@ -649,77 +653,74 @@ cpu_intr_p(void) @@ -649,77 +653,74 @@ cpu_intr_p(void)
649#ifdef MULTIPROCESSOR 653#ifdef MULTIPROCESSOR
650 654
651void 655void
652cpu_broadcast_ipi(int tag) 656cpu_broadcast_ipi(int tag)
653{ 657{
654 // No reason to remove ourselves since multicast_ipi will do that for us 658 // No reason to remove ourselves since multicast_ipi will do that for us
655 cpu_multicast_ipi(cpus_running, tag); 659 cpu_multicast_ipi(cpus_running, tag);
656} 660}
657 661
658void 662void
659cpu_multicast_ipi(const kcpuset_t *kcp, int tag) 663cpu_multicast_ipi(const kcpuset_t *kcp, int tag)
660{ 664{
661 struct cpu_info * const ci = curcpu(); 665 struct cpu_info * const ci = curcpu();
662 kcpuset_t *kcp2; 666 kcpuset_t *kcp2 = ci->ci_multicastcpus;
663 667
664 if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset)) 668 if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
665 return; 669 return;
666 670
667 kcpuset_clone(&kcp2, kcp); 671 kcpuset_copy(kcp2, kcp);
668 kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset); 672 kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset);
669 for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) { 673 for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) {
670 kcpuset_clear(kcp2, --cii); 674 kcpuset_clear(kcp2, --cii);
671 (void)cpu_send_ipi(cpu_lookup(cii), tag); 675 (void)cpu_send_ipi(cpu_lookup(cii), tag);
672 } 676 }
673 kcpuset_destroy(kcp2); 
674} 677}
675 678
676int 679int
677cpu_send_ipi(struct cpu_info *ci, int tag) 680cpu_send_ipi(struct cpu_info *ci, int tag)
678{ 681{
679 682
680 return (*mips_locoresw.lsw_send_ipi)(ci, tag); 683 return (*mips_locoresw.lsw_send_ipi)(ci, tag);
681} 684}
682 685
683static void 686static void
684cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted) 687cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted)
685{ 688{
686 bool done = false; 689 bool done = false;
687 kcpuset_t *kcp; 690 struct cpu_info * const ci = curcpu();
688 kcpuset_create(&kcp, false); 691 kcpuset_t *kcp = ci->ci_watchcpus;
689 692
690 /* some finite amount of time */ 693 /* some finite amount of time */
691 694
692 for (u_long limit = curcpu()->ci_cpu_freq/10; !done && limit--; ) { 695 for (u_long limit = curcpu()->ci_cpu_freq/10; !done && limit--; ) {
693 kcpuset_copy(kcp, watchset); 696 kcpuset_copy(kcp, watchset);
694 kcpuset_intersect(kcp, wanted); 697 kcpuset_intersect(kcp, wanted);
695 done = kcpuset_match(kcp, wanted); 698 done = kcpuset_match(kcp, wanted);
696 } 699 }
697 700
698 if (!done) { 701 if (!done) {
699 cpuid_t cii; 702 cpuid_t cii;
700 kcpuset_copy(kcp, wanted); 703 kcpuset_copy(kcp, wanted);
701 kcpuset_remove(kcp, watchset); 704 kcpuset_remove(kcp, watchset);
702 if ((cii = kcpuset_ffs(kcp)) != 0) { 705 if ((cii = kcpuset_ffs(kcp)) != 0) {
703 printf("Failed to %s:", s); 706 printf("Failed to %s:", s);
704 do { 707 do {
705 kcpuset_clear(kcp, --cii); 708 kcpuset_clear(kcp, --cii);
706 printf(" cpu%lu", cii); 709 printf(" cpu%lu", cii);
707 } while ((cii = kcpuset_ffs(kcp)) != 0); 710 } while ((cii = kcpuset_ffs(kcp)) != 0);
708 printf("\n"); 711 printf("\n");
709 } 712 }
710 } 713 }
711 
712 kcpuset_destroy(kcp); 
713} 714}
714 715
715/* 716/*
716 * Halt this cpu 717 * Halt this cpu
717 */ 718 */
718void 719void
719cpu_halt(void) 720cpu_halt(void)
720{ 721{
721 cpuid_t cii = cpu_index(curcpu()); 722 cpuid_t cii = cpu_index(curcpu());
722 723
723 printf("cpu%lu: shutting down\n", cii); 724 printf("cpu%lu: shutting down\n", cii);
724 kcpuset_atomic_set(cpus_halted, cii); 725 kcpuset_atomic_set(cpus_halted, cii);
725 spl0(); /* allow interrupts e.g. further ipi ? */ 726 spl0(); /* allow interrupts e.g. further ipi ? */
@@ -787,81 +788,76 @@ cpu_pause(struct reg *regsp) @@ -787,81 +788,76 @@ cpu_pause(struct reg *regsp)
787#endif 788#endif
788 } while (false); 789 } while (false);
789 790
790 splx(s); 791 splx(s);
791} 792}
792 793
793/* 794/*
794 * Pause all running cpus, excluding current cpu. 795 * Pause all running cpus, excluding current cpu.
795 */ 796 */
796void 797void
797cpu_pause_others(void) 798cpu_pause_others(void)
798{ 799{
799 struct cpu_info * const ci = curcpu(); 800 struct cpu_info * const ci = curcpu();
800 kcpuset_t *kcp; 
801 
802 if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset)) 801 if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
803 return; 802 return;
804 803
805 kcpuset_clone(&kcp, cpus_running); 804 kcpuset_t *kcp = ci->ci_ddbcpus;
 805
 806 kcpuset_copy(kcp, cpus_running);
806 kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset); 807 kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset);
807 kcpuset_remove(kcp, cpus_paused); 808 kcpuset_remove(kcp, cpus_paused);
808 809
809 cpu_broadcast_ipi(IPI_SUSPEND); 810 cpu_broadcast_ipi(IPI_SUSPEND);
810 cpu_ipi_wait("pause", cpus_paused, kcp); 811 cpu_ipi_wait("pause", cpus_paused, kcp);
811 
812 kcpuset_destroy(kcp); 
813} 812}
814 813
815/* 814/*
816 * Resume a single cpu 815 * Resume a single cpu
817 */ 816 */
818void 817void
819cpu_resume(cpuid_t cii) 818cpu_resume(cpuid_t cii)
820{ 819{
821 kcpuset_t *kcp; 
822 
823 if (__predict_false(cold)) 820 if (__predict_false(cold))
824 return; 821 return;
825 822
826 kcpuset_create(&kcp, true); 823 struct cpu_info * const ci = curcpu();
 824 kcpuset_t *kcp = ci->ci_ddbcpus;
 825
827 kcpuset_set(kcp, cii); 826 kcpuset_set(kcp, cii);
828 kcpuset_atomicly_remove(cpus_resumed, cpus_resumed); 827 kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
829 kcpuset_atomic_clear(cpus_paused, cii); 828 kcpuset_atomic_clear(cpus_paused, cii);
830 829
831 cpu_ipi_wait("resume", cpus_resumed, kcp); 830 cpu_ipi_wait("resume", cpus_resumed, kcp);
832 
833 kcpuset_destroy(kcp); 
834} 831}
835 832
836/* 833/*
837 * Resume all paused cpus. 834 * Resume all paused cpus.
838 */ 835 */
839void 836void
840cpu_resume_others(void) 837cpu_resume_others(void)
841{ 838{
842 kcpuset_t *kcp; 
843 
844 if (__predict_false(cold)) 839 if (__predict_false(cold))
845 return; 840 return;
846 841
 842 struct cpu_info * const ci = curcpu();
 843 kcpuset_t *kcp = ci->ci_ddbcpus;
 844
847 kcpuset_atomicly_remove(cpus_resumed, cpus_resumed); 845 kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
848 kcpuset_clone(&kcp, cpus_paused); 846 kcpuset_copy(kcp, cpus_paused);
849 kcpuset_atomicly_remove(cpus_paused, cpus_paused); 847 kcpuset_atomicly_remove(cpus_paused, cpus_paused);
850 848
851 /* CPUs awake on cpus_paused clear */ 849 /* CPUs awake on cpus_paused clear */
852 cpu_ipi_wait("resume", cpus_resumed, kcp); 850 cpu_ipi_wait("resume", cpus_resumed, kcp);
853 
854 kcpuset_destroy(kcp); 
855} 851}
856 852
857bool 853bool
858cpu_is_paused(cpuid_t cii) 854cpu_is_paused(cpuid_t cii)
859{ 855{
860 856
861 return !cold && kcpuset_isset(cpus_paused, cii); 857 return !cold && kcpuset_isset(cpus_paused, cii);
862} 858}
863 859
864#ifdef DDB 860#ifdef DDB
865void 861void
866cpu_debug_dump(void) 862cpu_debug_dump(void)
867{ 863{