Wed May 6 13:43:48 2020 UTC ()
remove the per-channel spin lock and instead make sure that events
add/remove will only be done on the CPU the handler is bound to, with
interrupts disabled.
Should be similar to the native x86 interrupts add/remove.


(bouyer)
diff -r1.92 -r1.93 src/sys/arch/xen/xen/evtchn.c

cvs diff -r1.92 -r1.93 src/sys/arch/xen/xen/evtchn.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/evtchn.c 2020/05/04 15:55:56 1.92
+++ src/sys/arch/xen/xen/evtchn.c 2020/05/06 13:43:48 1.93
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: evtchn.c,v 1.92 2020/05/04 15:55:56 jdolecek Exp $ */ 1/* $NetBSD: evtchn.c,v 1.93 2020/05/06 13:43:48 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -44,66 +44,64 @@ @@ -44,66 +44,64 @@
44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */ 53 */
54 54
55 55
56#include <sys/cdefs.h> 56#include <sys/cdefs.h>
57__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.92 2020/05/04 15:55:56 jdolecek Exp $"); 57__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.93 2020/05/06 13:43:48 bouyer Exp $");
58 58
59#include "opt_xen.h" 59#include "opt_xen.h"
60#include "isa.h" 60#include "isa.h"
61#include "pci.h" 61#include "pci.h"
62 62
63#include <sys/param.h> 63#include <sys/param.h>
64#include <sys/cpu.h> 64#include <sys/cpu.h>
65#include <sys/kernel.h> 65#include <sys/kernel.h>
66#include <sys/systm.h> 66#include <sys/systm.h>
67#include <sys/device.h> 67#include <sys/device.h>
68#include <sys/proc.h> 68#include <sys/proc.h>
69#include <sys/kmem.h> 69#include <sys/kmem.h>
70#include <sys/reboot.h> 70#include <sys/reboot.h>
71#include <sys/mutex.h> 71#include <sys/mutex.h>
72#include <sys/interrupt.h> 72#include <sys/interrupt.h>
 73#include <sys/xcall.h>
73 74
74#include <uvm/uvm.h> 75#include <uvm/uvm.h>
75 76
76#include <xen/intr.h> 77#include <xen/intr.h>
77 78
78#include <xen/xen.h> 79#include <xen/xen.h>
79#include <xen/hypervisor.h> 80#include <xen/hypervisor.h>
80#include <xen/evtchn.h> 81#include <xen/evtchn.h>
81#include <xen/xenfunc.h> 82#include <xen/xenfunc.h>
82 83
83#define NR_PIRQS NR_EVENT_CHANNELS 84#define NR_PIRQS NR_EVENT_CHANNELS
84 85
85/* 86/*
86 * This lock protects updates to the following mapping and reference-count 87 * This lock protects updates to the following mapping and reference-count
87 * arrays. The lock does not need to be acquired to read the mapping tables. 88 * arrays. The lock does not need to be acquired to read the mapping tables.
88 */ 89 */
89static kmutex_t evtchn_lock; 90static kmutex_t evtchn_lock;
90 91
91/* event handlers */ 92/* event handlers */
92struct evtsource *evtsource[NR_EVENT_CHANNELS]; 93struct evtsource *evtsource[NR_EVENT_CHANNELS];
93 94
94/* channel locks */ 
95static kmutex_t evtlock[NR_EVENT_CHANNELS]; 
96 
97/* Reference counts for bindings to event channels XXX: redo for SMP */ 95/* Reference counts for bindings to event channels XXX: redo for SMP */
98static uint8_t evtch_bindcount[NR_EVENT_CHANNELS]; 96static uint8_t evtch_bindcount[NR_EVENT_CHANNELS];
99 97
100/* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */ 98/* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */
101static evtchn_port_t vcpu_ipi_to_evtch[XEN_LEGACY_MAX_VCPUS]; 99static evtchn_port_t vcpu_ipi_to_evtch[XEN_LEGACY_MAX_VCPUS];
102 100
103/* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */ 101/* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */
104static int virq_timer_to_evtch[XEN_LEGACY_MAX_VCPUS]; 102static int virq_timer_to_evtch[XEN_LEGACY_MAX_VCPUS];
105 103
106/* event-channel <-> VIRQ mapping. */ 104/* event-channel <-> VIRQ mapping. */
107static int virq_to_evtch[NR_VIRQS]; 105static int virq_to_evtch[NR_VIRQS];
108 106
109 107
@@ -352,57 +350,54 @@ evtchn_do_event(int evtch, struct intrfr @@ -352,57 +350,54 @@ evtchn_do_event(int evtch, struct intrfr
352 hypervisor_set_ipending(evtsource[evtch]->ev_imask, 350 hypervisor_set_ipending(evtsource[evtch]->ev_imask,
353 evtch >> LONG_SHIFT, 351 evtch >> LONG_SHIFT,
354 evtch & LONG_MASK); 352 evtch & LONG_MASK);
355 353
356 /* leave masked */ 354 /* leave masked */
357 355
358 return 0; 356 return 0;
359 } 357 }
360 ci->ci_ilevel = evtsource[evtch]->ev_maxlevel; 358 ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
361 iplmask = evtsource[evtch]->ev_imask; 359 iplmask = evtsource[evtch]->ev_imask;
362 KASSERT(ci->ci_ilevel >= IPL_VM); 360 KASSERT(ci->ci_ilevel >= IPL_VM);
363 KASSERT(cpu_intr_p()); 361 KASSERT(cpu_intr_p());
364 x86_enable_intr(); 362 x86_enable_intr();
365 mutex_spin_enter(&evtlock[evtch]); 
366 ih = evtsource[evtch]->ev_handlers; 363 ih = evtsource[evtch]->ev_handlers;
367 while (ih != NULL) { 364 while (ih != NULL) {
368 KASSERT(ih->ih_cpu == ci); 365 KASSERT(ih->ih_cpu == ci);
369#if 0 366#if 0
370 if (ih->ih_cpu != ci) { 367 if (ih->ih_cpu != ci) {
371 hypervisor_send_event(ih->ih_cpu, evtch); 368 hypervisor_send_event(ih->ih_cpu, evtch);
372 iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level)); 369 iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level));
373 ih = ih->ih_evt_next; 370 ih = ih->ih_evt_next;
374 continue; 371 continue;
375 } 372 }
376#endif 373#endif
377 if (ih->ih_level <= ilevel) { 374 if (ih->ih_level <= ilevel) {
378#ifdef IRQ_DEBUG 375#ifdef IRQ_DEBUG
379 if (evtch == IRQ_DEBUG) 376 if (evtch == IRQ_DEBUG)
380 printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel); 377 printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
381#endif 378#endif
382 x86_disable_intr(); 379 x86_disable_intr();
383 hypervisor_set_ipending(iplmask, 380 hypervisor_set_ipending(iplmask,
384 evtch >> LONG_SHIFT, evtch & LONG_MASK); 381 evtch >> LONG_SHIFT, evtch & LONG_MASK);
385 /* leave masked */ 382 /* leave masked */
386 mutex_spin_exit(&evtlock[evtch]); 
387 goto splx; 383 goto splx;
388 } 384 }
389 iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level)); 385 iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level));
390 ci->ci_ilevel = ih->ih_level; 386 ci->ci_ilevel = ih->ih_level;
391 ih_fun = (void *)ih->ih_fun; 387 ih_fun = (void *)ih->ih_fun;
392 ih_fun(ih->ih_arg, regs); 388 ih_fun(ih->ih_arg, regs);
393 ih = ih->ih_evt_next; 389 ih = ih->ih_evt_next;
394 } 390 }
395 mutex_spin_exit(&evtlock[evtch]); 
396 x86_disable_intr(); 391 x86_disable_intr();
397 hypervisor_unmask_event(evtch); 392 hypervisor_unmask_event(evtch);
398 393
399splx: 394splx:
400 ci->ci_ilevel = ilevel; 395 ci->ci_ilevel = ilevel;
401 return 0; 396 return 0;
402} 397}
403 398
404#define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */ 399#define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */
405 400
406/* PIC callbacks */ 401/* PIC callbacks */
407/* pic "pin"s are conceptually mapped to event port numbers */ 402/* pic "pin"s are conceptually mapped to event port numbers */
408static void 403static void
@@ -763,54 +758,49 @@ pirq_interrupt(void *arg) @@ -763,54 +758,49 @@ pirq_interrupt(void *arg)
763 758
764#endif /* NPCI > 0 || NISA > 0 */ 759#endif /* NPCI > 0 || NISA > 0 */
765 760
766 761
767/* 762/*
768 * Recalculate the interrupt from scratch for an event source. 763 * Recalculate the interrupt from scratch for an event source.
769 */ 764 */
770static void 765static void
771intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci) 766intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci)
772{ 767{
773 struct intrhand *ih; 768 struct intrhand *ih;
774 int cpu_receive = 0; 769 int cpu_receive = 0;
775 770
776#ifdef MULTIPROCESSOR 
777 KASSERT(!mutex_owned(&evtlock[evtch])); 
778#endif 
779 mutex_spin_enter(&evtlock[evtch]); 
780 evts->ev_maxlevel = IPL_NONE; 771 evts->ev_maxlevel = IPL_NONE;
781 evts->ev_imask = 0; 772 evts->ev_imask = 0;
782 for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) { 773 for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) {
 774 KASSERT(ih->ih_cpu == curcpu());
783 if (ih->ih_level > evts->ev_maxlevel) 775 if (ih->ih_level > evts->ev_maxlevel)
784 evts->ev_maxlevel = ih->ih_level; 776 evts->ev_maxlevel = ih->ih_level;
785 evts->ev_imask |= (1 << XEN_IPL2SIR(ih->ih_level)); 777 evts->ev_imask |= (1 << XEN_IPL2SIR(ih->ih_level));
786 if (ih->ih_cpu == ci) 778 if (ih->ih_cpu == ci)
787 cpu_receive = 1; 779 cpu_receive = 1;
788 } 780 }
789 if (cpu_receive) 781 if (cpu_receive)
790 xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch); 782 xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch);
791 else 783 else
792 xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch); 784 xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch);
793 mutex_spin_exit(&evtlock[evtch]); 
794} 785}
795 786
796struct intrhand * 787struct intrhand *
797event_set_handler(int evtch, int (*func)(void *), void *arg, int level, 788event_set_handler(int evtch, int (*func)(void *), void *arg, int level,
798 const char *intrname, const char *xname, bool mpsafe, bool bind) 789 const char *intrname, const char *xname, bool mpsafe, bool bind)
799{ 790{
800 struct cpu_info *ci = curcpu(); /* XXX: pass in ci ? */ 791 struct cpu_info *ci = curcpu(); /* XXX: pass in ci ? */
801 struct evtsource *evts; 792 struct evtsource *evts;
802 struct intrhand *ih, **ihp; 793 struct intrhand *ih, **ihp;
803 int s; 
804 char intrstr_buf[INTRIDBUF]; 794 char intrstr_buf[INTRIDBUF];
805 795
806#ifdef IRQ_DEBUG 796#ifdef IRQ_DEBUG
807 printf("event_set_handler IRQ %d handler %p\n", evtch, func); 797 printf("event_set_handler IRQ %d handler %p\n", evtch, func);
808#endif 798#endif
809 799
810 KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch); 800 KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch);
811 KASSERTMSG(evtch < NR_EVENT_CHANNELS, 801 KASSERTMSG(evtch < NR_EVENT_CHANNELS,
812 "evtch number %d > NR_EVENT_CHANNELS", evtch); 802 "evtch number %d > NR_EVENT_CHANNELS", evtch);
813 KASSERT(xname != NULL); 803 KASSERT(xname != NULL);
814 804
815#if 0 805#if 0
816 printf("event_set_handler evtch %d handler %p level %d\n", evtch, 806 printf("event_set_handler evtch %d handler %p level %d\n", evtch,
@@ -826,200 +816,228 @@ event_set_handler(int evtch, int (*func) @@ -826,200 +816,228 @@ event_set_handler(int evtch, int (*func)
826 ih->ih_fun = ih->ih_realfun = func; 816 ih->ih_fun = ih->ih_realfun = func;
827 ih->ih_arg = ih->ih_realarg = arg; 817 ih->ih_arg = ih->ih_realarg = arg;
828 ih->ih_evt_next = NULL; 818 ih->ih_evt_next = NULL;
829 ih->ih_next = NULL; 819 ih->ih_next = NULL;
830 ih->ih_cpu = ci; 820 ih->ih_cpu = ci;
831 ih->ih_pin = evtch; 821 ih->ih_pin = evtch;
832#ifdef MULTIPROCESSOR 822#ifdef MULTIPROCESSOR
833 if (!mpsafe) { 823 if (!mpsafe) {
834 ih->ih_fun = xen_intr_biglock_wrapper; 824 ih->ih_fun = xen_intr_biglock_wrapper;
835 ih->ih_arg = ih; 825 ih->ih_arg = ih;
836 } 826 }
837#endif /* MULTIPROCESSOR */ 827#endif /* MULTIPROCESSOR */
838 828
839 s = splhigh(); 829 mutex_enter(&cpu_lock);
840 
841 /* register per-cpu handler for spllower() */ 
842 event_set_iplhandler(ci, ih, level); 
843 
844 /* register handler for event channel */ 830 /* register handler for event channel */
845 if (evtsource[evtch] == NULL) { 831 if (evtsource[evtch] == NULL) {
846 evtchn_op_t op; 832 evtchn_op_t op;
847 if (intrname == NULL) 833 if (intrname == NULL)
848 intrname = intr_create_intrid(-1, &xen_pic, evtch, 834 intrname = intr_create_intrid(-1, &xen_pic, evtch,
849 intrstr_buf, sizeof(intrstr_buf)); 835 intrstr_buf, sizeof(intrstr_buf));
850 evts = kmem_zalloc(sizeof (struct evtsource), 836 evts = kmem_zalloc(sizeof (struct evtsource),
851 KM_NOSLEEP); 837 KM_NOSLEEP);
852 if (evts == NULL) 838 if (evts == NULL)
853 panic("can't allocate fixed interrupt source"); 839 panic("can't allocate fixed interrupt source");
854 840
855 evts->ev_handlers = ih; 841 evts->ev_handlers = ih;
856 /* 842 /*
857 * XXX: We're assuming here that ci is the same cpu as 843 * XXX: We're assuming here that ci is the same cpu as
858 * the one on which this event/port is bound on. The 844 * the one on which this event/port is bound on. The
859 * api needs to be reshuffled so that this assumption 845 * api needs to be reshuffled so that this assumption
860 * is more explicitly implemented. 846 * is more explicitly implemented.
861 */ 847 */
862 evts->ev_cpu = ci; 848 evts->ev_cpu = ci;
863 mutex_init(&evtlock[evtch], MUTEX_DEFAULT, IPL_HIGH); 
864 evtsource[evtch] = evts; 849 evtsource[evtch] = evts;
865 strlcpy(evts->ev_intrname, intrname, sizeof(evts->ev_intrname)); 850 strlcpy(evts->ev_intrname, intrname, sizeof(evts->ev_intrname));
866 851
867 evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL, 852 evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL,
868 device_xname(ci->ci_dev), evts->ev_intrname); 853 device_xname(ci->ci_dev), evts->ev_intrname);
869 if (bind) { 854 if (bind) {
870 op.cmd = EVTCHNOP_bind_vcpu; 855 op.cmd = EVTCHNOP_bind_vcpu;
871 op.u.bind_vcpu.port = evtch; 856 op.u.bind_vcpu.port = evtch;
872 op.u.bind_vcpu.vcpu = ci->ci_vcpuid; 857 op.u.bind_vcpu.vcpu = ci->ci_vcpuid;
873 if (HYPERVISOR_event_channel_op(&op) != 0) { 858 if (HYPERVISOR_event_channel_op(&op) != 0) {
874 panic("Failed to bind event %d to VCPU %s %d", 859 panic("Failed to bind event %d to VCPU %s %d",
875 evtch, device_xname(ci->ci_dev), 860 evtch, device_xname(ci->ci_dev),
876 ci->ci_vcpuid); 861 ci->ci_vcpuid);
877 } 862 }
878 } 863 }
 864#ifndef XENPV
 865 evts->ev_isl = intr_allocate_io_intrsource(intrname);
 866 evts->ev_isl->is_pic = &xen_pic;
 867 evts->ev_isl->is_handlers = evts->ev_handlers;
 868#endif
879 } else { 869 } else {
880 evts = evtsource[evtch]; 870 evts = evtsource[evtch];
881 /* sort by IPL order, higher first */ 871 /* sort by IPL order, higher first */
882 mutex_spin_enter(&evtlock[evtch]); 
883 for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) { 872 for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) {
884 if ((*ihp)->ih_level < ih->ih_level) { 873 if ((*ihp)->ih_level < ih->ih_level) {
885 /* insert before *ihp */ 874 /* insert before *ihp */
886 ih->ih_evt_next = *ihp; 875 ih->ih_evt_next = *ihp;
887 *ihp = ih; 876 *ihp = ih;
888 break; 877 break;
889 } 878 }
890 if ((*ihp)->ih_evt_next == NULL) { 879 if ((*ihp)->ih_evt_next == NULL) {
891 (*ihp)->ih_evt_next = ih; 880 (*ihp)->ih_evt_next = ih;
892 break; 881 break;
893 } 882 }
894 } 883 }
895 mutex_spin_exit(&evtlock[evtch]); 
896#ifndef XENPV 884#ifndef XENPV
897 mutex_enter(&cpu_lock); 
898 evts->ev_isl->is_handlers = evts->ev_handlers; 885 evts->ev_isl->is_handlers = evts->ev_handlers;
899 mutex_exit(&cpu_lock); 
900#endif 886#endif
901 } 887 }
902 888 const u_long psl = x86_read_psl();
 889 x86_disable_intr();
 890 /* register per-cpu handler for spllower() */
 891 event_set_iplhandler(ci, ih, level);
 892 intr_calculatemasks(evts, evtch, ci);
 893 x86_write_psl(psl);
903 894
904 // append device name 895 // append device name
905 if (evts->ev_xname[0] != '\0') 896 if (evts->ev_xname[0] != '\0')
906 strlcat(evts->ev_xname, ", ", sizeof(evts->ev_xname)); 897 strlcat(evts->ev_xname, ", ", sizeof(evts->ev_xname));
907 strlcat(evts->ev_xname, xname, sizeof(evts->ev_xname)); 898 strlcat(evts->ev_xname, xname, sizeof(evts->ev_xname));
908 899
909 intr_calculatemasks(evts, evtch, ci); 
910 splx(s); 
911#ifndef XENPV 
912 mutex_enter(&cpu_lock); 
913 if (evts->ev_isl == NULL) { 
914 evts->ev_isl = intr_allocate_io_intrsource(intrname); 
915 evts->ev_isl->is_pic = &xen_pic; 
916 } 
917 evts->ev_isl->is_handlers = evts->ev_handlers; 
918 mutex_exit(&cpu_lock); 900 mutex_exit(&cpu_lock);
919#endif 
920 
921 
922 return ih; 901 return ih;
923} 902}
924 903
925void 904void
926event_set_iplhandler(struct cpu_info *ci, 905event_set_iplhandler(struct cpu_info *ci,
927 struct intrhand *ih, 906 struct intrhand *ih,
928 int level) 907 int level)
929{ 908{
930 struct intrsource *ipls; 909 struct intrsource *ipls;
931 int sir = XEN_IPL2SIR(level); 910 int sir = XEN_IPL2SIR(level);
932 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); 911 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH);
933 912
934 KASSERT(ci == ih->ih_cpu); 913 KASSERT(ci == ih->ih_cpu);
 914 KASSERT(ci == curcpu());
935 if (ci->ci_isources[sir] == NULL) { 915 if (ci->ci_isources[sir] == NULL) {
936 ipls = kmem_zalloc(sizeof (struct intrsource), 916 ipls = kmem_zalloc(sizeof (struct intrsource),
937 KM_NOSLEEP); 917 KM_NOSLEEP);
938 if (ipls == NULL) 918 if (ipls == NULL)
939 panic("can't allocate fixed interrupt source"); 919 panic("can't allocate fixed interrupt source");
940 ipls->is_recurse = xenev_stubs[level - IPL_VM].ist_recurse; 920 ipls->is_recurse = xenev_stubs[level - IPL_VM].ist_recurse;
941 ipls->is_resume = xenev_stubs[level - IPL_VM].ist_resume; 921 ipls->is_resume = xenev_stubs[level - IPL_VM].ist_resume;
942 ipls->is_handlers = ih; 922 ipls->is_handlers = ih;
943 ipls->is_pic = &xen_pic; 923 ipls->is_pic = &xen_pic;
944 ci->ci_isources[sir] = ipls; 924 ci->ci_isources[sir] = ipls;
945 } else { 925 } else {
946 ipls = ci->ci_isources[sir]; 926 ipls = ci->ci_isources[sir];
947 ih->ih_next = ipls->is_handlers; 927 ih->ih_next = ipls->is_handlers;
948 ipls->is_handlers = ih; 928 ipls->is_handlers = ih;
949 } 929 }
950 x86_intr_calculatemasks(ci); 930 x86_intr_calculatemasks(ci);
951} 931}
952 932
953int 933/*
954event_remove_handler(int evtch, int (*func)(void *), void *arg) 934 * Called on bound CPU to handle event_remove_handler()
 935 * caller (on initiating CPU) holds cpu_lock on our behalf
 936 * arg1: evtch
 937 * arg2: struct intrhand *ih
 938 */
 939
 940static void
 941event_remove_handler_xcall(void *arg1, void *arg2)
955{ 942{
956 struct intrsource *ipls; 943 struct intrsource *ipls;
957 struct evtsource *evts; 944 struct evtsource *evts;
958 struct intrhand *ih; 
959 struct intrhand **ihp; 945 struct intrhand **ihp;
960 struct cpu_info *ci; 946 struct cpu_info *ci;
 947 struct intrhand *ih = arg2;
 948 int evtch = (intptr_t)(arg1);
961 949
962 evts = evtsource[evtch]; 950 evts = evtsource[evtch];
963 if (evts == NULL) 951 KASSERT(evts != NULL);
964 return ENOENT; 952 KASSERT(ih != NULL);
 953 ci = ih->ih_cpu;
 954 KASSERT(ci == curcpu());
965 955
966 mutex_spin_enter(&evtlock[evtch]); 956 const u_long psl = x86_read_psl();
967 for (ihp = &evts->ev_handlers, ih = evts->ev_handlers; 957 x86_disable_intr();
968 ih != NULL; 958
969 ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) { 959 for (ihp = &evts->ev_handlers; *ihp != NULL;
970 if (ih->ih_realfun == func && ih->ih_realarg == arg) 960 ihp = &(*ihp)->ih_evt_next) {
 961 if ((*ihp) == ih)
971 break; 962 break;
972 } 963 }
973 if (ih == NULL) { 964 if (*(ihp) == NULL) {
974 mutex_spin_exit(&evtlock[evtch]); 965 panic("event_remove_handler_xcall: not in ev_handlers");
975 return ENOENT; 
976 } 966 }
977 ci = ih->ih_cpu; 967
978 *ihp = ih->ih_evt_next; 968 *ihp = ih->ih_evt_next;
979 969
980 int sir = XEN_IPL2SIR(ih->ih_level); 970 int sir = XEN_IPL2SIR(ih->ih_level);
981 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); 971 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH);
982 ipls = ci->ci_isources[sir]; 972 ipls = ci->ci_isources[sir];
983 for (ihp = &ipls->is_handlers, ih = ipls->is_handlers; 973 for (ihp = &ipls->is_handlers; *ihp != NULL; ihp = &(*ihp)->ih_next) {
984 ih != NULL; 974 if (*ihp == ih)
985 ihp = &ih->ih_next, ih = ih->ih_next) { 
986 if (ih->ih_realfun == func && ih->ih_realarg == arg) 
987 break; 975 break;
988 } 976 }
989 if (ih == NULL) 977 if (*ihp == NULL)
990 panic("event_remove_handler"); 978 panic("event_remove_handler_xcall: not in is_handlers");
991 *ihp = ih->ih_next; 979 *ihp = ih->ih_next;
992 mutex_spin_exit(&evtlock[evtch]); 980 intr_calculatemasks(evts, evtch, ci);
993#ifndef XENPV 981#ifndef XENPV
994 mutex_enter(&cpu_lock); 
995 evts->ev_isl->is_handlers = evts->ev_handlers; 982 evts->ev_isl->is_handlers = evts->ev_handlers;
996 mutex_exit(&cpu_lock); 
997#endif 983#endif
 984 if (evts->ev_handlers == NULL)
 985 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch);
 986
 987 x86_write_psl(psl);
 988}
 989
 990int
 991event_remove_handler(int evtch, int (*func)(void *), void *arg)
 992{
 993 struct intrhand *ih;
 994 struct cpu_info *ci;
 995 struct evtsource *evts;
 996
 997 mutex_enter(&cpu_lock);
 998 evts = evtsource[evtch];
 999 if (evts == NULL)
 1000 return ENOENT;
 1001
 1002 for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) {
 1003 if (ih->ih_realfun == func && ih->ih_realarg == arg)
 1004 break;
 1005 }
 1006 if (ih == NULL) {
 1007 mutex_exit(&cpu_lock);
 1008 return ENOENT;
 1009 }
 1010 ci = ih->ih_cpu;
 1011
 1012 if (ci == curcpu() || !mp_online) {
 1013 event_remove_handler_xcall((void *)(intptr_t)evtch, ih);
 1014 } else {
 1015 uint64_t where = xc_unicast(0, event_remove_handler_xcall,
 1016 (void *)(intptr_t)evtch, ih, ci);
 1017 xc_wait(where);
 1018 }
 1019
998 kmem_free(ih, sizeof (struct intrhand)); 1020 kmem_free(ih, sizeof (struct intrhand));
999 if (evts->ev_handlers == NULL) { 1021 if (evts->ev_handlers == NULL) {
1000#ifndef XENPV 1022#ifndef XENPV
1001 KASSERT(evts->ev_isl->is_handlers == NULL); 1023 KASSERT(evts->ev_isl->is_handlers == NULL);
1002 mutex_enter(&cpu_lock); 
1003 intr_free_io_intrsource(evts->ev_intrname); 1024 intr_free_io_intrsource(evts->ev_intrname);
1004 mutex_exit(&cpu_lock); 
1005#endif 1025#endif
1006 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch); 
1007 evcnt_detach(&evts->ev_evcnt); 1026 evcnt_detach(&evts->ev_evcnt);
1008 kmem_free(evts, sizeof (struct evtsource)); 1027 kmem_free(evts, sizeof (struct evtsource));
1009 evtsource[evtch] = NULL; 1028 evtsource[evtch] = NULL;
1010 } else { 
1011 intr_calculatemasks(evts, evtch, ci); 
1012 } 1029 }
 1030 mutex_exit(&cpu_lock);
1013 return 0; 1031 return 0;
1014} 1032}
1015 1033
1016int 1034int
1017xen_debug_handler(void *arg) 1035xen_debug_handler(void *arg)
1018{ 1036{
1019 struct cpu_info *ci = curcpu(); 1037 struct cpu_info *ci = curcpu();
1020 int i; 1038 int i;
1021 int xci_ilevel = ci->ci_ilevel; 1039 int xci_ilevel = ci->ci_ilevel;
1022 int xci_ipending = ci->ci_ipending; 1040 int xci_ipending = ci->ci_ipending;
1023 int xci_idepth = ci->ci_idepth; 1041 int xci_idepth = ci->ci_idepth;
1024 u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending; 1042 u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending;
1025 u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask; 1043 u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask;