Tue Dec 25 06:50:12 2018 UTC ()
Excise XEN specific code out of x86/x86/intr.c into xen/x86/xen_intr.c

While at it, separate the source function tracking so that the interrupt
paths are truly independant.

Use weak symbol exporting to provision for future PVHVM co-existence
of both files, but with independant paths. Introduce assembler code
such that in a unified scenario, native interrupts get first priority
in spllower(), followed by XEN event callbacks. IPL management and
semantics are unchanged - native handlers and xen callbacks are
expected to maintain their ipl related semantics.

In summary, after this commit, native and XEN now have completely
unrelated interrupt handling mechanisms, including
intr_establish_xname() and assembler stubs and intr handler
management.

Happy Christmas!


(cherry)
diff -r1.70 -r1.71 src/sys/arch/amd64/amd64/genassym.cf
diff -r1.29 -r1.30 src/sys/arch/amd64/amd64/lock_stubs.S
diff -r1.36 -r1.37 src/sys/arch/amd64/amd64/spl.S
diff -r1.64 -r1.65 src/sys/arch/amd64/amd64/vector.S
diff -r1.107 -r1.108 src/sys/arch/i386/i386/genassym.cf
diff -r1.43 -r1.44 src/sys/arch/i386/i386/spl.S
diff -r1.78 -r1.79 src/sys/arch/i386/i386/vector.S
diff -r1.100 -r1.101 src/sys/arch/x86/include/cpu.h
diff -r1.42 -r1.43 src/sys/arch/x86/isa/isa_machdep.c
diff -r1.21 -r1.22 src/sys/arch/x86/x86/i8259.c
diff -r1.140 -r1.141 src/sys/arch/x86/x86/intr.c
diff -r1.173 -r1.174 src/sys/arch/xen/conf/files.xen
diff -r1.50 -r1.51 src/sys/arch/xen/include/intr.h
diff -r1.33 -r1.34 src/sys/arch/xen/x86/hypervisor_machdep.c
diff -r1.10 -r1.11 src/sys/arch/xen/x86/xen_intr.c
diff -r1.75 -r1.76 src/sys/arch/xen/xen/clock.c
diff -r1.82 -r1.83 src/sys/arch/xen/xen/evtchn.c
diff -r1.52 -r1.53 src/sys/arch/xen/xen/xenevt.c

cvs diff -r1.70 -r1.71 src/sys/arch/amd64/amd64/genassym.cf (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/genassym.cf 2018/08/12 15:31:01 1.70
+++ src/sys/arch/amd64/amd64/genassym.cf 2018/12/25 06:50:11 1.71
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: genassym.cf,v 1.70 2018/08/12 15:31:01 maxv Exp $ 1# $NetBSD: genassym.cf,v 1.71 2018/12/25 06:50:11 cherry Exp $
2 2
3# 3#
4# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5# All rights reserved. 5# All rights reserved.
6# 6#
7# This code is derived from software contributed to The NetBSD Foundation 7# This code is derived from software contributed to The NetBSD Foundation
8# by Charles M. Hannum, and by Andrew Doran. 8# by Charles M. Hannum, and by Andrew Doran.
9# 9#
10# Redistribution and use in source and binary forms, with or without 10# Redistribution and use in source and binary forms, with or without
11# modification, are permitted provided that the following conditions 11# modification, are permitted provided that the following conditions
12# are met: 12# are met:
13# 1. Redistributions of source code must retain the above copyright 13# 1. Redistributions of source code must retain the above copyright
14# notice, this list of conditions and the following disclaimer. 14# notice, this list of conditions and the following disclaimer.
@@ -238,32 +238,34 @@ ifdef SVS @@ -238,32 +238,34 @@ ifdef SVS
238define CPU_INFO_UPDIRPA offsetof(struct cpu_info, ci_svs_updirpa) 238define CPU_INFO_UPDIRPA offsetof(struct cpu_info, ci_svs_updirpa)
239define CPU_INFO_KPDIRPA offsetof(struct cpu_info, ci_svs_kpdirpa) 239define CPU_INFO_KPDIRPA offsetof(struct cpu_info, ci_svs_kpdirpa)
240define CPU_INFO_RSP0 offsetof(struct cpu_info, ci_svs_rsp0) 240define CPU_INFO_RSP0 offsetof(struct cpu_info, ci_svs_rsp0)
241define CPU_INFO_URSP0 offsetof(struct cpu_info, ci_svs_ursp0) 241define CPU_INFO_URSP0 offsetof(struct cpu_info, ci_svs_ursp0)
242define CPU_INFO_KRSP0 offsetof(struct cpu_info, ci_svs_krsp0) 242define CPU_INFO_KRSP0 offsetof(struct cpu_info, ci_svs_krsp0)
243endif 243endif
244define CPU_INFO_NSYSCALL offsetof(struct cpu_info, ci_data.cpu_nsyscall) 244define CPU_INFO_NSYSCALL offsetof(struct cpu_info, ci_data.cpu_nsyscall)
245define CPU_INFO_NTRAP offsetof(struct cpu_info, ci_data.cpu_ntrap) 245define CPU_INFO_NTRAP offsetof(struct cpu_info, ci_data.cpu_ntrap)
246define CPU_INFO_NINTR offsetof(struct cpu_info, ci_data.cpu_nintr) 246define CPU_INFO_NINTR offsetof(struct cpu_info, ci_data.cpu_nintr)
247define CPU_INFO_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority) 247define CPU_INFO_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority)
248define CPU_INFO_FPCURLWP offsetof(struct cpu_info, ci_fpcurlwp) 248define CPU_INFO_FPCURLWP offsetof(struct cpu_info, ci_fpcurlwp)
249 249
250define CPU_INFO_GDT offsetof(struct cpu_info, ci_gdt) 250define CPU_INFO_GDT offsetof(struct cpu_info, ci_gdt)
 251define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel)
 252define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth)
 253if !defined(XEN)
251define CPU_INFO_IPENDING offsetof(struct cpu_info, ci_ipending) 254define CPU_INFO_IPENDING offsetof(struct cpu_info, ci_ipending)
252define CPU_INFO_IMASK offsetof(struct cpu_info, ci_imask) 255define CPU_INFO_IMASK offsetof(struct cpu_info, ci_imask)
253define CPU_INFO_IUNMASK offsetof(struct cpu_info, ci_iunmask) 256define CPU_INFO_IUNMASK offsetof(struct cpu_info, ci_iunmask)
254define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel) 
255define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth) 
256define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources) 257define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources)
 258endif
257define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) 259define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count)
258define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl) 260define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl)
259define CPU_INFO_CPUID offsetof(struct cpu_info, ci_cpuid) 261define CPU_INFO_CPUID offsetof(struct cpu_info, ci_cpuid)
260define CPU_INFO_ISTATE offsetof(struct cpu_info, ci_istate) 262define CPU_INFO_ISTATE offsetof(struct cpu_info, ci_istate)
261define CPU_INFO_CC_SKEW offsetof(struct cpu_info, ci_data.cpu_cc_skew) 263define CPU_INFO_CC_SKEW offsetof(struct cpu_info, ci_data.cpu_cc_skew)
262 264
263define ACPI_SUSPEND_GDT offsetof(struct cpu_info, ci_suspend_gdt) 265define ACPI_SUSPEND_GDT offsetof(struct cpu_info, ci_suspend_gdt)
264define ACPI_SUSPEND_IDT offsetof(struct cpu_info, ci_suspend_idt) 266define ACPI_SUSPEND_IDT offsetof(struct cpu_info, ci_suspend_idt)
265define ACPI_SUSPEND_TR offsetof(struct cpu_info, ci_suspend_tr) 267define ACPI_SUSPEND_TR offsetof(struct cpu_info, ci_suspend_tr)
266define ACPI_SUSPEND_LDT offsetof(struct cpu_info, ci_suspend_ldt) 268define ACPI_SUSPEND_LDT offsetof(struct cpu_info, ci_suspend_ldt)
267define ACPI_SUSPEND_FS offsetof(struct cpu_info, ci_suspend_fs) 269define ACPI_SUSPEND_FS offsetof(struct cpu_info, ci_suspend_fs)
268define ACPI_SUSPEND_GS offsetof(struct cpu_info, ci_suspend_gs) 270define ACPI_SUSPEND_GS offsetof(struct cpu_info, ci_suspend_gs)
269define ACPI_SUSPEND_KGS offsetof(struct cpu_info, ci_suspend_kgs) 271define ACPI_SUSPEND_KGS offsetof(struct cpu_info, ci_suspend_kgs)
@@ -342,26 +344,30 @@ define RW_THREAD RW_THREAD @@ -342,26 +344,30 @@ define RW_THREAD RW_THREAD
342define RW_READER RW_READER 344define RW_READER RW_READER
343define RW_WRITER RW_WRITER 345define RW_WRITER RW_WRITER
344 346
345define EV_COUNT offsetof(struct evcnt, ev_count) 347define EV_COUNT offsetof(struct evcnt, ev_count)
346 348
347define OPTERON_MSR_PASSCODE OPTERON_MSR_PASSCODE 349define OPTERON_MSR_PASSCODE OPTERON_MSR_PASSCODE
348 350
349define X86_BUS_SPACE_IO X86_BUS_SPACE_IO 351define X86_BUS_SPACE_IO X86_BUS_SPACE_IO
350 352
351define BST_TYPE offsetof(struct bus_space_tag, bst_type) 353define BST_TYPE offsetof(struct bus_space_tag, bst_type)
352 354
353ifdef XEN 355ifdef XEN
354define CPU_INFO_VCPU offsetof(struct cpu_info, ci_vcpu) 356define CPU_INFO_VCPU offsetof(struct cpu_info, ci_vcpu)
 357define CPU_INFO_XPENDING offsetof(struct cpu_info, ci_xpending)
 358define CPU_INFO_XMASK offsetof(struct cpu_info, ci_xmask)
 359define CPU_INFO_XUNMASK offsetof(struct cpu_info, ci_xunmask)
 360define CPU_INFO_XSOURCES offsetof(struct cpu_info, ci_xsources)
355define EVTCHN_UPCALL_MASK offsetof(struct vcpu_info, evtchn_upcall_mask) 361define EVTCHN_UPCALL_MASK offsetof(struct vcpu_info, evtchn_upcall_mask)
356define XEN_PT_BASE offsetof(struct start_info, pt_base)  362define XEN_PT_BASE offsetof(struct start_info, pt_base)
357define XEN_NR_PT_FRAMES offsetof(struct start_info, nr_pt_frames) 363define XEN_NR_PT_FRAMES offsetof(struct start_info, nr_pt_frames)
358define __HYPERVISOR_iret __HYPERVISOR_iret 364define __HYPERVISOR_iret __HYPERVISOR_iret
359endif 365endif
360 366
361define NKL4_KIMG_ENTRIES NKL4_KIMG_ENTRIES 367define NKL4_KIMG_ENTRIES NKL4_KIMG_ENTRIES
362define NKL3_KIMG_ENTRIES NKL3_KIMG_ENTRIES 368define NKL3_KIMG_ENTRIES NKL3_KIMG_ENTRIES
363define NKL2_KIMG_ENTRIES NKL2_KIMG_ENTRIES 369define NKL2_KIMG_ENTRIES NKL2_KIMG_ENTRIES
364 370
365define PGOFSET PGOFSET 371define PGOFSET PGOFSET
366define PGSHIFT PGSHIFT 372define PGSHIFT PGSHIFT
367 373

cvs diff -r1.29 -r1.30 src/sys/arch/amd64/amd64/lock_stubs.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/lock_stubs.S 2018/07/14 14:29:40 1.29
+++ src/sys/arch/amd64/amd64/lock_stubs.S 2018/12/25 06:50:11 1.30
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: lock_stubs.S,v 1.29 2018/07/14 14:29:40 maxv Exp $ */ 1/* $NetBSD: lock_stubs.S,v 1.30 2018/12/25 06:50:11 cherry Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -126,53 +126,71 @@ END(mutex_spin_enter) @@ -126,53 +126,71 @@ END(mutex_spin_enter)
126 */ 126 */
127ENTRY(mutex_spin_exit) 127ENTRY(mutex_spin_exit)
128#ifdef DIAGNOSTIC 128#ifdef DIAGNOSTIC
129 129
130 movl $0x0001, %eax /* new + expected value */ 130 movl $0x0001, %eax /* new + expected value */
131 movq CPUVAR(SELF), %r8 131 movq CPUVAR(SELF), %r8
132 cmpxchgb %ah, MTX_LOCK(%rdi) /* unlock */ 132 cmpxchgb %ah, MTX_LOCK(%rdi) /* unlock */
133 jnz _C_LABEL(mutex_vector_exit) /* hard case if problems */ 133 jnz _C_LABEL(mutex_vector_exit) /* hard case if problems */
134 movl CPU_INFO_MTX_OLDSPL(%r8), %edi 134 movl CPU_INFO_MTX_OLDSPL(%r8), %edi
135 incl CPU_INFO_MTX_COUNT(%r8) 135 incl CPU_INFO_MTX_COUNT(%r8)
136 jnz 1f 136 jnz 1f
137 cmpl CPU_INFO_ILEVEL(%r8), %edi 137 cmpl CPU_INFO_ILEVEL(%r8), %edi
138 jae 1f 138 jae 1f
 139#if !defined(XEN)
139 movl CPU_INFO_IUNMASK(%r8,%rdi,4), %esi 140 movl CPU_INFO_IUNMASK(%r8,%rdi,4), %esi
140 CLI(ax) 141 CLI(ax)
141 testl CPU_INFO_IPENDING(%r8), %esi 142 testl CPU_INFO_IPENDING(%r8), %esi
142 jnz _C_LABEL(Xspllower) 143 jnz _C_LABEL(Xspllower)
 144#endif
 145#if defined(XEN)
 146 movl CPU_INFO_XUNMASK(%r8,%rdi,4), %esi
 147 CLI(ax)
 148 testl CPU_INFO_XPENDING(%r8), %esi
 149 jnz _C_LABEL(Xspllower)
 150#endif
143 movl %edi, CPU_INFO_ILEVEL(%r8) 151 movl %edi, CPU_INFO_ILEVEL(%r8)
144 STI(ax) 152 STI(ax)
1451: rep /* double byte ret as branch */ 1531: rep /* double byte ret as branch */
146 ret /* target: see AMD docs */ 154 ret /* target: see AMD docs */
147 155
148#else /* DIAGNOSTIC */ 156#else /* DIAGNOSTIC */
149 157
150 movq CPUVAR(SELF), %rsi 158 movq CPUVAR(SELF), %rsi
151 movb $0x00, MTX_LOCK(%rdi) 159 movb $0x00, MTX_LOCK(%rdi)
152 movl CPU_INFO_MTX_OLDSPL(%rsi), %ecx 160 movl CPU_INFO_MTX_OLDSPL(%rsi), %ecx
153 incl CPU_INFO_MTX_COUNT(%rsi) 161 incl CPU_INFO_MTX_COUNT(%rsi)
154 movl CPU_INFO_ILEVEL(%rsi),%edx 162 movl CPU_INFO_ILEVEL(%rsi),%edx
155 cmovnzl %edx,%ecx 163 cmovnzl %edx,%ecx
156 pushq %rbx 164 pushq %rbx
157 cmpl %edx,%ecx /* new level is lower? */ 165 cmpl %edx,%ecx /* new level is lower? */
158 jae 2f 166 jae 2f
1591: 1671:
 168#if !defined(XEN)
160 movl CPU_INFO_IPENDING(%rsi),%eax 169 movl CPU_INFO_IPENDING(%rsi),%eax
161 testl %eax,CPU_INFO_IUNMASK(%rsi,%rcx,4)/* deferred interrupts? */ 170 testl %eax,CPU_INFO_IUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
162 jnz 3f 171 jnz 3f
163 movl %eax,%ebx 172 movl %eax,%ebx
164 cmpxchg8b CPU_INFO_ISTATE(%rsi) /* swap in new ilevel */ 173 cmpxchg8b CPU_INFO_ISTATE(%rsi) /* swap in new ilevel */
165 jnz 4f 174 jnz 4f
 175#endif
 176#if defined(XEN)
 177 movl CPU_INFO_XPENDING(%rsi),%eax
 178 testl %eax,CPU_INFO_XUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
 179 jnz 3f
 180 movl %edx, %eax
 181 cmpxchgl %ecx, CPU_INFO_ILEVEL(%rsi)
 182 jnz 4f
 183#endif
1662: 1842:
167 popq %rbx 185 popq %rbx
168 ret 186 ret
1693: 1873:
170 popq %rbx 188 popq %rbx
171 movl %ecx, %edi 189 movl %ecx, %edi
172 jmp _C_LABEL(Xspllower) 190 jmp _C_LABEL(Xspllower)
1734: 1914:
174 jmp 1b 192 jmp 1b
175 193
176#endif /* DIAGNOSTIC */ 194#endif /* DIAGNOSTIC */
177 195
178END(mutex_spin_exit) 196END(mutex_spin_exit)

cvs diff -r1.36 -r1.37 src/sys/arch/amd64/amd64/spl.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/spl.S 2018/08/22 17:04:36 1.36
+++ src/sys/arch/amd64/amd64/spl.S 2018/12/25 06:50:11 1.37
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: spl.S,v 1.36 2018/08/22 17:04:36 maxv Exp $ */ 1/* $NetBSD: spl.S,v 1.37 2018/12/25 06:50:11 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2003 Wasabi Systems, Inc. 4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc. 7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -230,27 +230,26 @@ ENTRY(spllower) @@ -230,27 +230,26 @@ ENTRY(spllower)
230 movl %edi,CPUVAR(ILEVEL) 230 movl %edi,CPUVAR(ILEVEL)
231 popf 231 popf
2321: 2321:
233 ret 233 ret
234 ret 234 ret
2352: 2352:
236 popf 236 popf
237 jmp _C_LABEL(Xspllower) 237 jmp _C_LABEL(Xspllower)
2383: 2383:
239 .space 16 239 .space 16
240 .align 16 240 .align 16
241END(spllower) 241END(spllower)
242LABEL(spllower_end) 242LABEL(spllower_end)
243#endif /* !XEN */ 
244 243
245/* 244/*
246 * void cx8_spllower(int s); 245 * void cx8_spllower(int s);
247 * 246 *
248 * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low. 247 * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low.
249 * 248 *
250 * edx : eax = old level / old ipending 249 * edx : eax = old level / old ipending
251 * ecx : ebx = new level / old ipending 250 * ecx : ebx = new level / old ipending
252 */ 251 */
253ENTRY(cx8_spllower) 252ENTRY(cx8_spllower)
254 movl CPUVAR(ILEVEL),%edx 253 movl CPUVAR(ILEVEL),%edx
255 movq %rbx,%r8 254 movq %rbx,%r8
256 cmpl %edx,%edi /* new level is lower? */ 255 cmpl %edx,%edi /* new level is lower? */
@@ -270,26 +269,27 @@ ENTRY(cx8_spllower) @@ -270,26 +269,27 @@ ENTRY(cx8_spllower)
2701: 2691:
271 movq %r8,%rbx 270 movq %r8,%rbx
272 ret 271 ret
2732: 2722:
274 movq %r8,%rbx 273 movq %r8,%rbx
275 .type _C_LABEL(cx8_spllower_patch), @function 274 .type _C_LABEL(cx8_spllower_patch), @function
276LABEL(cx8_spllower_patch) 275LABEL(cx8_spllower_patch)
277 jmp _C_LABEL(Xspllower) 276 jmp _C_LABEL(Xspllower)
278 277
279 .align 16 278 .align 16
280END(cx8_spllower_patch) 279END(cx8_spllower_patch)
281END(cx8_spllower) 280END(cx8_spllower)
282LABEL(cx8_spllower_end) 281LABEL(cx8_spllower_end)
 282#endif /* !XEN */
283 283
284/* 284/*
285 * void Xspllower(int s); 285 * void Xspllower(int s);
286 * 286 *
287 * Process pending interrupts. 287 * Process pending interrupts.
288 * 288 *
289 * Important registers: 289 * Important registers:
290 * ebx - cpl 290 * ebx - cpl
291 * r13 - address to resume loop at 291 * r13 - address to resume loop at
292 * 292 *
293 * It is important that the bit scan instruction is bsr, it will get 293 * It is important that the bit scan instruction is bsr, it will get
294 * the highest 2 bits (currently the IPI and clock handlers) first, 294 * the highest 2 bits (currently the IPI and clock handlers) first,
295 * to avoid deadlocks where one CPU sends an IPI, another one is at 295 * to avoid deadlocks where one CPU sends an IPI, another one is at
@@ -298,66 +298,92 @@ LABEL(cx8_spllower_end) @@ -298,66 +298,92 @@ LABEL(cx8_spllower_end)
298 * the sending CPU will never see the that CPU accept the IPI 298 * the sending CPU will never see the that CPU accept the IPI
299 * (see pmap_tlb_shootnow). 299 * (see pmap_tlb_shootnow).
300 */ 300 */
301 nop 301 nop
302 .align 4 /* Avoid confusion with cx8_spllower_end */ 302 .align 4 /* Avoid confusion with cx8_spllower_end */
303 303
304IDTVEC(spllower) 304IDTVEC(spllower)
305 pushq %rbx 305 pushq %rbx
306 pushq %r13 306 pushq %r13
307 pushq %r12 307 pushq %r12
308 movl %edi,%ebx 308 movl %edi,%ebx
309 leaq 1f(%rip),%r13 /* address to resume loop at */ 309 leaq 1f(%rip),%r13 /* address to resume loop at */
3101: movl %ebx,%eax /* get cpl */ 3101: movl %ebx,%eax /* get cpl */
 311#if !defined(XEN)
311 movl CPUVAR(IUNMASK)(,%rax,4),%eax 312 movl CPUVAR(IUNMASK)(,%rax,4),%eax
312 CLI(si) 313 CLI(si)
313 andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */ 314 andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */
314 jz 2f 315 jz 2f
315 bsrl %eax,%eax 316 bsrl %eax,%eax
316 btrl %eax,CPUVAR(IPENDING) 317 btrl %eax,CPUVAR(IPENDING)
317 movq CPUVAR(ISOURCES)(,%rax,8),%rax 318 movq CPUVAR(ISOURCES)(,%rax,8),%rax
318 jmp *IS_RECURSE(%rax) 319 jmp *IS_RECURSE(%rax)
 320#endif
3192: 3212:
 322#if defined(XEN)
 323 movl CPUVAR(XUNMASK)(,%rax,4),%eax
 324 CLI(si)
 325 andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */
 326 jz 3f
 327 bsrl %eax,%eax
 328 btrl %eax,CPUVAR(XPENDING)
 329 movq CPUVAR(XSOURCES)(,%rax,8),%rax
 330 jmp *IS_RECURSE(%rax)
 331#endif
 3323:
320 movl %ebx,CPUVAR(ILEVEL) 333 movl %ebx,CPUVAR(ILEVEL)
321 STI(si) 334 STI(si)
322 popq %r12 335 popq %r12
323 popq %r13 336 popq %r13
324 popq %rbx 337 popq %rbx
325 ret 338 ret
326IDTVEC_END(spllower) 339IDTVEC_END(spllower)
327 340
328/* 341/*
329 * void Xdoreti(void); 342 * void Xdoreti(void);
330 * 343 *
331 * Handle return from interrupt after device handler finishes. 344 * Handle return from interrupt after device handler finishes.
332 * 345 *
333 * Important registers: 346 * Important registers:
334 * ebx - cpl to restore 347 * ebx - cpl to restore
335 * r13 - address to resume loop at 348 * r13 - address to resume loop at
336 */ 349 */
337IDTVEC(doreti) 350IDTVEC(doreti)
338 popq %rbx /* get previous priority */ 351 popq %rbx /* get previous priority */
339 decl CPUVAR(IDEPTH) 352 decl CPUVAR(IDEPTH)
340 leaq 1f(%rip),%r13 353 leaq 1f(%rip),%r13
3411: movl %ebx,%eax 3541: movl %ebx,%eax
 355#if !defined(XEN)
342 movl CPUVAR(IUNMASK)(,%rax,4),%eax 356 movl CPUVAR(IUNMASK)(,%rax,4),%eax
343 CLI(si) 357 CLI(si)
344 andl CPUVAR(IPENDING),%eax 358 andl CPUVAR(IPENDING),%eax
345 jz 2f 359 jz 2f
346 bsrl %eax,%eax /* slow, but not worth optimizing */ 360 bsrl %eax,%eax /* slow, but not worth optimizing */
347 btrl %eax,CPUVAR(IPENDING) 361 btrl %eax,CPUVAR(IPENDING)
348 movq CPUVAR(ISOURCES)(,%rax,8),%rax 362 movq CPUVAR(ISOURCES)(,%rax,8),%rax
349 jmp *IS_RESUME(%rax) 363 jmp *IS_RESUME(%rax)
3502: /* Check for ASTs on exit to user mode. */ 364#endif
 3652:
 366#if defined(XEN)
 367 movl CPUVAR(XUNMASK)(,%rax,4),%eax
 368 CLI(si)
 369 andl CPUVAR(XPENDING),%eax
 370 jz 3f
 371 bsrl %eax,%eax /* slow, but not worth optimizing */
 372 btrl %eax,CPUVAR(XPENDING)
 373 movq CPUVAR(XSOURCES)(,%rax,8),%rax
 374 jmp *IS_RESUME(%rax)
 375#endif
 3763: /* Check for ASTs on exit to user mode. */
351 movl %ebx,CPUVAR(ILEVEL) 377 movl %ebx,CPUVAR(ILEVEL)
3525: 3785:
353 testb $SEL_RPL,TF_CS(%rsp) 379 testb $SEL_RPL,TF_CS(%rsp)
354 jz 6f 380 jz 6f
355 381
356 .type _C_LABEL(doreti_checkast), @function 382 .type _C_LABEL(doreti_checkast), @function
357LABEL(doreti_checkast) 383LABEL(doreti_checkast)
358 movq CPUVAR(CURLWP),%r14 384 movq CPUVAR(CURLWP),%r14
359 CHECK_ASTPENDING(%r14) 385 CHECK_ASTPENDING(%r14)
360 je 3f 386 je 3f
361 CLEAR_ASTPENDING(%r14) 387 CLEAR_ASTPENDING(%r14)
362 STI(si) 388 STI(si)
363 movl $T_ASTFLT,TF_TRAPNO(%rsp) /* XXX undo later.. */ 389 movl $T_ASTFLT,TF_TRAPNO(%rsp) /* XXX undo later.. */

cvs diff -r1.64 -r1.65 src/sys/arch/amd64/amd64/vector.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/vector.S 2018/07/14 14:29:40 1.64
+++ src/sys/arch/amd64/amd64/vector.S 2018/12/25 06:50:11 1.65
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vector.S,v 1.64 2018/07/14 14:29:40 maxv Exp $ */ 1/* $NetBSD: vector.S,v 1.65 2018/12/25 06:50:11 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Andrew Doran. 8 * by Charles M. Hannum and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -646,27 +646,27 @@ INTRSTUB_ARRAY_32(x2apic_level) @@ -646,27 +646,27 @@ INTRSTUB_ARRAY_32(x2apic_level)
646#endif /* !defined(XEN) */ 646#endif /* !defined(XEN) */
647 647
648#if defined(XEN) 648#if defined(XEN)
649/* Resume/recurse procedures for spl() */ 649/* Resume/recurse procedures for spl() */
650#define XENINTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \ 650#define XENINTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \
651IDTVEC(recurse_ ## name ## num) ;\ 651IDTVEC(recurse_ ## name ## num) ;\
652 INTR_RECURSE_HWFRAME ;\ 652 INTR_RECURSE_HWFRAME ;\
653 subq $8,%rsp ;\ 653 subq $8,%rsp ;\
654 pushq $T_ASTFLT /* trap # for doing ASTs */ ;\ 654 pushq $T_ASTFLT /* trap # for doing ASTs */ ;\
655 INTR_RECURSE_ENTRY ;\ 655 INTR_RECURSE_ENTRY ;\
656IDTVEC(resume_ ## name ## num) \ 656IDTVEC(resume_ ## name ## num) \
657 movq $IREENT_MAGIC,TF_ERR(%rsp) ;\ 657 movq $IREENT_MAGIC,TF_ERR(%rsp) ;\
658 movl %ebx,%r13d ;\ 658 movl %ebx,%r13d ;\
659 movq CPUVAR(ISOURCES) + (num) * 8,%r14 ;\ 659 movq CPUVAR(XSOURCES) + (num) * 8,%r14 ;\
6601: \ 6601: \
661 pushq %r13 ;\ 661 pushq %r13 ;\
662 movl $num,CPUVAR(ILEVEL) ;\ 662 movl $num,CPUVAR(ILEVEL) ;\
663 STI(si) ;\ 663 STI(si) ;\
664 incl CPUVAR(IDEPTH) ;\ 664 incl CPUVAR(IDEPTH) ;\
665 movq IS_HANDLERS(%r14),%rbx ;\ 665 movq IS_HANDLERS(%r14),%rbx ;\
6666: \ 6666: \
667 movq IH_ARG(%rbx),%rdi ;\ 667 movq IH_ARG(%rbx),%rdi ;\
668 movq %rsp,%rsi ;\ 668 movq %rsp,%rsi ;\
669 call *IH_FUN(%rbx) /* call it */ ;\ 669 call *IH_FUN(%rbx) /* call it */ ;\
670 movq IH_NEXT(%rbx),%rbx /* next handler in chain */ ;\ 670 movq IH_NEXT(%rbx),%rbx /* next handler in chain */ ;\
671 testq %rbx,%rbx ;\ 671 testq %rbx,%rbx ;\
672 jnz 6b ;\ 672 jnz 6b ;\

cvs diff -r1.107 -r1.108 src/sys/arch/i386/i386/genassym.cf (expand / switch to unified diff)

--- src/sys/arch/i386/i386/genassym.cf 2018/01/04 14:02:23 1.107
+++ src/sys/arch/i386/i386/genassym.cf 2018/12/25 06:50:11 1.108
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: genassym.cf,v 1.107 2018/01/04 14:02:23 maxv Exp $ 1# $NetBSD: genassym.cf,v 1.108 2018/12/25 06:50:11 cherry Exp $
2 2
3# 3#
4# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5# All rights reserved. 5# All rights reserved.
6# 6#
7# This code is derived from software contributed to The NetBSD Foundation 7# This code is derived from software contributed to The NetBSD Foundation
8# by Charles M. Hannum, and by Andrew Doran. 8# by Charles M. Hannum, and by Andrew Doran.
9# 9#
10# Redistribution and use in source and binary forms, with or without 10# Redistribution and use in source and binary forms, with or without
11# modification, are permitted provided that the following conditions 11# modification, are permitted provided that the following conditions
12# are met: 12# are met:
13# 1. Redistributions of source code must retain the above copyright 13# 1. Redistributions of source code must retain the above copyright
14# notice, this list of conditions and the following disclaimer. 14# notice, this list of conditions and the following disclaimer.
@@ -258,32 +258,34 @@ define CPU_INFO_TSS offsetof(struct cpu @@ -258,32 +258,34 @@ define CPU_INFO_TSS offsetof(struct cpu
258define IOMAP_INVALOFF IOMAP_INVALOFF 258define IOMAP_INVALOFF IOMAP_INVALOFF
259define IOMAP_VALIDOFF IOMAP_VALIDOFF 259define IOMAP_VALIDOFF IOMAP_VALIDOFF
260define CPU_INFO_NSYSCALL offsetof(struct cpu_info, ci_data.cpu_nsyscall) 260define CPU_INFO_NSYSCALL offsetof(struct cpu_info, ci_data.cpu_nsyscall)
261define CPU_INFO_NTRAP offsetof(struct cpu_info, ci_data.cpu_ntrap) 261define CPU_INFO_NTRAP offsetof(struct cpu_info, ci_data.cpu_ntrap)
262define CPU_INFO_NINTR offsetof(struct cpu_info, ci_data.cpu_nintr) 262define CPU_INFO_NINTR offsetof(struct cpu_info, ci_data.cpu_nintr)
263define CPU_INFO_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority) 263define CPU_INFO_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority)
264define CPU_INFO_CC_SKEW offsetof(struct cpu_info, ci_data.cpu_cc_skew) 264define CPU_INFO_CC_SKEW offsetof(struct cpu_info, ci_data.cpu_cc_skew)
265 265
266 266
267define CPU_INFO_VENDOR offsetof(struct cpu_info, ci_vendor[0]) 267define CPU_INFO_VENDOR offsetof(struct cpu_info, ci_vendor[0])
268define CPU_INFO_SIGNATURE offsetof(struct cpu_info, ci_signature) 268define CPU_INFO_SIGNATURE offsetof(struct cpu_info, ci_signature)
269 269
270define CPU_INFO_GDT offsetof(struct cpu_info, ci_gdt) 270define CPU_INFO_GDT offsetof(struct cpu_info, ci_gdt)
 271if !defined(XEN)
271define CPU_INFO_IPENDING offsetof(struct cpu_info, ci_ipending) 272define CPU_INFO_IPENDING offsetof(struct cpu_info, ci_ipending)
272define CPU_INFO_IMASK offsetof(struct cpu_info, ci_imask) 273define CPU_INFO_IMASK offsetof(struct cpu_info, ci_imask)
 274define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources)
273define CPU_INFO_IUNMASK offsetof(struct cpu_info, ci_iunmask) 275define CPU_INFO_IUNMASK offsetof(struct cpu_info, ci_iunmask)
 276endif
274define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel) 277define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel)
275define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth) 278define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth)
276define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources) 
277define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) 279define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count)
278define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl) 280define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl)
279define CPU_INFO_INTRSTACK offsetof(struct cpu_info, ci_intrstack) 281define CPU_INFO_INTRSTACK offsetof(struct cpu_info, ci_intrstack)
280define CPU_INFO_ISTATE offsetof(struct cpu_info, ci_istate) 282define CPU_INFO_ISTATE offsetof(struct cpu_info, ci_istate)
281 283
282define ACPI_SUSPEND_GDT offsetof(struct cpu_info, ci_suspend_gdt) 284define ACPI_SUSPEND_GDT offsetof(struct cpu_info, ci_suspend_gdt)
283define ACPI_SUSPEND_IDT offsetof(struct cpu_info, ci_suspend_idt) 285define ACPI_SUSPEND_IDT offsetof(struct cpu_info, ci_suspend_idt)
284define ACPI_SUSPEND_TR offsetof(struct cpu_info, ci_suspend_tr) 286define ACPI_SUSPEND_TR offsetof(struct cpu_info, ci_suspend_tr)
285define ACPI_SUSPEND_LDT offsetof(struct cpu_info, ci_suspend_ldt) 287define ACPI_SUSPEND_LDT offsetof(struct cpu_info, ci_suspend_ldt)
286define ACPI_SUSPEND_FS offsetof(struct cpu_info, ci_suspend_fs) 288define ACPI_SUSPEND_FS offsetof(struct cpu_info, ci_suspend_fs)
287define ACPI_SUSPEND_GS offsetof(struct cpu_info, ci_suspend_gs) 289define ACPI_SUSPEND_GS offsetof(struct cpu_info, ci_suspend_gs)
288define ACPI_SUSPEND_KGS offsetof(struct cpu_info, ci_suspend_kgs) 290define ACPI_SUSPEND_KGS offsetof(struct cpu_info, ci_suspend_kgs)
289define ACPI_SUSPEND_EFER offsetof(struct cpu_info, ci_suspend_efer) 291define ACPI_SUSPEND_EFER offsetof(struct cpu_info, ci_suspend_efer)
@@ -362,24 +364,28 @@ define X86_BUS_SPACE_IO X86_BUS_SPACE_IO @@ -362,24 +364,28 @@ define X86_BUS_SPACE_IO X86_BUS_SPACE_IO
362 364
363define BST_TYPE offsetof(struct bus_space_tag, bst_type) 365define BST_TYPE offsetof(struct bus_space_tag, bst_type)
364 366
365define L1_SHIFT L1_SHIFT 367define L1_SHIFT L1_SHIFT
366define L2_SHIFT L2_SHIFT 368define L2_SHIFT L2_SHIFT
367define L2_FRAME L2_FRAME 369define L2_FRAME L2_FRAME
368define PGOFSET PGOFSET 370define PGOFSET PGOFSET
369define PGSHIFT PGSHIFT 371define PGSHIFT PGSHIFT
370 372
371define RESCHED_KPREEMPT RESCHED_KPREEMPT 373define RESCHED_KPREEMPT RESCHED_KPREEMPT
372 374
373ifdef XEN 375ifdef XEN
374define CPU_INFO_VCPU offsetof(struct cpu_info, ci_vcpu) 376define CPU_INFO_VCPU offsetof(struct cpu_info, ci_vcpu)
 377define CPU_INFO_XPENDING offsetof(struct cpu_info, ci_xpending)
 378define CPU_INFO_XMASK offsetof(struct cpu_info, ci_xmask)
 379define CPU_INFO_XUNMASK offsetof(struct cpu_info, ci_xunmask)
 380define CPU_INFO_XSOURCES offsetof(struct cpu_info, ci_xsources)
375define START_INFO_SHARED_INFO offsetof(struct start_info, shared_info) 381define START_INFO_SHARED_INFO offsetof(struct start_info, shared_info)
376define START_INFO_FLAGS offsetof(struct start_info, flags) 382define START_INFO_FLAGS offsetof(struct start_info, flags)
377define START_INFO_CONSOLE_MFN offsetof(struct start_info, console.domU.mfn) 383define START_INFO_CONSOLE_MFN offsetof(struct start_info, console.domU.mfn)
378define START_INFO_STORE_MFN offsetof(struct start_info, store_mfn) 384define START_INFO_STORE_MFN offsetof(struct start_info, store_mfn)
379define SIF_INITDOMAIN SIF_INITDOMAIN 385define SIF_INITDOMAIN SIF_INITDOMAIN
380define EVTCHN_UPCALL_PENDING offsetof(struct vcpu_info, evtchn_upcall_pending) 386define EVTCHN_UPCALL_PENDING offsetof(struct vcpu_info, evtchn_upcall_pending)
381define EVTCHN_UPCALL_MASK offsetof(struct vcpu_info, evtchn_upcall_mask) 387define EVTCHN_UPCALL_MASK offsetof(struct vcpu_info, evtchn_upcall_mask)
382 388
383define HYPERVISOR_sched_op __HYPERVISOR_sched_op 389define HYPERVISOR_sched_op __HYPERVISOR_sched_op
384define SCHEDOP_yield SCHEDOP_yield 390define SCHEDOP_yield SCHEDOP_yield
385endif 391endif

cvs diff -r1.43 -r1.44 src/sys/arch/i386/i386/spl.S (expand / switch to unified diff)

--- src/sys/arch/i386/i386/spl.S 2018/04/04 22:52:58 1.43
+++ src/sys/arch/i386/i386/spl.S 2018/12/25 06:50:11 1.44
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: spl.S,v 1.43 2018/04/04 22:52:58 christos Exp $ */ 1/* $NetBSD: spl.S,v 1.44 2018/12/25 06:50:11 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and Andrew Doran. 8 * by Charles M. Hannum and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <machine/asm.h> 32#include <machine/asm.h>
33__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.43 2018/04/04 22:52:58 christos Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.44 2018/12/25 06:50:11 cherry Exp $");
34 34
35#include "opt_ddb.h" 35#include "opt_ddb.h"
36#include "opt_spldebug.h" 36#include "opt_spldebug.h"
37#include "opt_xen.h" 37#include "opt_xen.h"
38 38
39#include <machine/trap.h> 39#include <machine/trap.h>
40#include <machine/segments.h> 40#include <machine/segments.h>
41#include <machine/frameasm.h> 41#include <machine/frameasm.h>
42 42
43#include "assym.h" 43#include "assym.h"
44 44
45 .text 45 .text
46 46
@@ -190,35 +190,48 @@ IDTVEC(spllower) @@ -190,35 +190,48 @@ IDTVEC(spllower)
190#if defined(DEBUG) 190#if defined(DEBUG)
191#ifndef XEN 191#ifndef XEN
192 pushf 192 pushf
193 popl %eax 193 popl %eax
194 testl $PSL_I,%eax 194 testl $PSL_I,%eax
195 jnz .Lspllower_panic 195 jnz .Lspllower_panic
196#else 196#else
197 movl CPUVAR(VCPU),%eax 197 movl CPUVAR(VCPU),%eax
198 movb EVTCHN_UPCALL_MASK(%eax),%al 198 movb EVTCHN_UPCALL_MASK(%eax),%al
199 andb %al,%al 199 andb %al,%al
200 jz .Lspllower_panic 200 jz .Lspllower_panic
201#endif /* XEN */ 201#endif /* XEN */
202#endif /* defined(DEBUG) */ 202#endif /* defined(DEBUG) */
 203#if !defined(XEN)
203 movl %ebx,%eax /* get cpl */ 204 movl %ebx,%eax /* get cpl */
204 movl CPUVAR(IUNMASK)(,%eax,4),%eax 205 movl CPUVAR(IUNMASK)(,%eax,4),%eax
205 andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */ 206 andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */
206 jz 2f 207 jz 2f
207 bsrl %eax,%eax 208 bsrl %eax,%eax
208 btrl %eax,CPUVAR(IPENDING) 209 btrl %eax,CPUVAR(IPENDING)
209 movl CPUVAR(ISOURCES)(,%eax,4),%eax 210 movl CPUVAR(ISOURCES)(,%eax,4),%eax
210 jmp *IS_RECURSE(%eax) 211 jmp *IS_RECURSE(%eax)
 212#endif
2112: 2132:
 214#if defined(XEN)
 215 movl %ebx,%eax /* get cpl */
 216 movl CPUVAR(XUNMASK)(,%eax,4),%eax
 217 andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */
 218 jz 3f
 219 bsrl %eax,%eax
 220 btrl %eax,CPUVAR(XPENDING)
 221 movl CPUVAR(XSOURCES)(,%eax,4),%eax
 222 jmp *IS_RECURSE(%eax)
 223#endif
 2243:
212 movl %ebx,CPUVAR(ILEVEL) 225 movl %ebx,CPUVAR(ILEVEL)
213#ifdef XEN 226#ifdef XEN
214 STIC(%eax) 227 STIC(%eax)
215 jz 4f 228 jz 4f
216 call _C_LABEL(stipending) 229 call _C_LABEL(stipending)
217 testl %eax,%eax 230 testl %eax,%eax
218 jnz 1b 231 jnz 1b
2194: 2324:
220#else 233#else
221 STI(%eax) 234 STI(%eax)
222#endif 235#endif
223 popl %edi 236 popl %edi
224 popl %esi 237 popl %esi
@@ -254,35 +267,48 @@ IDTVEC(doreti) @@ -254,35 +267,48 @@ IDTVEC(doreti)
254#if defined(DEBUG) 267#if defined(DEBUG)
255#ifndef XEN 268#ifndef XEN
256 pushf 269 pushf
257 popl %eax 270 popl %eax
258 testl $PSL_I,%eax 271 testl $PSL_I,%eax
259 jnz .Ldoreti_panic 272 jnz .Ldoreti_panic
260#else 273#else
261 movl CPUVAR(VCPU),%eax 274 movl CPUVAR(VCPU),%eax
262 movb EVTCHN_UPCALL_MASK(%eax),%al 275 movb EVTCHN_UPCALL_MASK(%eax),%al
263 andb %al,%al 276 andb %al,%al
264 jz .Ldoreti_panic 277 jz .Ldoreti_panic
265#endif /* XEN */ 278#endif /* XEN */
266#endif /* defined(DEBUG) */ 279#endif /* defined(DEBUG) */
 280#if !defined(XEN)
267 movl %ebx,%eax 281 movl %ebx,%eax
268 movl CPUVAR(IUNMASK)(,%eax,4),%eax 282 movl CPUVAR(IUNMASK)(,%eax,4),%eax
269 andl CPUVAR(IPENDING),%eax 283 andl CPUVAR(IPENDING),%eax
270 jz 2f 284 jz 2f
271 bsrl %eax,%eax /* slow, but not worth optimizing */ 285 bsrl %eax,%eax /* slow, but not worth optimizing */
272 btrl %eax,CPUVAR(IPENDING) 286 btrl %eax,CPUVAR(IPENDING)
273 movl CPUVAR(ISOURCES)(,%eax, 4),%eax 287 movl CPUVAR(ISOURCES)(,%eax, 4),%eax
274 jmp *IS_RESUME(%eax) 288 jmp *IS_RESUME(%eax)
 289#endif
2752: /* Check for ASTs on exit to user mode. */ 2902: /* Check for ASTs on exit to user mode. */
 291#if defined(XEN)
 292 movl %ebx,%eax
 293 movl CPUVAR(IUNMASK)(,%eax,4),%eax
 294 andl CPUVAR(IPENDING),%eax
 295 jz 3f
 296 bsrl %eax,%eax /* slow, but not worth optimizing */
 297 btrl %eax,CPUVAR(IPENDING)
 298 movl CPUVAR(ISOURCES)(,%eax, 4),%eax
 299 jmp *IS_RESUME(%eax)
 300#endif
 3013:
276 movl %ebx,CPUVAR(ILEVEL) 302 movl %ebx,CPUVAR(ILEVEL)
2775: 3035:
278 testb $CHK_UPL,TF_CS(%esp) 304 testb $CHK_UPL,TF_CS(%esp)
279 jnz doreti_checkast 305 jnz doreti_checkast
280 jmp 6f 306 jmp 6f
281 .type _C_LABEL(doreti_checkast), @function 307 .type _C_LABEL(doreti_checkast), @function
282LABEL(doreti_checkast) 308LABEL(doreti_checkast)
283 CHECK_ASTPENDING(%eax) 309 CHECK_ASTPENDING(%eax)
284 jz 3f 310 jz 3f
285 CLEAR_ASTPENDING(%eax) 311 CLEAR_ASTPENDING(%eax)
286 STI(%eax) 312 STI(%eax)
287 movl $T_ASTFLT,TF_TRAPNO(%esp) /* XXX undo later.. */ 313 movl $T_ASTFLT,TF_TRAPNO(%esp) /* XXX undo later.. */
288 /* Pushed T_ASTFLT into tf_trapno on entry. */ 314 /* Pushed T_ASTFLT into tf_trapno on entry. */

cvs diff -r1.78 -r1.79 src/sys/arch/i386/i386/vector.S (expand / switch to unified diff)

--- src/sys/arch/i386/i386/vector.S 2018/07/14 14:29:40 1.78
+++ src/sys/arch/i386/i386/vector.S 2018/12/25 06:50:11 1.79
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vector.S,v 1.78 2018/07/14 14:29:40 maxv Exp $ */ 1/* $NetBSD: vector.S,v 1.79 2018/12/25 06:50:11 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright 2002 (c) Wasabi Systems, Inc. 4 * Copyright 2002 (c) Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc. 7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -55,27 +55,27 @@ @@ -55,27 +55,27 @@
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67#include <machine/asm.h> 67#include <machine/asm.h>
68__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.78 2018/07/14 14:29:40 maxv Exp $"); 68__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.79 2018/12/25 06:50:11 cherry Exp $");
69 69
70#include "opt_ddb.h" 70#include "opt_ddb.h"
71#include "opt_multiprocessor.h" 71#include "opt_multiprocessor.h"
72#include "opt_xen.h" 72#include "opt_xen.h"
73#include "opt_dtrace.h" 73#include "opt_dtrace.h"
74 74
75#include <machine/i8259.h> 75#include <machine/i8259.h>
76#include <machine/i82093reg.h> 76#include <machine/i82093reg.h>
77#include <machine/i82489reg.h> 77#include <machine/i82489reg.h>
78#include <machine/frameasm.h> 78#include <machine/frameasm.h>
79#include <machine/segments.h> 79#include <machine/segments.h>
80#include <machine/specialreg.h> 80#include <machine/specialreg.h>
81#include <machine/trap.h> 81#include <machine/trap.h>
@@ -889,27 +889,27 @@ END(x2apic_level_stubs) @@ -889,27 +889,27 @@ END(x2apic_level_stubs)
889#if defined(XEN) 889#if defined(XEN)
890#define voidop(num) 890#define voidop(num)
891 891
892#define XENINTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \ 892#define XENINTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \
893IDTVEC(recurse_ ## name ## num) ;\ 893IDTVEC(recurse_ ## name ## num) ;\
894 INTR_RECURSE_HWFRAME ;\ 894 INTR_RECURSE_HWFRAME ;\
895 subl $4,%esp ;\ 895 subl $4,%esp ;\
896 pushl $T_ASTFLT /* trap # for doing ASTs */ ;\ 896 pushl $T_ASTFLT /* trap # for doing ASTs */ ;\
897 INTRENTRY ;\ 897 INTRENTRY ;\
898 movl $_C_LABEL(Xdoreti), %esi; /* we now have a trap frame, so loop using doreti instead */ ;\ 898 movl $_C_LABEL(Xdoreti), %esi; /* we now have a trap frame, so loop using doreti instead */ ;\
899IDTVEC(resume_ ## name ## num) \ 899IDTVEC(resume_ ## name ## num) \
900 movl $IREENT_MAGIC,TF_ERR(%esp) ;\ 900 movl $IREENT_MAGIC,TF_ERR(%esp) ;\
901 pushl %ebx ;\ 901 pushl %ebx ;\
902 movl CPUVAR(ISOURCES) + (num) * 4,%ebp ;\ 902 movl CPUVAR(XSOURCES) + (num) * 4,%ebp ;\
903 movl $num,CPUVAR(ILEVEL) ;\ 903 movl $num,CPUVAR(ILEVEL) ;\
904 IDEPTH_INCR /* leaves old %esp on stack */ ;\ 904 IDEPTH_INCR /* leaves old %esp on stack */ ;\
905 STI(%eax) ;\ 905 STI(%eax) ;\
906 movl IS_HANDLERS(%ebp),%ebx ;\ 906 movl IS_HANDLERS(%ebp),%ebx ;\
9076: \ 9076: \
908 pushl IH_ARG(%ebx) ;\ 908 pushl IH_ARG(%ebx) ;\
909 call *IH_FUN(%ebx) /* call it */ ;\ 909 call *IH_FUN(%ebx) /* call it */ ;\
910 addl $4,%esp /* toss the arg */ ;\ 910 addl $4,%esp /* toss the arg */ ;\
911 movl IH_NEXT(%ebx),%ebx /* next handler in chain */ ;\ 911 movl IH_NEXT(%ebx),%ebx /* next handler in chain */ ;\
912 testl %ebx,%ebx ;\ 912 testl %ebx,%ebx ;\
913 jnz 6b ;\ 913 jnz 6b ;\
914 \ 914 \
915 CLI(%eax) ;\ 915 CLI(%eax) ;\

cvs diff -r1.100 -r1.101 src/sys/arch/x86/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/x86/include/cpu.h 2018/11/18 23:50:48 1.100
+++ src/sys/arch/x86/include/cpu.h 2018/12/25 06:50:11 1.101
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.100 2018/11/18 23:50:48 cherry Exp $ */ 1/* $NetBSD: cpu.h,v 1.101 2018/12/25 06:50:11 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1990 The Regents of the University of California. 4 * Copyright (c) 1990 The Regents of the University of California.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz. 8 * William Jolitz.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -127,38 +127,43 @@ struct cpu_info { @@ -127,38 +127,43 @@ struct cpu_info {
127 */ 127 */
128 struct pmap *ci_pmap; /* current pmap */ 128 struct pmap *ci_pmap; /* current pmap */
129 int ci_want_pmapload; /* pmap_load() is needed */ 129 int ci_want_pmapload; /* pmap_load() is needed */
130 volatile int ci_tlbstate; /* one of TLBSTATE_ states. see below */ 130 volatile int ci_tlbstate; /* one of TLBSTATE_ states. see below */
131#define TLBSTATE_VALID 0 /* all user tlbs are valid */ 131#define TLBSTATE_VALID 0 /* all user tlbs are valid */
132#define TLBSTATE_LAZY 1 /* tlbs are valid but won't be kept uptodate */ 132#define TLBSTATE_LAZY 1 /* tlbs are valid but won't be kept uptodate */
133#define TLBSTATE_STALE 2 /* we might have stale user tlbs */ 133#define TLBSTATE_STALE 2 /* we might have stale user tlbs */
134 int ci_curldt; /* current LDT descriptor */ 134 int ci_curldt; /* current LDT descriptor */
135 int ci_nintrhand; /* number of H/W interrupt handlers */ 135 int ci_nintrhand; /* number of H/W interrupt handlers */
136 uint64_t ci_scratch; 136 uint64_t ci_scratch;
137 uintptr_t ci_pmap_data[128 / sizeof(uintptr_t)]; 137 uintptr_t ci_pmap_data[128 / sizeof(uintptr_t)];
138 138
139 struct intrsource *ci_isources[MAX_INTR_SOURCES]; 139 struct intrsource *ci_isources[MAX_INTR_SOURCES];
140 140#if defined(XEN)
 141 struct intrsource *ci_xsources[NIPL];
 142 uint32_t ci_xmask[NIPL];
 143 uint32_t ci_xunmask[NIPL];
 144 uint32_t ci_xpending; /* XEN doesn't use the cmpxchg8 path */
 145#endif
 146
141 volatile int ci_mtx_count; /* Negative count of spin mutexes */ 147 volatile int ci_mtx_count; /* Negative count of spin mutexes */
142 volatile int ci_mtx_oldspl; /* Old SPL at this ci_idepth */ 148 volatile int ci_mtx_oldspl; /* Old SPL at this ci_idepth */
143 149
144 /* The following must be aligned for cmpxchg8b. */ 150 /* The following must be aligned for cmpxchg8b. */
145 struct { 151 struct {
146 uint32_t ipending; 152 uint32_t ipending;
147 int ilevel; 153 int ilevel;
148 } ci_istate __aligned(8); 154 } ci_istate __aligned(8);
149#define ci_ipending ci_istate.ipending 155#define ci_ipending ci_istate.ipending
150#define ci_ilevel ci_istate.ilevel 156#define ci_ilevel ci_istate.ilevel
151 
152 int ci_idepth; 157 int ci_idepth;
153 void * ci_intrstack; 158 void * ci_intrstack;
154 uint32_t ci_imask[NIPL]; 159 uint32_t ci_imask[NIPL];
155 uint32_t ci_iunmask[NIPL]; 160 uint32_t ci_iunmask[NIPL];
156 161
157 uint32_t ci_flags; /* flags; see below */ 162 uint32_t ci_flags; /* flags; see below */
158 uint32_t ci_ipis; /* interprocessor interrupts pending */ 163 uint32_t ci_ipis; /* interprocessor interrupts pending */
159 164
160 uint32_t ci_signature; /* X86 cpuid type (cpuid.1.%eax) */ 165 uint32_t ci_signature; /* X86 cpuid type (cpuid.1.%eax) */
161 uint32_t ci_vendor[4]; /* vendor string */ 166 uint32_t ci_vendor[4]; /* vendor string */
162 uint32_t ci_max_cpuid; /* cpuid.0:%eax */ 167 uint32_t ci_max_cpuid; /* cpuid.0:%eax */
163 uint32_t ci_max_ext_cpuid; /* cpuid.80000000:%eax */ 168 uint32_t ci_max_ext_cpuid; /* cpuid.80000000:%eax */
164 volatile uint32_t ci_lapic_counter; 169 volatile uint32_t ci_lapic_counter;

cvs diff -r1.42 -r1.43 src/sys/arch/x86/isa/isa_machdep.c (expand / switch to unified diff)

--- src/sys/arch/x86/isa/isa_machdep.c 2018/12/10 15:08:23 1.42
+++ src/sys/arch/x86/isa/isa_machdep.c 2018/12/25 06:50:12 1.43
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: isa_machdep.c,v 1.42 2018/12/10 15:08:23 maxv Exp $ */ 1/* $NetBSD: isa_machdep.c,v 1.43 2018/12/25 06:50:12 cherry Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center. 9 * Simulation Facility, NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -55,27 +55,27 @@ @@ -55,27 +55,27 @@
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE. 62 * SUCH DAMAGE.
63 * 63 *
64 * @(#)isa.c 7.2 (Berkeley) 5/13/91 64 * @(#)isa.c 7.2 (Berkeley) 5/13/91
65 */ 65 */
66 66
67#include <sys/cdefs.h> 67#include <sys/cdefs.h>
68__KERNEL_RCSID(0, "$NetBSD: isa_machdep.c,v 1.42 2018/12/10 15:08:23 maxv Exp $"); 68__KERNEL_RCSID(0, "$NetBSD: isa_machdep.c,v 1.43 2018/12/25 06:50:12 cherry Exp $");
69 69
70#include <sys/param.h> 70#include <sys/param.h>
71#include <sys/systm.h> 71#include <sys/systm.h>
72#include <sys/kernel.h> 72#include <sys/kernel.h>
73#include <sys/syslog.h> 73#include <sys/syslog.h>
74#include <sys/device.h> 74#include <sys/device.h>
75#include <sys/proc.h> 75#include <sys/proc.h>
76#include <sys/bus.h> 76#include <sys/bus.h>
77#include <sys/cpu.h> 77#include <sys/cpu.h>
78 78
79#include <machine/bus_private.h> 79#include <machine/bus_private.h>
80#include <machine/pio.h> 80#include <machine/pio.h>
81#include <machine/cpufunc.h> 81#include <machine/cpufunc.h>
@@ -132,27 +132,31 @@ isa_intr_alloc(isa_chipset_tag_t ic, int @@ -132,27 +132,31 @@ isa_intr_alloc(isa_chipset_tag_t ic, int
132 mask &= 0xdef8; 132 mask &= 0xdef8;
133 133
134 /* 134 /*
135 * XXX some interrupts will be used later (6 for fdc, 12 for pms). 135 * XXX some interrupts will be used later (6 for fdc, 12 for pms).
136 * the right answer is to do "breadth-first" searching of devices. 136 * the right answer is to do "breadth-first" searching of devices.
137 */ 137 */
138 mask &= 0xefbf; 138 mask &= 0xefbf;
139 139
140 mutex_enter(&cpu_lock); 140 mutex_enter(&cpu_lock);
141 141
142 for (i = 0; i < NUM_LEGACY_IRQS; i++) { 142 for (i = 0; i < NUM_LEGACY_IRQS; i++) {
143 if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0) 143 if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
144 continue; 144 continue;
 145#if !defined(XEN)
145 isp = ci->ci_isources[i]; 146 isp = ci->ci_isources[i];
 147#else
 148 isp = ci->ci_xsources[i];
 149#endif
146 if (isp == NULL) { 150 if (isp == NULL) {
147 /* if nothing's using the irq, just return it */ 151 /* if nothing's using the irq, just return it */
148 *irq = i; 152 *irq = i;
149 mutex_exit(&cpu_lock); 153 mutex_exit(&cpu_lock);
150 return 0; 154 return 0;
151 } 155 }
152 156
153 switch(isp->is_type) { 157 switch(isp->is_type) {
154 case IST_EDGE: 158 case IST_EDGE:
155 case IST_LEVEL: 159 case IST_LEVEL:
156 if (type != isp->is_type) 160 if (type != isp->is_type)
157 continue; 161 continue;
158 /* 162 /*

cvs diff -r1.21 -r1.22 src/sys/arch/x86/x86/i8259.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/i8259.c 2018/10/08 08:05:08 1.21
+++ src/sys/arch/x86/x86/i8259.c 2018/12/25 06:50:12 1.22
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: i8259.c,v 1.21 2018/10/08 08:05:08 cherry Exp $ */ 1/* $NetBSD: i8259.c,v 1.22 2018/12/25 06:50:12 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright 2002 (c) Wasabi Systems, Inc. 4 * Copyright 2002 (c) Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc. 7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -60,27 +60,27 @@ @@ -60,27 +60,27 @@
60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * SUCH DAMAGE. 67 * SUCH DAMAGE.
68 * 68 *
69 * @(#)isa.c 7.2 (Berkeley) 5/13/91 69 * @(#)isa.c 7.2 (Berkeley) 5/13/91
70 */ 70 */
71 71
72#include <sys/cdefs.h> 72#include <sys/cdefs.h>
73__KERNEL_RCSID(0, "$NetBSD: i8259.c,v 1.21 2018/10/08 08:05:08 cherry Exp $"); 73__KERNEL_RCSID(0, "$NetBSD: i8259.c,v 1.22 2018/12/25 06:50:12 cherry Exp $");
74 74
75#include <sys/param.h>  75#include <sys/param.h>
76#include <sys/systm.h> 76#include <sys/systm.h>
77#include <sys/kernel.h> 77#include <sys/kernel.h>
78#include <sys/syslog.h> 78#include <sys/syslog.h>
79#include <sys/device.h> 79#include <sys/device.h>
80#include <sys/proc.h> 80#include <sys/proc.h>
81 81
82#include <dev/isa/isareg.h> 82#include <dev/isa/isareg.h>
83#include <dev/ic/i8259reg.h> 83#include <dev/ic/i8259reg.h>
84 84
85#include <machine/pio.h> 85#include <machine/pio.h>
86#include <machine/cpufunc.h>  86#include <machine/cpufunc.h>
@@ -223,32 +223,41 @@ i8259_hwunmask(struct pic *pic, int pin) @@ -223,32 +223,41 @@ i8259_hwunmask(struct pic *pic, int pin)
223 } else { 223 } else {
224 port = IO_ICU1 + PIC_OCW1; 224 port = IO_ICU1 + PIC_OCW1;
225 byte = i8259_imen & 0xff; 225 byte = i8259_imen & 0xff;
226 } 226 }
227 outb(port, byte); 227 outb(port, byte);
228 x86_enable_intr(); 228 x86_enable_intr();
229} 229}
230 230
231static void 231static void
232i8259_reinit_irqs(void) 232i8259_reinit_irqs(void)
233{ 233{
234 int irqs, irq; 234 int irqs, irq;
235 struct cpu_info *ci = &cpu_info_primary; 235 struct cpu_info *ci = &cpu_info_primary;
236 const size_t array_len = MIN(__arraycount(ci->ci_isources), 236#if !defined(XEN)
 237 const size_t array_count = __arraycount(ci->ci_isources);
 238#else
 239 const size_t array_count = __arraycount(ci->ci_xsources);
 240#endif
 241 const size_t array_len = MIN(array_count,
237 NUM_LEGACY_IRQS); 242 NUM_LEGACY_IRQS);
238 243
239 irqs = 0; 244 irqs = 0;
240 for (irq = 0; irq < array_len; irq++) 245 for (irq = 0; irq < array_len; irq++)
 246#if !defined(XEN)
241 if (ci->ci_isources[irq] != NULL) 247 if (ci->ci_isources[irq] != NULL)
 248#else
 249 if (ci->ci_xsources[irq] != NULL)
 250#endif
242 irqs |= 1 << irq; 251 irqs |= 1 << irq;
243 if (irqs >= 0x100) /* any IRQs >= 8 in use */ 252 if (irqs >= 0x100) /* any IRQs >= 8 in use */
244 irqs |= 1 << IRQ_SLAVE; 253 irqs |= 1 << IRQ_SLAVE;
245 i8259_imen = ~irqs; 254 i8259_imen = ~irqs;
246 255
247 outb(IO_ICU1 + PIC_OCW1, i8259_imen); 256 outb(IO_ICU1 + PIC_OCW1, i8259_imen);
248 outb(IO_ICU2 + PIC_OCW1, i8259_imen >> 8); 257 outb(IO_ICU2 + PIC_OCW1, i8259_imen >> 8);
249} 258}
250 259
251static void 260static void
252i8259_setup(struct pic *pic, struct cpu_info *ci, 261i8259_setup(struct pic *pic, struct cpu_info *ci,
253 int pin, int idtvec, int type) 262 int pin, int idtvec, int type)
254{ 263{

cvs diff -r1.140 -r1.141 src/sys/arch/x86/x86/intr.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/intr.c 2018/12/24 22:05:45 1.140
+++ src/sys/arch/x86/x86/intr.c 2018/12/25 06:50:12 1.141
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: intr.c,v 1.140 2018/12/24 22:05:45 cherry Exp $ */ 1/* $NetBSD: intr.c,v 1.141 2018/12/25 06:50:12 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -123,27 +123,27 @@ @@ -123,27 +123,27 @@
123 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 123 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
124 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 124 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
125 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 125 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
126 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 126 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
127 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 127 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
128 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 128 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
129 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 129 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
130 * SUCH DAMAGE. 130 * SUCH DAMAGE.
131 * 131 *
132 * @(#)isa.c 7.2 (Berkeley) 5/13/91 132 * @(#)isa.c 7.2 (Berkeley) 5/13/91
133 */ 133 */
134 134
135#include <sys/cdefs.h> 135#include <sys/cdefs.h>
136__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.140 2018/12/24 22:05:45 cherry Exp $"); 136__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.141 2018/12/25 06:50:12 cherry Exp $");
137 137
138#include "opt_intrdebug.h" 138#include "opt_intrdebug.h"
139#include "opt_multiprocessor.h" 139#include "opt_multiprocessor.h"
140#include "opt_acpi.h" 140#include "opt_acpi.h"
141 141
142#include <sys/param.h> 142#include <sys/param.h>
143#include <sys/systm.h> 143#include <sys/systm.h>
144#include <sys/kernel.h> 144#include <sys/kernel.h>
145#include <sys/syslog.h> 145#include <sys/syslog.h>
146#include <sys/device.h> 146#include <sys/device.h>
147#include <sys/kmem.h> 147#include <sys/kmem.h>
148#include <sys/proc.h> 148#include <sys/proc.h>
149#include <sys/errno.h> 149#include <sys/errno.h>
@@ -178,123 +178,104 @@ __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.1 @@ -178,123 +178,104 @@ __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.1
178#endif 178#endif
179 179
180#if NPCI > 0 180#if NPCI > 0
181#include <dev/pci/ppbreg.h> 181#include <dev/pci/ppbreg.h>
182#endif 182#endif
183 183
184#include <x86/pci/msipic.h> 184#include <x86/pci/msipic.h>
185#include <x86/pci/pci_msi_machdep.h> 185#include <x86/pci/pci_msi_machdep.h>
186 186
187#if NPCI == 0 || !defined(__HAVE_PCI_MSI_MSIX) 187#if NPCI == 0 || !defined(__HAVE_PCI_MSI_MSIX)
188#define msipic_is_msi_pic(PIC) (false) 188#define msipic_is_msi_pic(PIC) (false)
189#endif 189#endif
190 190
191#if defined(XEN) /* XXX: Cleanup */ 
192#include <xen/xen.h> 
193#include <xen/hypervisor.h> 
194#include <xen/evtchn.h> 
195#include <xen/xenfunc.h> 
196#endif /* XEN */ 
197 
198#ifdef DDB 191#ifdef DDB
199#include <ddb/db_output.h> 192#include <ddb/db_output.h>
200#endif 193#endif
201 194
202#ifdef INTRDEBUG 195#ifdef INTRDEBUG
203#define DPRINTF(msg) printf msg 196#define DPRINTF(msg) printf msg
204#else 197#else
205#define DPRINTF(msg) 198#define DPRINTF(msg)
206#endif 199#endif
207 200
208struct pic softintr_pic = { 201struct pic softintr_pic = {
209 .pic_name = "softintr_fakepic", 202 .pic_name = "softintr_fakepic",
210 .pic_type = PIC_SOFT, 203 .pic_type = PIC_SOFT,
211 .pic_vecbase = 0, 204 .pic_vecbase = 0,
212 .pic_apicid = 0, 205 .pic_apicid = 0,
213 .pic_lock = __SIMPLELOCK_UNLOCKED, 206 .pic_lock = __SIMPLELOCK_UNLOCKED,
214}; 207};
215 208
216static void intr_calculatemasks(struct cpu_info *); 209static void intr_calculatemasks(struct cpu_info *);
217 210
218static SIMPLEQ_HEAD(, intrsource) io_interrupt_sources = 211static SIMPLEQ_HEAD(, intrsource) io_interrupt_sources =
219 SIMPLEQ_HEAD_INITIALIZER(io_interrupt_sources); 212 SIMPLEQ_HEAD_INITIALIZER(io_interrupt_sources);
220 213
221static kmutex_t intr_distribute_lock; 214static kmutex_t intr_distribute_lock;
222 215
223#if !defined(XEN) 
224static int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *, 216static int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *,
225 struct intrsource *); 217 struct intrsource *);
226static int __noinline intr_allocate_slot(struct pic *, int, int, 218static int __noinline intr_allocate_slot(struct pic *, int, int,
227 struct cpu_info **, int *, int *, 219 struct cpu_info **, int *, int *,
228 struct intrsource *); 220 struct intrsource *);
229 221
230static void intr_source_free(struct cpu_info *, int, struct pic *, int); 222static void intr_source_free(struct cpu_info *, int, struct pic *, int);
231 223
232static void intr_establish_xcall(void *, void *); 224static void intr_establish_xcall(void *, void *);
233static void intr_disestablish_xcall(void *, void *); 225static void intr_disestablish_xcall(void *, void *);
234#endif 
235 226
236static const char *legacy_intr_string(int, char *, size_t, struct pic *); 227static const char *legacy_intr_string(int, char *, size_t, struct pic *);
237 228
238#if defined(XEN) /* XXX: nuke conditional after integration */ 
239static const char *xen_intr_string(int, char *, size_t, struct pic *); 229static const char *xen_intr_string(int, char *, size_t, struct pic *);
240#endif /* XXX: XEN */ 
241 230
242#if defined(INTRSTACKSIZE) 231#if defined(INTRSTACKSIZE)
243static inline bool redzone_const_or_false(bool); 232static inline bool redzone_const_or_false(bool);
244static inline int redzone_const_or_zero(int); 233static inline int redzone_const_or_zero(int);
245#endif 234#endif
246 235
247static void intr_redistribute_xc_t(void *, void *); 236static void intr_redistribute_xc_t(void *, void *);
248static void intr_redistribute_xc_s1(void *, void *); 237static void intr_redistribute_xc_s1(void *, void *);
249static void intr_redistribute_xc_s2(void *, void *); 238static void intr_redistribute_xc_s2(void *, void *);
250static bool intr_redistribute(struct cpu_info *); 239static bool intr_redistribute(struct cpu_info *);
251 
252static struct intrsource *intr_get_io_intrsource(const char *); 240static struct intrsource *intr_get_io_intrsource(const char *);
253static void intr_free_io_intrsource_direct(struct intrsource *); 241static void intr_free_io_intrsource_direct(struct intrsource *);
254#if !defined(XEN) 
255static int intr_num_handlers(struct intrsource *); 242static int intr_num_handlers(struct intrsource *);
256 
257static int intr_find_unused_slot(struct cpu_info *, int *); 243static int intr_find_unused_slot(struct cpu_info *, int *);
258static void intr_activate_xcall(void *, void *); 244static void intr_activate_xcall(void *, void *);
259static void intr_deactivate_xcall(void *, void *); 245static void intr_deactivate_xcall(void *, void *);
260static void intr_get_affinity(struct intrsource *, kcpuset_t *); 246static void intr_get_affinity(struct intrsource *, kcpuset_t *);
261static int intr_set_affinity(struct intrsource *, const kcpuset_t *); 247static int intr_set_affinity(struct intrsource *, const kcpuset_t *);
262#endif /* XEN */ 
263 248
264/* 249/*
265 * Fill in default interrupt table (in case of spurious interrupt 250 * Fill in default interrupt table (in case of spurious interrupt
266 * during configuration of kernel), setup interrupt control unit 251 * during configuration of kernel), setup interrupt control unit
267 */ 252 */
268void 253void
269intr_default_setup(void) 254intr_default_setup(void)
270{ 255{
271#if !defined(XEN) 
272 int i; 256 int i;
273 257
274 /* icu vectors */ 258 /* icu vectors */
275 for (i = 0; i < NUM_LEGACY_IRQS; i++) { 259 for (i = 0; i < NUM_LEGACY_IRQS; i++) {
276 idt_vec_reserve(ICU_OFFSET + i); 260 idt_vec_reserve(ICU_OFFSET + i);
277 idt_vec_set(ICU_OFFSET + i, legacy_stubs[i].ist_entry); 261 idt_vec_set(ICU_OFFSET + i, legacy_stubs[i].ist_entry);
278 } 262 }
279 263
280 /* 264 /*
281 * Eventually might want to check if it's actually there. 265 * Eventually might want to check if it's actually there.
282 */ 266 */
283 i8259_default_setup(); 267 i8259_default_setup();
284 268
285#else 
286 events_default_setup(); 
287#endif /* !XEN */ 
288 mutex_init(&intr_distribute_lock, MUTEX_DEFAULT, IPL_NONE); 269 mutex_init(&intr_distribute_lock, MUTEX_DEFAULT, IPL_NONE);
289} 270}
290 271
291/* 272/*
292 * Handle a NMI, possibly a machine check. 273 * Handle a NMI, possibly a machine check.
293 * return true to panic system, false to ignore. 274 * return true to panic system, false to ignore.
294 */ 275 */
295void 276void
296x86_nmi(void) 277x86_nmi(void)
297{ 278{
298 279
299 log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70)); 280 log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70));
300} 281}
@@ -386,34 +367,30 @@ intr_create_intrid(int legacy_irq, struc @@ -386,34 +367,30 @@ intr_create_intrid(int legacy_irq, struc
386 pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK) 367 pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
387 | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK) 368 | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
388 | APIC_INT_VIA_MSI; 369 | APIC_INT_VIA_MSI;
389 if (pic->pic_type == PIC_MSI) 370 if (pic->pic_type == PIC_MSI)
390 MSI_INT_MAKE_MSI(pih); 371 MSI_INT_MAKE_MSI(pih);
391 else if (pic->pic_type == PIC_MSIX) 372 else if (pic->pic_type == PIC_MSIX)
392 MSI_INT_MAKE_MSIX(pih); 373 MSI_INT_MAKE_MSIX(pih);
393 374
394 return x86_pci_msi_string(NULL, pih, buf, len); 375 return x86_pci_msi_string(NULL, pih, buf, len);
395 } 376 }
396#endif /* __HAVE_PCI_MSI_MSIX */  377#endif /* __HAVE_PCI_MSI_MSIX */
397#endif 378#endif
398 379
399#if defined(XEN) 
400 evtchn_port_t port = pin; /* Port number */ 
401 
402 if (pic->pic_type == PIC_XEN) { 380 if (pic->pic_type == PIC_XEN) {
403 ih = pin; 381 ih = pin; /* Port == pin */
404 return xen_intr_string(port, buf, len, pic); 382 return xen_intr_string(pin, buf, len, pic);
405 } 383 }
406#endif 
407 384
408 /* 385 /*
409 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih" 386 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
410 * is only used in intr_string() to show the irq number. 387 * is only used in intr_string() to show the irq number.
411 * If the device is "legacy"(such as floppy), it should not use 388 * If the device is "legacy"(such as floppy), it should not use
412 * intr_string(). 389 * intr_string().
413 */ 390 */
414 if (pic->pic_type == PIC_I8259) { 391 if (pic->pic_type == PIC_I8259) {
415 ih = legacy_irq; 392 ih = legacy_irq;
416 return legacy_intr_string(ih, buf, len, pic); 393 return legacy_intr_string(ih, buf, len, pic);
417 } 394 }
418 395
419#if NIOAPIC > 0 || NACPICA > 0 396#if NIOAPIC > 0 || NACPICA > 0
@@ -513,27 +490,26 @@ intr_free_io_intrsource(const char *intr @@ -513,27 +490,26 @@ intr_free_io_intrsource(const char *intr
513 490
514 if ((isp = intr_get_io_intrsource(intrid)) == NULL) { 491 if ((isp = intr_get_io_intrsource(intrid)) == NULL) {
515 return; 492 return;
516 } 493 }
517 494
518 /* If the interrupt uses shared IRQ, don't free yet. */ 495 /* If the interrupt uses shared IRQ, don't free yet. */
519 if (isp->is_handlers != NULL) { 496 if (isp->is_handlers != NULL) {
520 return; 497 return;
521 } 498 }
522 499
523 intr_free_io_intrsource_direct(isp); 500 intr_free_io_intrsource_direct(isp);
524} 501}
525 502
526#if !defined(XEN) 
527static int 503static int
528intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin, 504intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin,
529 int *index, struct intrsource *chained) 505 int *index, struct intrsource *chained)
530{ 506{
531 int slot, i; 507 int slot, i;
532 struct intrsource *isp; 508 struct intrsource *isp;
533 509
534 KASSERT(mutex_owned(&cpu_lock)); 510 KASSERT(mutex_owned(&cpu_lock));
535 511
536 if (pic == &i8259_pic) { 512 if (pic == &i8259_pic) {
537 KASSERT(CPU_IS_PRIMARY(ci)); 513 KASSERT(CPU_IS_PRIMARY(ci));
538 slot = pin; 514 slot = pin;
539 } else { 515 } else {
@@ -716,30 +692,27 @@ intr_biglock_wrapper(void *vp) @@ -716,30 +692,27 @@ intr_biglock_wrapper(void *vp)
716{ 692{
717 struct intrhand *ih = vp; 693 struct intrhand *ih = vp;
718 int ret; 694 int ret;
719 695
720 KERNEL_LOCK(1, NULL); 696 KERNEL_LOCK(1, NULL);
721 697
722 ret = (*ih->ih_realfun)(ih->ih_realarg); 698 ret = (*ih->ih_realfun)(ih->ih_realarg);
723 699
724 KERNEL_UNLOCK_ONE(NULL); 700 KERNEL_UNLOCK_ONE(NULL);
725 701
726 return ret; 702 return ret;
727} 703}
728#endif /* MULTIPROCESSOR */ 704#endif /* MULTIPROCESSOR */
729#endif /* XEN */ 
730 705
731 
732#if !defined(XEN) 
733/* 706/*
734 * Append device name to intrsource. If device A and device B share IRQ number, 707 * Append device name to intrsource. If device A and device B share IRQ number,
735 * the device name of the interrupt id is "device A, device B". 708 * the device name of the interrupt id is "device A, device B".
736 */ 709 */
737static void 710static void
738intr_append_intrsource_xname(struct intrsource *isp, const char *xname) 711intr_append_intrsource_xname(struct intrsource *isp, const char *xname)
739{ 712{
740 713
741 if (isp->is_xname[0] != '\0') 714 if (isp->is_xname[0] != '\0')
742 strlcat(isp->is_xname, ", ", sizeof(isp->is_xname)); 715 strlcat(isp->is_xname, ", ", sizeof(isp->is_xname));
743 strlcat(isp->is_xname, xname, sizeof(isp->is_xname)); 716 strlcat(isp->is_xname, xname, sizeof(isp->is_xname));
744} 717}
745 718
@@ -1089,42 +1062,37 @@ intr_disestablish(struct intrhand *ih) @@ -1089,42 +1062,37 @@ intr_disestablish(struct intrhand *ih)
1089 if (ci == curcpu() || !mp_online) { 1062 if (ci == curcpu() || !mp_online) {
1090 intr_disestablish_xcall(ih, NULL); 1063 intr_disestablish_xcall(ih, NULL);
1091 } else { 1064 } else {
1092 where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci); 1065 where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci);
1093 xc_wait(where); 1066 xc_wait(where);
1094 } 1067 }
1095 if (!msipic_is_msi_pic(isp->is_pic) && intr_num_handlers(isp) < 1) { 1068 if (!msipic_is_msi_pic(isp->is_pic) && intr_num_handlers(isp) < 1) {
1096 intr_free_io_intrsource_direct(isp); 1069 intr_free_io_intrsource_direct(isp);
1097 } 1070 }
1098 mutex_exit(&cpu_lock); 1071 mutex_exit(&cpu_lock);
1099 kmem_free(ih, sizeof(*ih)); 1072 kmem_free(ih, sizeof(*ih));
1100} 1073}
1101 1074
1102#endif /* !XEN */ 
1103 
1104#if defined(XEN) /* nuke conditional post integration */ 
1105static const char * 1075static const char *
1106xen_intr_string(int port, char *buf, size_t len, struct pic *pic) 1076xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
1107{ 1077{
1108 KASSERT(pic->pic_type == PIC_XEN); 1078 KASSERT(pic->pic_type == PIC_XEN);
1109 1079
1110 KASSERT(port >= 0); 1080 KASSERT(port >= 0);
1111 KASSERT(port < NR_EVENT_CHANNELS); 
1112 1081
1113 snprintf(buf, len, "%s channel %d", pic->pic_name, port); 1082 snprintf(buf, len, "%s channel %d", pic->pic_name, port);
1114 1083
1115 return buf; 1084 return buf;
1116} 1085}
1117#endif /* XEN */ 
1118 1086
1119static const char * 1087static const char *
1120legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic) 1088legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
1121{ 1089{
1122 int legacy_irq; 1090 int legacy_irq;
1123 1091
1124 KASSERT(pic->pic_type == PIC_I8259); 1092 KASSERT(pic->pic_type == PIC_I8259);
1125#if NLAPIC > 0 1093#if NLAPIC > 0
1126 KASSERT(APIC_IRQ_ISLEGACY(ih)); 1094 KASSERT(APIC_IRQ_ISLEGACY(ih));
1127 1095
1128 legacy_irq = APIC_IRQ_LEGACY_IRQ(ih); 1096 legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
1129#else 1097#else
1130 legacy_irq = ih; 1098 legacy_irq = ih;
@@ -1202,27 +1170,26 @@ static inline int @@ -1202,27 +1170,26 @@ static inline int
1202redzone_const_or_zero(int x) 1170redzone_const_or_zero(int x)
1203{ 1171{
1204 return redzone_const_or_false(true) ? x : 0; 1172 return redzone_const_or_false(true) ? x : 0;
1205} 1173}
1206#endif 1174#endif
1207 1175
1208/* 1176/*
1209 * Initialize all handlers that aren't dynamically allocated, and exist 1177 * Initialize all handlers that aren't dynamically allocated, and exist
1210 * for each CPU. 1178 * for each CPU.
1211 */ 1179 */
1212void 1180void
1213cpu_intr_init(struct cpu_info *ci) 1181cpu_intr_init(struct cpu_info *ci)
1214{ 1182{
1215#if !defined(XEN) 
1216#if (NLAPIC > 0) || defined(MULTIPROCESSOR) || defined(__HAVE_PREEMPTION) 1183#if (NLAPIC > 0) || defined(MULTIPROCESSOR) || defined(__HAVE_PREEMPTION)
1217 struct intrsource *isp; 1184 struct intrsource *isp;
1218#endif 1185#endif
1219#if NLAPIC > 0 1186#if NLAPIC > 0
1220 static int first = 1; 1187 static int first = 1;
1221#if defined(MULTIPROCESSOR) 1188#if defined(MULTIPROCESSOR)
1222 int i; 1189 int i;
1223#endif 1190#endif
1224#endif 1191#endif
1225 1192
1226#if NLAPIC > 0 1193#if NLAPIC > 0
1227 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 1194 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1228 isp->is_recurse = Xrecurse_lapic_ltimer; 1195 isp->is_recurse = Xrecurse_lapic_ltimer;
@@ -1253,33 +1220,26 @@ cpu_intr_init(struct cpu_info *ci) @@ -1253,33 +1220,26 @@ cpu_intr_init(struct cpu_info *ci)
1253 1220
1254#if defined(__HAVE_PREEMPTION) 1221#if defined(__HAVE_PREEMPTION)
1255 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 1222 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1256 isp->is_recurse = Xrecurse_preempt; 1223 isp->is_recurse = Xrecurse_preempt;
1257 isp->is_resume = Xresume_preempt; 1224 isp->is_resume = Xresume_preempt;
1258 fake_preempt_intrhand.ih_level = IPL_PREEMPT; 1225 fake_preempt_intrhand.ih_level = IPL_PREEMPT;
1259 isp->is_handlers = &fake_preempt_intrhand; 1226 isp->is_handlers = &fake_preempt_intrhand;
1260 isp->is_pic = &softintr_pic; 1227 isp->is_pic = &softintr_pic;
1261 ci->ci_isources[SIR_PREEMPT] = isp; 1228 ci->ci_isources[SIR_PREEMPT] = isp;
1262 1229
1263#endif 1230#endif
1264 intr_calculatemasks(ci); 1231 intr_calculatemasks(ci);
1265 1232
1266#else /* XEN */ 
1267 int i; /* XXX: duplicate */ 
1268 ci->ci_iunmask[0] = 0xfffffffe; 
1269 for (i = 1; i < NIPL; i++) 
1270 ci->ci_iunmask[i] = ci->ci_iunmask[i - 1] & ~(1 << i); 
1271#endif /* XEN */ 
1272 
1273#if defined(INTRSTACKSIZE) 1233#if defined(INTRSTACKSIZE)
1274 vaddr_t istack; 1234 vaddr_t istack;
1275 1235
1276 /* 1236 /*
1277 * If the red zone is activated, protect both the top and 1237 * If the red zone is activated, protect both the top and
1278 * the bottom of the stack with an unmapped page. 1238 * the bottom of the stack with an unmapped page.
1279 */ 1239 */
1280 istack = uvm_km_alloc(kernel_map, 1240 istack = uvm_km_alloc(kernel_map,
1281 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0, 1241 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
1282 UVM_KMF_WIRED|UVM_KMF_ZERO); 1242 UVM_KMF_WIRED|UVM_KMF_ZERO);
1283 if (redzone_const_or_false(true)) { 1243 if (redzone_const_or_false(true)) {
1284 pmap_kremove(istack, PAGE_SIZE); 1244 pmap_kremove(istack, PAGE_SIZE);
1285 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE); 1245 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
@@ -1617,52 +1577,48 @@ intr_redistribute(struct cpu_info *oci) @@ -1617,52 +1577,48 @@ intr_redistribute(struct cpu_info *oci)
1617 1577
1618 return true; 1578 return true;
1619} 1579}
1620 1580
1621void 1581void
1622cpu_intr_redistribute(void) 1582cpu_intr_redistribute(void)
1623{ 1583{
1624 CPU_INFO_ITERATOR cii; 1584 CPU_INFO_ITERATOR cii;
1625 struct cpu_info *ci; 1585 struct cpu_info *ci;
1626 1586
1627 KASSERT(mutex_owned(&cpu_lock)); 1587 KASSERT(mutex_owned(&cpu_lock));
1628 KASSERT(mp_online); 1588 KASSERT(mp_online);
1629 1589
1630#if defined(XEN) /* XXX: remove */ 
1631 return; 
1632#endif 
1633 /* Direct interrupts away from shielded CPUs. */ 1590 /* Direct interrupts away from shielded CPUs. */
1634 for (CPU_INFO_FOREACH(cii, ci)) { 1591 for (CPU_INFO_FOREACH(cii, ci)) {
1635 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) { 1592 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
1636 continue; 1593 continue;
1637 } 1594 }
1638 while (intr_redistribute(ci)) { 1595 while (intr_redistribute(ci)) {
1639 /* nothing */ 1596 /* nothing */
1640 } 1597 }
1641 } 1598 }
1642 1599
1643 /* XXX should now re-balance */ 1600 /* XXX should now re-balance */
1644} 1601}
1645 1602
1646u_int 1603u_int
1647cpu_intr_count(struct cpu_info *ci) 1604cpu_intr_count(struct cpu_info *ci)
1648{ 1605{
1649 1606
1650 KASSERT(ci->ci_nintrhand >= 0); 1607 KASSERT(ci->ci_nintrhand >= 0);
1651 1608
1652 return ci->ci_nintrhand; 1609 return ci->ci_nintrhand;
1653} 1610}
1654 1611
1655#if !defined(XEN) 
1656static int 1612static int
1657intr_find_unused_slot(struct cpu_info *ci, int *index) 1613intr_find_unused_slot(struct cpu_info *ci, int *index)
1658{ 1614{
1659 int slot, i; 1615 int slot, i;
1660 1616
1661 KASSERT(mutex_owned(&cpu_lock)); 1617 KASSERT(mutex_owned(&cpu_lock));
1662 1618
1663 slot = -1; 1619 slot = -1;
1664 for (i = 0; i < MAX_INTR_SOURCES ; i++) { 1620 for (i = 0; i < MAX_INTR_SOURCES ; i++) {
1665 if (ci->ci_isources[i] == NULL) { 1621 if (ci->ci_isources[i] == NULL) {
1666 slot = i; 1622 slot = i;
1667 break; 1623 break;
1668 } 1624 }
@@ -1983,50 +1939,46 @@ interrupt_get_assigned(const char *intri @@ -1983,50 +1939,46 @@ interrupt_get_assigned(const char *intri
1983 mutex_enter(&cpu_lock); 1939 mutex_enter(&cpu_lock);
1984 1940
1985 ih = intr_get_handler(intrid); 1941 ih = intr_get_handler(intrid);
1986 if (ih == NULL) 1942 if (ih == NULL)
1987 goto out; 1943 goto out;
1988 1944
1989 ci = ih->ih_cpu; 1945 ci = ih->ih_cpu;
1990 kcpuset_set(cpuset, cpu_index(ci)); 1946 kcpuset_set(cpuset, cpu_index(ci));
1991 1947
1992 out: 1948 out:
1993 mutex_exit(&cpu_lock); 1949 mutex_exit(&cpu_lock);
1994} 1950}
1995 1951
1996#endif /* XEN */ 
1997 
1998/* 1952/*
1999 * MI interface for subr_interrupt.c 1953 * MI interface for subr_interrupt.c
2000 */ 1954 */
2001void 1955void
2002interrupt_get_available(kcpuset_t *cpuset) 1956interrupt_get_available(kcpuset_t *cpuset)
2003{ 1957{
2004 CPU_INFO_ITERATOR cii; 1958 CPU_INFO_ITERATOR cii;
2005 struct cpu_info *ci; 1959 struct cpu_info *ci;
2006 1960
2007 kcpuset_zero(cpuset); 1961 kcpuset_zero(cpuset);
2008 1962
2009 mutex_enter(&cpu_lock); 1963 mutex_enter(&cpu_lock);
2010 for (CPU_INFO_FOREACH(cii, ci)) { 1964 for (CPU_INFO_FOREACH(cii, ci)) {
2011 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) { 1965 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
2012 kcpuset_set(cpuset, cpu_index(ci)); 1966 kcpuset_set(cpuset, cpu_index(ci));
2013 } 1967 }
2014 } 1968 }
2015 mutex_exit(&cpu_lock); 1969 mutex_exit(&cpu_lock);
2016} 1970}
2017 1971
2018#if !defined(XEN) 
2019 
2020/* 1972/*
2021 * MI interface for subr_interrupt.c 1973 * MI interface for subr_interrupt.c
2022 */ 1974 */
2023void 1975void
2024interrupt_get_devname(const char *intrid, char *buf, size_t len) 1976interrupt_get_devname(const char *intrid, char *buf, size_t len)
2025{ 1977{
2026 struct intrsource *isp; 1978 struct intrsource *isp;
2027 struct intrhand *ih; 1979 struct intrhand *ih;
2028 int slot; 1980 int slot;
2029 1981
2030 mutex_enter(&cpu_lock); 1982 mutex_enter(&cpu_lock);
2031 1983
2032 ih = intr_get_handler(intrid); 1984 ih = intr_get_handler(intrid);
@@ -2152,27 +2104,26 @@ interrupt_construct_intrids(const kcpuse @@ -2152,27 +2104,26 @@ interrupt_construct_intrids(const kcpuse
2152 break; 2104 break;
2153 } 2105 }
2154 2106
2155 if (!intr_is_affinity_intrsource(isp, cpuset)) 2107 if (!intr_is_affinity_intrsource(isp, cpuset))
2156 continue; 2108 continue;
2157 2109
2158 strncpy(ids[i], isp->is_intrid, sizeof(intrid_t)); 2110 strncpy(ids[i], isp->is_intrid, sizeof(intrid_t));
2159 i++; 2111 i++;
2160 } 2112 }
2161 mutex_exit(&cpu_lock); 2113 mutex_exit(&cpu_lock);
2162 2114
2163 return ii_handler; 2115 return ii_handler;
2164} 2116}
2165#endif /* !XEN */ 
2166 2117
2167/* 2118/*
2168 * MI interface for subr_interrupt.c 2119 * MI interface for subr_interrupt.c
2169 */ 2120 */
2170void 2121void
2171interrupt_destruct_intrids(struct intrids_handler *ii_handler) 2122interrupt_destruct_intrids(struct intrids_handler *ii_handler)
2172{ 2123{
2173 size_t iih_size; 2124 size_t iih_size;
2174 2125
2175 if (ii_handler == NULL) 2126 if (ii_handler == NULL)
2176 return; 2127 return;
2177 2128
2178 iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids; 2129 iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;

cvs diff -r1.173 -r1.174 src/sys/arch/xen/conf/files.xen (expand / switch to unified diff)

--- src/sys/arch/xen/conf/files.xen 2018/12/24 21:15:59 1.173
+++ src/sys/arch/xen/conf/files.xen 2018/12/25 06:50:12 1.174
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: files.xen,v 1.173 2018/12/24 21:15:59 cherry Exp $ 1# $NetBSD: files.xen,v 1.174 2018/12/25 06:50:12 cherry Exp $
2# NetBSD: files.x86,v 1.10 2003/10/08 17:30:00 bouyer Exp  2# NetBSD: files.x86,v 1.10 2003/10/08 17:30:00 bouyer Exp
3# NetBSD: files.i386,v 1.254 2004/03/25 23:32:10 jmc Exp  3# NetBSD: files.i386,v 1.254 2004/03/25 23:32:10 jmc Exp
4 4
5ifdef i386 5ifdef i386
6maxpartitions 8 6maxpartitions 8
7else 7else
8maxpartitions 16 8maxpartitions 16
9endif 9endif
10 10
11maxusers 2 16 128 11maxusers 2 16 128
12 12
13defparam opt_kernbase.h KERNBASE 13defparam opt_kernbase.h KERNBASE
14 14
@@ -129,27 +129,26 @@ include "dev/scsipi/files.scsipi" @@ -129,27 +129,26 @@ include "dev/scsipi/files.scsipi"
129include "dev/ata/files.ata" 129include "dev/ata/files.ata"
130 130
131# Memory Disk for install floppy 131# Memory Disk for install floppy
132file dev/md_root.c memory_disk_hooks 132file dev/md_root.c memory_disk_hooks
133 133
134file arch/x86/x86/bus_dma.c machdep 134file arch/x86/x86/bus_dma.c machdep
135file arch/x86/x86/core_machdep.c coredump 135file arch/x86/x86/core_machdep.c coredump
136file arch/xen/x86/xen_bus_dma.c machdep 136file arch/xen/x86/xen_bus_dma.c machdep
137file arch/x86/x86/bus_space.c machdep 137file arch/x86/x86/bus_space.c machdep
138file arch/xen/x86/consinit.c machdep 138file arch/xen/x86/consinit.c machdep
139file arch/x86/x86/identcpu.c machdep 139file arch/x86/x86/identcpu.c machdep
140file arch/xen/x86/pintr.c machdep & dom0ops 140file arch/xen/x86/pintr.c machdep & dom0ops
141file arch/xen/x86/xen_ipi.c multiprocessor 141file arch/xen/x86/xen_ipi.c multiprocessor
142file arch/x86/x86/intr.c machdep 
143file arch/x86/x86/idt.c machdep 142file arch/x86/x86/idt.c machdep
144file arch/x86/x86/pmap.c machdep 143file arch/x86/x86/pmap.c machdep
145file arch/x86/x86/x86_tlb.c machdep 144file arch/x86/x86/x86_tlb.c machdep
146file arch/x86/x86/procfs_machdep.c procfs 145file arch/x86/x86/procfs_machdep.c procfs
147file arch/x86/x86/sys_machdep.c machdep 146file arch/x86/x86/sys_machdep.c machdep
148file arch/x86/x86/tsc.c machdep 147file arch/x86/x86/tsc.c machdep
149file arch/x86/x86/vm_machdep.c machdep 148file arch/x86/x86/vm_machdep.c machdep
150file arch/x86/x86/x86_machdep.c machdep 149file arch/x86/x86/x86_machdep.c machdep
151file arch/x86/x86/cpu_topology.c machdep 150file arch/x86/x86/cpu_topology.c machdep
152file arch/x86/x86/platform.c machdep 151file arch/x86/x86/platform.c machdep
153 152
154include "arch/xen/conf/files.compat" 153include "arch/xen/conf/files.compat"
155 154

cvs diff -r1.50 -r1.51 src/sys/arch/xen/include/intr.h (expand / switch to unified diff)

--- src/sys/arch/xen/include/intr.h 2018/12/24 14:55:42 1.50
+++ src/sys/arch/xen/include/intr.h 2018/12/25 06:50:12 1.51
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: intr.h,v 1.50 2018/12/24 14:55:42 cherry Exp $ */ 1/* $NetBSD: intr.h,v 1.51 2018/12/25 06:50:12 cherry Exp $ */
2/* NetBSD intr.h,v 1.15 2004/10/31 10:39:34 yamt Exp */ 2/* NetBSD intr.h,v 1.15 2004/10/31 10:39:34 yamt Exp */
3 3
4/*- 4/*-
5 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. 5 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum, and by Jason R. Thorpe. 9 * by Charles M. Hannum, and by Jason R. Thorpe.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -51,26 +51,29 @@ struct cpu_info; @@ -51,26 +51,29 @@ struct cpu_info;
51 * Struct describing an event channel.  51 * Struct describing an event channel.
52 */ 52 */
53 53
54struct evtsource { 54struct evtsource {
55 int ev_maxlevel; /* max. IPL for this source */ 55 int ev_maxlevel; /* max. IPL for this source */
56 uint32_t ev_imask; /* interrupt mask */ 56 uint32_t ev_imask; /* interrupt mask */
57 struct intrhand *ev_handlers; /* handler chain */ 57 struct intrhand *ev_handlers; /* handler chain */
58 struct evcnt ev_evcnt; /* interrupt counter */ 58 struct evcnt ev_evcnt; /* interrupt counter */
59 struct cpu_info *ev_cpu; /* cpu on which this event is bound */ 59 struct cpu_info *ev_cpu; /* cpu on which this event is bound */
60 char ev_intrname[32]; /* interrupt string */ 60 char ev_intrname[32]; /* interrupt string */
61 char ev_xname[64]; /* handler device list */ 61 char ev_xname[64]; /* handler device list */
62}; 62};
63 63
 64#define XMASK(ci,level) (ci)->ci_xmask[(level)]
 65#define XUNMASK(ci,level) (ci)->ci_xunmask[(level)]
 66
64extern struct intrstub xenev_stubs[]; 67extern struct intrstub xenev_stubs[];
65extern int irq2port[NR_EVENT_CHANNELS]; /* actually port + 1, so that 0 is invaid */ 68extern int irq2port[NR_EVENT_CHANNELS]; /* actually port + 1, so that 0 is invaid */
66 69
67#ifdef MULTIPROCESSOR 70#ifdef MULTIPROCESSOR
68int xen_intr_biglock_wrapper(void *); 71int xen_intr_biglock_wrapper(void *);
69#endif 72#endif
70 73
71#if defined(DOM0OPS) || NPCI > 0 74#if defined(DOM0OPS) || NPCI > 0
72int xen_vec_alloc(int); 75int xen_vec_alloc(int);
73int xen_pic_to_gsi(struct pic *, int); 76int xen_pic_to_gsi(struct pic *, int);
74#endif /* defined(DOM0OPS) || NPCI > 0 */ 77#endif /* defined(DOM0OPS) || NPCI > 0 */
75 78
76#ifdef MULTIPROCESSOR 79#ifdef MULTIPROCESSOR

cvs diff -r1.33 -r1.34 src/sys/arch/xen/x86/hypervisor_machdep.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/hypervisor_machdep.c 2018/11/19 10:05:09 1.33
+++ src/sys/arch/xen/x86/hypervisor_machdep.c 2018/12/25 06:50:12 1.34
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: hypervisor_machdep.c,v 1.33 2018/11/19 10:05:09 kre Exp $ */ 1/* $NetBSD: hypervisor_machdep.c,v 1.34 2018/12/25 06:50:12 cherry Exp $ */
2 2
3/* 3/*
4 * 4 *
5 * Copyright (c) 2004 Christian Limpach. 5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -44,27 +44,27 @@ @@ -44,27 +44,27 @@
44 * all copies or substantial portions of the Software. 44 * all copies or substantial portions of the Software.
45 *  45 *
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR  46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,  47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE  48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
49 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER  49 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
50 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING  50 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
51 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER  51 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
52 * DEALINGS IN THE SOFTWARE. 52 * DEALINGS IN THE SOFTWARE.
53 */ 53 */
54 54
55 55
56#include <sys/cdefs.h> 56#include <sys/cdefs.h>
57__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.33 2018/11/19 10:05:09 kre Exp $"); 57__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.34 2018/12/25 06:50:12 cherry Exp $");
58 58
59#include <sys/param.h> 59#include <sys/param.h>
60#include <sys/systm.h> 60#include <sys/systm.h>
61#include <sys/kmem.h> 61#include <sys/kmem.h>
62 62
63#include <uvm/uvm_extern.h> 63#include <uvm/uvm_extern.h>
64 64
65#include <machine/vmparam.h> 65#include <machine/vmparam.h>
66#include <machine/pmap.h> 66#include <machine/pmap.h>
67 67
68#include <xen/xen.h> 68#include <xen/xen.h>
69#include <xen/hypervisor.h> 69#include <xen/hypervisor.h>
70#include <xen/evtchn.h> 70#include <xen/evtchn.h>
@@ -191,31 +191,31 @@ stipending(void) @@ -191,31 +191,31 @@ stipending(void)
191 while (vci->evtchn_upcall_pending) { 191 while (vci->evtchn_upcall_pending) {
192 cli(); 192 cli();
193 193
194 vci->evtchn_upcall_pending = 0; 194 vci->evtchn_upcall_pending = 0;
195 195
196 evt_iterate_bits(&vci->evtchn_pending_sel, 196 evt_iterate_bits(&vci->evtchn_pending_sel,
197 s->evtchn_pending, s->evtchn_mask, 197 s->evtchn_pending, s->evtchn_mask,
198 evt_set_pending, &ret); 198 evt_set_pending, &ret);
199 199
200 sti(); 200 sti();
201 } 201 }
202 202
203#if 0 203#if 0
204 if (ci->ci_ipending & 0x1) 204 if (ci->ci_xpending & 0x1)
205 printf("stipending events %08lx mask %08lx ilevel %d ipending %08x\n", 205 printf("stipending events %08lx mask %08lx ilevel %d ipending %08x\n",
206 HYPERVISOR_shared_info->events, 206 HYPERVISOR_shared_info->events,
207 HYPERVISOR_shared_info->events_mask, ci->ci_ilevel, 207 HYPERVISOR_shared_info->events_mask, ci->ci_ilevel,
208 ci->ci_ipending); 208 ci->ci_xpending);
209#endif 209#endif
210 210
211 return (ret); 211 return (ret);
212} 212}
213 213
214/* Iterate through pending events and call the event handler */ 214/* Iterate through pending events and call the event handler */
215 215
216static inline void 216static inline void
217evt_do_hypervisor_callback(unsigned int port, unsigned int l1i, 217evt_do_hypervisor_callback(unsigned int port, unsigned int l1i,
218 unsigned int l2i, void *args) 218 unsigned int l2i, void *args)
219{ 219{
220 KASSERT(args != NULL); 220 KASSERT(args != NULL);
221 221
@@ -277,27 +277,27 @@ do_hypervisor_callback(struct intrframe  @@ -277,27 +277,27 @@ do_hypervisor_callback(struct intrframe
277 277
278 while (vci->evtchn_upcall_pending) { 278 while (vci->evtchn_upcall_pending) {
279 vci->evtchn_upcall_pending = 0; 279 vci->evtchn_upcall_pending = 0;
280 280
281 evt_iterate_bits(&vci->evtchn_pending_sel, 281 evt_iterate_bits(&vci->evtchn_pending_sel,
282 s->evtchn_pending, s->evtchn_mask, 282 s->evtchn_pending, s->evtchn_mask,
283 evt_do_hypervisor_callback, regs); 283 evt_do_hypervisor_callback, regs);
284 } 284 }
285 285
286#ifdef DIAGNOSTIC 286#ifdef DIAGNOSTIC
287 if (level != ci->ci_ilevel) 287 if (level != ci->ci_ilevel)
288 printf("hypervisor done %08x level %d/%d ipending %08x\n", 288 printf("hypervisor done %08x level %d/%d ipending %08x\n",
289 (uint)vci->evtchn_pending_sel, 289 (uint)vci->evtchn_pending_sel,
290 level, ci->ci_ilevel, ci->ci_ipending); 290 level, ci->ci_ilevel, ci->ci_xpending);
291#endif 291#endif
292} 292}
293 293
294void 294void
295hypervisor_send_event(struct cpu_info *ci, unsigned int ev) 295hypervisor_send_event(struct cpu_info *ci, unsigned int ev)
296{ 296{
297 KASSERT(ci != NULL); 297 KASSERT(ci != NULL);
298 298
299 volatile shared_info_t *s = HYPERVISOR_shared_info; 299 volatile shared_info_t *s = HYPERVISOR_shared_info;
300 volatile struct vcpu_info *vci = ci->ci_vcpu; 300 volatile struct vcpu_info *vci = ci->ci_vcpu;
301 301
302#ifdef PORT_DEBUG 302#ifdef PORT_DEBUG
303 if (ev == PORT_DEBUG) 303 if (ev == PORT_DEBUG)
@@ -381,57 +381,57 @@ evt_enable_event(unsigned int port, unsi @@ -381,57 +381,57 @@ evt_enable_event(unsigned int port, unsi
381} 381}
382 382
383void 383void
384hypervisor_enable_ipl(unsigned int ipl) 384hypervisor_enable_ipl(unsigned int ipl)
385{ 385{
386 struct cpu_info *ci = curcpu(); 386 struct cpu_info *ci = curcpu();
387 387
388 /* 388 /*
389 * enable all events for ipl. As we only set an event in ipl_evt_mask 389 * enable all events for ipl. As we only set an event in ipl_evt_mask
390 * for its lowest IPL, and pending IPLs are processed high to low, 390 * for its lowest IPL, and pending IPLs are processed high to low,
391 * we know that all callback for this event have been processed. 391 * we know that all callback for this event have been processed.
392 */ 392 */
393 393
394 evt_iterate_bits(&ci->ci_isources[ipl]->ipl_evt_mask1, 394 evt_iterate_bits(&ci->ci_xsources[ipl]->ipl_evt_mask1,
395 ci->ci_isources[ipl]->ipl_evt_mask2, NULL,  395 ci->ci_xsources[ipl]->ipl_evt_mask2, NULL,
396 evt_enable_event, NULL); 396 evt_enable_event, NULL);
397 397
398} 398}
399 399
400void 400void
401hypervisor_set_ipending(uint32_t iplmask, int l1, int l2) 401hypervisor_set_ipending(uint32_t iplmask, int l1, int l2)
402{ 402{
403 403
404 /* This function is not re-entrant */ 404 /* This function is not re-entrant */
405 KASSERT(x86_read_psl() != 0); 405 KASSERT(x86_read_psl() != 0);
406 406
407 int ipl; 407 int ipl;
408 struct cpu_info *ci = curcpu(); 408 struct cpu_info *ci = curcpu();
409 409
410 /* set pending bit for the appropriate IPLs */  410 /* set pending bit for the appropriate IPLs */
411 ci->ci_ipending |= iplmask; 411 ci->ci_xpending |= iplmask;
412 412
413 /* 413 /*
414 * And set event pending bit for the lowest IPL. As IPL are handled 414 * And set event pending bit for the lowest IPL. As IPL are handled
415 * from high to low, this ensure that all callbacks will have been 415 * from high to low, this ensure that all callbacks will have been
416 * called when we ack the event 416 * called when we ack the event
417 */ 417 */
418 ipl = ffs(iplmask); 418 ipl = ffs(iplmask);
419 KASSERT(ipl > 0); 419 KASSERT(ipl > 0);
420 ipl--; 420 ipl--;
421 KASSERT(ipl < NIPL); 421 KASSERT(ipl < NIPL);
422 KASSERT(ci->ci_isources[ipl] != NULL); 422 KASSERT(ci->ci_xsources[ipl] != NULL);
423 ci->ci_isources[ipl]->ipl_evt_mask1 |= 1UL << l1; 423 ci->ci_xsources[ipl]->ipl_evt_mask1 |= 1UL << l1;
424 ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2; 424 ci->ci_xsources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
425 if (__predict_false(ci != curcpu())) { 425 if (__predict_false(ci != curcpu())) {
426 if (xen_send_ipi(ci, XEN_IPI_HVCB)) { 426 if (xen_send_ipi(ci, XEN_IPI_HVCB)) {
427 panic("hypervisor_set_ipending: " 427 panic("hypervisor_set_ipending: "
428 "xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n", 428 "xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n",
429 (int) ci->ci_cpuid); 429 (int) ci->ci_cpuid);
430 } 430 }
431 } 431 }
432} 432}
433 433
434void 434void
435hypervisor_machdep_attach(void) 435hypervisor_machdep_attach(void)
436{ 436{
437 /* dom0 does not require the arch-dependent P2M translation table */ 437 /* dom0 does not require the arch-dependent P2M translation table */

cvs diff -r1.10 -r1.11 src/sys/arch/xen/x86/xen_intr.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/xen_intr.c 2018/12/24 14:55:42 1.10
+++ src/sys/arch/xen/x86/xen_intr.c 2018/12/25 06:50:12 1.11
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xen_intr.c,v 1.10 2018/12/24 14:55:42 cherry Exp $ */ 1/* $NetBSD: xen_intr.c,v 1.11 2018/12/25 06:50:12 cherry Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Jason R. Thorpe. 8 * by Charles M. Hannum, and by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,103 +20,131 @@ @@ -20,103 +20,131 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.10 2018/12/24 14:55:42 cherry Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.11 2018/12/25 06:50:12 cherry Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/kernel.h> 36#include <sys/kernel.h>
37#include <sys/kmem.h> 37#include <sys/kmem.h>
38 38
 39#include <sys/cpu.h>
 40
39#include <xen/evtchn.h> 41#include <xen/evtchn.h>
40 42
41#include <machine/cpu.h> 43#include <machine/cpu.h>
42#include <machine/intr.h> 44#include <machine/intr.h>
43 45
 46#include "acpica.h"
 47#include "ioapic.h"
 48#include "lapic.h"
 49#include "pci.h"
 50
 51#if NACPICA > 0
 52#include <dev/acpi/acpivar.h>
 53#endif
 54
 55#if NIOAPIC > 0 || NACPICA > 0
 56#include <machine/i82093var.h>
 57#endif
 58
 59#if NLAPIC > 0
 60#include <machine/i82489var.h>
 61#endif
 62
 63#if NPCI > 0
 64#include <dev/pci/ppbreg.h>
 65#endif
 66
 67void xen_disable_intr(void);
 68void xen_enable_intr(void);
 69u_long xen_read_psl(void);
 70void xen_write_psl(u_long);
 71
44/* 72/*
45 * Add a mask to cpl, and return the old value of cpl. 73 * Add a mask to cpl, and return the old value of cpl.
46 */ 74 */
47int 75int
48splraise(int nlevel) 76splraise(int nlevel)
49{ 77{
50 int olevel; 78 int olevel;
51 struct cpu_info *ci = curcpu(); 79 struct cpu_info *ci = curcpu();
52 80
53 olevel = ci->ci_ilevel; 81 olevel = ci->ci_ilevel;
54 if (nlevel > olevel) 82 if (nlevel > olevel)
55 ci->ci_ilevel = nlevel; 83 ci->ci_ilevel = nlevel;
56 __insn_barrier(); 84 __insn_barrier();
57 return (olevel); 85 return (olevel);
58} 86}
59 87
60/* 88/*
61 * Restore a value to cpl (unmasking interrupts). If any unmasked 89 * Restore a value to cpl (unmasking interrupts). If any unmasked
62 * interrupts are pending, call Xspllower() to process them. 90 * interrupts are pending, call Xspllower() to process them.
63 */ 91 */
64void 92void
65spllower(int nlevel) 93spllower(int nlevel)
66{ 94{
67 struct cpu_info *ci = curcpu(); 95 struct cpu_info *ci = curcpu();
68 uint32_t imask; 96 uint32_t xmask;
69 u_long psl; 97 u_long psl;
70 98
71 if (ci->ci_ilevel <= nlevel) 99 if (ci->ci_ilevel <= nlevel)
72 return; 100 return;
73 101
74 __insn_barrier(); 102 __insn_barrier();
75 103
76 imask = IUNMASK(ci, nlevel); 104 xmask = XUNMASK(ci, nlevel);
77 psl = x86_read_psl(); 105 psl = xen_read_psl();
78 x86_disable_intr(); 106 xen_disable_intr();
79 if (ci->ci_ipending & imask) { 107 if (ci->ci_xpending & xmask) {
80 KASSERT(psl == 0); 108 KASSERT(psl == 0);
81 Xspllower(nlevel); 109 Xspllower(nlevel);
82 /* Xspllower does enable_intr() */ 110 /* Xspllower does enable_intr() */
83 } else { 111 } else {
84 ci->ci_ilevel = nlevel; 112 ci->ci_ilevel = nlevel;
85 x86_write_psl(psl); 113 xen_write_psl(psl);
86 } 114 }
87} 115}
88 116
89void 117void
90x86_disable_intr(void) 118xen_disable_intr(void)
91{ 119{
92 __cli(); 120 __cli();
93} 121}
94 122
95void 123void
96x86_enable_intr(void) 124xen_enable_intr(void)
97{ 125{
98 __sti(); 126 __sti();
99} 127}
100 128
101u_long 129u_long
102x86_read_psl(void) 130xen_read_psl(void)
103{ 131{
104 132
105 return (curcpu()->ci_vcpu->evtchn_upcall_mask); 133 return (curcpu()->ci_vcpu->evtchn_upcall_mask);
106} 134}
107 135
108void 136void
109x86_write_psl(u_long psl) 137xen_write_psl(u_long psl)
110{ 138{
111 struct cpu_info *ci = curcpu(); 139 struct cpu_info *ci = curcpu();
112 140
113 ci->ci_vcpu->evtchn_upcall_mask = psl; 141 ci->ci_vcpu->evtchn_upcall_mask = psl;
114 xen_rmb(); 142 xen_rmb();
115 if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) { 143 if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
116 hypervisor_force_callback(); 144 hypervisor_force_callback();
117 } 145 }
118} 146}
119 147
120void * 148void *
121xen_intr_establish(int legacy_irq, struct pic *pic, int pin, 149xen_intr_establish(int legacy_irq, struct pic *pic, int pin,
122 int type, int level, int (*handler)(void *), void *arg, 150 int type, int level, int (*handler)(void *), void *arg,
@@ -251,16 +279,218 @@ xen_intr_disestablish(struct intrhand *i @@ -251,16 +279,218 @@ xen_intr_disestablish(struct intrhand *i
251 * event_remove_handler(). 279 * event_remove_handler().
252 * 280 *
253 * We can safely unbind the pirq now. 281 * We can safely unbind the pirq now.
254 */ 282 */
255 283
256 port = unbind_pirq_from_evtch(pirq); 284 port = unbind_pirq_from_evtch(pirq);
257 KASSERT(port == pih->evtch); 285 KASSERT(port == pih->evtch);
258 irq2port[pirq] = 0; 286 irq2port[pirq] = 0;
259 } 287 }
260#endif 288#endif
261 return; 289 return;
262} 290}
263 291
 292/* MI interface for kern_cpu.c */
 293void xen_cpu_intr_redistribute(void);
 294
 295void
 296xen_cpu_intr_redistribute(void)
 297{
 298 KASSERT(mutex_owned(&cpu_lock));
 299 KASSERT(mp_online);
 300
 301 return;
 302}
 303
 304/* MD - called by x86/cpu.c */
 305void
 306cpu_intr_init(struct cpu_info *ci)
 307{
 308 int i; /* XXX: duplicate */
 309
 310 ci->ci_xunmask[0] = 0xfffffffe;
 311 for (i = 1; i < NIPL; i++)
 312 ci->ci_xunmask[i] = ci->ci_xunmask[i - 1] & ~(1 << i);
 313
 314#if defined(INTRSTACKSIZE)
 315 vaddr_t istack;
 316
 317 /*
 318 * If the red zone is activated, protect both the top and
 319 * the bottom of the stack with an unmapped page.
 320 */
 321 istack = uvm_km_alloc(kernel_map,
 322 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
 323 UVM_KMF_WIRED|UVM_KMF_ZERO);
 324 if (redzone_const_or_false(true)) {
 325 pmap_kremove(istack, PAGE_SIZE);
 326 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
 327 pmap_update(pmap_kernel());
 328 }
 329
 330 /*
 331 * 33 used to be 1. Arbitrarily reserve 32 more register_t's
 332 * of space for ddb(4) to examine some subroutine arguments
 333 * and to hunt for the next stack frame.
 334 */
 335 ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
 336 INTRSTACKSIZE - 33 * sizeof(register_t);
 337#endif
 338
 339 ci->ci_idepth = -1;
 340}
 341
 342/*
 343 * Everything below from here is duplicated from x86/intr.c
 344 * When intr.c and xen_intr.c are unified, these will need to be
 345 * merged.
 346 */
 347
 348u_int xen_cpu_intr_count(struct cpu_info *ci);
 349
 350u_int
 351xen_cpu_intr_count(struct cpu_info *ci)
 352{
 353
 354 KASSERT(ci->ci_nintrhand >= 0);
 355
 356 return ci->ci_nintrhand;
 357}
 358
 359static const char *
 360xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
 361{
 362 KASSERT(pic->pic_type == PIC_XEN);
 363
 364 KASSERT(port >= 0);
 365 KASSERT(port < NR_EVENT_CHANNELS);
 366
 367 snprintf(buf, len, "%s channel %d", pic->pic_name, port);
 368
 369 return buf;
 370}
 371
 372static const char *
 373legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
 374{
 375 int legacy_irq;
 376
 377 KASSERT(pic->pic_type == PIC_I8259);
 378#if NLAPIC > 0
 379 KASSERT(APIC_IRQ_ISLEGACY(ih));
 380
 381 legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
 382#else
 383 legacy_irq = ih;
 384#endif
 385 KASSERT(legacy_irq >= 0 && legacy_irq < 16);
 386
 387 snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
 388
 389 return buf;
 390}
 391
 392const char *
 393intr_string(intr_handle_t ih, char *buf, size_t len)
 394{
 395#if NIOAPIC > 0
 396 struct ioapic_softc *pic;
 397#endif
 398
 399 if (ih == 0)
 400 panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
 401
 402#if NIOAPIC > 0
 403 if (ih & APIC_INT_VIA_APIC) {
 404 pic = ioapic_find(APIC_IRQ_APIC(ih));
 405 if (pic != NULL) {
 406 snprintf(buf, len, "%s pin %d",
 407 device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
 408 } else {
 409 snprintf(buf, len,
 410 "apic %d int %d (irq %d)",
 411 APIC_IRQ_APIC(ih),
 412 APIC_IRQ_PIN(ih),
 413 APIC_IRQ_LEGACY_IRQ(ih));
 414 }
 415 } else
 416 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
 417
 418#elif NLAPIC > 0
 419 snprintf(buf, len, "irq %d" APIC_IRQ_LEGACY_IRQ(ih));
 420#else
 421 snprintf(buf, len, "irq %d", (int) ih);
 422#endif
 423 return buf;
 424
 425}
 426
 427/*
 428 * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
 429 * by MI code and intrctl(8).
 430 */
 431const char *
 432intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
 433{
 434 int ih = 0;
 435
 436#if NPCI > 0
 437#if defined(__HAVE_PCI_MSI_MSIX)
 438 if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
 439 uint64_t pih;
 440 int dev, vec;
 441
 442 dev = msipic_get_devid(pic);
 443 vec = pin;
 444 pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
 445 | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
 446 | APIC_INT_VIA_MSI;
 447 if (pic->pic_type == PIC_MSI)
 448 MSI_INT_MAKE_MSI(pih);
 449 else if (pic->pic_type == PIC_MSIX)
 450 MSI_INT_MAKE_MSIX(pih);
 451
 452 return x86_pci_msi_string(NULL, pih, buf, len);
 453 }
 454#endif /* __HAVE_PCI_MSI_MSIX */
 455#endif
 456
 457 if (pic->pic_type == PIC_XEN) {
 458 ih = pin; /* Port == pin */
 459 return xen_intr_string(pin, buf, len, pic);
 460 }
 461
 462 /*
 463 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
 464 * is only used in intr_string() to show the irq number.
 465 * If the device is "legacy"(such as floppy), it should not use
 466 * intr_string().
 467 */
 468 if (pic->pic_type == PIC_I8259) {
 469 ih = legacy_irq;
 470 return legacy_intr_string(ih, buf, len, pic);
 471 }
 472
 473#if NIOAPIC > 0 || NACPICA > 0
 474 ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
 475 | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
 476 if (pic->pic_type == PIC_IOAPIC) {
 477 ih |= APIC_INT_VIA_APIC;
 478 }
 479 ih |= pin;
 480 return intr_string(ih, buf, len);
 481#endif
 482
 483 return NULL; /* No pic found! */
 484}
 485
 486__weak_alias(x86_disable_intr, xen_disable_intr);
 487__weak_alias(x86_enable_intr, xen_enable_intr);
 488__weak_alias(x86_read_psl, xen_read_psl);
 489__weak_alias(x86_write_psl, xen_write_psl);
 490
264__weak_alias(intr_establish, xen_intr_establish); 491__weak_alias(intr_establish, xen_intr_establish);
265__weak_alias(intr_establish_xname, xen_intr_establish_xname); 492__weak_alias(intr_establish_xname, xen_intr_establish_xname);
266__weak_alias(intr_disestablish, xen_intr_disestablish); 493__weak_alias(intr_disestablish, xen_intr_disestablish);
 494__weak_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute);
 495__weak_alias(cpu_intr_count, xen_cpu_intr_count);
 496

cvs diff -r1.75 -r1.76 src/sys/arch/xen/xen/Attic/clock.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/Attic/clock.c 2018/12/24 14:55:42 1.75
+++ src/sys/arch/xen/xen/Attic/clock.c 2018/12/25 06:50:12 1.76
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: clock.c,v 1.75 2018/12/24 14:55:42 cherry Exp $ */ 1/* $NetBSD: clock.c,v 1.76 2018/12/25 06:50:12 cherry Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2017, 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2017, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell. 8 * by Taylor R. Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -26,27 +26,27 @@ @@ -26,27 +26,27 @@
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include "opt_xen.h" 32#include "opt_xen.h"
33 33
34#ifndef XEN_CLOCK_DEBUG 34#ifndef XEN_CLOCK_DEBUG
35#define XEN_CLOCK_DEBUG 0 35#define XEN_CLOCK_DEBUG 0
36#endif 36#endif
37 37
38#include <sys/cdefs.h> 38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.75 2018/12/24 14:55:42 cherry Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.76 2018/12/25 06:50:12 cherry Exp $");
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/types.h> 42#include <sys/types.h>
43#include <sys/atomic.h> 43#include <sys/atomic.h>
44#include <sys/callout.h> 44#include <sys/callout.h>
45#include <sys/cpu.h> 45#include <sys/cpu.h>
46#include <sys/device.h> 46#include <sys/device.h>
47#include <sys/evcnt.h> 47#include <sys/evcnt.h>
48#include <sys/intr.h> 48#include <sys/intr.h>
49#include <sys/kernel.h> 49#include <sys/kernel.h>
50#include <sys/lwp.h> 50#include <sys/lwp.h>
51#include <sys/proc.h> 51#include <sys/proc.h>
52#include <sys/sysctl.h> 52#include <sys/sysctl.h>
@@ -152,27 +152,27 @@ setstatclockrate(int rate) @@ -152,27 +152,27 @@ setstatclockrate(int rate)
152{ 152{
153} 153}
154 154
155/* 155/*
156 * idle_block() 156 * idle_block()
157 * 157 *
158 * Called from the idle loop when we have nothing to do but wait 158 * Called from the idle loop when we have nothing to do but wait
159 * for an interrupt. 159 * for an interrupt.
160 */ 160 */
161void 161void
162idle_block(void) 162idle_block(void)
163{ 163{
164 164
165 KASSERT(curcpu()->ci_ipending == 0); 165 KASSERT(curcpu()->ci_xpending == 0);
166 HYPERVISOR_block(); 166 HYPERVISOR_block();
167} 167}
168 168
169/* 169/*
170 * xen_rdtsc() 170 * xen_rdtsc()
171 * 171 *
172 * Read the local pCPU's tsc. 172 * Read the local pCPU's tsc.
173 */ 173 */
174static inline uint64_t 174static inline uint64_t
175xen_rdtsc(void) 175xen_rdtsc(void)
176{ 176{
177 uint32_t lo, hi; 177 uint32_t lo, hi;
178 178

cvs diff -r1.82 -r1.83 src/sys/arch/xen/xen/evtchn.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/evtchn.c 2018/10/26 05:33:21 1.82
+++ src/sys/arch/xen/xen/evtchn.c 2018/12/25 06:50:12 1.83
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: evtchn.c,v 1.82 2018/10/26 05:33:21 cherry Exp $ */ 1/* $NetBSD: evtchn.c,v 1.83 2018/12/25 06:50:12 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -44,27 +44,27 @@ @@ -44,27 +44,27 @@
44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */ 53 */
54 54
55 55
56#include <sys/cdefs.h> 56#include <sys/cdefs.h>
57__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.82 2018/10/26 05:33:21 cherry Exp $"); 57__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.83 2018/12/25 06:50:12 cherry Exp $");
58 58
59#include "opt_xen.h" 59#include "opt_xen.h"
60#include "isa.h" 60#include "isa.h"
61#include "pci.h" 61#include "pci.h"
62 62
63#include <sys/param.h> 63#include <sys/param.h>
64#include <sys/cpu.h> 64#include <sys/cpu.h>
65#include <sys/kernel.h> 65#include <sys/kernel.h>
66#include <sys/systm.h> 66#include <sys/systm.h>
67#include <sys/device.h> 67#include <sys/device.h>
68#include <sys/proc.h> 68#include <sys/proc.h>
69#include <sys/kmem.h> 69#include <sys/kmem.h>
70#include <sys/reboot.h> 70#include <sys/reboot.h>
@@ -362,80 +362,80 @@ evtchn_do_event(int evtch, struct intrfr @@ -362,80 +362,80 @@ evtchn_do_event(int evtch, struct intrfr
362 362
363 /* leave masked */ 363 /* leave masked */
364 364
365 return 0; 365 return 0;
366 } 366 }
367 ci->ci_ilevel = evtsource[evtch]->ev_maxlevel; 367 ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
368 iplmask = evtsource[evtch]->ev_imask; 368 iplmask = evtsource[evtch]->ev_imask;
369 sti(); 369 sti();
370 mutex_spin_enter(&evtlock[evtch]); 370 mutex_spin_enter(&evtlock[evtch]);
371 ih = evtsource[evtch]->ev_handlers; 371 ih = evtsource[evtch]->ev_handlers;
372 while (ih != NULL) { 372 while (ih != NULL) {
373 if (ih->ih_cpu != ci) { 373 if (ih->ih_cpu != ci) {
374 hypervisor_send_event(ih->ih_cpu, evtch); 374 hypervisor_send_event(ih->ih_cpu, evtch);
375 iplmask &= ~IUNMASK(ci, ih->ih_level); 375 iplmask &= ~XUNMASK(ci, ih->ih_level);
376 ih = ih->ih_evt_next; 376 ih = ih->ih_evt_next;
377 continue; 377 continue;
378 } 378 }
379 if (ih->ih_level <= ilevel) { 379 if (ih->ih_level <= ilevel) {
380#ifdef IRQ_DEBUG 380#ifdef IRQ_DEBUG
381 if (evtch == IRQ_DEBUG) 381 if (evtch == IRQ_DEBUG)
382 printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel); 382 printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
383#endif 383#endif
384 cli(); 384 cli();
385 hypervisor_set_ipending(iplmask, 385 hypervisor_set_ipending(iplmask,
386 evtch >> LONG_SHIFT, evtch & LONG_MASK); 386 evtch >> LONG_SHIFT, evtch & LONG_MASK);
387 /* leave masked */ 387 /* leave masked */
388 mutex_spin_exit(&evtlock[evtch]); 388 mutex_spin_exit(&evtlock[evtch]);
389 goto splx; 389 goto splx;
390 } 390 }
391 iplmask &= ~IUNMASK(ci, ih->ih_level); 391 iplmask &= ~XUNMASK(ci, ih->ih_level);
392 ci->ci_ilevel = ih->ih_level; 392 ci->ci_ilevel = ih->ih_level;
393 ih_fun = (void *)ih->ih_fun; 393 ih_fun = (void *)ih->ih_fun;
394 ih_fun(ih->ih_arg, regs); 394 ih_fun(ih->ih_arg, regs);
395 ih = ih->ih_evt_next; 395 ih = ih->ih_evt_next;
396 } 396 }
397 mutex_spin_exit(&evtlock[evtch]); 397 mutex_spin_exit(&evtlock[evtch]);
398 cli(); 398 cli();
399 hypervisor_unmask_event(evtch); 399 hypervisor_unmask_event(evtch);
400#if NPCI > 0 || NISA > 0 400#if NPCI > 0 || NISA > 0
401 hypervisor_ack_pirq_event(evtch); 401 hypervisor_ack_pirq_event(evtch);
402#endif /* NPCI > 0 || NISA > 0 */  402#endif /* NPCI > 0 || NISA > 0 */
403 403
404splx: 404splx:
405 /* 405 /*
406 * C version of spllower(). ASTs will be checked when 406 * C version of spllower(). ASTs will be checked when
407 * hypevisor_callback() exits, so no need to check here. 407 * hypevisor_callback() exits, so no need to check here.
408 */ 408 */
409 iplmask = (IUNMASK(ci, ilevel) & ci->ci_ipending); 409 iplmask = (XUNMASK(ci, ilevel) & ci->ci_xpending);
410 while (iplmask != 0) { 410 while (iplmask != 0) {
411 iplbit = 1 << (NIPL - 1); 411 iplbit = 1 << (NIPL - 1);
412 i = (NIPL - 1); 412 i = (NIPL - 1);
413 while (iplmask != 0 && i > ilevel) { 413 while (iplmask != 0 && i > ilevel) {
414 while (iplmask & iplbit) { 414 while (iplmask & iplbit) {
415 ci->ci_ipending &= ~iplbit; 415 ci->ci_xpending &= ~iplbit;
416 ci->ci_ilevel = i; 416 ci->ci_ilevel = i;
417 for (ih = ci->ci_isources[i]->is_handlers; 417 for (ih = ci->ci_xsources[i]->is_handlers;
418 ih != NULL; ih = ih->ih_next) { 418 ih != NULL; ih = ih->ih_next) {
419 KASSERT(ih->ih_cpu == ci); 419 KASSERT(ih->ih_cpu == ci);
420 sti(); 420 sti();
421 ih_fun = (void *)ih->ih_fun; 421 ih_fun = (void *)ih->ih_fun;
422 ih_fun(ih->ih_arg, regs); 422 ih_fun(ih->ih_arg, regs);
423 cli(); 423 cli();
424 } 424 }
425 hypervisor_enable_ipl(i); 425 hypervisor_enable_ipl(i);
426 /* more pending IPLs may have been registered */ 426 /* more pending IPLs may have been registered */
427 iplmask = 427 iplmask =
428 (IUNMASK(ci, ilevel) & ci->ci_ipending); 428 (XUNMASK(ci, ilevel) & ci->ci_xpending);
429 } 429 }
430 i--; 430 i--;
431 iplbit >>= 1; 431 iplbit >>= 1;
432 } 432 }
433 } 433 }
434 ci->ci_ilevel = ilevel; 434 ci->ci_ilevel = ilevel;
435 return 0; 435 return 0;
436} 436}
437 437
438#define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */ 438#define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */
439 439
440/* PIC callbacks */ 440/* PIC callbacks */
441/* pic "pin"s are conceptually mapped to event port numbers */ 441/* pic "pin"s are conceptually mapped to event port numbers */
@@ -928,37 +928,37 @@ event_set_handler(int evtch, int (*func) @@ -928,37 +928,37 @@ event_set_handler(int evtch, int (*func)
928 splx(s); 928 splx(s);
929 929
930 return 0; 930 return 0;
931} 931}
932 932
933void 933void
934event_set_iplhandler(struct cpu_info *ci, 934event_set_iplhandler(struct cpu_info *ci,
935 struct intrhand *ih, 935 struct intrhand *ih,
936 int level) 936 int level)
937{ 937{
938 struct intrsource *ipls; 938 struct intrsource *ipls;
939 939
940 KASSERT(ci == ih->ih_cpu); 940 KASSERT(ci == ih->ih_cpu);
941 if (ci->ci_isources[level] == NULL) { 941 if (ci->ci_xsources[level] == NULL) {
942 ipls = kmem_zalloc(sizeof (struct intrsource), 942 ipls = kmem_zalloc(sizeof (struct intrsource),
943 KM_NOSLEEP); 943 KM_NOSLEEP);
944 if (ipls == NULL) 944 if (ipls == NULL)
945 panic("can't allocate fixed interrupt source"); 945 panic("can't allocate fixed interrupt source");
946 ipls->is_recurse = xenev_stubs[level].ist_recurse; 946 ipls->is_recurse = xenev_stubs[level].ist_recurse;
947 ipls->is_resume = xenev_stubs[level].ist_resume; 947 ipls->is_resume = xenev_stubs[level].ist_resume;
948 ipls->is_handlers = ih; 948 ipls->is_handlers = ih;
949 ci->ci_isources[level] = ipls; 949 ci->ci_xsources[level] = ipls;
950 } else { 950 } else {
951 ipls = ci->ci_isources[level]; 951 ipls = ci->ci_xsources[level];
952 ih->ih_next = ipls->is_handlers; 952 ih->ih_next = ipls->is_handlers;
953 ipls->is_handlers = ih; 953 ipls->is_handlers = ih;
954 } 954 }
955} 955}
956 956
957int 957int
958event_remove_handler(int evtch, int (*func)(void *), void *arg) 958event_remove_handler(int evtch, int (*func)(void *), void *arg)
959{ 959{
960 struct intrsource *ipls; 960 struct intrsource *ipls;
961 struct evtsource *evts; 961 struct evtsource *evts;
962 struct intrhand *ih; 962 struct intrhand *ih;
963 struct intrhand **ihp; 963 struct intrhand **ihp;
964 struct cpu_info *ci; 964 struct cpu_info *ci;
@@ -971,27 +971,27 @@ event_remove_handler(int evtch, int (*fu @@ -971,27 +971,27 @@ event_remove_handler(int evtch, int (*fu
971 for (ihp = &evts->ev_handlers, ih = evts->ev_handlers; 971 for (ihp = &evts->ev_handlers, ih = evts->ev_handlers;
972 ih != NULL; 972 ih != NULL;
973 ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) { 973 ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) {
974 if (ih->ih_realfun == func && ih->ih_realarg == arg) 974 if (ih->ih_realfun == func && ih->ih_realarg == arg)
975 break; 975 break;
976 } 976 }
977 if (ih == NULL) { 977 if (ih == NULL) {
978 mutex_spin_exit(&evtlock[evtch]); 978 mutex_spin_exit(&evtlock[evtch]);
979 return ENOENT; 979 return ENOENT;
980 } 980 }
981 ci = ih->ih_cpu; 981 ci = ih->ih_cpu;
982 *ihp = ih->ih_evt_next; 982 *ihp = ih->ih_evt_next;
983 983
984 ipls = ci->ci_isources[ih->ih_level]; 984 ipls = ci->ci_xsources[ih->ih_level];
985 for (ihp = &ipls->is_handlers, ih = ipls->is_handlers; 985 for (ihp = &ipls->is_handlers, ih = ipls->is_handlers;
986 ih != NULL; 986 ih != NULL;
987 ihp = &ih->ih_next, ih = ih->ih_next) { 987 ihp = &ih->ih_next, ih = ih->ih_next) {
988 if (ih->ih_realfun == func && ih->ih_realarg == arg) 988 if (ih->ih_realfun == func && ih->ih_realarg == arg)
989 break; 989 break;
990 } 990 }
991 if (ih == NULL) 991 if (ih == NULL)
992 panic("event_remove_handler"); 992 panic("event_remove_handler");
993 *ihp = ih->ih_next; 993 *ihp = ih->ih_next;
994 mutex_spin_exit(&evtlock[evtch]); 994 mutex_spin_exit(&evtlock[evtch]);
995 kmem_free(ih, sizeof (struct intrhand)); 995 kmem_free(ih, sizeof (struct intrhand));
996 if (evts->ev_handlers == NULL) { 996 if (evts->ev_handlers == NULL) {
997 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch); 997 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch);
@@ -1036,45 +1036,45 @@ hypervisor_ack_pirq_event(unsigned int e @@ -1036,45 +1036,45 @@ hypervisor_ack_pirq_event(unsigned int e
1036 printf("pirq_notify(%d)\n", evtch); 1036 printf("pirq_notify(%d)\n", evtch);
1037#endif 1037#endif
1038 (void)HYPERVISOR_physdev_op(&physdev_op_notify); 1038 (void)HYPERVISOR_physdev_op(&physdev_op_notify);
1039 } 1039 }
1040} 1040}
1041#endif /* NPCI > 0 || NISA > 0 */ 1041#endif /* NPCI > 0 || NISA > 0 */
1042 1042
1043int 1043int
1044xen_debug_handler(void *arg) 1044xen_debug_handler(void *arg)
1045{ 1045{
1046 struct cpu_info *ci = curcpu(); 1046 struct cpu_info *ci = curcpu();
1047 int i; 1047 int i;
1048 int xci_ilevel = ci->ci_ilevel; 1048 int xci_ilevel = ci->ci_ilevel;
1049 int xci_ipending = ci->ci_ipending; 1049 int xci_xpending = ci->ci_xpending;
1050 int xci_idepth = ci->ci_idepth; 1050 int xci_idepth = ci->ci_idepth;
1051 u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending; 1051 u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending;
1052 u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask; 1052 u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask;
1053 u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel; 1053 u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel;
1054 unsigned long evtchn_mask[sizeof(unsigned long) * 8]; 1054 unsigned long evtchn_mask[sizeof(unsigned long) * 8];
1055 unsigned long evtchn_pending[sizeof(unsigned long) * 8]; 1055 unsigned long evtchn_pending[sizeof(unsigned long) * 8];
1056 1056
1057 u_long p; 1057 u_long p;
1058 1058
1059 p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0]; 1059 p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0];
1060 memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask)); 1060 memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask));
1061 p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0]; 1061 p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0];
1062 memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending)); 1062 memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending));
1063 1063
1064 __insn_barrier(); 1064 __insn_barrier();
1065 printf("debug event\n"); 1065 printf("debug event\n");
1066 printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n", 1066 printf("ci_ilevel 0x%x ci_xpending 0x%x ci_idepth %d\n",
1067 xci_ilevel, xci_ipending, xci_idepth); 1067 xci_ilevel, xci_xpending, xci_idepth);
1068 printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld" 1068 printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld"
1069 " evtchn_pending_sel 0x%lx\n", 1069 " evtchn_pending_sel 0x%lx\n",
1070 upcall_pending, upcall_mask, pending_sel); 1070 upcall_pending, upcall_mask, pending_sel);
1071 printf("evtchn_mask"); 1071 printf("evtchn_mask");
1072 for (i = 0 ; i <= LONG_MASK; i++) 1072 for (i = 0 ; i <= LONG_MASK; i++)
1073 printf(" %lx", (u_long)evtchn_mask[i]); 1073 printf(" %lx", (u_long)evtchn_mask[i]);
1074 printf("\n"); 1074 printf("\n");
1075 printf("evtchn_pending"); 1075 printf("evtchn_pending");
1076 for (i = 0 ; i <= LONG_MASK; i++) 1076 for (i = 0 ; i <= LONG_MASK; i++)
1077 printf(" %lx", (u_long)evtchn_pending[i]); 1077 printf(" %lx", (u_long)evtchn_pending[i]);
1078 printf("\n"); 1078 printf("\n");
1079 return 0; 1079 return 0;
1080} 1080}

cvs diff -r1.52 -r1.53 src/sys/arch/xen/xen/xenevt.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xenevt.c 2018/12/24 14:55:42 1.52
+++ src/sys/arch/xen/xen/xenevt.c 2018/12/25 06:50:12 1.53
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xenevt.c,v 1.52 2018/12/24 14:55:42 cherry Exp $ */ 1/* $NetBSD: xenevt.c,v 1.53 2018/12/25 06:50:12 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2005 Manuel Bouyer. 4 * Copyright (c) 2005 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -16,27 +16,27 @@ @@ -16,27 +16,27 @@
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: xenevt.c,v 1.52 2018/12/24 14:55:42 cherry Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: xenevt.c,v 1.53 2018/12/25 06:50:12 cherry Exp $");
30 30
31#include "opt_xen.h" 31#include "opt_xen.h"
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/kernel.h> 33#include <sys/kernel.h>
34#include <sys/malloc.h> 34#include <sys/malloc.h>
35#include <sys/mutex.h> 35#include <sys/mutex.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/device.h> 37#include <sys/device.h>
38#include <sys/file.h> 38#include <sys/file.h>
39#include <sys/filedesc.h> 39#include <sys/filedesc.h>
40#include <sys/poll.h> 40#include <sys/poll.h>
41#include <sys/select.h> 41#include <sys/select.h>
42#include <sys/proc.h> 42#include <sys/proc.h>
@@ -185,27 +185,27 @@ xenevtattach(int n) @@ -185,27 +185,27 @@ xenevtattach(int n)
185 /* The real objective here is to wiggle into the ih callchain for IPL level */ 185 /* The real objective here is to wiggle into the ih callchain for IPL level */
186 ih = xen_intr_establish_xname(-1, &xen_pic, evtchn, IST_LEVEL, level, 186 ih = xen_intr_establish_xname(-1, &xen_pic, evtchn, IST_LEVEL, level,
187 xenevt_processevt, NULL, mpsafe, "xenevt"); 187 xenevt_processevt, NULL, mpsafe, "xenevt");
188 188
189 KASSERT(ih != NULL); 189 KASSERT(ih != NULL);
190} 190}
191 191
192/* register pending event - always called with interrupt disabled */ 192/* register pending event - always called with interrupt disabled */
193void 193void
194xenevt_setipending(int l1, int l2) 194xenevt_setipending(int l1, int l2)
195{ 195{
196 atomic_or_ulong(&xenevt_ev1, 1UL << l1); 196 atomic_or_ulong(&xenevt_ev1, 1UL << l1);
197 atomic_or_ulong(&xenevt_ev2[l1], 1UL << l2); 197 atomic_or_ulong(&xenevt_ev2[l1], 1UL << l2);
198 atomic_or_32(&cpu_info_primary.ci_ipending, 1 << IPL_HIGH); 198 atomic_or_32(&cpu_info_primary.ci_xpending, 1 << IPL_HIGH);
199} 199}
200 200
201/* process pending events */ 201/* process pending events */
202static int 202static int
203xenevt_processevt(void *v) 203xenevt_processevt(void *v)
204{ 204{
205 long l1, l2; 205 long l1, l2;
206 int l1i, l2i; 206 int l1i, l2i;
207 int port; 207 int port;
208 208
209 l1 = xen_atomic_xchg(&xenevt_ev1, 0); 209 l1 = xen_atomic_xchg(&xenevt_ev1, 0);
210 while ((l1i = xen_ffs(l1)) != 0) { 210 while ((l1i = xen_ffs(l1)) != 0) {
211 l1i--; 211 l1i--;