Sat Dec 22 21:27:22 2018 UTC ()
Introduce a weak alias method of exporting different implementations
of the same API.
For eg: the amd64 native implementation of invlpg() now becomes
amd64_invlpg() with a weak symbol export of invlpg(), while the XEN
implementation becomes xen_invlpg(), also weakly exported as invlpg()
Note that linking in both together without having an override function
named invlpg() would be a mistake, as we have limited control over
which of the two options would emerge as the finally exported invlpg()
resulting in a potential situation where the wrong function is finally
exported. This change avoids this situation.
We should however include an override function invlpg() in that case,
such that it is able to then pass on the call to the appropriate
backing function (amd64_invlpg() in the case of native, and
xen_invlpg() in the case of under XEN virtualisation) at runtime.
This change does not introduce such a function and therefore does not
alter builds to include native as well as XEN implementations in the
same binary. This will be done later, with the introduction of XEN
PVHVM mode, where precisely such a runtime switch is required.
There are no operational changes introduced by this change.
(cherry)
diff -r1.33 -r1.34 src/sys/arch/amd64/amd64/cpufunc.S
diff -r1.25 -r1.26 src/sys/arch/i386/i386/cpufunc.S
diff -r1.18 -r1.19 src/sys/arch/i386/i386/i386func.S
diff -r1.22 -r1.23 src/sys/arch/xen/x86/xenfunc.c
--- src/sys/arch/amd64/amd64/cpufunc.S 2018/07/21 06:09:13 1.33
+++ src/sys/arch/amd64/amd64/cpufunc.S 2018/12/22 21:27:22 1.34
@@ -1,4 +1,4 @@
-/* $NetBSD: cpufunc.S,v 1.33 2018/07/21 06:09:13 maxv Exp $ */
+/* $NetBSD: cpufunc.S,v 1.34 2018/12/22 21:27:22 cherry Exp $ */
/*
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -63,18 +63,53 @@
ret
END(x86_mfence)
+/*
+ * These functions below should always be accessed via the corresponding wrapper
+ * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS()
+ *
+ * We use this rather roundabout method so that a runtime wrapper function may
+ * be made available for PVHVM, which could override both native and PV aliases
+ * and decide which to invoke at run time.
+ */
+
+WEAK_ALIAS(invlpg, amd64_invlpg)
+WEAK_ALIAS(lidt, amd64_lidt)
+WEAK_ALIAS(lldt, amd64_lldt)
+WEAK_ALIAS(ltr, amd64_ltr)
+WEAK_ALIAS(lcr0, amd64_lcr0)
+WEAK_ALIAS(rcr0, amd64_rcr0)
+WEAK_ALIAS(rcr2, amd64_rcr2)
+WEAK_ALIAS(lcr2, amd64_lcr2)
+WEAK_ALIAS(rcr3, amd64_rcr3)
+WEAK_ALIAS(lcr3, amd64_lcr3)
+WEAK_ALIAS(tlbflush, amd64_tlbflush)
+WEAK_ALIAS(tlbflushg, amd64_tlbflushg)
+WEAK_ALIAS(rdr0, amd64_rdr0)
+WEAK_ALIAS(ldr0, amd64_ldr0)
+WEAK_ALIAS(rdr1, amd64_rdr1)
+WEAK_ALIAS(ldr1, amd64_ldr1)
+WEAK_ALIAS(rdr2, amd64_rdr2)
+WEAK_ALIAS(ldr2, amd64_ldr2)
+WEAK_ALIAS(rdr3, amd64_rdr3)
+WEAK_ALIAS(ldr3, amd64_ldr3)
+WEAK_ALIAS(rdr6, amd64_rdr6)
+WEAK_ALIAS(ldr6, amd64_ldr6)
+WEAK_ALIAS(rdr7, amd64_rdr7)
+WEAK_ALIAS(ldr7, amd64_ldr7)
+WEAK_ALIAS(wbinvd, amd64_wbinvd)
+
#ifndef XEN
-ENTRY(invlpg)
+ENTRY(amd64_invlpg)
invlpg (%rdi)
ret
-END(invlpg)
+END(amd64_invlpg)
-ENTRY(lidt)
+ENTRY(amd64_lidt)
lidt (%rdi)
ret
-END(lidt)
+END(amd64_lidt)
-ENTRY(lldt)
+ENTRY(amd64_lldt)
cmpl %edi, CPUVAR(CURLDT)
jne 1f
ret
@@ -82,42 +117,42 @@
movl %edi, CPUVAR(CURLDT)
lldt %di
ret
-END(lldt)
+END(amd64_lldt)
-ENTRY(ltr)
+ENTRY(amd64_ltr)
ltr %di
ret
-END(ltr)
+END(amd64_ltr)
-ENTRY(lcr0)
+ENTRY(amd64_lcr0)
movq %rdi, %cr0
ret
-END(lcr0)
+END(amd64_lcr0)
-ENTRY(rcr0)
+ENTRY(amd64_rcr0)
movq %cr0, %rax
ret
-END(rcr0)
+END(amd64_rcr0)
-ENTRY(lcr2)
+ENTRY(amd64_lcr2)
movq %rdi, %cr2
ret
-END(lcr2)
+END(amd64_lcr2)
-ENTRY(rcr2)
+ENTRY(amd64_rcr2)
movq %cr2, %rax
ret
-END(rcr2)
+END(amd64_rcr2)
-ENTRY(lcr3)
+ENTRY(amd64_lcr3)
movq %rdi, %cr3
ret
-END(lcr3)
+END(amd64_lcr3)
-ENTRY(rcr3)
+ENTRY(amd64_rcr3)
movq %cr3, %rax
ret
-END(rcr3)
+END(amd64_rcr3)
#endif
ENTRY(lcr4)
@@ -159,7 +194,7 @@
* If PGE is not in use, we reload CR3.
*/
#ifndef XEN
-ENTRY(tlbflushg)
+ENTRY(amd64_tlbflushg)
movq %cr4, %rax
testq $CR4_PGE, %rax
jz 1f
@@ -168,74 +203,74 @@
movq %rdx, %cr4
movq %rax, %cr4
ret
-END(tlbflushg)
+END(amd64_tlbflushg)
-ENTRY(tlbflush)
+ENTRY(amd64_tlbflush)
1:
movq %cr3, %rax
movq %rax, %cr3
ret
-END(tlbflush)
+END(amd64_tlbflush)
-ENTRY(ldr0)
+ENTRY(amd64_ldr0)
movq %rdi, %dr0
ret
-END(ldr0)
+END(amd64_ldr0)
-ENTRY(rdr0)
+ENTRY(amd64_rdr0)
movq %dr0, %rax
ret
-END(rdr0)
+END(amd64_rdr0)
-ENTRY(ldr1)
+ENTRY(amd64_ldr1)
movq %rdi, %dr1
ret
-END(ldr1)
+END(amd64_ldr1)
-ENTRY(rdr1)
+ENTRY(amd64_rdr1)
movq %dr1, %rax
ret
-END(rdr1)
+END(amd64_rdr1)
-ENTRY(ldr2)
+ENTRY(amd64_ldr2)
movq %rdi, %dr2
ret
-END(ldr2)
+END(amd64_ldr2)
-ENTRY(rdr2)
+ENTRY(amd64_rdr2)
movq %dr2, %rax
ret
-END(rdr2)
+END(amd64_rdr2)
-ENTRY(ldr3)
+ENTRY(amd64_ldr3)
movq %rdi, %dr3
ret
-END(ldr3)
+END(amd64_ldr3)
-ENTRY(rdr3)
+ENTRY(amd64_rdr3)
movq %dr3, %rax
ret
-END(rdr3)
+END(amd64_rdr3)
-ENTRY(ldr6)
+ENTRY(amd64_ldr6)
movq %rdi, %dr6
ret
-END(ldr6)
+END(amd64_ldr6)
-ENTRY(rdr6)
+ENTRY(amd64_rdr6)
movq %dr6, %rax
ret
-END(rdr6)
+END(amd64_rdr6)
-ENTRY(ldr7)
+ENTRY(amd64_ldr7)
movq %rdi, %dr7
ret
-END(ldr7)
+END(amd64_ldr7)
-ENTRY(rdr7)
+ENTRY(amd64_rdr7)
movq %dr7, %rax
ret
-END(rdr7)
+END(amd64_rdr7)
ENTRY(x86_disable_intr)
cli
--- src/sys/arch/i386/i386/cpufunc.S 2018/10/18 04:11:14 1.25
+++ src/sys/arch/i386/i386/cpufunc.S 2018/12/22 21:27:22 1.26
@@ -1,4 +1,4 @@
-/* $NetBSD: cpufunc.S,v 1.25 2018/10/18 04:11:14 cherry Exp $ */
+/* $NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $ */
/*-
* Copyright (c) 1998, 2007 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
#include <sys/errno.h>
#include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.25 2018/10/18 04:11:14 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $");
#include "opt_xen.h"
@@ -47,6 +47,18 @@
#include "assym.h"
+/*
+ * These functions below should always be accessed via the corresponding wrapper
+ * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS()
+ *
+ * We use this rather roundabout method so that a runtime wrapper function may
+ * be made available for PVHVM, which could override both native and PV aliases
+ * and decide which to invoke at run time.
+ */
+
+WEAK_ALIAS(lidt, i386_lidt)
+WEAK_ALIAS(rcr3, i386_rcr3)
+
ENTRY(x86_lfence)
lock
addl $0, -4(%esp)
@@ -66,17 +78,17 @@
END(x86_mfence)
#ifndef XEN
-ENTRY(lidt)
+ENTRY(i386_lidt)
movl 4(%esp), %eax
lidt (%eax)
ret
-END(lidt)
+END(i386_lidt)
#endif /* XEN */
-ENTRY(rcr3)
+ENTRY(i386_rcr3)
movl %cr3, %eax
ret
-END(rcr3)
+END(i386_rcr3)
ENTRY(lcr4)
movl 4(%esp), %eax
--- src/sys/arch/i386/i386/i386func.S 2016/11/27 14:49:21 1.18
+++ src/sys/arch/i386/i386/i386func.S 2018/12/22 21:27:22 1.19
@@ -1,4 +1,4 @@
-/* $NetBSD: i386func.S,v 1.18 2016/11/27 14:49:21 kamil Exp $ */
+/* $NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $ */
/*-
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -36,20 +36,53 @@
*/
#include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.18 2016/11/27 14:49:21 kamil Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $");
#include <machine/specialreg.h>
#include <machine/segments.h>
#include "assym.h"
-ENTRY(invlpg)
+/*
+ * These functions below should always be accessed via the corresponding wrapper
+ * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS()
+ *
+ * We use this rather roundabout method so that a runtime wrapper function may
+ * be made available for PVHVM, which could override both native and PV aliases
+ * and decide which to invoke at run time.
+ */
+
+WEAK_ALIAS(invlpg, i386_invlpg)
+WEAK_ALIAS(lldt, i386_lldt)
+WEAK_ALIAS(ltr, i386_ltr)
+WEAK_ALIAS(lcr0, i386_lcr0)
+WEAK_ALIAS(rcr0, i386_rcr0)
+WEAK_ALIAS(lcr3, i386_lcr3)
+WEAK_ALIAS(tlbflush, i386_tlbflush)
+WEAK_ALIAS(tlbflushg, i386_tlbflushg)
+WEAK_ALIAS(rdr0, i386_rdr0)
+WEAK_ALIAS(ldr0, i386_ldr0)
+WEAK_ALIAS(rdr1, i386_rdr1)
+WEAK_ALIAS(ldr1, i386_ldr1)
+WEAK_ALIAS(rdr2, i386_rdr2)
+WEAK_ALIAS(ldr2, i386_ldr2)
+WEAK_ALIAS(rdr3, i386_rdr3)
+WEAK_ALIAS(ldr3, i386_ldr3)
+WEAK_ALIAS(rdr6, i386_rdr6)
+WEAK_ALIAS(ldr6, i386_ldr6)
+WEAK_ALIAS(rdr7, i386_rdr7)
+WEAK_ALIAS(ldr7, i386_ldr7)
+WEAK_ALIAS(rcr2, i386_rcr2)
+WEAK_ALIAS(lcr2, i386_lcr2)
+WEAK_ALIAS(wbinvd, i386_wbinvd)
+
+ENTRY(i386_invlpg)
movl 4(%esp), %eax
invlpg (%eax)
ret
-END(invlpg)
+END(i386_invlpg)
-ENTRY(lldt)
+ENTRY(i386_lldt)
movl 4(%esp), %eax
cmpl %eax, CPUVAR(CURLDT)
jne 1f
@@ -58,30 +91,30 @@
movl %eax, CPUVAR(CURLDT)
lldt %ax
ret
-END(lldt)
+END(i386_lldt)
-ENTRY(ltr)
+ENTRY(i386_ltr)
movl 4(%esp), %eax
ltr %ax
ret
-END(ltr)
+END(i386_ltr)
-ENTRY(lcr0)
+ENTRY(i386_lcr0)
movl 4(%esp), %eax
movl %eax, %cr0
ret
-END(lcr0)
+END(i386_lcr0)
-ENTRY(rcr0)
+ENTRY(i386_rcr0)
movl %cr0, %eax
ret
-END(rcr0)
+END(i386_rcr0)
-ENTRY(lcr3)
+ENTRY(i386_lcr3)
movl 4(%esp), %eax
movl %eax, %cr3
ret
-END(lcr3)
+END(i386_lcr3)
/*
* Big hammer: flush all TLB entries, including ones from PTE's
@@ -103,7 +136,7 @@
* first since i486 does not have CR4. Note: the feature flag may
* be present while the actual PGE functionality not yet enabled.
*/
-ENTRY(tlbflushg)
+ENTRY(i386_tlbflushg)
testl $CPUID_PGE, _C_LABEL(cpu_feature)
jz 1f
movl %cr4, %eax
@@ -114,96 +147,96 @@
movl %edx, %cr4
movl %eax, %cr4
ret
-END(tlbflushg)
+END(i386_tlbflushg)
-ENTRY(tlbflush)
+ENTRY(i386_tlbflush)
1:
movl %cr3, %eax
movl %eax, %cr3
ret
-END(tlbflush)
+END(i386_tlbflush)
-ENTRY(ldr0)
+ENTRY(i386_ldr0)
movl 4(%esp), %eax
movl %eax, %dr0
ret
-END(ldr0)
+END(i386_ldr0)
-ENTRY(rdr0)
+ENTRY(i386_rdr0)
movl %dr0, %eax
ret
-END(rdr0)
+END(i386_rdr0)
-ENTRY(ldr1)
+ENTRY(i386_ldr1)
movl 4(%esp), %eax
movl %eax, %dr1
ret
-END(ldr1)
+END(i386_ldr1)
-ENTRY(rdr1)
+ENTRY(i386_rdr1)
movl %dr1, %eax
ret
-END(rdr1)
+END(i386_rdr1)
-ENTRY(ldr2)
+ENTRY(i386_ldr2)
movl 4(%esp), %eax
movl %eax, %dr2
ret
-END(ldr2)
+END(i386_ldr2)
-ENTRY(rdr2)
+ENTRY(i386_rdr2)
movl %dr2, %eax
ret
-END(rdr2)
+END(i386_rdr2)
-ENTRY(ldr3)
+ENTRY(i386_ldr3)
movl 4(%esp), %eax
movl %eax, %dr3
ret
-END(ldr3)
+END(i386_ldr3)
-ENTRY(rdr3)
+ENTRY(i386_rdr3)
movl %dr3, %eax
ret
-END(rdr3)
+END(i386_rdr3)
-ENTRY(ldr6)
+ENTRY(i386_ldr6)
movl 4(%esp), %eax
movl %eax, %dr6
ret
-END(ldr6)
+END(i386_ldr6)
-ENTRY(rdr6)
+ENTRY(i386_rdr6)
movl %dr6, %eax
ret
-END(rdr6)
+END(i386_rdr6)
-ENTRY(ldr7)
+ENTRY(i386_ldr7)
movl 4(%esp), %eax
movl %eax, %dr7
ret
-END(ldr7)
+END(i386_ldr7)
-ENTRY(rdr7)
+ENTRY(i386_rdr7)
movl %dr7, %eax
ret
-END(rdr7)
+END(i386_rdr7)
-ENTRY(rcr2)
+ENTRY(i386_rcr2)
movl %cr2, %eax
ret
-END(rcr2)
+END(i386_rcr2)
-ENTRY(lcr2)
+ENTRY(i386_lcr2)
movl 4(%esp), %eax
movl %eax, %cr2
ret
-END(lcr2)
+END(i386_lcr2)
-ENTRY(wbinvd)
+ENTRY(i386_wbinvd)
wbinvd
ret
-END(wbinvd)
+END(i386_wbinvd)
ENTRY(x86_disable_intr)
cli
--- src/sys/arch/xen/x86/xenfunc.c 2018/10/18 04:17:18 1.22
+++ src/sys/arch/xen/x86/xenfunc.c 2018/12/22 21:27:22 1.23
@@ -1,4 +1,4 @@
-/* $NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $ */
+/* $NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $ */
/*
* Copyright (c) 2004 Christian Limpach.
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $");
#include <sys/param.h>
@@ -45,8 +45,74 @@
void xen_set_ldt(vaddr_t, uint32_t);
+/*
+ * We don't need to export these declarations, since they are used via
+ * linker aliasing. They should always be accessed via the
+ * corresponding wrapper function names defined in
+ * x86/include/cpufunc.h and exported as __weak_alias()
+ *
+ * We use this rather roundabout method so that a runtime wrapper
+ * function may be made available for PVHVM, which could override both
+ * native and PV aliases and decide which to invoke at run time.
+ */
+
+void xen_invlpg(vaddr_t);
+void xen_lidt(struct region_descriptor *);
+void xen_lldt(u_short);
+void xen_ltr(u_short);
+void xen_lcr0(u_long);
+u_long xen_rcr0(void);
+void xen_tlbflush(void);
+void xen_tlbflushg(void);
+register_t xen_rdr0(void);
+void xen_ldr0(register_t);
+register_t xen_rdr1(void);
+void xen_ldr1(register_t);
+register_t xen_rdr2(void);
+void xen_ldr2(register_t);
+register_t xen_rdr3(void);
+void xen_ldr3(register_t);
+register_t xen_rdr6(void);
+void xen_ldr6(register_t);
+register_t xen_rdr7(void);
+void xen_ldr7(register_t);
+void xen_wbinvd(void);
+vaddr_t xen_rcr2(void);
+
+__weak_alias(invlpg, xen_invlpg);
+__weak_alias(lidt, xen_lidt);
+__weak_alias(lldt, xen_lldt);
+__weak_alias(ltr, xen_ltr);
+__weak_alias(lcr0, xen_lcr0);
+__weak_alias(rcr0, xen_rcr0);
+__weak_alias(tlbflush, xen_tlbflush);
+__weak_alias(tlbflushg, xen_tlbflushg);
+__weak_alias(rdr0, xen_rdr0);
+__weak_alias(ldr0, xen_ldr0);
+__weak_alias(rdr1, xen_rdr1);
+__weak_alias(ldr1, xen_ldr1);
+__weak_alias(rdr2, xen_rdr2);
+__weak_alias(ldr2, xen_ldr2);
+__weak_alias(rdr3, xen_rdr3);
+__weak_alias(ldr3, xen_ldr3);
+__weak_alias(rdr6, xen_rdr6);
+__weak_alias(ldr6, xen_ldr6);
+__weak_alias(rdr7, xen_rdr7);
+__weak_alias(ldr7, xen_ldr7);
+__weak_alias(wbinvd, xen_wbinvd);
+__weak_alias(rcr2, xen_rcr2);
+
+#ifdef __x86_64__
+void xen_setusergs(int);
+__weak_alias(setusergs, xen_setusergs);
+#else
+void xen_lcr3(vaddr_t);
+__weak_alias(lcr3, xen_lcr3);
+
+#endif
+
void
-invlpg(vaddr_t addr)
+xen_invlpg(vaddr_t addr)
{
int s = splvm(); /* XXXSMP */
xpq_queue_invlpg(addr);
@@ -54,7 +120,7 @@
}
void
-lidt(struct region_descriptor *rd)
+xen_lidt(struct region_descriptor *rd)
{
/*
* We need to do this because we can't assume kmem_alloc(9)
@@ -99,7 +165,7 @@
}
void
-lldt(u_short sel)
+xen_lldt(u_short sel)
{
#ifndef __x86_64__
struct cpu_info *ci;
@@ -118,19 +184,19 @@
}
void
-ltr(u_short sel)
+xen_ltr(u_short sel)
{
panic("XXX ltr not supported\n");
}
void
-lcr0(u_long val)
+xen_lcr0(u_long val)
{
panic("XXX lcr0 not supported\n");
}
u_long
-rcr0(void)
+xen_rcr0(void)
{
/* XXX: handle X86_CR0_TS ? */
return 0;
@@ -138,7 +204,7 @@
#ifndef __x86_64__
void
-lcr3(vaddr_t val)
+xen_lcr3(vaddr_t val)
{
int s = splvm(); /* XXXSMP */
xpq_queue_pt_switch(xpmap_ptom_masked(val));
@@ -147,7 +213,7 @@
#endif
void
-tlbflush(void)
+xen_tlbflush(void)
{
int s = splvm(); /* XXXSMP */
xpq_queue_tlb_flush();
@@ -155,110 +221,110 @@
}
void
-tlbflushg(void)
+xen_tlbflushg(void)
{
tlbflush();
}
register_t
-rdr0(void)
+xen_rdr0(void)
{
return HYPERVISOR_get_debugreg(0);
}
void
-ldr0(register_t val)
+xen_ldr0(register_t val)
{
HYPERVISOR_set_debugreg(0, val);
}
register_t
-rdr1(void)
+xen_rdr1(void)
{
return HYPERVISOR_get_debugreg(1);
}
void
-ldr1(register_t val)
+xen_ldr1(register_t val)
{
HYPERVISOR_set_debugreg(1, val);
}
register_t
-rdr2(void)
+xen_rdr2(void)
{
return HYPERVISOR_get_debugreg(2);
}
void
-ldr2(register_t val)
+xen_ldr2(register_t val)
{
HYPERVISOR_set_debugreg(2, val);
}
register_t
-rdr3(void)
+xen_rdr3(void)
{
return HYPERVISOR_get_debugreg(3);
}
void
-ldr3(register_t val)
+xen_ldr3(register_t val)
{
HYPERVISOR_set_debugreg(3, val);
}
register_t
-rdr6(void)
+xen_rdr6(void)
{
return HYPERVISOR_get_debugreg(6);
}
void
-ldr6(register_t val)
+xen_ldr6(register_t val)
{
HYPERVISOR_set_debugreg(6, val);
}
register_t
-rdr7(void)
+xen_rdr7(void)
{
return HYPERVISOR_get_debugreg(7);
}
void
-ldr7(register_t val)
+xen_ldr7(register_t val)
{
HYPERVISOR_set_debugreg(7, val);
}
void
-wbinvd(void)
+xen_wbinvd(void)
{
xpq_flush_cache();
}
vaddr_t
-rcr2(void)
+xen_rcr2(void)
{
return curcpu()->ci_vcpu->arch.cr2;
}
#ifdef __x86_64__
void
-setusergs(int gssel)
+xen_setusergs(int gssel)
{
HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel);
}