untrusted comment: verify with openbsd-64-base.pub
RWQq6XmS4eDAcSxxCoQNfqniJc/p5M+mTCVZN61KQ02HV1vyUMCikT6G69Oz0T0ToHjawmB3UHoKA/yBRjg2ZUp8WaLxR/vMmAE=
OpenBSD 6.4 errata 016, March 27, 2019:
GDT and IDT limits were improperly restored during VMM context switches.
Apply by doing:
signify -Vep /etc/signify/openbsd-64-base.pub -x 016_vmmints.patch.sig \
-m - | (cd /usr/src && patch -p0)
And then rebuild and install a new kernel:
KK=`sysctl -n kern.osversion | cut -d# -f1`
cd /usr/src/sys/arch/`machine`/compile/$KK
make obj
make config
make
make install
Index: sys/arch/amd64/amd64/vmm.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/vmm.c,v
diff -u -p -u -r1.221 vmm.c
--- sys/arch/amd64/amd64/vmm.c 7 Oct 2018 22:43:06 -0000 1.221
+++ sys/arch/amd64/amd64/vmm.c 26 Mar 2019 04:49:51 -0000
@@ -294,6 +294,30 @@ extern struct gate_descriptor *idt;
#define CR_CLTS 2
#define CR_LMSW 3
+static __inline void
+sidt(void *p)
+{
+ __asm volatile("sidt (%0)" : : "r" (p) : "memory");
+}
+
+static __inline void
+sgdt(void *p)
+{
+ __asm volatile("sgdt (%0)" : : "r" (p) : "memory");
+}
+
+static __inline void
+bare_lgdt(struct region_descriptor *p)
+{
+ __asm volatile("lgdt (%0)" : : "r" (p) : "memory");
+}
+
+static __inline void
+sldt(u_short *sel)
+{
+ __asm volatile("sldt (%0)" : : "r" (sel) : "memory");
+}
+
/*
* vmm_enabled
*
@@ -3919,8 +3943,9 @@ vcpu_run_vmx(struct vcpu *vcpu, struct v
struct schedstate_percpu *spc;
struct vmx_invvpid_descriptor vid;
uint64_t eii, procbased, int_st;
- uint16_t irq;
+ uint16_t irq, ldt_sel;
u_long s;
+ struct region_descriptor gdtr, idtr;
resume = 0;
irq = vrp->vrp_irq;
@@ -4121,10 +4146,18 @@ vcpu_run_vmx(struct vcpu *vcpu, struct v
break;
}
+ sgdt(&gdtr);
+ sidt(&idtr);
+ sldt(&ldt_sel);
+
KERNEL_UNLOCK();
ret = vmx_enter_guest(&vcpu->vc_control_pa,
&vcpu->vc_gueststate, resume,
curcpu()->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr);
+
+ bare_lgdt(&gdtr);
+ lidt(&idtr);
+ lldt(ldt_sel);
/*
* On exit, interrupts are disabled, and we are running with
Index: sys/arch/i386/i386/vmm.c
===================================================================
RCS file: /cvs/src/sys/arch/i386/i386/Attic/vmm.c,v
diff -u -p -u -r1.42 vmm.c
--- sys/arch/i386/i386/vmm.c 29 Aug 2018 04:51:12 -0000 1.42
+++ sys/arch/i386/i386/vmm.c 26 Mar 2019 05:25:04 -0000
@@ -275,6 +275,30 @@ extern int cpu_pae;
#define CR_CLTS 2
#define CR_LMSW 3
+static __inline void
+bare_lgdt(struct region_descriptor *p)
+{
+ __asm volatile("lgdt (%0)" : : "r" (p) : "memory");
+}
+
+static __inline void
+sidt(void *p)
+{
+ __asm volatile("sidt (%0)" : : "r" (p) : "memory");
+}
+
+static __inline void
+sgdt(void *p)
+{
+ __asm volatile("sgdt (%0)" : : "r" (p) : "memory");
+}
+
+static __inline void
+sldt(u_short *sel)
+{
+ __asm volatile("sldt (%0)" : : "r" (sel) : "memory");
+}
+
/*
* vmm_enabled
*
@@ -3598,7 +3622,8 @@ vcpu_run_vmx(struct vcpu *vcpu, struct v
struct schedstate_percpu *spc;
struct vmx_invvpid_descriptor vid;
uint32_t eii, procbased, int_st;
- uint16_t irq;
+ uint16_t irq, ldt_sel;
+ struct region_descriptor gdtr, idtr;
resume = 0;
irq = vrp->vrp_irq;
@@ -3788,6 +3813,10 @@ vcpu_run_vmx(struct vcpu *vcpu, struct v
invvpid(IA32_VMX_INVVPID_SINGLE_CTX_GLB, &vid);
}
+ sgdt(&gdtr);
+ sidt(&idtr);
+ sldt(&ldt_sel);
+
/* Start / resume the VCPU */
#ifdef VMM_DEBUG
KERNEL_ASSERT_LOCKED();
@@ -3795,6 +3824,10 @@ vcpu_run_vmx(struct vcpu *vcpu, struct v
KERNEL_UNLOCK();
ret = vmx_enter_guest(&vcpu->vc_control_pa,
&vcpu->vc_gueststate, resume, gdt.rd_base);
+
+ bare_lgdt(&gdtr);
+ lidt(&idtr);
+ lldt(ldt_sel);
exit_reason = VM_EXIT_NONE;
if (ret == 0) {