summaryrefslogtreecommitdiff
path: root/usr
diff options
context:
space:
mode:
authorPatrick Mooney <pmooney@pfmooney.com>2022-05-13 23:42:57 +0000
committerPatrick Mooney <pmooney@oxide.computer>2022-05-26 01:55:28 +0000
commit3d097f7d985318f4f57189c9c47f3b19f1721f89 (patch)
tree02acd7783b46b86d6b431c4467d2d169ed132e24 /usr
parent662993c9b2fd7e3e0a5c2e535902c2360206095a (diff)
downloadillumos-joyent-3d097f7d985318f4f57189c9c47f3b19f1721f89.tar.gz
14692 consolidate bhyve exception injection
Reviewed by: Dan Cross <cross@oxidecomputer.com> Reviewed by: Jordan Paige Hendricks <jordan@oxidecomputer.com> Approved by: Gordon Ross <gordon.w.ross@gmail.com>
Diffstat (limited to 'usr')
-rw-r--r--usr/src/cmd/bhyvectl/bhyvectl.c31
-rw-r--r--usr/src/uts/intel/io/vmm/amd/svm.c126
-rw-r--r--usr/src/uts/intel/io/vmm/amd/vmcb.h5
-rw-r--r--usr/src/uts/intel/io/vmm/intel/vmcs.h21
-rw-r--r--usr/src/uts/intel/io/vmm/intel/vmx.c149
-rw-r--r--usr/src/uts/intel/io/vmm/sys/vmm_kernel.h23
-rw-r--r--usr/src/uts/intel/io/vmm/vmm.c320
-rw-r--r--usr/src/uts/intel/io/vmm/vmm_sol_dev.c6
-rw-r--r--usr/src/uts/intel/sys/vmm.h28
9 files changed, 349 insertions, 360 deletions
diff --git a/usr/src/cmd/bhyvectl/bhyvectl.c b/usr/src/cmd/bhyvectl/bhyvectl.c
index cbe779a4ea..a6c86fd5fc 100644
--- a/usr/src/cmd/bhyvectl/bhyvectl.c
+++ b/usr/src/cmd/bhyvectl/bhyvectl.c
@@ -686,6 +686,7 @@ print_cpus(const char *banner, const cpuset_t *cpus)
printf("\n");
}
+#ifdef __FreeBSD__
static void
print_intinfo(const char *banner, uint64_t info)
{
@@ -716,6 +717,36 @@ print_intinfo(const char *banner, uint64_t info)
}
printf("\n");
}
+#else /* __FreeBSD__ */
+static void
+print_intinfo(const char *banner, uint64_t info)
+{
+ printf("%s:\t", banner);
+ if (VM_INTINFO_PENDING(info)) {
+ switch (VM_INTINFO_TYPE(info)) {
+ case VM_INTINFO_HWINTR:
+ printf("extint");
+ break;
+ case VM_INTINFO_NMI:
+ printf("nmi");
+ break;
+ case VM_INTINFO_SWINTR:
+ printf("swint");
+ break;
+ default:
+ printf("exception");
+ break;
+ }
+ printf(" vector %hhd", VM_INTINFO_VECTOR(info));
+ if (VM_INTINFO_HAS_ERRCODE(info)) {
+ printf(" errcode %#x", VM_INTINFO_ERRCODE(info));
+ }
+ } else {
+ printf("n/a");
+ }
+ printf("\n");
+}
+#endif /* __FreeBSD__ */
static bool
cpu_vendor_intel(void)
diff --git a/usr/src/uts/intel/io/vmm/amd/svm.c b/usr/src/uts/intel/io/vmm/amd/svm.c
index b679f8c9c0..993e759fc6 100644
--- a/usr/src/uts/intel/io/vmm/amd/svm.c
+++ b/usr/src/uts/intel/io/vmm/amd/svm.c
@@ -37,7 +37,7 @@
* http://www.illumos.org/license/CDDL.
*
* Copyright 2018 Joyent, Inc.
- * Copyright 2021 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
#include <sys/cdefs.h>
@@ -920,6 +920,13 @@ svm_update_virqinfo(struct svm_softc *sc, int vcpu)
"v_intr_vector %d", __func__, ctrl->v_intr_vector));
}
+CTASSERT(VMCB_EVENTINJ_TYPE_INTR == VM_INTINFO_HWINTR);
+CTASSERT(VMCB_EVENTINJ_TYPE_NMI == VM_INTINFO_NMI);
+CTASSERT(VMCB_EVENTINJ_TYPE_EXCEPTION == VM_INTINFO_HWEXCP);
+CTASSERT(VMCB_EVENTINJ_TYPE_INTn == VM_INTINFO_SWINTR);
+CTASSERT(VMCB_EVENTINJ_EC_VALID == VM_INTINFO_DEL_ERRCODE);
+CTASSERT(VMCB_EVENTINJ_VALID == VM_INTINFO_VALID);
+
static void
svm_save_exitintinfo(struct svm_softc *svm_sc, int vcpu)
{
@@ -941,6 +948,10 @@ svm_save_exitintinfo(struct svm_softc *svm_sc, int vcpu)
VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
intinfo, VMCB_EXITINTINFO_VECTOR(intinfo));
vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
+ /*
+ * Relies on match between VMCB exitintinfo format and bhyve-generic
+ * format, which is ensured by CTASSERTs above.
+ */
err = vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
VERIFY0(err);
}
@@ -1050,39 +1061,51 @@ svm_clear_nmi_blocking(struct svm_softc *sc, int vcpu)
}
static void
-svm_inject_event(struct svm_softc *sc, int vcpu, uint64_t intinfo)
+svm_inject_event(struct vmcb_ctrl *ctrl, uint64_t info)
{
- struct vmcb_ctrl *ctrl;
- uint8_t vector;
- uint32_t evtype;
-
- ASSERT(VMCB_EXITINTINFO_VALID(intinfo));
+ ASSERT(VM_INTINFO_PENDING(info));
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
- vector = VMCB_EXITINTINFO_VECTOR(intinfo);
- evtype = VMCB_EXITINTINFO_TYPE(intinfo);
+ uint8_t vector = VM_INTINFO_VECTOR(info);
+ uint32_t type = VM_INTINFO_TYPE(info);
- switch (evtype) {
- case VMCB_EVENTINJ_TYPE_INTR:
- case VMCB_EVENTINJ_TYPE_NMI:
- case VMCB_EVENTINJ_TYPE_INTn:
+ /*
+ * Correct behavior depends on bhyve intinfo event types lining up with
+ * those defined by AMD for event injection in the VMCB. The CTASSERTs
+ * above svm_save_exitintinfo() ensure it.
+ */
+ switch (type) {
+ case VM_INTINFO_NMI:
+ /* Ensure vector for injected event matches its type (NMI) */
+ vector = IDT_NMI;
break;
- case VMCB_EVENTINJ_TYPE_EXCEPTION:
- VERIFY(vector <= 31);
+ case VM_INTINFO_HWINTR:
+ case VM_INTINFO_SWINTR:
+ break;
+ case VM_INTINFO_HWEXCP:
+ if (vector == IDT_NMI) {
+ /*
+ * NMIs are expected to be injected with
+ * VMCB_EVENTINJ_TYPE_NMI, rather than as an exception
+ * with the NMI vector.
+ */
+ type = VM_INTINFO_NMI;
+ }
+ VERIFY(vector < 32);
+ break;
+ default:
/*
- * NMIs are expected to be injected with VMCB_EVENTINJ_TYPE_NMI,
- * rather than as an exception with the NMI vector.
+ * Since there is not strong validation for injected event types
+ * at this point, fall back to software interrupt for those we
+ * do not recognized.
*/
- VERIFY(vector != 2);
+ type = VM_INTINFO_SWINTR;
break;
- default:
- panic("unexpected event type %x", evtype);
}
- ctrl->eventinj = VMCB_EVENTINJ_VALID | evtype | vector;
- if (VMCB_EXITINTINFO_EC_VALID(intinfo)) {
+ ctrl->eventinj = VMCB_EVENTINJ_VALID | type | vector;
+ if (VM_INTINFO_HAS_ERRCODE(info)) {
ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
- ctrl->eventinj |= (uint64_t)VMCB_EXITINTINFO_EC(intinfo) << 32;
+ ctrl->eventinj |= (uint64_t)VM_INTINFO_ERRCODE(info) << 32;
}
}
@@ -1266,7 +1289,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
struct vmcb_ctrl *ctrl;
struct svm_regctx *ctx;
uint64_t code, info1, info2;
- int error, errcode_valid = 0, handled, idtvec, reflect;
+ int handled;
ctx = svm_get_guest_regctx(svm_sc, vcpu);
vmcb = svm_get_vmcb(svm_sc, vcpu);
@@ -1367,37 +1390,35 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
*/
handled = 1;
break;
- case 0x40 ... 0x5F:
+ case VMCB_EXIT_EXCP0 ... VMCB_EXIT_EXCP31: {
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
- reflect = 1;
- idtvec = code - 0x40;
+
+ const uint8_t idtvec = code - VMCB_EXIT_EXCP0;
+ uint32_t errcode = 0;
+ bool reflect = true;
+ bool errcode_valid = false;
+
switch (idtvec) {
case IDT_MC:
- /*
- * Call the machine check handler by hand. Also don't
- * reflect the machine check back into the guest.
- */
- reflect = 0;
- VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler");
+ /* The host will handle the MCE itself. */
+ reflect = false;
vmm_call_trap(T_MCE);
break;
case IDT_PF:
- error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
- info2);
- KASSERT(error == 0, ("%s: error %d updating cr2",
- __func__, error));
+ VERIFY0(svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
+ info2));
/* fallthru */
case IDT_NP:
case IDT_SS:
case IDT_GP:
case IDT_AC:
case IDT_TS:
- errcode_valid = 1;
+ errcode_valid = true;
+ errcode = info1;
break;
case IDT_DF:
- errcode_valid = 1;
- info1 = 0;
+ errcode_valid = true;
break;
case IDT_BP:
@@ -1412,31 +1433,22 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
* event injection is identical to what it was when
* the exception originally happened.
*/
- VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d "
- "to zero before injecting exception %d",
- vmexit->inst_length, idtvec);
vmexit->inst_length = 0;
/* fallthru */
default:
- errcode_valid = 0;
- info1 = 0;
+ errcode_valid = false;
break;
}
- KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) "
- "when reflecting exception %d into guest",
- vmexit->inst_length, idtvec));
+ VERIFY0(vmexit->inst_length);
if (reflect) {
/* Reflect the exception back into the guest */
- VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception "
- "%d/%x into the guest", idtvec, (int)info1);
- error = vm_inject_exception(svm_sc->vm, vcpu, idtvec,
- errcode_valid, info1, 0);
- KASSERT(error == 0, ("%s: vm_inject_exception error %d",
- __func__, error));
+ VERIFY0(vm_inject_exception(svm_sc->vm, vcpu, idtvec,
+ errcode_valid, errcode, false));
}
handled = 1;
break;
+ }
case VMCB_EXIT_MSR:
handled = svm_handle_msr(svm_sc, vcpu, vmexit, info1 != 0);
break;
@@ -1586,9 +1598,7 @@ svm_inject_events(struct svm_softc *sc, int vcpu)
* by the hypervisor (e.g. #PF during instruction emulation).
*/
if (vm_entry_intinfo(sc->vm, vcpu, &intinfo)) {
- ASSERT(VMCB_EXITINTINFO_VALID(intinfo));
-
- svm_inject_event(sc, vcpu, intinfo);
+ svm_inject_event(ctrl, intinfo);
vmm_stat_incr(sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
ev_state = EIS_EV_INJECTED;
}
diff --git a/usr/src/uts/intel/io/vmm/amd/vmcb.h b/usr/src/uts/intel/io/vmm/amd/vmcb.h
index 15b076b5bb..da0f08445c 100644
--- a/usr/src/uts/intel/io/vmm/amd/vmcb.h
+++ b/usr/src/uts/intel/io/vmm/amd/vmcb.h
@@ -38,7 +38,7 @@
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
- * Copyright 2021 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
#ifndef _VMCB_H_
@@ -145,7 +145,8 @@ struct svm_softc;
#define VMCB_EXIT_CR15_READ 0x0f
#define VMCB_EXIT_CR0_WRITE 0x10
#define VMCB_EXIT_CR15_WRITE 0x1f
-#define VMCB_EXIT_MC 0x52
+#define VMCB_EXIT_EXCP0 0x40
+#define VMCB_EXIT_EXCP31 0x5f
#define VMCB_EXIT_INTR 0x60
#define VMCB_EXIT_NMI 0x61
#define VMCB_EXIT_SMI 0x62
diff --git a/usr/src/uts/intel/io/vmm/intel/vmcs.h b/usr/src/uts/intel/io/vmm/intel/vmcs.h
index d61244baee..24dc2dd574 100644
--- a/usr/src/uts/intel/io/vmm/intel/vmcs.h
+++ b/usr/src/uts/intel/io/vmm/intel/vmcs.h
@@ -30,7 +30,7 @@
/*
* Copyright 2017 Joyent, Inc.
- * Copyright 2020 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
#ifndef _VMCS_H_
@@ -57,17 +57,6 @@ void vmcs_clear(uintptr_t vmcs_pa);
uint64_t vmcs_read(uint32_t encoding);
void vmcs_write(uint32_t encoding, uint64_t val);
-#define vmexit_instruction_length() vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH)
-#define vmcs_guest_rip() vmcs_read(VMCS_GUEST_RIP)
-#define vmcs_instruction_error() vmcs_read(VMCS_INSTRUCTION_ERROR)
-#define vmcs_exit_reason() (vmcs_read(VMCS_EXIT_REASON) & 0xffff)
-#define vmcs_exit_qualification() vmcs_read(VMCS_EXIT_QUALIFICATION)
-#define vmcs_guest_cr3() vmcs_read(VMCS_GUEST_CR3)
-#define vmcs_gpa() vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS)
-#define vmcs_gla() vmcs_read(VMCS_GUEST_LINEAR_ADDRESS)
-#define vmcs_idt_vectoring_info() vmcs_read(VMCS_IDT_VECTORING_INFO)
-#define vmcs_idt_vectoring_err() vmcs_read(VMCS_IDT_VECTORING_ERROR)
-
#endif /* _ASM */
#endif /* _KERNEL */
@@ -258,7 +247,13 @@ void vmcs_write(uint32_t encoding, uint64_t val);
#define VMRESUME_WITH_NON_LAUNCHED_VMCS 5
/*
- * VMCS exit reasons
+ * Bits 15:0 of VMCS_EXIT_REASON field represent the "basic exit reason", as
+ * detailed below.
+ */
+#define BASIC_EXIT_REASON_MASK 0xffff
+
+/*
+ * VMCS (basic) exit reasons
*/
#define EXIT_REASON_EXCEPTION 0
#define EXIT_REASON_EXT_INTR 1
diff --git a/usr/src/uts/intel/io/vmm/intel/vmx.c b/usr/src/uts/intel/io/vmm/intel/vmx.c
index 283eab984f..58d1c8361e 100644
--- a/usr/src/uts/intel/io/vmm/intel/vmx.c
+++ b/usr/src/uts/intel/io/vmm/intel/vmx.c
@@ -896,22 +896,6 @@ vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
return (handled);
}
-static __inline void
-vmx_run_trace(struct vmx *vmx, int vcpu)
-{
-#ifdef KTR
- VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %lx", vmcs_guest_rip());
-#endif
-}
-
-static __inline void
-vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
-{
-#ifdef KTR
- VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
-#endif
-}
-
static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
@@ -1131,6 +1115,76 @@ vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu)
}
}
+CTASSERT(VMCS_INTR_T_HWINTR == VM_INTINFO_HWINTR);
+CTASSERT(VMCS_INTR_T_NMI == VM_INTINFO_NMI);
+CTASSERT(VMCS_INTR_T_HWEXCEPTION == VM_INTINFO_HWEXCP);
+CTASSERT(VMCS_INTR_T_SWINTR == VM_INTINFO_SWINTR);
+CTASSERT(VMCS_INTR_T_PRIV_SWEXCEPTION == VM_INTINFO_RESV5);
+CTASSERT(VMCS_INTR_T_SWEXCEPTION == VM_INTINFO_RESV6);
+CTASSERT(VMCS_IDT_VEC_ERRCODE_VALID == VM_INTINFO_DEL_ERRCODE);
+CTASSERT(VMCS_INTR_T_MASK == VM_INTINFO_MASK_TYPE);
+
+static uint64_t
+vmx_idtvec_to_intinfo(uint32_t info)
+{
+ ASSERT(info & VMCS_IDT_VEC_VALID);
+
+ const uint32_t type = info & VMCS_INTR_T_MASK;
+ const uint8_t vec = info & 0xff;
+
+ switch (type) {
+ case VMCS_INTR_T_HWINTR:
+ case VMCS_INTR_T_NMI:
+ case VMCS_INTR_T_HWEXCEPTION:
+ case VMCS_INTR_T_SWINTR:
+ case VMCS_INTR_T_PRIV_SWEXCEPTION:
+ case VMCS_INTR_T_SWEXCEPTION:
+ break;
+ default:
+ panic("unexpected event type 0x%03x", type);
+ }
+
+ uint64_t intinfo = VM_INTINFO_VALID | type | vec;
+ if (info & VMCS_IDT_VEC_ERRCODE_VALID) {
+ const uint32_t errcode = vmcs_read(VMCS_IDT_VECTORING_ERROR);
+ intinfo |= (uint64_t)errcode << 32;
+ }
+
+ return (intinfo);
+}
+
+static void
+vmx_inject_intinfo(uint64_t info)
+{
+ ASSERT(VM_INTINFO_PENDING(info));
+ ASSERT0(info & VM_INTINFO_MASK_RSVD);
+
+ /*
+ * The bhyve format matches that of the VMCS, which is ensured by the
+ * CTASSERTs above.
+ */
+ uint32_t inject = info;
+ switch (VM_INTINFO_VECTOR(info)) {
+ case IDT_BP:
+ case IDT_OF:
+ /*
+ * VT-x requires #BP and #OF to be injected as software
+ * exceptions.
+ */
+ inject &= ~VMCS_INTR_T_MASK;
+ inject |= VMCS_INTR_T_SWEXCEPTION;
+ break;
+ default:
+ break;
+ }
+
+ if (VM_INTINFO_HAS_ERRCODE(info)) {
+ vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR,
+ VM_INTINFO_ERRCODE(info));
+ }
+ vmcs_write(VMCS_ENTRY_INTR_INFO, inject);
+}
+
#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \
@@ -1190,24 +1244,7 @@ vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip)
}
if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
- ASSERT(entryinfo & VMCS_INTR_VALID);
-
- info = entryinfo;
- vector = info & 0xff;
- if (vector == IDT_BP || vector == IDT_OF) {
- /*
- * VT-x requires #BP and #OF to be injected as software
- * exceptions.
- */
- info &= ~VMCS_INTR_T_MASK;
- info |= VMCS_INTR_T_SWEXCEPTION;
- }
-
- if (info & VMCS_INTR_DEL_ERRCODE) {
- vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
- }
-
- vmcs_write(VMCS_ENTRY_INTR_INFO, info);
+ vmx_inject_intinfo(entryinfo);
state = EIS_EV_INJECTED;
}
@@ -1744,7 +1781,7 @@ vmx_paging_mode(void)
static void
vmx_paging_info(struct vm_guest_paging *paging)
{
- paging->cr3 = vmcs_guest_cr3();
+ paging->cr3 = vmcs_read(VMCS_GUEST_CR3);
paging->cpl = vmx_cpl();
paging->cpu_mode = vmx_cpu_mode();
paging->paging_mode = vmx_paging_mode();
@@ -2121,9 +2158,9 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
struct vie *vie;
struct vlapic *vlapic;
struct vm_task_switch *ts;
- uint32_t idtvec_info, idtvec_err, intr_info;
+ uint32_t idtvec_info, intr_info;
uint32_t intr_type, intr_vec, reason;
- uint64_t exitintinfo, qual, gpa;
+ uint64_t qual, gpa;
CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
@@ -2158,17 +2195,11 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* See "Information for VM Exits During Event Delivery" in Intel SDM
* for details.
*/
- idtvec_info = vmcs_idt_vectoring_info();
+ idtvec_info = vmcs_read(VMCS_IDT_VECTORING_INFO);
if (idtvec_info & VMCS_IDT_VEC_VALID) {
- idtvec_info &= ~(1 << 12); /* clear undefined bit */
- exitintinfo = idtvec_info;
- if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
- idtvec_err = vmcs_idt_vectoring_err();
- exitintinfo |= (uint64_t)idtvec_err << 32;
- }
- error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
- KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
- __func__, error));
+ /* Record exit intinfo */
+ VERIFY0(vm_exit_intinfo(vmx->vm, vcpu,
+ vmx_idtvec_to_intinfo(idtvec_info)));
/*
* If 'virtual NMIs' are being used and the VM-exit
@@ -2238,7 +2269,8 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
vmexit->inst_length = 0;
if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
ts->errcode_valid = 1;
- ts->errcode = vmcs_idt_vectoring_err();
+ ts->errcode =
+ vmcs_read(VMCS_IDT_VECTORING_ERROR);
}
}
}
@@ -2428,7 +2460,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* memory then this must be a nested page fault otherwise
* this must be an instruction that accesses MMIO space.
*/
- gpa = vmcs_gpa();
+ gpa = vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS);
if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
apic_access_fault(vmx, vcpu, gpa)) {
vmexit->exitcode = VM_EXITCODE_PAGING;
@@ -2440,7 +2472,8 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
vmx, vcpu, vmexit, gpa, qual);
} else if (ept_emulation_fault(qual)) {
vie = vm_vie_ctx(vmx->vm, vcpu);
- vmexit_mmio_emul(vmexit, vie, gpa, vmcs_gla());
+ vmexit_mmio_emul(vmexit, vie, gpa,
+ vmcs_read(VMCS_GUEST_LINEAR_ADDRESS));
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1);
SDT_PROBE4(vmm, vmx, exit, mmiofault,
vmx, vcpu, vmexit, gpa);
@@ -2564,7 +2597,7 @@ vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_VMX;
vmexit->u.vmx.status = vmxctx->inst_fail_status;
- vmexit->u.vmx.inst_error = vmcs_instruction_error();
+ vmexit->u.vmx.inst_error = vmcs_read(VMCS_INSTRUCTION_ERROR);
vmexit->u.vmx.exit_reason = ~0;
vmexit->u.vmx.exit_qualification = ~0;
@@ -2720,8 +2753,7 @@ vmx_run(void *arg, int vcpu, uint64_t rip)
enum event_inject_state inject_state;
uint64_t eptgen;
- KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
- "%lx/%lx", __func__, vmcs_guest_rip(), rip));
+ ASSERT3U(vmcs_read(VMCS_GUEST_RIP), ==, rip);
handled = UNHANDLED;
@@ -2844,7 +2876,6 @@ vmx_run(void *arg, int vcpu, uint64_t rip)
vmx->eptgen[curcpu] = eptgen;
}
- vmx_run_trace(vmx, vcpu);
vcpu_ustate_change(vm, vcpu, VU_RUN);
vmx_dr_enter_guest(vmxctx);
@@ -2862,10 +2893,12 @@ vmx_run(void *arg, int vcpu, uint64_t rip)
}
/* Collect some information for VM exit processing */
- vmexit->rip = rip = vmcs_guest_rip();
- vmexit->inst_length = vmexit_instruction_length();
- vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
- vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
+ vmexit->rip = rip = vmcs_read(VMCS_GUEST_RIP);
+ vmexit->inst_length = vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH);
+ vmexit->u.vmx.exit_reason = exit_reason =
+ (vmcs_read(VMCS_EXIT_REASON) & BASIC_EXIT_REASON_MASK);
+ vmexit->u.vmx.exit_qualification =
+ vmcs_read(VMCS_EXIT_QUALIFICATION);
/* Update 'nextrip' */
vmx->state[vcpu].nextrip = rip;
diff --git a/usr/src/uts/intel/io/vmm/sys/vmm_kernel.h b/usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
index c84b33dc2e..ed6fe1c814 100644
--- a/usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
+++ b/usr/src/uts/intel/io/vmm/sys/vmm_kernel.h
@@ -166,10 +166,10 @@ int vm_set_fpu(struct vm *vm, int vcpuid, void *buf, size_t len);
int vm_run(struct vm *vm, int vcpuid, const struct vm_entry *);
int vm_suspend(struct vm *vm, enum vm_suspend_how how);
int vm_inject_nmi(struct vm *vm, int vcpu);
-int vm_nmi_pending(struct vm *vm, int vcpuid);
+bool vm_nmi_pending(struct vm *vm, int vcpuid);
void vm_nmi_clear(struct vm *vm, int vcpuid);
int vm_inject_extint(struct vm *vm, int vcpu);
-int vm_extint_pending(struct vm *vm, int vcpuid);
+bool vm_extint_pending(struct vm *vm, int vcpuid);
void vm_extint_clear(struct vm *vm, int vcpuid);
int vm_inject_init(struct vm *vm, int vcpuid);
int vm_inject_sipi(struct vm *vm, int vcpuid, uint8_t vec);
@@ -278,8 +278,8 @@ struct vrtc *vm_rtc(struct vm *vm);
* This function should only be called in the context of the thread that is
* executing this vcpu.
*/
-int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid,
- uint32_t errcode, int restart_instruction);
+int vm_inject_exception(struct vm *vm, int vcpuid, uint8_t vector,
+ bool err_valid, uint32_t errcode, bool restart_instruction);
/*
* This function is called after a VM-exit that occurred during exception or
@@ -301,10 +301,9 @@ int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
* event that should be injected into the guest. This function combines
* nested events into a double or triple fault.
*
- * Returns 0 if there are no events that need to be injected into the guest
- * and non-zero otherwise.
+ * Returns false if there are no events that need to be injected into the guest.
*/
-int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
+bool vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
@@ -344,15 +343,11 @@ void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
-/* APIs to inject faults into the guest */
-void vm_inject_fault(struct vm *vm, int vcpuid, int vector, int errcode_valid,
- int errcode);
-
void vm_inject_ud(struct vm *vm, int vcpuid);
void vm_inject_gp(struct vm *vm, int vcpuid);
-void vm_inject_ac(struct vm *vm, int vcpuid, int errcode);
-void vm_inject_ss(struct vm *vm, int vcpuid, int errcode);
-void vm_inject_pf(struct vm *vm, int vcpuid, int errcode, uint64_t cr2);
+void vm_inject_ac(struct vm *vm, int vcpuid, uint32_t errcode);
+void vm_inject_ss(struct vm *vm, int vcpuid, uint32_t errcode);
+void vm_inject_pf(struct vm *vm, int vcpuid, uint32_t errcode, uint64_t cr2);
/*
* Both SVM and VMX have complex logic for injecting events such as exceptions
diff --git a/usr/src/uts/intel/io/vmm/vmm.c b/usr/src/uts/intel/io/vmm/vmm.c
index 8699810c2c..db87dfbbf4 100644
--- a/usr/src/uts/intel/io/vmm/vmm.c
+++ b/usr/src/uts/intel/io/vmm/vmm.c
@@ -39,7 +39,7 @@
*
* Copyright 2015 Pluribus Networks Inc.
* Copyright 2018 Joyent, Inc.
- * Copyright 2021 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
* Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
*/
@@ -139,13 +139,11 @@ struct vcpu {
int reqidle; /* (i) request vcpu to idle */
struct vlapic *vlapic; /* (i) APIC device model */
enum x2apic_state x2apic_state; /* (i) APIC mode */
- uint64_t exitintinfo; /* (i) events pending at VM exit */
- int nmi_pending; /* (i) NMI pending */
- int extint_pending; /* (i) INTR pending */
- int exception_pending; /* (i) exception pending */
- int exc_vector; /* (x) exception collateral */
- int exc_errcode_valid;
- uint32_t exc_errcode;
+ uint64_t exit_intinfo; /* (i) events pending at VM exit */
+ uint64_t exc_pending; /* (i) exception pending */
+ bool nmi_pending; /* (i) NMI pending */
+ bool extint_pending; /* (i) INTR pending */
+
uint8_t sipi_vector; /* (i) SIPI vector */
hma_fpu_t *guestfpu; /* (a,i) guest fpu state */
uint64_t guest_xcr0; /* (i) guest %xcr0 register */
@@ -386,10 +384,10 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
(void) vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
vcpu->reqidle = 0;
- vcpu->exitintinfo = 0;
- vcpu->nmi_pending = 0;
- vcpu->extint_pending = 0;
- vcpu->exception_pending = 0;
+ vcpu->exit_intinfo = 0;
+ vcpu->nmi_pending = false;
+ vcpu->extint_pending = false;
+ vcpu->exc_pending = 0;
vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
(void) hma_fpu_init(vcpu->guestfpu);
vmm_stat_init(vcpu->stats);
@@ -2490,27 +2488,26 @@ int
vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
{
struct vcpu *vcpu;
- int type, vector;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
- if (info & VM_INTINFO_VALID) {
- type = info & VM_INTINFO_TYPE;
- vector = info & 0xff;
+ if (VM_INTINFO_PENDING(info)) {
+ const uint32_t type = VM_INTINFO_TYPE(info);
+ const uint8_t vector = VM_INTINFO_VECTOR(info);
+
if (type == VM_INTINFO_NMI && vector != IDT_NMI)
return (EINVAL);
- if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
+ if (type == VM_INTINFO_HWEXCP && vector >= 32)
return (EINVAL);
- if (info & VM_INTINFO_RSVD)
+ if (info & VM_INTINFO_MASK_RSVD)
return (EINVAL);
} else {
info = 0;
}
- VCPU_CTR2(vm, vcpuid, "%s: info1(%lx)", __func__, info);
- vcpu->exitintinfo = info;
+ vcpu->exit_intinfo = info;
return (0);
}
@@ -2525,14 +2522,10 @@ enum exc_class {
static enum exc_class
exception_class(uint64_t info)
{
- int type, vector;
-
- KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %lx", info));
- type = info & VM_INTINFO_TYPE;
- vector = info & 0xff;
+ ASSERT(VM_INTINFO_PENDING(info));
/* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
- switch (type) {
+ switch (VM_INTINFO_TYPE(info)) {
case VM_INTINFO_HWINTR:
case VM_INTINFO_SWINTR:
case VM_INTINFO_NMI:
@@ -2553,7 +2546,7 @@ exception_class(uint64_t info)
break;
}
- switch (vector) {
+ switch (VM_INTINFO_VECTOR(info)) {
case IDT_PF:
case IDT_VE:
return (EXC_PAGEFAULT);
@@ -2568,105 +2561,61 @@ exception_class(uint64_t info)
}
}
-static int
-nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
- uint64_t *retinfo)
-{
- enum exc_class exc1, exc2;
- int type1, vector1;
-
- KASSERT(info1 & VM_INTINFO_VALID, ("info1 %lx is not valid", info1));
- KASSERT(info2 & VM_INTINFO_VALID, ("info2 %lx is not valid", info2));
-
- /*
- * If an exception occurs while attempting to call the double-fault
- * handler the processor enters shutdown mode (aka triple fault).
- */
- type1 = info1 & VM_INTINFO_TYPE;
- vector1 = info1 & 0xff;
- if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
- VCPU_CTR2(vm, vcpuid, "triple fault: info1(%lx), info2(%lx)",
- info1, info2);
- (void) vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
- *retinfo = 0;
- return (0);
- }
-
- /*
- * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
- */
- exc1 = exception_class(info1);
- exc2 = exception_class(info2);
- if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
- (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
- /* Convert nested fault into a double fault. */
- *retinfo = IDT_DF;
- *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
- *retinfo |= VM_INTINFO_DEL_ERRCODE;
- } else {
- /* Handle exceptions serially */
- *retinfo = info2;
- }
- return (1);
-}
-
-static uint64_t
-vcpu_exception_intinfo(struct vcpu *vcpu)
-{
- uint64_t info = 0;
-
- if (vcpu->exception_pending) {
- info = vcpu->exc_vector & 0xff;
- info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
- if (vcpu->exc_errcode_valid) {
- info |= VM_INTINFO_DEL_ERRCODE;
- info |= (uint64_t)vcpu->exc_errcode << 32;
- }
- }
- return (info);
-}
-
-int
+/*
+ * Fetch event pending injection into the guest, if one exists.
+ *
+ * Returns true if an event is to be injected (which is placed in `retinfo`).
+ */
+bool
vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
{
- struct vcpu *vcpu;
- uint64_t info1, info2;
- int valid;
-
- KASSERT(vcpuid >= 0 &&
- vcpuid < vm->maxcpus, ("invalid vcpu %d", vcpuid));
-
- vcpu = &vm->vcpu[vcpuid];
-
- info1 = vcpu->exitintinfo;
- vcpu->exitintinfo = 0;
-
- info2 = 0;
- if (vcpu->exception_pending) {
- info2 = vcpu_exception_intinfo(vcpu);
- vcpu->exception_pending = 0;
- VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %lx",
- vcpu->exc_vector, info2);
- }
+ struct vcpu *vcpu = &vm->vcpu[vcpuid];
+ const uint64_t info1 = vcpu->exit_intinfo;
+ vcpu->exit_intinfo = 0;
+ const uint64_t info2 = vcpu->exc_pending;
+ vcpu->exc_pending = 0;
- if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
- valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
- } else if (info1 & VM_INTINFO_VALID) {
+ if (VM_INTINFO_PENDING(info1) && VM_INTINFO_PENDING(info2)) {
+ /*
+ * If an exception occurs while attempting to call the
+ * double-fault handler the processor enters shutdown mode
+ * (aka triple fault).
+ */
+ if (VM_INTINFO_TYPE(info1) == VM_INTINFO_HWEXCP &&
+ VM_INTINFO_VECTOR(info1) == IDT_DF) {
+ (void) vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
+ *retinfo = 0;
+ return (false);
+ }
+ /*
+ * "Conditions for Generating a Double Fault"
+ * Intel SDM, Vol3, Table 6-5
+ */
+ const enum exc_class exc1 = exception_class(info1);
+ const enum exc_class exc2 = exception_class(info2);
+ if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
+ (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
+ /* Convert nested fault into a double fault. */
+ *retinfo =
+ VM_INTINFO_VALID |
+ VM_INTINFO_DEL_ERRCODE |
+ VM_INTINFO_HWEXCP |
+ IDT_DF;
+ } else {
+ /* Handle exceptions serially */
+ vcpu->exit_intinfo = info1;
+ *retinfo = info2;
+ }
+ return (true);
+ } else if (VM_INTINFO_PENDING(info1)) {
*retinfo = info1;
- valid = 1;
- } else if (info2 & VM_INTINFO_VALID) {
+ return (true);
+ } else if (VM_INTINFO_PENDING(info2)) {
*retinfo = info2;
- valid = 1;
- } else {
- valid = 0;
- }
-
- if (valid) {
- VCPU_CTR4(vm, vcpuid, "%s: info1(%lx), info2(%lx), "
- "retinfo(%lx)", __func__, info1, info2, *retinfo);
+ return (true);
}
- return (valid);
+ return (false);
}
int
@@ -2678,14 +2627,14 @@ vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
- *info1 = vcpu->exitintinfo;
- *info2 = vcpu_exception_intinfo(vcpu);
+ *info1 = vcpu->exit_intinfo;
+ *info2 = vcpu->exc_pending;
return (0);
}
int
-vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
- uint32_t errcode, int restart_instruction)
+vm_inject_exception(struct vm *vm, int vcpuid, uint8_t vector,
+ bool errcode_valid, uint32_t errcode, bool restart_instruction)
{
struct vcpu *vcpu;
uint64_t regval;
@@ -2694,14 +2643,14 @@ vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
- if (vector < 0 || vector >= 32)
+ if (vector >= 32)
return (EINVAL);
/*
- * NMIs (which bear an exception vector of 2) are to be injected via
- * their own specialized path using vm_inject_nmi().
+ * NMIs are to be injected via their own specialized path using
+ * vm_inject_nmi().
*/
- if (vector == 2) {
+ if (vector == IDT_NMI) {
return (EINVAL);
}
@@ -2710,14 +2659,14 @@ vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
* the guest. It is a derived exception that results from specific
* combinations of nested faults.
*/
- if (vector == IDT_DF)
+ if (vector == IDT_DF) {
return (EINVAL);
+ }
vcpu = &vm->vcpu[vcpuid];
- if (vcpu->exception_pending) {
- VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
- "pending exception %d", vector, vcpu->exc_vector);
+ if (VM_INTINFO_PENDING(vcpu->exc_pending)) {
+ /* Unable to inject exception due to one already pending */
return (EBUSY);
}
@@ -2726,9 +2675,10 @@ vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
* Exceptions don't deliver an error code in real mode.
*/
error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, &regval);
- KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
- if (!(regval & CR0_PE))
- errcode_valid = 0;
+ VERIFY0(error);
+ if ((regval & CR0_PE) == 0) {
+ errcode_valid = false;
+ }
}
/*
@@ -2738,68 +2688,50 @@ vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
* one instruction or incurs an exception.
*/
error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
- KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
- __func__, error));
+ VERIFY0(error);
if (restart_instruction) {
VERIFY0(vm_restart_instruction(vm, vcpuid));
}
- vcpu->exception_pending = 1;
- vcpu->exc_vector = vector;
- vcpu->exc_errcode = errcode;
- vcpu->exc_errcode_valid = errcode_valid;
- VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
+ uint64_t val = VM_INTINFO_VALID | VM_INTINFO_HWEXCP | vector;
+ if (errcode_valid) {
+ val |= VM_INTINFO_DEL_ERRCODE;
+ val |= (uint64_t)errcode << VM_INTINFO_SHIFT_ERRCODE;
+ }
+ vcpu->exc_pending = val;
return (0);
}
void
-vm_inject_fault(struct vm *vm, int vcpuid, int vector, int errcode_valid,
- int errcode)
-{
- int error;
-
- error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
- errcode, 1);
- KASSERT(error == 0, ("vm_inject_exception error %d", error));
-}
-
-void
vm_inject_ud(struct vm *vm, int vcpuid)
{
- vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0);
+ VERIFY0(vm_inject_exception(vm, vcpuid, IDT_UD, false, 0, true));
}
void
vm_inject_gp(struct vm *vm, int vcpuid)
{
- vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0);
+ VERIFY0(vm_inject_exception(vm, vcpuid, IDT_GP, true, 0, true));
}
void
-vm_inject_ac(struct vm *vm, int vcpuid, int errcode)
+vm_inject_ac(struct vm *vm, int vcpuid, uint32_t errcode)
{
- vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode);
+ VERIFY0(vm_inject_exception(vm, vcpuid, IDT_AC, true, errcode, true));
}
void
-vm_inject_ss(struct vm *vm, int vcpuid, int errcode)
+vm_inject_ss(struct vm *vm, int vcpuid, uint32_t errcode)
{
- vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode);
+ VERIFY0(vm_inject_exception(vm, vcpuid, IDT_SS, true, errcode, true));
}
void
-vm_inject_pf(struct vm *vm, int vcpuid, int error_code, uint64_t cr2)
+vm_inject_pf(struct vm *vm, int vcpuid, uint32_t errcode, uint64_t cr2)
{
- int error;
-
- VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %x, cr2 %lx",
- error_code, cr2);
-
- error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
- KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
-
- vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
+ VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2));
+ VERIFY0(vm_inject_exception(vm, vcpuid, IDT_PF, true, errcode, true));
}
static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
@@ -2814,20 +2746,15 @@ vm_inject_nmi(struct vm *vm, int vcpuid)
vcpu = &vm->vcpu[vcpuid];
- vcpu->nmi_pending = 1;
+ vcpu->nmi_pending = true;
vcpu_notify_event(vm, vcpuid);
return (0);
}
-int
+bool
vm_nmi_pending(struct vm *vm, int vcpuid)
{
- struct vcpu *vcpu;
-
- if (vcpuid < 0 || vcpuid >= vm->maxcpus)
- panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
-
- vcpu = &vm->vcpu[vcpuid];
+ struct vcpu *vcpu = &vm->vcpu[vcpuid];
return (vcpu->nmi_pending);
}
@@ -2835,17 +2762,11 @@ vm_nmi_pending(struct vm *vm, int vcpuid)
void
vm_nmi_clear(struct vm *vm, int vcpuid)
{
- struct vcpu *vcpu;
-
- if (vcpuid < 0 || vcpuid >= vm->maxcpus)
- panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
-
- vcpu = &vm->vcpu[vcpuid];
+ struct vcpu *vcpu = &vm->vcpu[vcpuid];
- if (vcpu->nmi_pending == 0)
- panic("vm_nmi_clear: inconsistent nmi_pending state");
+ ASSERT(vcpu->nmi_pending);
- vcpu->nmi_pending = 0;
+ vcpu->nmi_pending = false;
vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
}
@@ -2861,20 +2782,15 @@ vm_inject_extint(struct vm *vm, int vcpuid)
vcpu = &vm->vcpu[vcpuid];
- vcpu->extint_pending = 1;
+ vcpu->extint_pending = true;
vcpu_notify_event(vm, vcpuid);
return (0);
}
-int
+bool
vm_extint_pending(struct vm *vm, int vcpuid)
{
- struct vcpu *vcpu;
-
- if (vcpuid < 0 || vcpuid >= vm->maxcpus)
- panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
-
- vcpu = &vm->vcpu[vcpuid];
+ struct vcpu *vcpu = &vm->vcpu[vcpuid];
return (vcpu->extint_pending);
}
@@ -2882,17 +2798,11 @@ vm_extint_pending(struct vm *vm, int vcpuid)
void
vm_extint_clear(struct vm *vm, int vcpuid)
{
- struct vcpu *vcpu;
-
- if (vcpuid < 0 || vcpuid >= vm->maxcpus)
- panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
-
- vcpu = &vm->vcpu[vcpuid];
+ struct vcpu *vcpu = &vm->vcpu[vcpuid];
- if (vcpu->extint_pending == 0)
- panic("vm_extint_clear: inconsistent extint_pending state");
+ ASSERT(vcpu->extint_pending);
- vcpu->extint_pending = 0;
+ vcpu->extint_pending = false;
vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
}
@@ -3054,9 +2964,9 @@ vcpu_arch_reset(struct vm *vm, int vcpuid, bool init_only)
VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0));
- vcpu->exitintinfo = 0;
- vcpu->exception_pending = 0;
- vcpu->nmi_pending = 0;
+ vcpu->exit_intinfo = 0;
+ vcpu->exc_pending = 0;
+ vcpu->nmi_pending = false;
vcpu->extint_pending = 0;
/*
diff --git a/usr/src/uts/intel/io/vmm/vmm_sol_dev.c b/usr/src/uts/intel/io/vmm/vmm_sol_dev.c
index 625c71c618..c8fd8b9aa4 100644
--- a/usr/src/uts/intel/io/vmm/vmm_sol_dev.c
+++ b/usr/src/uts/intel/io/vmm/vmm_sol_dev.c
@@ -14,7 +14,7 @@
* Copyright 2015 Pluribus Networks Inc.
* Copyright 2019 Joyent, Inc.
* Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
- * Copyright 2021 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
#include <sys/types.h>
@@ -681,8 +681,8 @@ vmmdev_do_ioctl(vmm_softc_t *sc, int cmd, intptr_t arg, int md,
break;
}
error = vm_inject_exception(sc->vmm_vm, vcpu, vmexc.vector,
- vmexc.error_code_valid, vmexc.error_code,
- vmexc.restart_instruction);
+ vmexc.error_code_valid != 0, vmexc.error_code,
+ vmexc.restart_instruction != 0);
break;
}
case VM_INJECT_NMI: {
diff --git a/usr/src/uts/intel/sys/vmm.h b/usr/src/uts/intel/sys/vmm.h
index edafabaf15..268b2e82ce 100644
--- a/usr/src/uts/intel/sys/vmm.h
+++ b/usr/src/uts/intel/sys/vmm.h
@@ -39,7 +39,7 @@
*
* Copyright 2015 Pluribus Networks Inc.
* Copyright 2019 Joyent, Inc.
- * Copyright 2021 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
#ifndef _VMM_H_
@@ -112,15 +112,29 @@ enum x2apic_state {
X2APIC_STATE_LAST
};
-#define VM_INTINFO_VECTOR(info) ((info) & 0xff)
-#define VM_INTINFO_DEL_ERRCODE 0x800
-#define VM_INTINFO_RSVD 0x7ffff000
-#define VM_INTINFO_VALID 0x80000000
-#define VM_INTINFO_TYPE 0x700
+#define VM_INTINFO_MASK_VECTOR 0xffUL
+#define VM_INTINFO_MASK_TYPE 0x700UL
+#define VM_INTINFO_MASK_RSVD 0x7ffff000UL
+#define VM_INTINFO_SHIFT_ERRCODE 32
+
+#define VM_INTINFO_VECTOR(val) ((val) & VM_INTINFO_MASK_VECTOR)
+#define VM_INTINFO_TYPE(val) ((val) & VM_INTINFO_MASK_TYPE)
+#define VM_INTINFO_ERRCODE(val) ((val) >> VM_INTINFO_SHIFT_ERRCODE)
+#define VM_INTINFO_PENDING(val) (((val) & VM_INTINFO_VALID) != 0)
+#define VM_INTINFO_HAS_ERRCODE(val) (((val) & VM_INTINFO_DEL_ERRCODE) != 0)
+
+#define VM_INTINFO_VALID (1UL << 31)
+#define VM_INTINFO_DEL_ERRCODE (1UL << 11)
+
#define VM_INTINFO_HWINTR (0 << 8)
#define VM_INTINFO_NMI (2 << 8)
-#define VM_INTINFO_HWEXCEPTION (3 << 8)
+#define VM_INTINFO_HWEXCP (3 << 8)
#define VM_INTINFO_SWINTR (4 << 8)
+/* Reserved for CPU (read: Intel) specific types */
+#define VM_INTINFO_RESV1 (1 << 8)
+#define VM_INTINFO_RESV5 (5 << 8)
+#define VM_INTINFO_RESV6 (6 << 8)
+#define VM_INTINFO_RESV7 (7 << 8)
/*
* illumos doesn't have a limitation based on SPECNAMELEN like FreeBSD does.