summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan McDonald <danmcd@joyent.com>2020-12-01 10:17:05 -0500
committerDan McDonald <danmcd@joyent.com>2020-12-01 10:17:11 -0500
commit99f76d09704e2a55d2d5fda310f907fdfbdf0684 (patch)
treed6fa1dcf0e2517db2fdbe0730b976a62514354d3
parentae5074194468cd0ea2063a873fe20f520850f371 (diff)
parent166994016fed8bc2ed47612b34708b33007a891b (diff)
downloadillumos-joyent-99f76d09704e2a55d2d5fda310f907fdfbdf0684.tar.gz
[illumos-gate merge]
commit 166994016fed8bc2ed47612b34708b33007a891b 13331 loader: term_drawrect and putimage coordinates fix commit 2699b94cd4d1e9baf6bfcbe579328b398a9736e6 13050 bhyve kernel should be cstyle clean Conflicts: usr/src/uts/i86pc/io/vmm/vmm.c
-rw-r--r--exception_lists/cstyle20
-rw-r--r--usr/src/boot/Makefile.version2
-rw-r--r--usr/src/boot/sys/boot/common/gfx_fb.c19
-rw-r--r--usr/src/compat/bhyve/sys/time.h12
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/npt.c8
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/npt.h4
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/svm.c158
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/svm.h4
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/svm_msr.c5
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/svm_msr.h4
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/svm_softc.h19
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/vmcb.h20
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/ept.c39
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/ept.h2
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vmcs.h108
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vmx.c323
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vmx.h4
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vmx_controls.h2
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vmx_msr.c43
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vmx_msr.h8
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vmx_support.s2
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vtd.c66
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/iommu.c10
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/iommu.h6
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vatpic.c6
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vatpic.h3
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vatpit.c8
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vhpet.c5
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vioapic.c2
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vlapic.c42
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vlapic_priv.h98
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vpmtmr.c4
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vrtc.c17
-rw-r--r--usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h17
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm.c62
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_host.h7
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_instruction_emul.c115
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_ioport.h3
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_ktr.h23
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_lapic.c16
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_lapic.h6
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_mem.c6
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_mem.h2
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_sol_glue.c24
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_stat.c4
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_stat.h4
-rw-r--r--usr/src/uts/i86pc/io/vmm/x86.c16
-rw-r--r--usr/src/uts/i86pc/io/vmm/x86.h40
48 files changed, 557 insertions, 861 deletions
diff --git a/exception_lists/cstyle b/exception_lists/cstyle
index c1c3e6182a..68a26baf1a 100644
--- a/exception_lists/cstyle
+++ b/exception_lists/cstyle
@@ -1438,23 +1438,7 @@ usr/src/compat/bhyve/*
usr/src/contrib/bhyve/*
usr/src/lib/libvmmapi/common/vmmapi.[ch]
usr/src/test/libc-tests/tests/qsort/*.c
-usr/src/uts/i86pc/io/vmm/amd/*.[ch]
-usr/src/uts/i86pc/io/vmm/intel/*.[chs]
-usr/src/uts/i86pc/io/vmm/io/*.[ch]
-usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h
-usr/src/uts/i86pc/io/vmm/sys/vmm_instruction_emul.h
-usr/src/uts/i86pc/io/vmm/vmm.c
-usr/src/uts/i86pc/io/vmm/vmm_host.[ch]
-usr/src/uts/i86pc/io/vmm/vmm_instruction_emul.c
-usr/src/uts/i86pc/io/vmm/vmm_ioport.[ch]
-usr/src/uts/i86pc/io/vmm/vmm_ipi.h
-usr/src/uts/i86pc/io/vmm/vmm_ktr.h
-usr/src/uts/i86pc/io/vmm/vmm_lapic.[ch]
-usr/src/uts/i86pc/io/vmm/vmm_mem.[ch]
-usr/src/uts/i86pc/io/vmm/vmm_sol_glue.c
-usr/src/uts/i86pc/io/vmm/vmm_stat.[ch]
-usr/src/uts/i86pc/io/vmm/vmm_util.[ch]
-usr/src/uts/i86pc/io/vmm/vmx_assym.s
-usr/src/uts/i86pc/io/vmm/x86.[ch]
+usr/src/uts/i86pc/io/vmm/amd/amdvi_*.[ch]
+usr/src/uts/i86pc/io/vmm/amd/ivrs_*.c
usr/src/uts/i86pc/sys/vmm.h
usr/src/uts/i86pc/sys/vmm_dev.h
diff --git a/usr/src/boot/Makefile.version b/usr/src/boot/Makefile.version
index 1d116dda12..d4000c0baf 100644
--- a/usr/src/boot/Makefile.version
+++ b/usr/src/boot/Makefile.version
@@ -34,4 +34,4 @@ LOADER_VERSION = 1.1
# Use date like formatting here, YYYY.MM.DD.XX, without leading zeroes.
# The version is processed from left to right, the version number can only
# be increased.
-BOOT_VERSION = $(LOADER_VERSION)-2020.11.25.1
+BOOT_VERSION = $(LOADER_VERSION)-2020.11.27.1
diff --git a/usr/src/boot/sys/boot/common/gfx_fb.c b/usr/src/boot/sys/boot/common/gfx_fb.c
index 56314566d6..94b9c71bef 100644
--- a/usr/src/boot/sys/boot/common/gfx_fb.c
+++ b/usr/src/boot/sys/boot/common/gfx_fb.c
@@ -1121,14 +1121,17 @@ gfx_term_drawrect(uint32_t ux1, uint32_t uy1, uint32_t ux2, uint32_t uy2)
width = vf_width / 4; /* line width */
xshift = (vf_width - width) / 2;
yshift = (vf_height - width) / 2;
- /* Terminal coordinates start from (1,1) */
- ux1--;
- uy1--;
+
+ /* Shift coordinates */
+ if (ux1 != 0)
+ ux1--;
+ if (uy1 != 0)
+ uy1--;
ux2--;
uy2--;
/* mark area used in tem */
- tem_image_display(tems.ts_active, uy1 - 1, ux1 - 1, uy2, ux2);
+ tem_image_display(tems.ts_active, uy1, ux1, uy2 + 1, ux2 + 1);
/*
* Draw horizontal lines width points thick, shifted from outer edge.
@@ -1320,10 +1323,10 @@ gfx_fb_putimage(png_t *png, uint32_t ux1, uint32_t uy1, uint32_t ux2,
*/
if (!(flags & FL_PUTIMAGE_NOSCROLL)) {
tem_image_display(tems.ts_active,
- da.row / tems.ts_font.vf_height - 1,
- da.col / tems.ts_font.vf_width - 1,
- (da.row + da.height) / tems.ts_font.vf_height - 1,
- (da.col + da.width) / tems.ts_font.vf_width - 1);
+ da.row / tems.ts_font.vf_height,
+ da.col / tems.ts_font.vf_width,
+ (da.row + da.height) / tems.ts_font.vf_height,
+ (da.col + da.width) / tems.ts_font.vf_width);
}
if ((flags & FL_PUTIMAGE_BORDER))
diff --git a/usr/src/compat/bhyve/sys/time.h b/usr/src/compat/bhyve/sys/time.h
index 4e0fbfc02c..48bdcc304e 100644
--- a/usr/src/compat/bhyve/sys/time.h
+++ b/usr/src/compat/bhyve/sys/time.h
@@ -11,6 +11,7 @@
/*
* Copyright 2013 Pluribus Networks Inc.
+ * Copyright 2020 Oxide Computer Company
*/
#ifndef _COMPAT_FREEBSD_SYS_TIME_H_
@@ -50,6 +51,17 @@ binuptime(struct bintime *bt)
((a)->frac cmp (b)->frac) : \
((a)->sec cmp (b)->sec))
+/*
+ * The bintime_cmp() macro is problematic for a couple reasons:
+ * 1. Bearing a lowercase name suggests it is a function rather than a macro.
+ * 2. Placing the comparison operator as the last argument runs afoul of our
+ * cstyle rules, unlike cases such as VERIFY3*().
+ *
+ * To remedy these issues in illumos bhyve, we provide a slightly modified
+ * version which addresses both problems.
+ */
+#define BINTIME_CMP(a, cmp, b) bintime_cmp((a), (b), cmp)
+
#define SBT_1S ((sbintime_t)1 << 32)
#define SBT_1M (SBT_1S * 60)
#define SBT_1MS (SBT_1S / 1000)
diff --git a/usr/src/uts/i86pc/io/vmm/amd/npt.c b/usr/src/uts/i86pc/io/vmm/amd/npt.c
index 3f143a5d8f..d532b7b6ec 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/npt.c
+++ b/usr/src/uts/i86pc/io/vmm/amd/npt.c
@@ -40,15 +40,9 @@ __FBSDID("$FreeBSD$");
#include "npt.h"
-SYSCTL_DECL(_hw_vmm);
-SYSCTL_NODE(_hw_vmm, OID_AUTO, npt, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
- NULL);
-
static int npt_flags;
-SYSCTL_INT(_hw_vmm_npt, OID_AUTO, pmap_flags, CTLFLAG_RD,
- &npt_flags, 0, NULL);
-#define NPT_IPIMASK 0xFF
+#define NPT_IPIMASK 0xFF
/*
* AMD nested page table init.
diff --git a/usr/src/uts/i86pc/io/vmm/amd/npt.h b/usr/src/uts/i86pc/io/vmm/amd/npt.h
index d90a1b14b2..95f3fbab9e 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/npt.h
+++ b/usr/src/uts/i86pc/io/vmm/amd/npt.h
@@ -28,8 +28,8 @@
* $FreeBSD$
*/
-#ifndef _SVM_NPT_H_
-#define _SVM_NPT_H_
+#ifndef _SVM_NPT_H_
+#define _SVM_NPT_H_
int svm_npt_init(int ipinum);
struct vmspace *svm_npt_alloc(vm_offset_t min, vm_offset_t max);
diff --git a/usr/src/uts/i86pc/io/vmm/amd/svm.c b/usr/src/uts/i86pc/io/vmm/amd/svm.c
index 8c12f4ba04..aaa19d4bab 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/svm.c
+++ b/usr/src/uts/i86pc/io/vmm/amd/svm.c
@@ -92,16 +92,16 @@ SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
/*
* SVM CPUID function 0x8000_000A, edx bit decoding.
*/
-#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */
-#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */
-#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */
-#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */
-#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */
-#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */
-#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */
-#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */
-#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */
-#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */
+#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */
+#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */
+#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */
+#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */
+#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */
+#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */
+#define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */
+#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */
+#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */
+#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */
#define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */
#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \
@@ -122,28 +122,10 @@ SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
static MALLOC_DEFINE(M_SVM, "svm", "svm");
static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
+/* SVM features advertised by CPUID.8000000AH:EDX */
static uint32_t svm_feature = ~0U; /* AMD SVM features. */
-SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0,
- "SVM features advertised by CPUID.8000000AH:EDX");
static int disable_npf_assist;
-SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN,
- &disable_npf_assist, 0, NULL);
-
-#ifdef __FreeBSD__
-/* Maximum ASIDs supported by the processor */
-static uint32_t nasid;
-SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0,
- "Number of ASIDs supported by this processor");
-
-/* Current ASID generation for each host cpu */
-static struct asid asid[MAXCPU];
-
-/*
- * SVM host state saved area of size 4KB for each core.
- */
-static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
-#endif /* __FreeBSD__ */
static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
@@ -191,7 +173,7 @@ svm_cleanup(void)
static int
check_svm_features(void)
{
- u_int regs[4];
+ uint_t regs[4];
/* CPUID Fn8000_000A is for SVM */
do_cpuid(0x8000000A, regs);
@@ -333,14 +315,14 @@ svm_restore(void)
#endif /* __FreeBSD__ */
/* Pentium compatible MSRs */
-#define MSR_PENTIUM_START 0
-#define MSR_PENTIUM_END 0x1FFF
+#define MSR_PENTIUM_START 0
+#define MSR_PENTIUM_END 0x1FFF
/* AMD 6th generation and Intel compatible MSRs */
-#define MSR_AMD6TH_START 0xC0000000UL
-#define MSR_AMD6TH_END 0xC0001FFFUL
+#define MSR_AMD6TH_START 0xC0000000UL
+#define MSR_AMD6TH_END 0xC0001FFFUL
/* AMD 7th and 8th generation compatible MSRs */
-#define MSR_AMD7TH_START 0xC0010000UL
-#define MSR_AMD7TH_END 0xC0011FFFUL
+#define MSR_AMD7TH_START 0xC0010000UL
+#define MSR_AMD7TH_END 0xC0011FFFUL
/*
* Get the index and bit position for a MSR in permission bitmap.
@@ -418,7 +400,7 @@ svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
{
struct vmcb_ctrl *ctrl;
- KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
+ KASSERT(idx >= 0 && idx < 5, ("invalid intercept index %d", idx));
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
return (ctrl->intercept[idx] & bitmask ? 1 : 0);
@@ -431,7 +413,7 @@ svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
struct vmcb_ctrl *ctrl;
uint32_t oldval;
- KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
+ KASSERT(idx >= 0 && idx < 5, ("invalid intercept index %d", idx));
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
oldval = ctrl->intercept[idx];
@@ -940,7 +922,7 @@ svm_save_exitintinfo(struct svm_softc *svm_sc, int vcpu)
* that was being delivered.
*/
VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
- intinfo, VMCB_EXITINTINFO_VECTOR(intinfo));
+ intinfo, VMCB_EXITINTINFO_VECTOR(intinfo));
vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
}
@@ -1186,7 +1168,7 @@ gpf:
}
static int
-emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val)
+emulate_wrmsr(struct svm_softc *sc, int vcpu, uint_t num, uint64_t val)
{
int error;
@@ -1201,7 +1183,7 @@ emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val)
}
static int
-emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num)
+emulate_rdmsr(struct svm_softc *sc, int vcpu, uint_t num)
{
struct vmcb_state *state;
struct svm_regctx *ctx;
@@ -1223,50 +1205,6 @@ emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num)
return (error);
}
-#ifdef KTR
-static const char *
-exit_reason_to_str(uint64_t reason)
-{
- static char reasonbuf[32];
-
- switch (reason) {
- case VMCB_EXIT_INVALID:
- return ("invalvmcb");
- case VMCB_EXIT_SHUTDOWN:
- return ("shutdown");
- case VMCB_EXIT_NPF:
- return ("nptfault");
- case VMCB_EXIT_PAUSE:
- return ("pause");
- case VMCB_EXIT_HLT:
- return ("hlt");
- case VMCB_EXIT_CPUID:
- return ("cpuid");
- case VMCB_EXIT_IO:
- return ("inout");
- case VMCB_EXIT_MC:
- return ("mchk");
- case VMCB_EXIT_INTR:
- return ("extintr");
- case VMCB_EXIT_NMI:
- return ("nmi");
- case VMCB_EXIT_VINTR:
- return ("vintr");
- case VMCB_EXIT_MSR:
- return ("msr");
- case VMCB_EXIT_IRET:
- return ("iret");
- case VMCB_EXIT_MONITOR:
- return ("monitor");
- case VMCB_EXIT_MWAIT:
- return ("mwait");
- default:
- snprintf(reasonbuf, sizeof(reasonbuf), "%lx", reason);
- return (reasonbuf);
- }
-}
-#endif /* KTR */
-
/*
* From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs
* that are due to instruction intercepts as well as MSR and IOIO intercepts
@@ -1551,10 +1489,6 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
break;
}
- VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %lx/%d",
- handled ? "handled" : "unhandled", exit_reason_to_str(code),
- vmexit->rip, vmexit->inst_length);
-
DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, vmexit->rip, uint32_t,
code);
@@ -1790,7 +1724,7 @@ svm_inject_recheck(struct svm_softc *sc, int vcpu,
#ifdef __FreeBSD__
static void
-check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
+check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, uint_t thiscpu)
{
struct svm_vcpu *vcpustate;
struct vmcb_ctrl *ctrl;
@@ -1816,26 +1750,26 @@ check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
* the 'vcpustate'. This happens when the host invalidates pages
* belonging to the guest.
*
- * asidgen eptgen Action
+ * asidgen eptgen Action
* mismatch mismatch
- * 0 0 (a)
- * 0 1 (b1) or (b2)
- * 1 0 (c)
- * 1 1 (d)
+ * 0 0 (a)
+ * 0 1 (b1) or (b2)
+ * 1 0 (c)
+ * 1 1 (d)
*
- * (a) There is no mismatch in eptgen or ASID generation and therefore
- * no further action is needed.
+ * (a) There is no mismatch in eptgen or ASID generation and therefore
+ * no further action is needed.
*
- * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
- * retained and the TLB entries associated with this ASID
- * are flushed by VMRUN.
+ * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
+ * retained and the TLB entries associated with this ASID
+ * are flushed by VMRUN.
*
- * (b2) If the cpu does not support FlushByAsid then a new ASID is
- * allocated.
+ * (b2) If the cpu does not support FlushByAsid then a new ASID is
+ * allocated.
*
- * (c) A new ASID is allocated.
+ * (c) A new ASID is allocated.
*
- * (d) A new ASID is allocated.
+ * (d) A new ASID is allocated.
*/
alloc_asid = false;
@@ -1893,7 +1827,7 @@ check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
}
#else /* __FreeBSD__ */
static void
-check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
+check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, uint_t thiscpu)
{
struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpuid);
struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
@@ -1916,15 +1850,13 @@ check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
static __inline void
disable_gintr(void)
{
-
__asm __volatile("clgi");
}
static __inline void
enable_gintr(void)
{
-
- __asm __volatile("stgi");
+ __asm __volatile("stgi");
}
static __inline void
@@ -1986,7 +1918,7 @@ svm_dr_leave_guest(struct svm_regctx *gctx)
*/
static int
svm_vmrun(void *arg, int vcpu, uint64_t rip, pmap_t pmap,
- struct vm_eventinfo *evinfo)
+ struct vm_eventinfo *evinfo)
{
struct svm_regctx *gctx;
struct svm_softc *svm_sc;
@@ -2538,7 +2470,8 @@ svm_vlapic_init(void *arg, int vcpuid)
struct vlapic *vlapic;
svm_sc = arg;
- vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
+ vlapic = malloc(sizeof (struct vlapic), M_SVM_VLAPIC,
+ M_WAITOK | M_ZERO);
vlapic->vm = svm_sc->vm;
vlapic->vcpuid = vcpuid;
vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
@@ -2551,9 +2484,8 @@ svm_vlapic_init(void *arg, int vcpuid)
static void
svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
{
-
- vlapic_cleanup(vlapic);
- free(vlapic, M_SVM_VLAPIC);
+ vlapic_cleanup(vlapic);
+ free(vlapic, M_SVM_VLAPIC);
}
#ifndef __FreeBSD__
diff --git a/usr/src/uts/i86pc/io/vmm/amd/svm.h b/usr/src/uts/i86pc/io/vmm/amd/svm.h
index 19739884c2..a3a83dba19 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/svm.h
+++ b/usr/src/uts/i86pc/io/vmm/amd/svm.h
@@ -28,8 +28,8 @@
* $FreeBSD$
*/
-#ifndef _SVM_H_
-#define _SVM_H_
+#ifndef _SVM_H_
+#define _SVM_H_
/*
* Guest register state that is saved outside the VMCB.
diff --git a/usr/src/uts/i86pc/io/vmm/amd/svm_msr.c b/usr/src/uts/i86pc/io/vmm/amd/svm_msr.c
index abea850244..234631def4 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/svm_msr.c
+++ b/usr/src/uts/i86pc/io/vmm/amd/svm_msr.c
@@ -106,7 +106,6 @@ svm_msr_guest_init(struct svm_softc *sc, int vcpu)
* There are no guest MSRs that are saved/restored "by hand" so nothing
* more to do here.
*/
- return;
}
void
@@ -144,7 +143,7 @@ svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
}
int
-svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result)
+svm_rdmsr(struct svm_softc *sc, int vcpu, uint_t num, uint64_t *result)
{
int error = 0;
@@ -180,7 +179,7 @@ svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result)
}
int
-svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val)
+svm_wrmsr(struct svm_softc *sc, int vcpu, uint_t num, uint64_t val)
{
int error = 0;
diff --git a/usr/src/uts/i86pc/io/vmm/amd/svm_msr.h b/usr/src/uts/i86pc/io/vmm/amd/svm_msr.h
index e47b0548ac..03ef2acfe7 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/svm_msr.h
+++ b/usr/src/uts/i86pc/io/vmm/amd/svm_msr.h
@@ -38,7 +38,7 @@ void svm_msr_guest_init(struct svm_softc *sc, int vcpu);
void svm_msr_guest_enter(struct svm_softc *sc, int vcpu);
void svm_msr_guest_exit(struct svm_softc *sc, int vcpu);
-int svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val);
-int svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result);
+int svm_wrmsr(struct svm_softc *sc, int vcpu, uint_t num, uint64_t val);
+int svm_rdmsr(struct svm_softc *sc, int vcpu, uint_t num, uint64_t *result);
#endif /* _SVM_MSR_H_ */
diff --git a/usr/src/uts/i86pc/io/vmm/amd/svm_softc.h b/usr/src/uts/i86pc/io/vmm/amd/svm_softc.h
index 0b996d0ab4..61f8d38898 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/svm_softc.h
+++ b/usr/src/uts/i86pc/io/vmm/amd/svm_softc.h
@@ -28,11 +28,11 @@
* $FreeBSD$
*/
-#ifndef _SVM_SOFTC_H_
-#define _SVM_SOFTC_H_
+#ifndef _SVM_SOFTC_H_
+#define _SVM_SOFTC_H_
-#define SVM_IO_BITMAP_SIZE (3 * PAGE_SIZE)
-#define SVM_MSR_BITMAP_SIZE (2 * PAGE_SIZE)
+#define SVM_IO_BITMAP_SIZE (3 * PAGE_SIZE)
+#define SVM_MSR_BITMAP_SIZE (2 * PAGE_SIZE)
#ifdef __FreeBSD__
struct asid {
@@ -43,7 +43,7 @@ struct asid {
#include <sys/hma.h>
/* This must match HOST_MSR_NUM in svm_msr.c (where it is CTASSERTed) */
-#define SVM_HOST_MSR_NUM 4
+#define SVM_HOST_MSR_NUM 4
#endif /* __FreeBSD__ */
/*
@@ -55,7 +55,7 @@ struct svm_vcpu {
struct svm_regctx swctx; /* software saved vcpu context */
uint64_t vmcb_pa; /* VMCB physical address */
uint64_t nextrip; /* next instruction to be executed by guest */
- int lastcpu; /* host cpu that the vcpu last ran on */
+ int lastcpu; /* host cpu that the vcpu last ran on */
uint32_t dirty; /* state cache bits that must be cleared */
long eptgen; /* pmap->pm_eptgen when the vcpu last ran */
#ifdef __FreeBSD__
@@ -121,11 +121,10 @@ svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
static __inline void
svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
{
- struct svm_vcpu *vcpustate;
+ struct svm_vcpu *vcpustate;
- vcpustate = svm_get_vcpu(sc, vcpu);
-
- vcpustate->dirty |= dirtybits;
+ vcpustate = svm_get_vcpu(sc, vcpu);
+ vcpustate->dirty |= dirtybits;
}
#endif /* _SVM_SOFTC_H_ */
diff --git a/usr/src/uts/i86pc/io/vmm/amd/vmcb.h b/usr/src/uts/i86pc/io/vmm/amd/vmcb.h
index 1c002aee7b..41bbf98097 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/vmcb.h
+++ b/usr/src/uts/i86pc/io/vmm/amd/vmcb.h
@@ -46,7 +46,7 @@
struct svm_softc;
-#define BIT(n) (1ULL << n)
+#define BIT(n) (1ULL << n)
/*
* Secure Virtual Machine: AMD64 Programmer's Manual Vol2, Chapter 15
@@ -186,11 +186,11 @@ struct svm_softc;
* EXITINTINFO, Interrupt exit info for all intrecepts.
* Section 15.7.2, Intercepts during IDT Interrupt Delivery.
*/
-#define VMCB_EXITINTINFO_VECTOR(x) ((x) & 0xFF)
-#define VMCB_EXITINTINFO_TYPE(x) ((x) & (0x7 << 8))
-#define VMCB_EXITINTINFO_EC_VALID(x) (((x) & BIT(11)) != 0)
-#define VMCB_EXITINTINFO_VALID(x) (((x) & BIT(31)) != 0)
-#define VMCB_EXITINTINFO_EC(x) (((x) >> 32) & 0xFFFFFFFF)
+#define VMCB_EXITINTINFO_VECTOR(x) ((x) & 0xFF)
+#define VMCB_EXITINTINFO_TYPE(x) ((x) & (0x7 << 8))
+#define VMCB_EXITINTINFO_EC_VALID(x) (((x) & BIT(11)) != 0)
+#define VMCB_EXITINTINFO_VALID(x) (((x) & BIT(31)) != 0)
+#define VMCB_EXITINTINFO_EC(x) (((x) >> 32) & 0xFFFFFFFF)
/* Offset of various VMCB fields. */
#define VMCB_OFF_CTRL(x) (x)
@@ -229,7 +229,7 @@ struct vmcb_segment {
uint32_t limit;
uint64_t base;
};
-CTASSERT(sizeof(struct vmcb_segment) == 16);
+CTASSERT(sizeof (struct vmcb_segment) == 16);
/* Convert to/from vmcb segment access to generic (VMX) access */
#define VMCB_ATTR2ACCESS(attr) ((((attr) & 0xf00) << 4) | ((attr) & 0xff))
@@ -313,7 +313,7 @@ struct vmcb_ctrl {
uint64_t vmsa_pa; /* 0x108: VMSA pointer */
uint64_t _pad8[94]; /* 0x110-0x3FF: Reserved */
};
-CTASSERT(sizeof(struct vmcb_ctrl) == 1024);
+CTASSERT(sizeof (struct vmcb_ctrl) == 1024);
CTASSERT(offsetof(struct vmcb_ctrl, vmsa_pa) == 0x108);
struct vmcb_state {
@@ -361,7 +361,7 @@ struct vmcb_state {
uint64_t int_to; /* 0x290 */
uint64_t _pad7[301]; /* Reserved up to end of VMCB */
};
-CTASSERT(sizeof(struct vmcb_state) == 0xC00);
+CTASSERT(sizeof (struct vmcb_state) == 0xC00);
CTASSERT(offsetof(struct vmcb_state, int_to) == 0x290);
/*
@@ -377,7 +377,7 @@ struct vmcb {
struct vmcb_ctrl ctrl;
struct vmcb_state state;
};
-CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
+CTASSERT(sizeof (struct vmcb) == PAGE_SIZE);
CTASSERT(offsetof(struct vmcb, state) == 0x400);
struct vmcb_segment *vmcb_segptr(struct vmcb *vmcb, int type);
diff --git a/usr/src/uts/i86pc/io/vmm/intel/ept.c b/usr/src/uts/i86pc/io/vmm/intel/ept.c
index 5e3bd6d309..672a452111 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/ept.c
+++ b/usr/src/uts/i86pc/io/vmm/intel/ept.c
@@ -88,8 +88,6 @@ SYSCTL_NODE(_hw_vmm, OID_AUTO, ept, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
static int ept_enable_ad_bits;
static int ept_pmap_flags;
-SYSCTL_INT(_hw_vmm_ept, OID_AUTO, pmap_flags, CTLFLAG_RD,
- &ept_pmap_flags, 0, NULL);
int
ept_init(int ipinum)
@@ -136,43 +134,8 @@ ept_init(int ipinum)
return (0);
}
-#if 0
-static void
-ept_dump(uint64_t *ptp, int nlevels)
-{
- int i, t, tabs;
- uint64_t *ptpnext, ptpval;
-
- if (--nlevels < 0)
- return;
-
- tabs = 3 - nlevels;
- for (t = 0; t < tabs; t++)
- printf("\t");
- printf("PTP = %p\n", ptp);
-
- for (i = 0; i < 512; i++) {
- ptpval = ptp[i];
-
- if (ptpval == 0)
- continue;
-
- for (t = 0; t < tabs; t++)
- printf("\t");
- printf("%3d 0x%016lx\n", i, ptpval);
-
- if (nlevels != 0 && (ptpval & EPT_PG_SUPERPAGE) == 0) {
- ptpnext = (uint64_t *)
- PHYS_TO_DMAP(ptpval & EPT_ADDR_MASK);
- ept_dump(ptpnext, nlevels);
- }
- }
-}
-#endif
-
-
void
-ept_invalidate_mappings(u_long eptp)
+ept_invalidate_mappings(ulong_t eptp)
{
hma_vmx_invept_allcpus((uintptr_t)eptp);
}
diff --git a/usr/src/uts/i86pc/io/vmm/intel/ept.h b/usr/src/uts/i86pc/io/vmm/intel/ept.h
index 4a029e8b22..e4a6d6c959 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/ept.h
+++ b/usr/src/uts/i86pc/io/vmm/intel/ept.h
@@ -34,7 +34,7 @@
struct vmx;
int ept_init(int ipinum);
-void ept_invalidate_mappings(u_long eptp);
+void ept_invalidate_mappings(ulong_t eptp);
struct vmspace *ept_vmspace_alloc(vm_offset_t min, vm_offset_t max);
void ept_vmspace_free(struct vmspace *vmspace);
uint64_t eptp(uint64_t pml4);
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vmcs.h b/usr/src/uts/i86pc/io/vmm/intel/vmcs.h
index 1713872556..d61244baee 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vmcs.h
+++ b/usr/src/uts/i86pc/io/vmm/intel/vmcs.h
@@ -41,7 +41,7 @@
struct vmcs {
uint32_t identifier;
uint32_t abort_code;
- char _impl_specific[PAGE_SIZE - sizeof(uint32_t) * 2];
+ char _impl_specific[PAGE_SIZE - sizeof (uint32_t) * 2];
};
CTASSERT(sizeof (struct vmcs) == PAGE_SIZE);
@@ -191,7 +191,7 @@ void vmcs_write(uint32_t encoding, uint64_t val);
#define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
#define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
#define VMCS_GUEST_ACTIVITY 0x00004826
-#define VMCS_GUEST_SMBASE 0x00004828
+#define VMCS_GUEST_SMBASE 0x00004828
#define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
#define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
@@ -260,59 +260,59 @@ void vmcs_write(uint32_t encoding, uint64_t val);
/*
* VMCS exit reasons
*/
-#define EXIT_REASON_EXCEPTION 0
-#define EXIT_REASON_EXT_INTR 1
-#define EXIT_REASON_TRIPLE_FAULT 2
-#define EXIT_REASON_INIT 3
-#define EXIT_REASON_SIPI 4
-#define EXIT_REASON_IO_SMI 5
-#define EXIT_REASON_SMI 6
-#define EXIT_REASON_INTR_WINDOW 7
-#define EXIT_REASON_NMI_WINDOW 8
-#define EXIT_REASON_TASK_SWITCH 9
-#define EXIT_REASON_CPUID 10
-#define EXIT_REASON_GETSEC 11
-#define EXIT_REASON_HLT 12
-#define EXIT_REASON_INVD 13
-#define EXIT_REASON_INVLPG 14
-#define EXIT_REASON_RDPMC 15
-#define EXIT_REASON_RDTSC 16
-#define EXIT_REASON_RSM 17
-#define EXIT_REASON_VMCALL 18
-#define EXIT_REASON_VMCLEAR 19
-#define EXIT_REASON_VMLAUNCH 20
-#define EXIT_REASON_VMPTRLD 21
-#define EXIT_REASON_VMPTRST 22
-#define EXIT_REASON_VMREAD 23
-#define EXIT_REASON_VMRESUME 24
-#define EXIT_REASON_VMWRITE 25
-#define EXIT_REASON_VMXOFF 26
-#define EXIT_REASON_VMXON 27
-#define EXIT_REASON_CR_ACCESS 28
-#define EXIT_REASON_DR_ACCESS 29
-#define EXIT_REASON_INOUT 30
-#define EXIT_REASON_RDMSR 31
-#define EXIT_REASON_WRMSR 32
-#define EXIT_REASON_INVAL_VMCS 33
-#define EXIT_REASON_INVAL_MSR 34
-#define EXIT_REASON_MWAIT 36
-#define EXIT_REASON_MTF 37
-#define EXIT_REASON_MONITOR 39
-#define EXIT_REASON_PAUSE 40
-#define EXIT_REASON_MCE_DURING_ENTRY 41
-#define EXIT_REASON_TPR 43
-#define EXIT_REASON_APIC_ACCESS 44
+#define EXIT_REASON_EXCEPTION 0
+#define EXIT_REASON_EXT_INTR 1
+#define EXIT_REASON_TRIPLE_FAULT 2
+#define EXIT_REASON_INIT 3
+#define EXIT_REASON_SIPI 4
+#define EXIT_REASON_IO_SMI 5
+#define EXIT_REASON_SMI 6
+#define EXIT_REASON_INTR_WINDOW 7
+#define EXIT_REASON_NMI_WINDOW 8
+#define EXIT_REASON_TASK_SWITCH 9
+#define EXIT_REASON_CPUID 10
+#define EXIT_REASON_GETSEC 11
+#define EXIT_REASON_HLT 12
+#define EXIT_REASON_INVD 13
+#define EXIT_REASON_INVLPG 14
+#define EXIT_REASON_RDPMC 15
+#define EXIT_REASON_RDTSC 16
+#define EXIT_REASON_RSM 17
+#define EXIT_REASON_VMCALL 18
+#define EXIT_REASON_VMCLEAR 19
+#define EXIT_REASON_VMLAUNCH 20
+#define EXIT_REASON_VMPTRLD 21
+#define EXIT_REASON_VMPTRST 22
+#define EXIT_REASON_VMREAD 23
+#define EXIT_REASON_VMRESUME 24
+#define EXIT_REASON_VMWRITE 25
+#define EXIT_REASON_VMXOFF 26
+#define EXIT_REASON_VMXON 27
+#define EXIT_REASON_CR_ACCESS 28
+#define EXIT_REASON_DR_ACCESS 29
+#define EXIT_REASON_INOUT 30
+#define EXIT_REASON_RDMSR 31
+#define EXIT_REASON_WRMSR 32
+#define EXIT_REASON_INVAL_VMCS 33
+#define EXIT_REASON_INVAL_MSR 34
+#define EXIT_REASON_MWAIT 36
+#define EXIT_REASON_MTF 37
+#define EXIT_REASON_MONITOR 39
+#define EXIT_REASON_PAUSE 40
+#define EXIT_REASON_MCE_DURING_ENTRY 41
+#define EXIT_REASON_TPR 43
+#define EXIT_REASON_APIC_ACCESS 44
#define EXIT_REASON_VIRTUALIZED_EOI 45
-#define EXIT_REASON_GDTR_IDTR 46
-#define EXIT_REASON_LDTR_TR 47
-#define EXIT_REASON_EPT_FAULT 48
-#define EXIT_REASON_EPT_MISCONFIG 49
-#define EXIT_REASON_INVEPT 50
-#define EXIT_REASON_RDTSCP 51
-#define EXIT_REASON_VMX_PREEMPT 52
-#define EXIT_REASON_INVVPID 53
-#define EXIT_REASON_WBINVD 54
-#define EXIT_REASON_XSETBV 55
+#define EXIT_REASON_GDTR_IDTR 46
+#define EXIT_REASON_LDTR_TR 47
+#define EXIT_REASON_EPT_FAULT 48
+#define EXIT_REASON_EPT_MISCONFIG 49
+#define EXIT_REASON_INVEPT 50
+#define EXIT_REASON_RDTSCP 51
+#define EXIT_REASON_VMX_PREEMPT 52
+#define EXIT_REASON_INVVPID 53
+#define EXIT_REASON_WBINVD 54
+#define EXIT_REASON_XSETBV 55
#define EXIT_REASON_APIC_WRITE 56
#define EXIT_REASON_RDRAND 57
#define EXIT_REASON_INVPCID 58
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vmx.c b/usr/src/uts/i86pc/io/vmm/intel/vmx.c
index 1f670ef3b3..54a03b1d3e 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vmx.c
+++ b/usr/src/uts/i86pc/io/vmm/intel/vmx.c
@@ -96,36 +96,36 @@ __FBSDID("$FreeBSD$");
#define PINBASED_CTLS_ONE_SETTING \
(PINBASED_EXTINT_EXITING | \
- PINBASED_NMI_EXITING | \
- PINBASED_VIRTUAL_NMI)
+ PINBASED_NMI_EXITING | \
+ PINBASED_VIRTUAL_NMI)
#define PINBASED_CTLS_ZERO_SETTING 0
-#define PROCBASED_CTLS_WINDOW_SETTING \
+#define PROCBASED_CTLS_WINDOW_SETTING \
(PROCBASED_INT_WINDOW_EXITING | \
- PROCBASED_NMI_WINDOW_EXITING)
+ PROCBASED_NMI_WINDOW_EXITING)
#ifdef __FreeBSD__
#define PROCBASED_CTLS_ONE_SETTING \
(PROCBASED_SECONDARY_CONTROLS | \
- PROCBASED_MWAIT_EXITING | \
- PROCBASED_MONITOR_EXITING | \
- PROCBASED_IO_EXITING | \
- PROCBASED_MSR_BITMAPS | \
- PROCBASED_CTLS_WINDOW_SETTING | \
- PROCBASED_CR8_LOAD_EXITING | \
- PROCBASED_CR8_STORE_EXITING)
+ PROCBASED_MWAIT_EXITING | \
+ PROCBASED_MONITOR_EXITING | \
+ PROCBASED_IO_EXITING | \
+ PROCBASED_MSR_BITMAPS | \
+ PROCBASED_CTLS_WINDOW_SETTING | \
+ PROCBASED_CR8_LOAD_EXITING | \
+ PROCBASED_CR8_STORE_EXITING)
#else
/* We consider TSC offset a necessity for unsynched TSC handling */
#define PROCBASED_CTLS_ONE_SETTING \
(PROCBASED_SECONDARY_CONTROLS | \
- PROCBASED_TSC_OFFSET | \
- PROCBASED_MWAIT_EXITING | \
- PROCBASED_MONITOR_EXITING | \
- PROCBASED_IO_EXITING | \
- PROCBASED_MSR_BITMAPS | \
- PROCBASED_CTLS_WINDOW_SETTING | \
- PROCBASED_CR8_LOAD_EXITING | \
- PROCBASED_CR8_STORE_EXITING)
+ PROCBASED_TSC_OFFSET | \
+ PROCBASED_MWAIT_EXITING | \
+ PROCBASED_MONITOR_EXITING | \
+ PROCBASED_IO_EXITING | \
+ PROCBASED_MSR_BITMAPS | \
+ PROCBASED_CTLS_WINDOW_SETTING | \
+ PROCBASED_CR8_LOAD_EXITING | \
+ PROCBASED_CR8_STORE_EXITING)
#endif /* __FreeBSD__ */
#define PROCBASED_CTLS_ZERO_SETTING \
@@ -175,24 +175,13 @@ static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
static uint32_t exit_ctls, entry_ctls;
static uint64_t cr0_ones_mask, cr0_zeros_mask;
-SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
- &cr0_ones_mask, 0, NULL);
-SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
- &cr0_zeros_mask, 0, NULL);
static uint64_t cr4_ones_mask, cr4_zeros_mask;
-SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
- &cr4_ones_mask, 0, NULL);
-SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
- &cr4_zeros_mask, 0, NULL);
static int vmx_initialized;
-SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
- &vmx_initialized, 0, "Intel VMX initialized");
+/* Do not flush RSB upon vmexit */
static int no_flush_rsb;
-SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, no_flush_rsb, CTLFLAG_RW,
- &no_flush_rsb, 0, "Do not flush RSB upon vmexit");
/*
* Optional capabilities
@@ -204,42 +193,31 @@ static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap,
NULL);
#endif
+/* HLT triggers a VM-exit */
static int cap_halt_exit;
-SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
- "HLT triggers a VM-exit");
+/* PAUSE triggers a VM-exit */
static int cap_pause_exit;
-SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
- 0, "PAUSE triggers a VM-exit");
+/* Monitor trap flag */
static int cap_monitor_trap;
-SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
- &cap_monitor_trap, 0, "Monitor trap flag");
+/* Guests are allowed to use INVPCID */
static int cap_invpcid;
-SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
- 0, "Guests are allowed to use INVPCID");
/* Extra capabilities (VMX_CAP_*) beyond the minimum */
static enum vmx_caps vmx_capabilities;
+/* APICv posted interrupt vector */
static int pirvec = -1;
-SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
- &pirvec, 0, "APICv posted interrupt vector");
#ifdef __FreeBSD__
static struct unrhdr *vpid_unr;
-#endif /* __FreeBSD__*/
-static u_int vpid_alloc_failed;
-SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
- &vpid_alloc_failed, 0, NULL);
+#endif /* __FreeBSD__ */
+static uint_t vpid_alloc_failed;
int guest_l1d_flush;
-SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD,
- &guest_l1d_flush, 0, NULL);
int guest_l1d_flush_sw;
-SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD,
- &guest_l1d_flush_sw, 0, NULL);
/* MSR save region is composed of an array of 'struct msr_entry' */
struct msr_entry {
@@ -254,6 +232,7 @@ static struct msr_entry msr_load_list[1] __aligned(16);
* The definitions of SDT probes for VMX.
*/
+/* BEGIN CSTYLED */
SDT_PROBE_DEFINE3(vmm, vmx, exit, entry,
"struct vmx *", "int", "struct vm_exit *");
@@ -328,6 +307,7 @@ SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown,
SDT_PROBE_DEFINE4(vmm, vmx, exit, return,
"struct vmx *", "int", "struct vm_exit *", "int");
+/* END CSTYLED */
/*
* Use the last page below 4GB as the APIC access address. This address is
@@ -343,126 +323,6 @@ static void vmx_apicv_sync_tmr(struct vlapic *vlapic);
static void vmx_tpr_shadow_enter(struct vlapic *vlapic);
static void vmx_tpr_shadow_exit(struct vlapic *vlapic);
-#ifdef KTR
-static const char *
-exit_reason_to_str(int reason)
-{
- static char reasonbuf[32];
-
- switch (reason) {
- case EXIT_REASON_EXCEPTION:
- return "exception";
- case EXIT_REASON_EXT_INTR:
- return "extint";
- case EXIT_REASON_TRIPLE_FAULT:
- return "triplefault";
- case EXIT_REASON_INIT:
- return "init";
- case EXIT_REASON_SIPI:
- return "sipi";
- case EXIT_REASON_IO_SMI:
- return "iosmi";
- case EXIT_REASON_SMI:
- return "smi";
- case EXIT_REASON_INTR_WINDOW:
- return "intrwindow";
- case EXIT_REASON_NMI_WINDOW:
- return "nmiwindow";
- case EXIT_REASON_TASK_SWITCH:
- return "taskswitch";
- case EXIT_REASON_CPUID:
- return "cpuid";
- case EXIT_REASON_GETSEC:
- return "getsec";
- case EXIT_REASON_HLT:
- return "hlt";
- case EXIT_REASON_INVD:
- return "invd";
- case EXIT_REASON_INVLPG:
- return "invlpg";
- case EXIT_REASON_RDPMC:
- return "rdpmc";
- case EXIT_REASON_RDTSC:
- return "rdtsc";
- case EXIT_REASON_RSM:
- return "rsm";
- case EXIT_REASON_VMCALL:
- return "vmcall";
- case EXIT_REASON_VMCLEAR:
- return "vmclear";
- case EXIT_REASON_VMLAUNCH:
- return "vmlaunch";
- case EXIT_REASON_VMPTRLD:
- return "vmptrld";
- case EXIT_REASON_VMPTRST:
- return "vmptrst";
- case EXIT_REASON_VMREAD:
- return "vmread";
- case EXIT_REASON_VMRESUME:
- return "vmresume";
- case EXIT_REASON_VMWRITE:
- return "vmwrite";
- case EXIT_REASON_VMXOFF:
- return "vmxoff";
- case EXIT_REASON_VMXON:
- return "vmxon";
- case EXIT_REASON_CR_ACCESS:
- return "craccess";
- case EXIT_REASON_DR_ACCESS:
- return "draccess";
- case EXIT_REASON_INOUT:
- return "inout";
- case EXIT_REASON_RDMSR:
- return "rdmsr";
- case EXIT_REASON_WRMSR:
- return "wrmsr";
- case EXIT_REASON_INVAL_VMCS:
- return "invalvmcs";
- case EXIT_REASON_INVAL_MSR:
- return "invalmsr";
- case EXIT_REASON_MWAIT:
- return "mwait";
- case EXIT_REASON_MTF:
- return "mtf";
- case EXIT_REASON_MONITOR:
- return "monitor";
- case EXIT_REASON_PAUSE:
- return "pause";
- case EXIT_REASON_MCE_DURING_ENTRY:
- return "mce-during-entry";
- case EXIT_REASON_TPR:
- return "tpr";
- case EXIT_REASON_APIC_ACCESS:
- return "apic-access";
- case EXIT_REASON_GDTR_IDTR:
- return "gdtridtr";
- case EXIT_REASON_LDTR_TR:
- return "ldtrtr";
- case EXIT_REASON_EPT_FAULT:
- return "eptfault";
- case EXIT_REASON_EPT_MISCONFIG:
- return "eptmisconfig";
- case EXIT_REASON_INVEPT:
- return "invept";
- case EXIT_REASON_RDTSCP:
- return "rdtscp";
- case EXIT_REASON_VMX_PREEMPT:
- return "vmxpreempt";
- case EXIT_REASON_INVVPID:
- return "invvpid";
- case EXIT_REASON_WBINVD:
- return "wbinvd";
- case EXIT_REASON_XSETBV:
- return "xsetbv";
- case EXIT_REASON_APIC_WRITE:
- return "apic-write";
- default:
- snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
- return (reasonbuf);
- }
-}
-#endif /* KTR */
-
static int
vmx_allow_x2apic_msrs(struct vmx *vmx)
{
@@ -511,14 +371,14 @@ vmx_allow_x2apic_msrs(struct vmx *vmx)
return (error);
}
-static u_long
-vmx_fix_cr0(u_long cr0)
+static ulong_t
+vmx_fix_cr0(ulong_t cr0)
{
return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
}
-static u_long
-vmx_fix_cr4(u_long cr4)
+static ulong_t
+vmx_fix_cr4(ulong_t cr4)
{
return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
}
@@ -623,12 +483,12 @@ vmx_init(int ipinum)
/* Check support for primary processor-based VM-execution controls */
error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
- MSR_VMX_TRUE_PROCBASED_CTLS,
- PROCBASED_CTLS_ONE_SETTING,
- PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
+ MSR_VMX_TRUE_PROCBASED_CTLS,
+ PROCBASED_CTLS_ONE_SETTING,
+ PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
if (error) {
printf("vmx_init: processor does not support desired primary "
- "processor-based controls\n");
+ "processor-based controls\n");
return (error);
}
@@ -637,37 +497,39 @@ vmx_init(int ipinum)
/* Check support for secondary processor-based VM-execution controls */
error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
- MSR_VMX_PROCBASED_CTLS2,
- PROCBASED_CTLS2_ONE_SETTING,
- PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
+ MSR_VMX_PROCBASED_CTLS2,
+ PROCBASED_CTLS2_ONE_SETTING,
+ PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
if (error) {
printf("vmx_init: processor does not support desired secondary "
- "processor-based controls\n");
+ "processor-based controls\n");
return (error);
}
/* Check support for VPID */
- error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
- PROCBASED2_ENABLE_VPID, 0, &tmp);
+ error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
+ MSR_VMX_PROCBASED_CTLS2,
+ PROCBASED2_ENABLE_VPID,
+ 0, &tmp);
if (error == 0)
procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
/* Check support for pin-based VM-execution controls */
error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
- MSR_VMX_TRUE_PINBASED_CTLS,
- PINBASED_CTLS_ONE_SETTING,
- PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
+ MSR_VMX_TRUE_PINBASED_CTLS,
+ PINBASED_CTLS_ONE_SETTING,
+ PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
if (error) {
printf("vmx_init: processor does not support desired "
- "pin-based controls\n");
+ "pin-based controls\n");
return (error);
}
/* Check support for VM-exit controls */
error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
- VM_EXIT_CTLS_ONE_SETTING,
- VM_EXIT_CTLS_ZERO_SETTING,
- &exit_ctls);
+ VM_EXIT_CTLS_ONE_SETTING,
+ VM_EXIT_CTLS_ZERO_SETTING,
+ &exit_ctls);
if (error) {
printf("vmx_init: processor does not support desired "
"exit controls\n");
@@ -689,25 +551,26 @@ vmx_init(int ipinum)
* as individual bits
*/
cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
- MSR_VMX_TRUE_PROCBASED_CTLS,
- PROCBASED_HLT_EXITING, 0,
- &tmp) == 0);
+ MSR_VMX_TRUE_PROCBASED_CTLS,
+ PROCBASED_HLT_EXITING, 0,
+ &tmp) == 0);
cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
- MSR_VMX_PROCBASED_CTLS,
- PROCBASED_MTF, 0,
- &tmp) == 0);
+ MSR_VMX_PROCBASED_CTLS,
+ PROCBASED_MTF, 0,
+ &tmp) == 0);
cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
- MSR_VMX_TRUE_PROCBASED_CTLS,
- PROCBASED_PAUSE_EXITING, 0,
- &tmp) == 0);
+ MSR_VMX_TRUE_PROCBASED_CTLS,
+ PROCBASED_PAUSE_EXITING, 0,
+ &tmp) == 0);
cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
&tmp) == 0);
- /* Check for APIC virtualization capabilities:
+ /*
+ * Check for APIC virtualization capabilities:
* - TPR shadowing
* - Full APICv (with or without x2APIC support)
* - Posted interrupt handling
@@ -860,10 +723,10 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
uint16_t maxcpus;
uint32_t proc_ctls, proc2_ctls, pin_ctls;
- vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
+ vmx = malloc(sizeof (struct vmx), M_VMX, M_WAITOK | M_ZERO);
if ((uintptr_t)vmx & PAGE_MASK) {
panic("malloc of struct vmx not aligned on %d byte boundary",
- PAGE_SIZE);
+ PAGE_SIZE);
}
vmx->vm = vm;
@@ -982,7 +845,8 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
/*
* Configure host sysenter MSRs to be restored on VM exit.
- * The thread-specific MSR_INTC_SEP_ESP value is loaded in vmx_run.
+ * The thread-specific MSR_INTC_SEP_ESP value is loaded in
+ * vmx_run.
*/
vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL);
vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP,
@@ -1098,19 +962,6 @@ vmx_run_trace(struct vmx *vmx, int vcpu)
}
static __inline void
-vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
- int handled)
-{
-#ifdef KTR
- VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
- handled ? "handled" : "unhandled",
- exit_reason_to_str(exit_reason), rip);
-#endif
- DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip,
- uint32_t, exit_reason);
-}
-
-static __inline void
vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
{
#ifdef KTR
@@ -1131,7 +982,7 @@ struct invvpid_desc {
uint32_t _res2;
uint64_t linear_addr;
};
-CTASSERT(sizeof(struct invvpid_desc) == 16);
+CTASSERT(sizeof (struct invvpid_desc) == 16);
static __inline void
invvpid(uint64_t type, struct invvpid_desc desc)
@@ -1318,9 +1169,9 @@ vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu)
}
#define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \
- VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \
- VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
static void
vmx_inject_nmi(struct vmx *vmx, int vcpu)
@@ -1475,7 +1326,7 @@ vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
* Hardware Interrupts":
* - maskable interrupt vectors [16,255] can be delivered
* through the local APIC.
- */
+ */
KASSERT(vector >= 16 && vector <= 255,
("invalid vector %d from local APIC", vector));
@@ -1632,7 +1483,8 @@ vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
}
/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
- if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
+ if (!limits->xsave_enabled ||
+ !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
vm_inject_ud(vmx->vm, vcpu);
return (HANDLED);
}
@@ -2024,7 +1876,7 @@ ept_fault_type(uint64_t ept_qual)
else if (ept_qual & EPT_VIOLATION_INST_FETCH)
fault_type = VM_PROT_EXECUTE;
else
- fault_type= VM_PROT_READ;
+ fault_type = VM_PROT_READ;
return (fault_type);
}
@@ -2241,7 +2093,7 @@ vmx_task_switch_reason(uint64_t qual)
}
static int
-emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val)
+emulate_wrmsr(struct vmx *vmx, int vcpuid, uint_t num, uint64_t val)
{
int error;
@@ -2254,7 +2106,7 @@ emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val)
}
static int
-emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num)
+emulate_rdmsr(struct vmx *vmx, int vcpuid, uint_t num)
{
uint64_t result;
int error;
@@ -2586,7 +2438,8 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* If the hypervisor has requested user exits for
* debug exceptions, bounce them out to userland.
*/
- if (intr_type == VMCS_INTR_T_SWEXCEPTION && intr_vec == IDT_BP &&
+ if (intr_type == VMCS_INTR_T_SWEXCEPTION &&
+ intr_vec == IDT_BP &&
(vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) {
vmexit->exitcode = VM_EXITCODE_BPT;
vmexit->u.bpt.inst_length = vmexit->inst_length;
@@ -3127,7 +2980,8 @@ vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap,
#ifdef __FreeBSD__
launched = 1;
#endif
- vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
+ DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip,
+ uint32_t, exit_reason);
rip = vmexit->rip;
} while (handled);
@@ -3138,7 +2992,7 @@ vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap,
if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
(!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
panic("Mismatch between handled (%d) and exitcode (%d)",
- handled, vmexit->exitcode);
+ handled, vmexit->exitcode);
}
if (!handled)
@@ -3173,8 +3027,6 @@ vmx_vmcleanup(void *arg)
vpid_free(vmx->state[i].vpid);
free(vmx, M_VMX);
-
- return;
}
static uint64_t *
@@ -3583,13 +3435,13 @@ struct vlapic_vtx {
struct pir_desc *pir_desc;
struct vmx *vmx;
- u_int pending_prio;
+ uint_t pending_prio;
boolean_t tmr_sync;
};
-CTASSERT((offsetof (struct vlapic_vtx, tmr_active) & 63) == 0);
+CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0);
-#define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4))
+#define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4))
static vcpu_notify_t
vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level)
@@ -3662,8 +3514,8 @@ vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level)
notify = VCPU_NOTIFY_APIC;
vlapic_vtx->pending_prio = 0;
} else {
- const u_int old_prio = vlapic_vtx->pending_prio;
- const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT);
+ const uint_t old_prio = vlapic_vtx->pending_prio;
+ const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT);
if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) {
atomic_set_int(&vlapic_vtx->pending_prio, prio_bit);
@@ -3874,7 +3726,8 @@ vmx_vlapic_init(void *arg, int vcpuid)
vmx = arg;
- vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
+ vlapic = malloc(sizeof (struct vlapic_vtx), M_VLAPIC,
+ M_WAITOK | M_ZERO);
vlapic->vm = vmx->vm;
vlapic->vcpuid = vcpuid;
vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vmx.h b/usr/src/uts/i86pc/io/vmm/intel/vmx.h
index b78f146755..6d17bdaadd 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vmx.h
+++ b/usr/src/uts/i86pc/io/vmm/intel/vmx.h
@@ -106,7 +106,7 @@ struct vmxstate {
struct apic_page {
uint32_t reg[PAGE_SIZE / 4];
};
-CTASSERT(sizeof(struct apic_page) == PAGE_SIZE);
+CTASSERT(sizeof (struct apic_page) == PAGE_SIZE);
/* Posted Interrupt Descriptor (described in section 29.6 of the Intel SDM) */
struct pir_desc {
@@ -114,7 +114,7 @@ struct pir_desc {
uint64_t pending;
uint64_t unused[3];
} __aligned(64);
-CTASSERT(sizeof(struct pir_desc) == 64);
+CTASSERT(sizeof (struct pir_desc) == 64);
/* Index into the 'guest_msrs[]' array */
enum {
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vmx_controls.h b/usr/src/uts/i86pc/io/vmm/intel/vmx_controls.h
index 5408d129ad..ae6ff9b5aa 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vmx_controls.h
+++ b/usr/src/uts/i86pc/io/vmm/intel/vmx_controls.h
@@ -52,7 +52,7 @@
#define PROCBASED_CR8_STORE_EXITING (1 << 20)
#define PROCBASED_USE_TPR_SHADOW (1 << 21)
#define PROCBASED_NMI_WINDOW_EXITING (1 << 22)
-#define PROCBASED_MOV_DR_EXITING (1 << 23)
+#define PROCBASED_MOV_DR_EXITING (1 << 23)
#define PROCBASED_IO_EXITING (1 << 24)
#define PROCBASED_IO_BITMAPS (1 << 25)
#define PROCBASED_MTF (1 << 27)
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.c b/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.c
index e2ae7ae8e2..5e404e5ebd 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.c
+++ b/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.c
@@ -75,7 +75,7 @@ vmx_ctl_allows_zero_setting(uint64_t msr_val, int bitpos)
*/
int
vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
- uint32_t zeros_mask, uint32_t *retval)
+ uint32_t zeros_mask, uint32_t *retval)
{
int i;
uint64_t val, trueval;
@@ -98,8 +98,8 @@ vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
zero_allowed = vmx_ctl_allows_zero_setting(trueval, i);
KASSERT(one_allowed || zero_allowed,
- ("invalid zero/one setting for bit %d of ctl 0x%0x, "
- "truectl 0x%0x\n", i, ctl_reg, true_ctl_reg));
+ ("invalid zero/one setting for bit %d of ctl 0x%0x, "
+ "truectl 0x%0x\n", i, ctl_reg, true_ctl_reg));
if (zero_allowed && !one_allowed) { /* b(i),c(i) */
if (ones_mask & (1 << i))
@@ -110,21 +110,26 @@ vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
return (EINVAL);
*retval |= 1 << i;
} else {
- if (zeros_mask & (1 << i)) /* b(ii),c(ii) */
+ if (zeros_mask & (1 << i)) {
+ /* b(ii),c(ii) */
*retval &= ~(1 << i);
- else if (ones_mask & (1 << i)) /* b(ii), c(ii) */
+ } else if (ones_mask & (1 << i)) {
+ /* b(ii), c(ii) */
*retval |= 1 << i;
- else if (!true_ctls_avail)
- *retval &= ~(1 << i); /* b(iii) */
- else if (vmx_ctl_allows_zero_setting(val, i))/* c(iii)*/
+ } else if (!true_ctls_avail) {
+ /* b(iii) */
*retval &= ~(1 << i);
- else if (vmx_ctl_allows_one_setting(val, i)) /* c(iv) */
+ } else if (vmx_ctl_allows_zero_setting(val, i)) {
+ /* c(iii) */
+ *retval &= ~(1 << i);
+ } else if (vmx_ctl_allows_one_setting(val, i)) {
+ /* c(iv) */
*retval |= 1 << i;
- else {
+ } else {
panic("vmx_set_ctlreg: unable to determine "
- "correct value of ctl bit %d for msr "
- "0x%0x and true msr 0x%0x", i, ctl_reg,
- true_ctl_reg);
+ "correct value of ctl bit %d for msr "
+ "0x%0x and true msr 0x%0x", i, ctl_reg,
+ true_ctl_reg);
}
}
}
@@ -140,7 +145,7 @@ msr_bitmap_initialize(char *bitmap)
}
int
-msr_bitmap_change_access(char *bitmap, u_int msr, int access)
+msr_bitmap_change_access(char *bitmap, uint_t msr, int access)
{
int byte, bit;
@@ -177,7 +182,7 @@ static uint64_t host_msrs[GUEST_MSR_NUM];
static bool
nehalem_cpu(void)
{
- u_int family, model;
+ uint_t family, model;
/*
* The family:model numbers belonging to the Nehalem microarchitecture
@@ -202,7 +207,7 @@ nehalem_cpu(void)
static bool
westmere_cpu(void)
{
- u_int family, model;
+ uint_t family, model;
/*
* The family:model numbers belonging to the Westmere microarchitecture
@@ -343,8 +348,6 @@ vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
PAT_VALUE(5, PAT_WRITE_THROUGH) |
PAT_VALUE(6, PAT_UNCACHED) |
PAT_VALUE(7, PAT_UNCACHEABLE);
-
- return;
}
void
@@ -398,7 +401,7 @@ vmx_msr_guest_exit(struct vmx *vmx, int vcpuid)
}
int
-vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val)
+vmx_rdmsr(struct vmx *vmx, int vcpuid, uint_t num, uint64_t *val)
{
const uint64_t *guest_msrs;
int error;
@@ -447,7 +450,7 @@ vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val)
}
int
-vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val)
+vmx_wrmsr(struct vmx *vmx, int vcpuid, uint_t num, uint64_t val)
{
uint64_t *guest_msrs;
uint64_t changed;
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.h b/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.h
index 04583336e2..4bc43d74f9 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.h
+++ b/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.h
@@ -37,11 +37,11 @@ void vmx_msr_init(void);
void vmx_msr_guest_init(struct vmx *vmx, int vcpuid);
void vmx_msr_guest_enter(struct vmx *vmx, int vcpuid);
void vmx_msr_guest_exit(struct vmx *vmx, int vcpuid);
-int vmx_rdmsr(struct vmx *, int vcpuid, u_int num, uint64_t *val);
-int vmx_wrmsr(struct vmx *, int vcpuid, u_int num, uint64_t val);
+int vmx_rdmsr(struct vmx *, int vcpuid, uint_t num, uint64_t *val);
+int vmx_wrmsr(struct vmx *, int vcpuid, uint_t num, uint64_t val);
int vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
- uint32_t zeros_mask, uint32_t *retval);
+ uint32_t zeros_mask, uint32_t *retval);
/*
* According to Section 21.10.4 "Software Access to Related Structures",
@@ -59,7 +59,7 @@ int vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
#define MSR_BITMAP_ACCESS_WRITE 0x2
#define MSR_BITMAP_ACCESS_RW (MSR_BITMAP_ACCESS_READ|MSR_BITMAP_ACCESS_WRITE)
void msr_bitmap_initialize(char *bitmap);
-int msr_bitmap_change_access(char *bitmap, u_int msr, int access);
+int msr_bitmap_change_access(char *bitmap, uint_t msr, int access);
#define guest_msr_rw(vmx, msr) \
msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vmx_support.s b/usr/src/uts/i86pc/io/vmm/intel/vmx_support.s
index f719e31e30..aba844e8c3 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vmx_support.s
+++ b/usr/src/uts/i86pc/io/vmm/intel/vmx_support.s
@@ -58,6 +58,7 @@
* We modify %rsp to point to the 'vmxctx' so we can use it to restore
* host context in case of an error with 'vmlaunch' or 'vmresume'.
*/
+/* BEGIN CSTYLED */
#define VMX_GUEST_RESTORE \
movq VMXCTX_GUEST_CR2(%rdi),%rsi; \
movq %rsi,%cr2; \
@@ -99,6 +100,7 @@
movq %rbx, VMXCTX_GUEST_CR2(%rdi); \
movq VMXSTK_TMPRDI(%rsp), %rdx; \
movq %rdx, VMXCTX_GUEST_RDI(%rdi);
+/* END CSTYLED */
/*
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vtd.c b/usr/src/uts/i86pc/io/vmm/intel/vtd.c
index 79524220b5..e75764acf3 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vtd.c
+++ b/usr/src/uts/i86pc/io/vmm/intel/vtd.c
@@ -53,7 +53,7 @@ __FBSDID("$FreeBSD$");
* Architecture Spec, September 2008.
*/
-#define VTD_DRHD_INCLUDE_PCI_ALL(Flags) (((Flags) >> 0) & 0x1)
+#define VTD_DRHD_INCLUDE_PCI_ALL(Flags) (((Flags) >> 0) & 0x1)
/* Section 10.4 "Register Descriptions" */
struct vtdmap {
@@ -105,14 +105,14 @@ struct vtdmap {
#define VTD_PTE_SUPERPAGE (1UL << 7)
#define VTD_PTE_ADDR_M (0x000FFFFFFFFFF000UL)
-#define VTD_RID2IDX(rid) (((rid) & 0xff) * 2)
+#define VTD_RID2IDX(rid) (((rid) & 0xff) * 2)
struct domain {
uint64_t *ptp; /* first level page table page */
int pt_levels; /* number of page table levels */
int addrwidth; /* 'AW' field in context entry */
int spsmask; /* supported super page sizes */
- u_int id; /* domain id */
+ uint_t id; /* domain id */
vm_paddr_t maxaddr; /* highest address to be mapped */
SLIST_ENTRY(domain) next;
};
@@ -129,8 +129,8 @@ typedef int (*drhd_ident_func_t)(void);
static dev_info_t *vtddips[DRHD_MAX_UNITS];
#endif
-static uint64_t root_table[PAGE_SIZE / sizeof(uint64_t)] __aligned(4096);
-static uint64_t ctx_tables[256][PAGE_SIZE / sizeof(uint64_t)] __aligned(4096);
+static uint64_t root_table[PAGE_SIZE / sizeof (uint64_t)] __aligned(4096);
+static uint64_t ctx_tables[256][PAGE_SIZE / sizeof (uint64_t)] __aligned(4096);
static MALLOC_DEFINE(M_VTD, "vtd", "vtd");
@@ -161,10 +161,10 @@ vtd_max_domains(struct vtdmap *vtdmap)
}
}
-static u_int
+static uint_t
domain_id(void)
{
- u_int id;
+ uint_t id;
struct domain *dom;
/* Skip domain id 0 - it is reserved when Caching Mode field is set */
@@ -186,7 +186,7 @@ domain_id(void)
static struct vtdmap *
vtd_device_scope(uint16_t rid)
{
- int i, remaining, pathremaining;
+ int i, remaining, pathrem;
char *end, *pathend;
struct vtdmap *vtdmap;
ACPI_DMAR_HARDWARE_UNIT *drhd;
@@ -199,21 +199,23 @@ vtd_device_scope(uint16_t rid)
if (VTD_DRHD_INCLUDE_PCI_ALL(drhd->Flags)) {
/*
* From Intel VT-d arch spec, version 3.0:
- * If a DRHD structure with INCLUDE_PCI_ALL flag Set is reported
- * for a Segment, it must be enumerated by BIOS after all other
- * DRHD structures for the same Segment.
+ * If a DRHD structure with INCLUDE_PCI_ALL flag Set is
+ * reported for a Segment, it must be enumerated by BIOS
+ * after all other DRHD structures for the same Segment.
*/
vtdmap = vtdmaps[i];
- return(vtdmap);
+ return (vtdmap);
}
end = (char *)drhd + drhd->Header.Length;
- remaining = drhd->Header.Length - sizeof(ACPI_DMAR_HARDWARE_UNIT);
- while (remaining > sizeof(ACPI_DMAR_DEVICE_SCOPE)) {
- device_scope = (ACPI_DMAR_DEVICE_SCOPE *)(end - remaining);
+ remaining = drhd->Header.Length -
+ sizeof (ACPI_DMAR_HARDWARE_UNIT);
+ while (remaining > sizeof (ACPI_DMAR_DEVICE_SCOPE)) {
+ device_scope =
+ (ACPI_DMAR_DEVICE_SCOPE *)(end - remaining);
remaining -= device_scope->Length;
- switch (device_scope->EntryType){
+ switch (device_scope->EntryType) {
/* 0x01 and 0x02 are PCI device entries */
case 0x01:
case 0x02:
@@ -226,10 +228,12 @@ vtd_device_scope(uint16_t rid)
continue;
pathend = (char *)device_scope + device_scope->Length;
- pathremaining = device_scope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE);
- while (pathremaining >= sizeof(ACPI_DMAR_PCI_PATH)) {
- path = (ACPI_DMAR_PCI_PATH *)(pathend - pathremaining);
- pathremaining -= sizeof(ACPI_DMAR_PCI_PATH);
+ pathrem = device_scope->Length -
+ sizeof (ACPI_DMAR_DEVICE_SCOPE);
+ while (pathrem >= sizeof (ACPI_DMAR_PCI_PATH)) {
+ path = (ACPI_DMAR_PCI_PATH *)
+ (pathend - pathrem);
+ pathrem -= sizeof (ACPI_DMAR_PCI_PATH);
if (PCI_RID2SLOT(rid) != path->Device)
continue;
@@ -281,7 +285,7 @@ vtd_iotlb_global_invalidate(struct vtdmap *vtdmap)
iotlb_reg = (volatile uint64_t *)((caddr_t)vtdmap + offset + 8);
*iotlb_reg = VTD_IIR_IVT | VTD_IIR_IIRG_GLOBAL |
- VTD_IIR_DRAIN_READS | VTD_IIR_DRAIN_WRITES;
+ VTD_IIR_DRAIN_READS | VTD_IIR_DRAIN_WRITES;
while (1) {
val = *iotlb_reg;
@@ -375,7 +379,8 @@ vtd_init(void)
* set vtd.regmap.1.addr=0xfeda0000
*/
for (units = 0; units < DRHD_MAX_UNITS; units++) {
- snprintf(envname, sizeof(envname), "vtd.regmap.%d.addr", units);
+ snprintf(envname, sizeof (envname), "vtd.regmap.%d.addr",
+ units);
if (getenv_ulong(envname, &mapaddr) == 0)
break;
vtdmaps[units] = (struct vtdmap *)PHYS_TO_DMAP(mapaddr);
@@ -392,8 +397,8 @@ vtd_init(void)
return (ENXIO);
end = (char *)dmar + dmar->Header.Length;
- remaining = dmar->Header.Length - sizeof(ACPI_TABLE_DMAR);
- while (remaining > sizeof(ACPI_DMAR_HEADER)) {
+ remaining = dmar->Header.Length - sizeof (ACPI_TABLE_DMAR);
+ while (remaining > sizeof (ACPI_DMAR_HEADER)) {
hdr = (ACPI_DMAR_HEADER *)(end - remaining);
if (hdr->Length > remaining)
break;
@@ -431,7 +436,7 @@ skip_dmar:
drhd_num = units;
max_domains = 64 * 1024; /* maximum valid value */
- for (i = 0; i < drhd_num; i++){
+ for (i = 0; i < drhd_num; i++) {
vtdmap = vtdmaps[i];
if (VTD_CAP_CM(vtdmap->cap) != 0)
@@ -538,13 +543,12 @@ vtd_add_device(void *arg, uint16_t rid)
if (ctxp[idx] & VTD_CTX_PRESENT) {
panic("vtd_add_device: device %x is already owned by "
- "domain %d", rid,
- (uint16_t)(ctxp[idx + 1] >> 8));
+ "domain %d", rid, (uint16_t)(ctxp[idx + 1] >> 8));
}
if ((vtdmap = vtd_device_scope(rid)) == NULL)
panic("vtd_add_device: device %x is not in scope for "
- "any DMA remapping unit", rid);
+ "any DMA remapping unit", rid);
/*
* Order is important. The 'present' bit is set only after all fields
@@ -601,7 +605,7 @@ vtd_remove_device(void *arg, uint16_t rid)
static uint64_t
vtd_update_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len,
- int remove)
+ int remove)
{
struct domain *dom;
int i, spshift, ptpshift, ptpindex, nlevels;
@@ -773,10 +777,10 @@ vtd_create_domain(vm_paddr_t maxaddr)
if (i >= 5) {
panic("vtd_create_domain: SAGAW 0x%x does not support AGAW %d",
- tmp, agaw);
+ tmp, agaw);
}
- dom = malloc(sizeof(struct domain), M_VTD, M_ZERO | M_WAITOK);
+ dom = malloc(sizeof (struct domain), M_VTD, M_ZERO | M_WAITOK);
dom->pt_levels = pt_levels;
dom->addrwidth = addrwidth;
dom->id = domain_id();
diff --git a/usr/src/uts/i86pc/io/vmm/io/iommu.c b/usr/src/uts/i86pc/io/vmm/io/iommu.c
index 2e5fc9df32..3630c36680 100644
--- a/usr/src/uts/i86pc/io/vmm/io/iommu.c
+++ b/usr/src/uts/i86pc/io/vmm/io/iommu.c
@@ -51,17 +51,9 @@ __FBSDID("$FreeBSD$");
#include "vmm_mem.h"
#include "iommu.h"
-SYSCTL_DECL(_hw_vmm);
-SYSCTL_NODE(_hw_vmm, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
- "bhyve iommu parameters");
-
static int iommu_avail;
-SYSCTL_INT(_hw_vmm_iommu, OID_AUTO, initialized, CTLFLAG_RD, &iommu_avail,
- 0, "bhyve iommu initialized?");
static int iommu_enable = 1;
-SYSCTL_INT(_hw_vmm_iommu, OID_AUTO, enable, CTLFLAG_RDTUN, &iommu_enable, 0,
- "Enable use of I/O MMU (required for PCI passthrough).");
static struct iommu_ops *ops;
static void *host_domain;
@@ -70,7 +62,7 @@ static eventhandler_tag add_tag, delete_tag;
#endif
#ifndef __FreeBSD__
-static volatile u_int iommu_initted;
+static volatile uint_t iommu_initted;
#endif
static __inline int
diff --git a/usr/src/uts/i86pc/io/vmm/io/iommu.h b/usr/src/uts/i86pc/io/vmm/io/iommu.h
index f8003a5d45..3c79709183 100644
--- a/usr/src/uts/i86pc/io/vmm/io/iommu.h
+++ b/usr/src/uts/i86pc/io/vmm/io/iommu.h
@@ -38,9 +38,9 @@ typedef void (*iommu_disable_func_t)(void);
typedef void *(*iommu_create_domain_t)(vm_paddr_t maxaddr);
typedef void (*iommu_destroy_domain_t)(void *domain);
typedef uint64_t (*iommu_create_mapping_t)(void *domain, vm_paddr_t gpa,
- vm_paddr_t hpa, uint64_t len);
+ vm_paddr_t hpa, uint64_t len);
typedef uint64_t (*iommu_remove_mapping_t)(void *domain, vm_paddr_t gpa,
- uint64_t len);
+ uint64_t len);
typedef void (*iommu_add_device_t)(void *domain, uint16_t rid);
typedef void (*iommu_remove_device_t)(void *dom, uint16_t rid);
typedef void (*iommu_invalidate_tlb_t)(void *dom);
@@ -68,7 +68,7 @@ void *iommu_host_domain(void);
void *iommu_create_domain(vm_paddr_t maxaddr);
void iommu_destroy_domain(void *dom);
void iommu_create_mapping(void *dom, vm_paddr_t gpa, vm_paddr_t hpa,
- size_t len);
+ size_t len);
void iommu_remove_mapping(void *dom, vm_paddr_t gpa, size_t len);
void iommu_add_device(void *dom, uint16_t rid);
void iommu_remove_device(void *dom, uint16_t rid);
diff --git a/usr/src/uts/i86pc/io/vmm/io/vatpic.c b/usr/src/uts/i86pc/io/vmm/io/vatpic.c
index 627f4ed14e..de93246166 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vatpic.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vatpic.c
@@ -131,7 +131,7 @@ vatpic_get_highest_isrpin(struct atpic *atpic)
int i;
ATPIC_PIN_FOREACH(pin, atpic, i) {
- bit = (1 << pin);
+ bit = (1 << pin);
if (atpic->service & bit) {
/*
@@ -625,7 +625,7 @@ vatpic_intr_accepted(struct vm *vm, int vector)
static int
vatpic_read(struct vatpic *vatpic, struct atpic *atpic, bool in, int port,
- int bytes, uint32_t *eax)
+ int bytes, uint32_t *eax)
{
int pin;
@@ -788,7 +788,7 @@ vatpic_init(struct vm *vm)
{
struct vatpic *vatpic;
- vatpic = malloc(sizeof(struct vatpic), M_VATPIC, M_WAITOK | M_ZERO);
+ vatpic = malloc(sizeof (struct vatpic), M_VATPIC, M_WAITOK | M_ZERO);
vatpic->vm = vm;
mtx_init(&vatpic->mtx, "vatpic lock", NULL, MTX_SPIN);
diff --git a/usr/src/uts/i86pc/io/vmm/io/vatpic.h b/usr/src/uts/i86pc/io/vmm/io/vatpic.h
index 4518df34a5..4ed51a06ed 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vatpic.h
+++ b/usr/src/uts/i86pc/io/vmm/io/vatpic.h
@@ -49,7 +49,8 @@ int vatpic_elc_handler(void *arg, bool in, uint16_t port, uint8_t bytes,
int vatpic_assert_irq(struct vm *vm, int irq);
int vatpic_deassert_irq(struct vm *vm, int irq);
int vatpic_pulse_irq(struct vm *vm, int irq);
-int vatpic_set_irq_trigger(struct vm *vm, int irq, enum vm_intr_trigger trigger);
+int vatpic_set_irq_trigger(struct vm *vm, int irq,
+ enum vm_intr_trigger trigger);
void vatpic_pending_intr(struct vm *vm, int *vecptr);
void vatpic_intr_accepted(struct vm *vm, int vector);
diff --git a/usr/src/uts/i86pc/io/vmm/io/vatpit.c b/usr/src/uts/i86pc/io/vmm/io/vatpit.c
index 7d68bc659d..51ee4923d0 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vatpit.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vatpit.c
@@ -174,7 +174,6 @@ vatpit_callout_handler(void *a)
done:
VATPIT_UNLOCK(vatpit);
- return;
}
static void
@@ -197,7 +196,7 @@ pit_timer_start_cntr0(struct vatpit *vatpit)
* ticks in the past.
*/
binuptime(&now);
- if (bintime_cmp(&c->callout_bt, &now, <)) {
+ if (BINTIME_CMP(&c->callout_bt, <, &now)) {
c->callout_bt = now;
bintime_add(&c->callout_bt, &delta);
}
@@ -391,8 +390,9 @@ vatpit_handler(void *arg, bool in, uint16_t port, uint8_t bytes, uint32_t *eax)
tmp &= 0xff;
*eax = tmp;
c->frbyte ^= 1;
- } else
+ } else {
*eax = c->ol[--c->olbyte];
+ }
} else {
c->cr[c->crbyte++] = *eax;
if (c->crbyte == 2) {
@@ -441,7 +441,7 @@ vatpit_init(struct vm *vm)
struct vatpit_callout_arg *arg;
int i;
- vatpit = malloc(sizeof(struct vatpit), M_VATPIT, M_WAITOK | M_ZERO);
+ vatpit = malloc(sizeof (struct vatpit), M_VATPIT, M_WAITOK | M_ZERO);
vatpit->vm = vm;
mtx_init(&vatpit->mtx, "vatpit lock", NULL, MTX_SPIN);
diff --git a/usr/src/uts/i86pc/io/vmm/io/vhpet.c b/usr/src/uts/i86pc/io/vmm/io/vhpet.c
index 34004d482a..edeee6c128 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vhpet.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vhpet.c
@@ -319,7 +319,6 @@ vhpet_handler(void *a)
vhpet_timer_interrupt(vhpet, n);
done:
VHPET_UNLOCK(vhpet);
- return;
}
static void
@@ -718,8 +717,8 @@ vhpet_init(struct vm *vm)
struct vhpet_callout_arg *arg;
struct bintime bt;
- vhpet = malloc(sizeof(struct vhpet), M_VHPET, M_WAITOK | M_ZERO);
- vhpet->vm = vm;
+ vhpet = malloc(sizeof (struct vhpet), M_VHPET, M_WAITOK | M_ZERO);
+ vhpet->vm = vm;
mtx_init(&vhpet->mtx, "vhpet lock", NULL, MTX_DEF);
FREQ2BT(HPET_FREQ, &bt);
diff --git a/usr/src/uts/i86pc/io/vmm/io/vioapic.c b/usr/src/uts/i86pc/io/vmm/io/vioapic.c
index 89d3bf79df..8b259000c6 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vioapic.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vioapic.c
@@ -424,7 +424,7 @@ vioapic_init(struct vm *vm)
int i;
struct vioapic *vioapic;
- vioapic = malloc(sizeof(struct vioapic), M_VIOAPIC, M_WAITOK | M_ZERO);
+ vioapic = malloc(sizeof (struct vioapic), M_VIOAPIC, M_WAITOK | M_ZERO);
vioapic->vm = vm;
mtx_init(&vioapic->mtx, "vioapic lock", NULL, MTX_SPIN);
diff --git a/usr/src/uts/i86pc/io/vmm/io/vlapic.c b/usr/src/uts/i86pc/io/vmm/io/vlapic.c
index 8af77a387b..e84520de46 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vlapic.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vlapic.c
@@ -78,7 +78,7 @@ __FBSDID("$FreeBSD$");
*/
#define PRIO(x) ((x) & 0xf0)
-#define VLAPIC_VERSION (16)
+#define VLAPIC_VERSION (16)
#define x2apic(vlapic) (((vlapic)->msr_apicbase & APICBASE_X2APIC) ? 1 : 0)
@@ -97,7 +97,7 @@ __FBSDID("$FreeBSD$");
* - arbitrary but chosen to be in the ballpark of contemporary hardware.
* - power-of-two to avoid loss of precision when converted to a bintime.
*/
-#define VLAPIC_BUS_FREQ (128 * 1024 * 1024)
+#define VLAPIC_BUS_FREQ (128 * 1024 * 1024)
static void vlapic_set_error(struct vlapic *, uint32_t, bool);
@@ -238,7 +238,7 @@ vlapic_get_ccr(struct vlapic *vlapic)
* compute the value of 'ccr' based on the remaining time.
*/
binuptime(&bt_now);
- if (bintime_cmp(&vlapic->timer_fire_bt, &bt_now, >)) {
+ if (BINTIME_CMP(&vlapic->timer_fire_bt, >, &bt_now)) {
bt_rem = vlapic->timer_fire_bt;
bintime_sub(&bt_rem, &bt_now);
ccr += bt_rem.sec * BT2FREQ(&vlapic->timer_freq_bt);
@@ -482,7 +482,7 @@ vlapic_mask_lvts(struct vlapic *vlapic)
}
static int
-vlapic_fire_lvt(struct vlapic *vlapic, u_int lvt)
+vlapic_fire_lvt(struct vlapic *vlapic, uint_t lvt)
{
uint32_t mode, reg, vec;
vcpu_notify_t notify;
@@ -691,7 +691,7 @@ vlapic_fire_timer(struct vlapic *vlapic)
}
static VMM_STAT(VLAPIC_INTR_CMC,
- "corrected machine check interrupts generated by vlapic");
+ "corrected machine check interrupts generated by vlapic");
void
vlapic_fire_cmci(struct vlapic *vlapic)
@@ -703,7 +703,7 @@ vlapic_fire_cmci(struct vlapic *vlapic)
}
static VMM_STAT_ARRAY(LVTS_TRIGGERRED, VLAPIC_MAXLVT_INDEX + 1,
- "lvts triggered");
+ "lvts triggered");
int
vlapic_trigger_lvt(struct vlapic *vlapic, int vector)
@@ -714,7 +714,7 @@ vlapic_trigger_lvt(struct vlapic *vlapic, int vector)
* When the local APIC is global/hardware disabled,
* LINT[1:0] pins are configured as INTR and NMI pins,
* respectively.
- */
+ */
switch (vector) {
case APIC_LVT_LINT0:
vm_inject_extint(vlapic->vm, vlapic->vcpuid);
@@ -769,17 +769,11 @@ vlapic_callout_handler(void *arg)
if (vlapic_periodic_timer(vlapic)) {
binuptime(&btnow);
-#ifdef __FreeBSD__
- KASSERT(bintime_cmp(&btnow, &vlapic->timer_fire_bt, >=),
- ("vlapic callout at %#lx.%#lx, expected at %#lx.#%lx",
- btnow.sec, btnow.frac, vlapic->timer_fire_bt.sec,
- vlapic->timer_fire_bt.frac));
-#else
- KASSERT(bintime_cmp(&btnow, &vlapic->timer_fire_bt, >=),
+
+ KASSERT(BINTIME_CMP(&btnow, >=, &vlapic->timer_fire_bt),
("vlapic callout at %lx.%lx, expected at %lx.%lx",
btnow.sec, btnow.frac, vlapic->timer_fire_bt.sec,
vlapic->timer_fire_bt.frac));
-#endif
/*
* Compute the delta between when the timer was supposed to
@@ -789,7 +783,7 @@ vlapic_callout_handler(void *arg)
bintime_sub(&bt, &vlapic->timer_fire_bt);
rem_sbt = bttosbt(vlapic->timer_period_bt);
- if (bintime_cmp(&bt, &vlapic->timer_period_bt, <)) {
+ if (BINTIME_CMP(&bt, <, &vlapic->timer_period_bt)) {
/*
* Adjust the time until the next countdown downward
* to account for the lost time.
@@ -1250,14 +1244,13 @@ vlapic_read(struct vlapic *vlapic, int mmio_access, uint64_t offset,
goto done;
}
- if (offset > sizeof(*lapic)) {
+ if (offset > sizeof (*lapic)) {
*data = 0;
goto done;
}
offset &= ~3;
- switch(offset)
- {
+ switch (offset) {
case APIC_OFFSET_ID:
*data = lapic->id;
break;
@@ -1342,7 +1335,7 @@ vlapic_read(struct vlapic *vlapic, int mmio_access, uint64_t offset,
}
done:
VLAPIC_CTR2(vlapic, "vlapic read offset %#x, data %#lx", offset, *data);
- return 0;
+ return (0);
}
int
@@ -1364,7 +1357,7 @@ vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset,
VLAPIC_CTR2(vlapic, "vlapic write offset %#lx, data %#lx",
offset, data);
- if (offset > sizeof(*lapic))
+ if (offset > sizeof (*lapic))
return (0);
/* Ignore MMIO accesses in x2APIC mode */
@@ -1384,8 +1377,7 @@ vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset,
}
retval = 0;
- switch(offset)
- {
+ switch (offset) {
case APIC_OFFSET_ID:
lapic->id = data;
vlapic_id_write_handler(vlapic);
@@ -1464,7 +1456,7 @@ vlapic_reset(struct vlapic *vlapic)
struct LAPIC *lapic;
lapic = vlapic->apic_page;
- bzero(lapic, sizeof(struct LAPIC));
+ bzero(lapic, sizeof (struct LAPIC));
lapic->id = vlapic_get_id(vlapic);
lapic->version = VLAPIC_VERSION;
@@ -1652,7 +1644,7 @@ vlapic_isrstk_eoi(struct vlapic *vlapic, int vector)
{
if (vlapic->isrvec_stk_top <= 0) {
panic("invalid vlapic isrvec_stk_top %d",
- vlapic->isrvec_stk_top);
+ vlapic->isrvec_stk_top);
}
vlapic->isrvec_stk_top--;
vlapic_isrstk_verify(vlapic);
diff --git a/usr/src/uts/i86pc/io/vmm/io/vlapic_priv.h b/usr/src/uts/i86pc/io/vmm/io/vlapic_priv.h
index 1329ab5b36..69daf3652c 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vlapic_priv.h
+++ b/usr/src/uts/i86pc/io/vmm/io/vlapic_priv.h
@@ -48,53 +48,53 @@
/*
* APIC Register: Offset Description
*/
-#define APIC_OFFSET_ID 0x20 /* Local APIC ID */
-#define APIC_OFFSET_VER 0x30 /* Local APIC Version */
-#define APIC_OFFSET_TPR 0x80 /* Task Priority Register */
-#define APIC_OFFSET_APR 0x90 /* Arbitration Priority */
-#define APIC_OFFSET_PPR 0xA0 /* Processor Priority Register */
-#define APIC_OFFSET_EOI 0xB0 /* EOI Register */
-#define APIC_OFFSET_RRR 0xC0 /* Remote read */
-#define APIC_OFFSET_LDR 0xD0 /* Logical Destination */
-#define APIC_OFFSET_DFR 0xE0 /* Destination Format Register */
-#define APIC_OFFSET_SVR 0xF0 /* Spurious Vector Register */
-#define APIC_OFFSET_ISR0 0x100 /* In Service Register */
-#define APIC_OFFSET_ISR1 0x110
-#define APIC_OFFSET_ISR2 0x120
-#define APIC_OFFSET_ISR3 0x130
-#define APIC_OFFSET_ISR4 0x140
-#define APIC_OFFSET_ISR5 0x150
-#define APIC_OFFSET_ISR6 0x160
-#define APIC_OFFSET_ISR7 0x170
-#define APIC_OFFSET_TMR0 0x180 /* Trigger Mode Register */
-#define APIC_OFFSET_TMR1 0x190
-#define APIC_OFFSET_TMR2 0x1A0
-#define APIC_OFFSET_TMR3 0x1B0
-#define APIC_OFFSET_TMR4 0x1C0
-#define APIC_OFFSET_TMR5 0x1D0
-#define APIC_OFFSET_TMR6 0x1E0
-#define APIC_OFFSET_TMR7 0x1F0
-#define APIC_OFFSET_IRR0 0x200 /* Interrupt Request Register */
-#define APIC_OFFSET_IRR1 0x210
-#define APIC_OFFSET_IRR2 0x220
-#define APIC_OFFSET_IRR3 0x230
-#define APIC_OFFSET_IRR4 0x240
-#define APIC_OFFSET_IRR5 0x250
-#define APIC_OFFSET_IRR6 0x260
-#define APIC_OFFSET_IRR7 0x270
-#define APIC_OFFSET_ESR 0x280 /* Error Status Register */
-#define APIC_OFFSET_CMCI_LVT 0x2F0 /* Local Vector Table (CMCI) */
-#define APIC_OFFSET_ICR_LOW 0x300 /* Interrupt Command Register */
-#define APIC_OFFSET_ICR_HI 0x310
-#define APIC_OFFSET_TIMER_LVT 0x320 /* Local Vector Table (Timer) */
-#define APIC_OFFSET_THERM_LVT 0x330 /* Local Vector Table (Thermal) */
-#define APIC_OFFSET_PERF_LVT 0x340 /* Local Vector Table (PMC) */
-#define APIC_OFFSET_LINT0_LVT 0x350 /* Local Vector Table (LINT0) */
-#define APIC_OFFSET_LINT1_LVT 0x360 /* Local Vector Table (LINT1) */
-#define APIC_OFFSET_ERROR_LVT 0x370 /* Local Vector Table (ERROR) */
-#define APIC_OFFSET_TIMER_ICR 0x380 /* Timer's Initial Count */
-#define APIC_OFFSET_TIMER_CCR 0x390 /* Timer's Current Count */
-#define APIC_OFFSET_TIMER_DCR 0x3E0 /* Timer's Divide Configuration */
+#define APIC_OFFSET_ID 0x20 /* Local APIC ID */
+#define APIC_OFFSET_VER 0x30 /* Local APIC Version */
+#define APIC_OFFSET_TPR 0x80 /* Task Priority Register */
+#define APIC_OFFSET_APR 0x90 /* Arbitration Priority */
+#define APIC_OFFSET_PPR 0xA0 /* Processor Priority Register */
+#define APIC_OFFSET_EOI 0xB0 /* EOI Register */
+#define APIC_OFFSET_RRR 0xC0 /* Remote read */
+#define APIC_OFFSET_LDR 0xD0 /* Logical Destination */
+#define APIC_OFFSET_DFR 0xE0 /* Destination Format Register */
+#define APIC_OFFSET_SVR 0xF0 /* Spurious Vector Register */
+#define APIC_OFFSET_ISR0 0x100 /* In Service Register */
+#define APIC_OFFSET_ISR1 0x110
+#define APIC_OFFSET_ISR2 0x120
+#define APIC_OFFSET_ISR3 0x130
+#define APIC_OFFSET_ISR4 0x140
+#define APIC_OFFSET_ISR5 0x150
+#define APIC_OFFSET_ISR6 0x160
+#define APIC_OFFSET_ISR7 0x170
+#define APIC_OFFSET_TMR0 0x180 /* Trigger Mode Register */
+#define APIC_OFFSET_TMR1 0x190
+#define APIC_OFFSET_TMR2 0x1A0
+#define APIC_OFFSET_TMR3 0x1B0
+#define APIC_OFFSET_TMR4 0x1C0
+#define APIC_OFFSET_TMR5 0x1D0
+#define APIC_OFFSET_TMR6 0x1E0
+#define APIC_OFFSET_TMR7 0x1F0
+#define APIC_OFFSET_IRR0 0x200 /* Interrupt Request Register */
+#define APIC_OFFSET_IRR1 0x210
+#define APIC_OFFSET_IRR2 0x220
+#define APIC_OFFSET_IRR3 0x230
+#define APIC_OFFSET_IRR4 0x240
+#define APIC_OFFSET_IRR5 0x250
+#define APIC_OFFSET_IRR6 0x260
+#define APIC_OFFSET_IRR7 0x270
+#define APIC_OFFSET_ESR 0x280 /* Error Status Register */
+#define APIC_OFFSET_CMCI_LVT 0x2F0 /* Local Vector Table (CMCI) */
+#define APIC_OFFSET_ICR_LOW 0x300 /* Interrupt Command Register */
+#define APIC_OFFSET_ICR_HI 0x310
+#define APIC_OFFSET_TIMER_LVT 0x320 /* Local Vector Table (Timer) */
+#define APIC_OFFSET_THERM_LVT 0x330 /* Local Vector Table (Thermal) */
+#define APIC_OFFSET_PERF_LVT 0x340 /* Local Vector Table (PMC) */
+#define APIC_OFFSET_LINT0_LVT 0x350 /* Local Vector Table (LINT0) */
+#define APIC_OFFSET_LINT1_LVT 0x360 /* Local Vector Table (LINT1) */
+#define APIC_OFFSET_ERROR_LVT 0x370 /* Local Vector Table (ERROR) */
+#define APIC_OFFSET_TIMER_ICR 0x380 /* Timer's Initial Count */
+#define APIC_OFFSET_TIMER_CCR 0x390 /* Timer's Current Count */
+#define APIC_OFFSET_TIMER_DCR 0x3E0 /* Timer's Divide Configuration */
#define APIC_OFFSET_SELF_IPI 0x3F0 /* Self IPI register */
#define VLAPIC_CTR0(vlapic, format) \
@@ -148,9 +148,9 @@ enum boot_state {
*/
#define ISRVEC_STK_SIZE (16 + 1)
-#define VLAPIC_MAXLVT_INDEX APIC_LVT_CMCI
+#define VLAPIC_MAXLVT_INDEX APIC_LVT_CMCI
-#define VLAPIC_TMR_CNT 8
+#define VLAPIC_TMR_CNT 8
#ifdef DEBUG
#define __ISRVEC_DEBUG
diff --git a/usr/src/uts/i86pc/io/vmm/io/vpmtmr.c b/usr/src/uts/i86pc/io/vmm/io/vpmtmr.c
index 61a50b418c..2644ee61d6 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vpmtmr.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vpmtmr.c
@@ -58,7 +58,7 @@ __FBSDID("$FreeBSD$");
* This implementation will be 32-bits
*/
-#define PMTMR_FREQ 3579545 /* 3.579545MHz */
+#define PMTMR_FREQ 3579545 /* 3.579545MHz */
struct vpmtmr {
struct vm *vm;
@@ -77,7 +77,7 @@ vpmtmr_init(struct vm *vm)
struct vpmtmr *vpmtmr;
struct bintime bt;
- vpmtmr = malloc(sizeof(struct vpmtmr), M_VPMTMR, M_WAITOK | M_ZERO);
+ vpmtmr = malloc(sizeof (struct vpmtmr), M_VPMTMR, M_WAITOK | M_ZERO);
vpmtmr->vm = vm;
vpmtmr->baseuptime = sbinuptime();
vpmtmr->baseval = 0;
diff --git a/usr/src/uts/i86pc/io/vmm/io/vrtc.c b/usr/src/uts/i86pc/io/vmm/io/vrtc.c
index 2f87446bdc..4b53883a79 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vrtc.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vrtc.c
@@ -72,14 +72,14 @@ struct rtcdev {
uint8_t century;
uint8_t nvram2[128 - 51];
} __packed;
-CTASSERT(sizeof(struct rtcdev) == 128);
+CTASSERT(sizeof (struct rtcdev) == 128);
CTASSERT(offsetof(struct rtcdev, century) == RTC_CENTURY);
struct vrtc {
struct vm *vm;
struct mtx mtx;
struct callout callout;
- u_int addr; /* RTC register to read or write */
+ uint_t addr; /* RTC register to read or write */
sbintime_t base_uptime;
time_t base_rtctime;
struct rtcdev rtcdev;
@@ -113,9 +113,8 @@ SYSCTL_DECL(_hw_vmm);
SYSCTL_NODE(_hw_vmm, OID_AUTO, vrtc, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
NULL);
+/* Stop guest when invalid RTC time is detected */
static int rtc_flag_broken_time = 1;
-SYSCTL_INT(_hw_vmm_vrtc, OID_AUTO, flag_broken_time, CTLFLAG_RDTUN,
- &rtc_flag_broken_time, 0, "Stop guest when invalid RTC time is detected");
static __inline bool
divider_enabled(int reg_a)
@@ -292,7 +291,7 @@ rtc_to_secs(struct vrtc *vrtc)
rtc = &vrtc->rtcdev;
- bzero(&ct, sizeof(struct clocktime));
+ bzero(&ct, sizeof (struct clocktime));
error = rtcget(rtc, rtc->sec, &ct.sec);
if (error || ct.sec < 0 || ct.sec > 59) {
@@ -785,7 +784,7 @@ vrtc_nvram_write(struct vm *vm, int offset, uint8_t value)
* Don't allow writes to RTC control registers or the date/time fields.
*/
if (offset < offsetof(struct rtcdev, nvram[0]) ||
- offset == RTC_CENTURY || offset >= sizeof(struct rtcdev)) {
+ offset == RTC_CENTURY || offset >= sizeof (struct rtcdev)) {
VM_CTR1(vrtc->vm, "RTC nvram write to invalid offset %d",
offset);
return (EINVAL);
@@ -811,7 +810,7 @@ vrtc_nvram_read(struct vm *vm, int offset, uint8_t *retval)
/*
* Allow all offsets in the RTC to be read.
*/
- if (offset < 0 || offset >= sizeof(struct rtcdev))
+ if (offset < 0 || offset >= sizeof (struct rtcdev))
return (EINVAL);
vrtc = vm_rtc(vm);
@@ -868,7 +867,7 @@ vrtc_data_handler(void *arg, bool in, uint16_t port, uint8_t bytes,
VRTC_LOCK(vrtc);
offset = vrtc->addr;
- if (offset >= sizeof(struct rtcdev)) {
+ if (offset >= sizeof (struct rtcdev)) {
VRTC_UNLOCK(vrtc);
return (-1);
}
@@ -969,7 +968,7 @@ vrtc_init(struct vm *vm)
struct rtcdev *rtc;
time_t curtime;
- vrtc = malloc(sizeof(struct vrtc), M_VRTC, M_WAITOK | M_ZERO);
+ vrtc = malloc(sizeof (struct vrtc), M_VRTC, M_WAITOK | M_ZERO);
vrtc->vm = vm;
mtx_init(&vrtc->mtx, "vrtc lock", NULL, MTX_DEF);
callout_init(&vrtc->callout, 1);
diff --git a/usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h b/usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h
index b566e503e0..863d31cfeb 100644
--- a/usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h
+++ b/usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h
@@ -65,7 +65,7 @@ struct vm_guest_paging;
struct pmap;
struct vm_eventinfo {
- u_int *rptr; /* runblock cookie */
+ uint_t *rptr; /* runblock cookie */
int *sptr; /* suspend cookie */
int *iptr; /* reqidle cookie */
};
@@ -75,21 +75,21 @@ typedef int (*vmm_cleanup_func_t)(void);
typedef void (*vmm_resume_func_t)(void);
typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
typedef int (*vmi_run_func_t)(void *vmi, int vcpu, uint64_t rip,
- struct pmap *pmap, struct vm_eventinfo *info);
+ struct pmap *pmap, struct vm_eventinfo *info);
typedef void (*vmi_cleanup_func_t)(void *vmi);
typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
- uint64_t *retval);
+ uint64_t *retval);
typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
- uint64_t val);
+ uint64_t val);
typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
- struct seg_desc *desc);
+ struct seg_desc *desc);
typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
- struct seg_desc *desc);
+ struct seg_desc *desc);
typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
-typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
+typedef struct vmspace *(*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
-typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, int vcpu);
+typedef struct vlapic *(*vmi_vlapic_init)(void *vmi, int vcpu);
typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
#ifndef __FreeBSD__
typedef void (*vmi_savectx)(void *vmi, int vcpu);
@@ -430,4 +430,3 @@ void vm_ioport_unhook(struct vm *, void **);
#endif /* __FreeBSD */
#endif /* _VMM_KERNEL_H_ */
-
diff --git a/usr/src/uts/i86pc/io/vmm/vmm.c b/usr/src/uts/i86pc/io/vmm/vmm.c
index 3cd89f9fe6..ed7a97921a 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm.c
@@ -119,7 +119,7 @@ struct vcpu {
#ifndef __FreeBSD__
int lastloccpu; /* (o) last host cpu localized to */
#endif
- u_int runblock; /* (i) block vcpu from run state */
+ uint_t runblock; /* (i) block vcpu from run state */
int reqidle; /* (i) request vcpu to idle */
struct vlapic *vlapic; /* (i) APIC device model */
enum x2apic_state x2apic_state; /* (i) APIC mode */
@@ -184,7 +184,7 @@ struct vm {
struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
struct vrtc *vrtc; /* (o) virtual RTC */
volatile cpuset_t active_cpus; /* (i) active vcpus */
- volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */
+ volatile cpuset_t debug_cpus; /* (i) vcpus stopped for dbg */
int suspend; /* (i) stop VM execution */
volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
@@ -250,8 +250,8 @@ static struct vmm_ops *ops = &vmm_ops_null;
#define VMINIT(vm, pmap) ((*ops->vminit)(vm, pmap))
#define VMRUN(vmi, vcpu, rip, pmap, evinfo) \
- ((*ops->vmrun)(vmi, vcpu, rip, pmap, evinfo) )
-#define VMCLEANUP(vmi) ((*ops->vmcleanup)(vmi) )
+ ((*ops->vmrun)(vmi, vcpu, rip, pmap, evinfo))
+#define VMCLEANUP(vmi) ((*ops->vmcleanup)(vmi))
#define VMSPACE_ALLOC(min, max) ((*ops->vmspace_alloc)(min, max))
#define VMSPACE_FREE(vmspace) ((*ops->vmspace_free)(vmspace))
@@ -282,18 +282,12 @@ SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
* interrupts disabled.
*/
static int halt_detection_enabled = 1;
-SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
- &halt_detection_enabled, 0,
- "Halt VM if all vcpus execute HLT with interrupts disabled");
+/* IPI vector used for vcpu notifications */
static int vmm_ipinum;
-SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
- "IPI vector used for vcpu notifications");
+/* Trap into hypervisor on all guest exceptions and reflect them back */
static int trace_guest_exceptions;
-SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
- &trace_guest_exceptions, 0,
- "Trap into hypervisor on all guest exceptions and reflect them back");
static void vm_free_memmap(struct vm *vm, int ident);
static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
@@ -523,8 +517,8 @@ vm_init(struct vm *vm, bool create)
/*
* The default CPU topology is a single thread per package.
*/
-u_int cores_per_package = 1;
-u_int threads_per_core = 1;
+uint_t cores_per_package = 1;
+uint_t threads_per_core = 1;
int
vm_create(const char *name, struct vm **retvm)
@@ -546,7 +540,7 @@ vm_create(const char *name, struct vm **retvm)
if (vmspace == NULL)
return (ENOMEM);
- vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
+ vm = malloc(sizeof (struct vm), M_VM, M_WAITOK | M_ZERO);
strcpy(vm->name, name);
vm->vmspace = vmspace;
@@ -590,7 +584,7 @@ vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
vm->cores = cores;
vm->threads = threads;
vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */
- return(0);
+ return (0);
}
static void
@@ -837,7 +831,7 @@ vm_free_memseg(struct vm *vm, int ident)
seg = &vm->mem_segs[ident];
if (seg->object != NULL) {
vm_object_deallocate(seg->object);
- bzero(seg, sizeof(struct mem_seg));
+ bzero(seg, sizeof (struct mem_seg));
}
}
@@ -954,7 +948,7 @@ vm_free_memmap(struct vm *vm, int ident)
mm->gpa + mm->len);
KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
__func__, error));
- bzero(mm, sizeof(struct mem_map));
+ bzero(mm, sizeof (struct mem_map));
}
}
@@ -1025,7 +1019,7 @@ vm_iommu_modify(struct vm *vm, bool map)
gpa = mm->gpa;
while (gpa < mm->gpa + mm->len) {
vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE,
- &cookie);
+ &cookie);
KASSERT(vp != NULL, ("vm(%s) could not map gpa %lx",
vm_name(vm), gpa));
@@ -1065,21 +1059,12 @@ vm_iommu_modify(struct vm *vm, bool map)
#define vm_iommu_unmap(vm) vm_iommu_modify((vm), false)
#define vm_iommu_map(vm) vm_iommu_modify((vm), true)
-#ifdef __FreeBSD__
-int
-vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
-#else
int
vm_unassign_pptdev(struct vm *vm, int pptfd)
-#endif /* __FreeBSD__ */
{
int error;
-#ifdef __FreeBSD__
- error = ppt_unassign_device(vm, bus, slot, func);
-#else
error = ppt_unassign_device(vm, pptfd);
-#endif /* __FreeBSD__ */
if (error)
return (error);
@@ -1089,13 +1074,8 @@ vm_unassign_pptdev(struct vm *vm, int pptfd)
return (0);
}
-#ifdef __FreeBSD__
-int
-vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
-#else
int
vm_assign_pptdev(struct vm *vm, int pptfd)
-#endif /* __FreeBSD__ */
{
int error;
vm_paddr_t maxaddr;
@@ -1111,17 +1091,13 @@ vm_assign_pptdev(struct vm *vm, int pptfd)
vm_iommu_map(vm);
}
-#ifdef __FreeBSD__
- error = ppt_assign_device(vm, bus, slot, func);
-#else
error = ppt_assign_device(vm, pptfd);
-#endif /* __FreeBSD__ */
return (error);
}
void *
vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
- void **cookie)
+ void **cookie)
{
int i, count, pageoff;
struct mem_map *mm;
@@ -1244,8 +1220,7 @@ is_segment_register(int reg)
}
int
-vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
- struct seg_desc *desc)
+vm_get_seg_desc(struct vm *vm, int vcpu, int reg, struct seg_desc *desc)
{
if (vcpu < 0 || vcpu >= vm->maxcpus)
@@ -1258,8 +1233,7 @@ vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
}
int
-vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
- struct seg_desc *desc)
+vm_set_seg_desc(struct vm *vm, int vcpu, int reg, struct seg_desc *desc)
{
if (vcpu < 0 || vcpu >= vm->maxcpus)
return (EINVAL);
@@ -3264,7 +3238,7 @@ vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
if (copyinfo[idx].cookie != NULL)
vm_gpa_release(copyinfo[idx].cookie);
}
- bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
+ bzero(copyinfo, num_copyinfo * sizeof (struct vm_copyinfo));
}
int
@@ -3277,7 +3251,7 @@ vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
void *hva, *cookie;
uint64_t gpa;
- bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
+ bzero(copyinfo, sizeof (struct vm_copyinfo) * num_copyinfo);
nused = 0;
remaining = len;
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_host.h b/usr/src/uts/i86pc/io/vmm/vmm_host.h
index 1b3e84184a..c5688f108a 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_host.h
+++ b/usr/src/uts/i86pc/io/vmm/vmm_host.h
@@ -78,12 +78,7 @@ const struct xsave_limits *vmm_get_xsave_limits(void);
static __inline uint64_t
vmm_get_host_trbase(void)
{
-
-#ifdef __FreeBSD__
- return ((uint64_t)PCPU_GET(tssp));
-#else
- return ((u_long)CPU->cpu_tss);
-#endif
+ return ((uint64_t)CPU->cpu_tss);
}
static __inline uint64_t
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_instruction_emul.c b/usr/src/uts/i86pc/io/vmm/vmm_instruction_emul.c
index 31f6ea75b5..02b87a79f6 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_instruction_emul.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm_instruction_emul.c
@@ -131,8 +131,8 @@ struct vie {
uint8_t scale;
- uint8_t vex_reg:4, /* vvvv: first source register specifier */
- vex_pp:2, /* pp */
+ uint8_t vex_reg:4, /* vvvv: first source reg specifier */
+ vex_pp:2, /* pp */
_sparebits:2;
uint8_t _sparebytes[2];
@@ -537,23 +537,25 @@ vie_repeat(struct vie *vie)
/*
* Return the status flags that would result from doing (x - y).
*/
+/* BEGIN CSTYLED */
#define GETCC(sz) \
-static u_long \
+static ulong_t \
getcc##sz(uint##sz##_t x, uint##sz##_t y) \
{ \
- u_long rflags; \
+ ulong_t rflags; \
\
__asm __volatile("sub %2,%1; pushfq; popq %0" : \
"=r" (rflags), "+r" (x) : "m" (y)); \
return (rflags); \
} struct __hack
+/* END CSTYLED */
GETCC(8);
GETCC(16);
GETCC(32);
GETCC(64);
-static u_long
+static ulong_t
getcc(int opsize, uint64_t x, uint64_t y)
{
KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
@@ -572,23 +574,25 @@ getcc(int opsize, uint64_t x, uint64_t y)
/*
* Macro creation of functions getaddflags{8,16,32,64}
*/
+/* BEGIN CSTYLED */
#define GETADDFLAGS(sz) \
-static u_long \
+static ulong_t \
getaddflags##sz(uint##sz##_t x, uint##sz##_t y) \
{ \
- u_long rflags; \
+ ulong_t rflags; \
\
__asm __volatile("add %2,%1; pushfq; popq %0" : \
"=r" (rflags), "+r" (x) : "m" (y)); \
return (rflags); \
} struct __hack
+/* END CSTYLED */
GETADDFLAGS(8);
GETADDFLAGS(16);
GETADDFLAGS(32);
GETADDFLAGS(64);
-static u_long
+static ulong_t
getaddflags(int opsize, uint64_t x, uint64_t y)
{
KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
@@ -607,23 +611,25 @@ getaddflags(int opsize, uint64_t x, uint64_t y)
/*
* Return the status flags that would result from doing (x & y).
*/
+/* BEGIN CSTYLED */
#define GETANDFLAGS(sz) \
-static u_long \
+static ulong_t \
getandflags##sz(uint##sz##_t x, uint##sz##_t y) \
{ \
- u_long rflags; \
+ ulong_t rflags; \
\
__asm __volatile("and %2,%1; pushfq; popq %0" : \
"=r" (rflags), "+r" (x) : "m" (y)); \
return (rflags); \
} struct __hack
+/* END CSTYLED */
GETANDFLAGS(8);
GETANDFLAGS(16);
GETANDFLAGS(32);
GETANDFLAGS(64);
-static u_long
+static ulong_t
getandflags(int opsize, uint64_t x, uint64_t y)
{
KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
@@ -659,8 +665,10 @@ vie_emulate_mov(struct vie *vie, struct vm *vm, int vcpuid, uint64_t gpa)
*/
size = 1; /* override for byte operation */
error = vie_read_bytereg(vie, vm, vcpuid, &byte);
- if (error == 0)
- error = vie_mmio_write(vie, vm, vcpuid, gpa, byte, size);
+ if (error == 0) {
+ error = vie_mmio_write(vie, vm, vcpuid, gpa, byte,
+ size);
+ }
break;
case 0x89:
/*
@@ -1021,7 +1029,7 @@ vie_emulate_movs(struct vie *vie, struct vm *vm, int vcpuid, uint64_t gpa)
goto done;
error = vm_gla2gpa(vm, vcpuid, paging, dstaddr,
- PROT_WRITE, &dstgpa, &fault);
+ PROT_WRITE, &dstgpa, &fault);
if (error || fault)
goto done;
@@ -1190,16 +1198,16 @@ vie_emulate_and(struct vie *vie, struct vm *vm, int vcpuid, uint64_t gpa)
*/
/* get the first operand */
- error = vie_mmio_read(vie, vm, vcpuid, gpa, &val1, size);
- if (error)
+ error = vie_mmio_read(vie, vm, vcpuid, gpa, &val1, size);
+ if (error)
break;
- /*
+ /*
* perform the operation with the pre-fetched immediate
* operand and write the result
*/
- result = val1 & vie->immediate;
- error = vie_mmio_write(vie, vm, vcpuid, gpa, result, size);
+ result = val1 & vie->immediate;
+ error = vie_mmio_write(vie, vm, vcpuid, gpa, result, size);
break;
default:
break;
@@ -1241,9 +1249,9 @@ vie_emulate_or(struct vie *vie, struct vm *vm, int vcpuid, uint64_t gpa)
* OR reg (ModRM:reg) and mem (ModRM:r/m) and store the
* result in reg.
*
- * 0b/r or r16, r/m16
- * 0b/r or r32, r/m32
- * REX.W + 0b/r or r64, r/m64
+ * 0b/r or r16, r/m16
+ * 0b/r or r32, r/m32
+ * REX.W + 0b/r or r64, r/m64
*/
/* get the first operand */
@@ -1277,16 +1285,16 @@ vie_emulate_or(struct vie *vie, struct vm *vm, int vcpuid, uint64_t gpa)
*/
/* get the first operand */
- error = vie_mmio_read(vie, vm, vcpuid, gpa, &val1, size);
- if (error)
+ error = vie_mmio_read(vie, vm, vcpuid, gpa, &val1, size);
+ if (error)
break;
- /*
+ /*
* perform the operation with the pre-fetched immediate
* operand and write the result
*/
- result = val1 | vie->immediate;
- error = vie_mmio_write(vie, vm, vcpuid, gpa, result, size);
+ result = val1 | vie->immediate;
+ error = vie_mmio_write(vie, vm, vcpuid, gpa, result, size);
break;
default:
break;
@@ -1384,7 +1392,7 @@ vie_emulate_cmp(struct vie *vie, struct vm *vm, int vcpuid, uint64_t gpa)
size = 1;
/* get the first operand */
- error = vie_mmio_read(vie, vm, vcpuid, gpa, &op1, size);
+ error = vie_mmio_read(vie, vm, vcpuid, gpa, &op1, size);
if (error)
return (error);
@@ -1543,9 +1551,9 @@ vie_emulate_add(struct vie *vie, struct vm *vm, int vcpuid, uint64_t gpa)
/*
* ADD r/m to r and store the result in r
*
- * 03/r ADD r16, r/m16
- * 03/r ADD r32, r/m32
- * REX.W + 03/r ADD r64, r/m64
+ * 03/r ADD r16, r/m16
+ * 03/r ADD r32, r/m32
+ * REX.W + 03/r ADD r64, r/m64
*/
/* get the first operand */
@@ -1598,9 +1606,9 @@ vie_emulate_sub(struct vie *vie, struct vm *vm, int vcpuid, uint64_t gpa)
/*
* SUB r/m from r and store the result in r
*
- * 2B/r SUB r16, r/m16
- * 2B/r SUB r32, r/m32
- * REX.W + 2B/r SUB r64, r/m64
+ * 2B/r SUB r16, r/m16
+ * 2B/r SUB r32, r/m32
+ * REX.W + 2B/r SUB r64, r/m64
*/
/* get the first operand */
@@ -2653,9 +2661,6 @@ _vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
int ptpshift = 0, ptpindex = 0;
uint64_t ptpphys;
uint64_t *ptpbase = NULL, pte = 0, pgsize = 0;
-#ifdef __FreeBSD__
- u_int retries;
-#endif
uint32_t *ptpbase32, pte32;
void *cookie;
@@ -2665,16 +2670,9 @@ _vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
writable = prot & VM_PROT_WRITE;
cookie = NULL;
retval = 0;
-#ifdef __FreeBSD__
- retries = 0;
-#endif
restart:
ptpphys = paging->cr3; /* root of the page tables */
ptp_release(&cookie);
-#ifdef __FreeBSD__
- if (retries++ > 0)
- maybe_yield();
-#endif
if (vie_canonical_check(paging->cpu_mode, gla)) {
/*
@@ -2713,8 +2711,8 @@ restart:
(usermode && (pte32 & PG_U) == 0) ||
(writable && (pte32 & PG_RW) == 0)) {
if (!check_only) {
- pfcode = pf_error_code(usermode, prot, 0,
- pte32);
+ pfcode = pf_error_code(usermode, prot,
+ 0, pte32);
vm_inject_pf(vm, vcpuid, pfcode, gla);
}
goto fault;
@@ -2759,7 +2757,7 @@ restart:
/* Zero out the lower 5 bits and the upper 32 bits */
ptpphys &= 0xffffffe0UL;
- ptpbase = ptp_hold(vm, vcpuid, ptpphys, sizeof(*ptpbase) * 4,
+ ptpbase = ptp_hold(vm, vcpuid, ptpphys, sizeof (*ptpbase) * 4,
&cookie);
if (ptpbase == NULL)
goto error;
@@ -2816,8 +2814,8 @@ restart:
if (nlevels > 0 && (pte & PG_PS) != 0) {
if (pgsize > 1 * GB) {
if (!check_only) {
- pfcode = pf_error_code(usermode, prot, 1,
- pte);
+ pfcode = pf_error_code(usermode, prot,
+ 1, pte);
vm_inject_pf(vm, vcpuid, pfcode, gla);
}
goto fault;
@@ -2984,8 +2982,8 @@ decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
/*
* ยง 2.3.5, "The VEX Prefix", SDM Vol 2.
*/
- if ((cpu_mode == CPU_MODE_64BIT || cpu_mode == CPU_MODE_COMPATIBILITY)
- && x == 0xC4) {
+ if ((cpu_mode == CPU_MODE_64BIT ||
+ cpu_mode == CPU_MODE_COMPATIBILITY) && x == 0xC4) {
const struct vie_op *optab;
/* 3-byte VEX prefix. */
@@ -3420,7 +3418,7 @@ vie_verify_gla(struct vie *vie, struct vm *vm, int cpuid, uint64_t gla)
error = vm_get_register(vm, cpuid, vie->base_register, &base);
if (error) {
printf("verify_gla: error %d getting base reg %d\n",
- error, vie->base_register);
+ error, vie->base_register);
return (-1);
}
@@ -3437,7 +3435,7 @@ vie_verify_gla(struct vie *vie, struct vm *vm, int cpuid, uint64_t gla)
error = vm_get_register(vm, cpuid, vie->index_register, &idx);
if (error) {
printf("verify_gla: error %d getting index reg %d\n",
- error, vie->index_register);
+ error, vie->index_register);
return (-1);
}
}
@@ -3470,8 +3468,7 @@ vie_verify_gla(struct vie *vie, struct vm *vm, int cpuid, uint64_t gla)
error = vm_get_seg_desc(vm, cpuid, seg, &desc);
if (error) {
printf("verify_gla: error %d getting segment"
- " descriptor %d", error,
- vie->segment_register);
+ " descriptor %d", error, vie->segment_register);
return (-1);
}
segbase = desc.base;
@@ -3481,10 +3478,10 @@ vie_verify_gla(struct vie *vie, struct vm *vm, int cpuid, uint64_t gla)
gla2 &= size2mask[vie->addrsize];
if (gla != gla2) {
printf("verify_gla mismatch: segbase(0x%0lx)"
- "base(0x%0lx), scale(%d), index(0x%0lx), "
- "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n",
- segbase, base, vie->scale, idx, vie->displacement,
- gla, gla2);
+ "base(0x%0lx), scale(%d), index(0x%0lx), "
+ "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n",
+ segbase, base, vie->scale, idx, vie->displacement,
+ gla, gla2);
return (-1);
}
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_ioport.h b/usr/src/uts/i86pc/io/vmm/vmm_ioport.h
index 2a448133d9..254ba002f2 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_ioport.h
+++ b/usr/src/uts/i86pc/io/vmm/vmm_ioport.h
@@ -74,7 +74,8 @@ int vm_inout_detach(struct ioport_config *ports, uint16_t port, bool drv_hook,
int vm_inout_access(struct ioport_config *ports, bool in, uint16_t port,
uint8_t bytes, uint32_t *val);
-/* Arbitrary cookie for io port hook:
+/*
+ * Arbitrary cookie for io port hook:
* - top 48 bits: func address + arg
* - lower 16 bits: port
*/
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_ktr.h b/usr/src/uts/i86pc/io/vmm/vmm_ktr.h
index 414d0341cc..2e706ffc57 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_ktr.h
+++ b/usr/src/uts/i86pc/io/vmm/vmm_ktr.h
@@ -39,33 +39,34 @@
#endif
#define VCPU_CTR0(vm, vcpuid, format) \
-CTR2(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid))
+ CTR2(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid))
#define VCPU_CTR1(vm, vcpuid, format, p1) \
-CTR3(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1))
+ CTR3(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1))
#define VCPU_CTR2(vm, vcpuid, format, p1, p2) \
-CTR4(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2))
+ CTR4(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2))
#define VCPU_CTR3(vm, vcpuid, format, p1, p2, p3) \
-CTR5(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2), (p3))
+ CTR5(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), \
+ (p1), (p2), (p3))
#define VCPU_CTR4(vm, vcpuid, format, p1, p2, p3, p4) \
-CTR6(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), \
- (p1), (p2), (p3), (p4))
+ CTR6(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), \
+ (p1), (p2), (p3), (p4))
#define VM_CTR0(vm, format) \
-CTR1(KTR_VMM, "vm %s: " format, vm_name((vm)))
+ CTR1(KTR_VMM, "vm %s: " format, vm_name((vm)))
#define VM_CTR1(vm, format, p1) \
-CTR2(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1))
+ CTR2(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1))
#define VM_CTR2(vm, format, p1, p2) \
-CTR3(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2))
+ CTR3(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2))
#define VM_CTR3(vm, format, p1, p2, p3) \
-CTR4(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3))
+ CTR4(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3))
#define VM_CTR4(vm, format, p1, p2, p3, p4) \
-CTR5(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3), (p4))
+ CTR5(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3), (p4))
#endif
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_lapic.c b/usr/src/uts/i86pc/io/vmm/vmm_lapic.c
index 3de67f012d..f28a2f1ffd 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_lapic.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm_lapic.c
@@ -151,30 +151,30 @@ lapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
}
static bool
-x2apic_msr(u_int msr)
+x2apic_msr(uint_t msr)
{
return (msr >= 0x800 && msr <= 0xBFF);
}
-static u_int
-x2apic_msr_to_regoff(u_int msr)
+static uint_t
+x2apic_msr_to_regoff(uint_t msr)
{
return ((msr - 0x800) << 4);
}
bool
-lapic_msr(u_int msr)
+lapic_msr(uint_t msr)
{
return (x2apic_msr(msr) || msr == MSR_APICBASE);
}
int
-lapic_rdmsr(struct vm *vm, int cpu, u_int msr, uint64_t *rval)
+lapic_rdmsr(struct vm *vm, int cpu, uint_t msr, uint64_t *rval)
{
int error;
- u_int offset;
+ uint_t offset;
struct vlapic *vlapic;
vlapic = vm_lapic(vm, cpu);
@@ -191,10 +191,10 @@ lapic_rdmsr(struct vm *vm, int cpu, u_int msr, uint64_t *rval)
}
int
-lapic_wrmsr(struct vm *vm, int cpu, u_int msr, uint64_t val)
+lapic_wrmsr(struct vm *vm, int cpu, uint_t msr, uint64_t val)
{
int error;
- u_int offset;
+ uint_t offset;
struct vlapic *vlapic;
vlapic = vm_lapic(vm, cpu);
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_lapic.h b/usr/src/uts/i86pc/io/vmm/vmm_lapic.h
index b0e877048e..037b15a342 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_lapic.h
+++ b/usr/src/uts/i86pc/io/vmm/vmm_lapic.h
@@ -45,9 +45,9 @@
struct vm;
-bool lapic_msr(u_int num);
-int lapic_rdmsr(struct vm *vm, int cpu, u_int msr, uint64_t *rval);
-int lapic_wrmsr(struct vm *vm, int cpu, u_int msr, uint64_t wval);
+bool lapic_msr(uint_t num);
+int lapic_rdmsr(struct vm *vm, int cpu, uint_t msr, uint64_t *rval);
+int lapic_wrmsr(struct vm *vm, int cpu, uint_t msr, uint64_t wval);
int lapic_mmio_read(struct vm *vm, int cpu, uint64_t gpa, uint64_t *rval,
int size);
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_mem.c b/usr/src/uts/i86pc/io/vmm/vmm_mem.c
index cd894dc84d..443a6ffc6f 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_mem.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm_mem.c
@@ -59,7 +59,7 @@ vmm_mem_init(void)
vm_object_t
vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len,
- vm_paddr_t hpa)
+ vm_paddr_t hpa)
{
int error;
vm_object_t obj;
@@ -83,10 +83,10 @@ vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len,
VM_OBJECT_WUNLOCK(obj);
if (error != KERN_SUCCESS) {
panic("vmm_mmio_alloc: vm_object_set_memattr error %d",
- error);
+ error);
}
error = vm_map_find(&vmspace->vm_map, obj, 0, &gpa, len, 0,
- VMFS_NO_SPACE, VM_PROT_RW, VM_PROT_RW, 0);
+ VMFS_NO_SPACE, VM_PROT_RW, VM_PROT_RW, 0);
if (error != KERN_SUCCESS) {
vm_object_deallocate(obj);
obj = NULL;
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_mem.h b/usr/src/uts/i86pc/io/vmm/vmm_mem.h
index e6f88fb222..eaa499c51a 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_mem.h
+++ b/usr/src/uts/i86pc/io/vmm/vmm_mem.h
@@ -48,7 +48,7 @@ struct vm_object;
int vmm_mem_init(void);
struct vm_object *vmm_mmio_alloc(struct vmspace *, vm_paddr_t gpa, size_t len,
- vm_paddr_t hpa);
+ vm_paddr_t hpa);
void vmm_mmio_free(struct vmspace *, vm_paddr_t gpa, size_t size);
vm_paddr_t vmm_mem_maxaddr(void);
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_sol_glue.c b/usr/src/uts/i86pc/io/vmm/vmm_sol_glue.c
index 696052d7d6..58fdc500fa 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_sol_glue.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm_sol_glue.c
@@ -80,7 +80,7 @@ sysinit(void)
(*si)->func((*si)->data);
}
-u_char const bin2bcd_data[] = {
+uint8_t const bin2bcd_data[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
@@ -411,7 +411,7 @@ vmm_glue_callout_localize(struct callout *c)
}
void
-ipi_cpu(int cpu, u_int ipi)
+ipi_cpu(int cpu, uint_t ipi)
{
/*
* This was previously implemented as an invocation of asynchronous
@@ -422,21 +422,21 @@ ipi_cpu(int cpu, u_int ipi)
poke_cpu(cpu);
}
-u_int cpu_high; /* Highest arg to CPUID */
-u_int cpu_exthigh; /* Highest arg to extended CPUID */
-u_int cpu_id; /* Stepping ID */
+uint_t cpu_high; /* Highest arg to CPUID */
+uint_t cpu_exthigh; /* Highest arg to extended CPUID */
+uint_t cpu_id; /* Stepping ID */
char cpu_vendor[20]; /* CPU Origin code */
static void
vmm_cpuid_init(void)
{
- u_int regs[4];
+ uint_t regs[4];
do_cpuid(0, regs);
cpu_high = regs[0];
- ((u_int *)&cpu_vendor)[0] = regs[1];
- ((u_int *)&cpu_vendor)[1] = regs[3];
- ((u_int *)&cpu_vendor)[2] = regs[2];
+ ((uint_t *)&cpu_vendor)[0] = regs[1];
+ ((uint_t *)&cpu_vendor)[1] = regs[3];
+ ((uint_t *)&cpu_vendor)[2] = regs[2];
cpu_vendor[12] = '\0';
do_cpuid(1, regs);
@@ -565,7 +565,7 @@ vmm_sol_glue_cleanup(void)
#include <sys/clock.h>
-/*--------------------------------------------------------------------*
+/*
* Generic routines to convert between a POSIX date
* (seconds since 1/1/1970) and yr/mo/day/hr/min/sec
* Derived from NetBSD arch/hp300/hp300/clock.c
@@ -625,8 +625,8 @@ clock_ct_to_ts(struct clocktime *ct, struct timespec *ts)
/* Sanity checks. */
if (ct->mon < 1 || ct->mon > 12 || ct->day < 1 ||
ct->day > days_in_month(year, ct->mon) ||
- ct->hour > 23 || ct->min > 59 || ct->sec > 59 ||
- (sizeof(time_t) == 4 && year > 2037)) { /* time_t overflow */
+ ct->hour > 23 || ct->min > 59 || ct->sec > 59 ||
+ (sizeof (time_t) == 4 && year > 2037)) { /* time_t overflow */
#ifdef __FreeBSD__
if (ct_debug)
printf(" = EINVAL\n");
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_stat.c b/usr/src/uts/i86pc/io/vmm/vmm_stat.c
index 42d6f8cfa3..26e3573bc9 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_stat.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm_stat.c
@@ -53,7 +53,7 @@ static struct vmm_stat_type *vsttab[MAX_VMM_STAT_ELEMS];
static MALLOC_DEFINE(M_VMM_STAT, "vmm stat", "vmm stat");
-#define vst_size ((size_t)vst_num_elems * sizeof(uint64_t))
+#define vst_size ((size_t)vst_num_elems * sizeof (uint64_t))
void
vmm_stat_register(void *arg)
@@ -137,7 +137,7 @@ vmm_stat_desc_copy(int index, char *buf, int bufsize)
if (index >= vst->index && index < vst->index + vst->nelems) {
if (vst->nelems > 1) {
snprintf(buf, bufsize, "%s[%d]",
- vst->desc, index - vst->index);
+ vst->desc, index - vst->index);
} else {
strlcpy(buf, vst->desc, bufsize);
}
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_stat.h b/usr/src/uts/i86pc/io/vmm/vmm_stat.h
index a214ba0fe9..68e43c7bfc 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_stat.h
+++ b/usr/src/uts/i86pc/io/vmm/vmm_stat.h
@@ -104,7 +104,7 @@ int vmm_stat_desc_copy(int index, char *buf, int buflen);
static __inline void
vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
- int statidx, uint64_t x)
+ int statidx, uint64_t x)
{
#ifdef VMM_KEEP_STATS
uint64_t *stats;
@@ -118,7 +118,7 @@ vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
static __inline void
vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
- int statidx, uint64_t val)
+ int statidx, uint64_t val)
{
#ifdef VMM_KEEP_STATS
uint64_t *stats;
diff --git a/usr/src/uts/i86pc/io/vmm/x86.c b/usr/src/uts/i86pc/io/vmm/x86.c
index 248014ae24..7b155af3c3 100644
--- a/usr/src/uts/i86pc/io/vmm/x86.c
+++ b/usr/src/uts/i86pc/io/vmm/x86.c
@@ -74,20 +74,17 @@ static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
static const char bhyve_id[12] = "bhyve bhyve ";
+/* Number of times an unknown cpuid leaf was accessed */
static uint64_t bhyve_xcpuids;
-SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0,
- "Number of times an unknown cpuid leaf was accessed");
static int cpuid_leaf_b = 1;
-SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN,
- &cpuid_leaf_b, 0, NULL);
/*
* Round up to the next power of two, if necessary, and then take log2.
* Returns -1 if argument is zero.
*/
static __inline int
-log2(u_int x)
+log2(uint_t x)
{
return (fls(x << (1 - powerof2(x))) - 1);
@@ -173,7 +170,8 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx,
if (width < 0x4)
width = 0;
logical_cpus = MIN(0xFF, threads * cores - 1);
- regs[2] = (width << AMDID_COREID_SIZE_SHIFT) | logical_cpus;
+ regs[2] = (width << AMDID_COREID_SIZE_SHIFT) |
+ logical_cpus;
}
break;
@@ -331,7 +329,7 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx,
error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state);
if (error) {
panic("x86_emulate_cpuid: error %d "
- "fetching x2apic state", error);
+ "fetching x2apic state", error);
}
/*
@@ -372,7 +370,7 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx,
VM_REG_GUEST_CR4, &cr4);
if (error)
panic("x86_emulate_cpuid: error %d "
- "fetching %%cr4", error);
+ "fetching %%cr4", error);
if (cr4 & CR4_XSAVE)
regs[2] |= CPUID2_OSXSAVE;
}
@@ -383,7 +381,7 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx,
*/
regs[2] &= ~CPUID2_MON;
- /*
+ /*
* Hide the performance and debug features.
*/
regs[2] &= ~CPUID2_PDCM;
diff --git a/usr/src/uts/i86pc/io/vmm/x86.h b/usr/src/uts/i86pc/io/vmm/x86.h
index 7c8fccf78f..29d51cfe02 100644
--- a/usr/src/uts/i86pc/io/vmm/x86.h
+++ b/usr/src/uts/i86pc/io/vmm/x86.h
@@ -31,38 +31,38 @@
#ifndef _X86_H_
#define _X86_H_
-#define CPUID_0000_0000 (0x0)
-#define CPUID_0000_0001 (0x1)
-#define CPUID_0000_0002 (0x2)
-#define CPUID_0000_0003 (0x3)
-#define CPUID_0000_0004 (0x4)
-#define CPUID_0000_0006 (0x6)
-#define CPUID_0000_0007 (0x7)
+#define CPUID_0000_0000 (0x0)
+#define CPUID_0000_0001 (0x1)
+#define CPUID_0000_0002 (0x2)
+#define CPUID_0000_0003 (0x3)
+#define CPUID_0000_0004 (0x4)
+#define CPUID_0000_0006 (0x6)
+#define CPUID_0000_0007 (0x7)
#define CPUID_0000_000A (0xA)
#define CPUID_0000_000B (0xB)
#define CPUID_0000_000D (0xD)
#define CPUID_0000_0015 (0x15)
-#define CPUID_8000_0000 (0x80000000)
-#define CPUID_8000_0001 (0x80000001)
-#define CPUID_8000_0002 (0x80000002)
-#define CPUID_8000_0003 (0x80000003)
-#define CPUID_8000_0004 (0x80000004)
-#define CPUID_8000_0006 (0x80000006)
-#define CPUID_8000_0007 (0x80000007)
-#define CPUID_8000_0008 (0x80000008)
-#define CPUID_8000_001D (0x8000001D)
-#define CPUID_8000_001E (0x8000001E)
+#define CPUID_8000_0000 (0x80000000)
+#define CPUID_8000_0001 (0x80000001)
+#define CPUID_8000_0002 (0x80000002)
+#define CPUID_8000_0003 (0x80000003)
+#define CPUID_8000_0004 (0x80000004)
+#define CPUID_8000_0006 (0x80000006)
+#define CPUID_8000_0007 (0x80000007)
+#define CPUID_8000_0008 (0x80000008)
+#define CPUID_8000_001D (0x8000001D)
+#define CPUID_8000_001E (0x8000001E)
/*
* CPUID instruction Fn0000_0001:
*/
-#define CPUID_0000_0001_APICID_MASK (0xff<<24)
-#define CPUID_0000_0001_APICID_SHIFT 24
+#define CPUID_0000_0001_APICID_MASK (0xff<<24)
+#define CPUID_0000_0001_APICID_SHIFT 24
/*
* CPUID instruction Fn0000_0001 ECX
*/
-#define CPUID_0000_0001_FEAT0_VMX (1<<5)
+#define CPUID_0000_0001_FEAT0_VMX (1<<5)
int x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx,
uint64_t *rcx, uint64_t *rdx);