summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan McDonald <danmcd@joyent.com>2021-05-13 12:35:45 -0400
committerDan McDonald <danmcd@joyent.com>2021-05-13 12:35:49 -0400
commitd0d5aa48a298fa111ddb8153a4ecdb18f94d6867 (patch)
tree7f5e1eb0c626fe16baeb0367220fe67151b56d8a
parent796c0f4ba6e4c2ae9df33a204e48b68d1a486a30 (diff)
parent3dfe801733ea71102fa4b1f79dddf16ee3ea546f (diff)
downloadillumos-joyent-d0d5aa48a298fa111ddb8153a4ecdb18f94d6867.tar.gz
[illumos-gate merge]
commit 3dfe801733ea71102fa4b1f79dddf16ee3ea546f 13678 Make zpool list -vp print individual vdev sizes parsable. 8883 zpool list parsable output is inconsistent commit 860b5cc8ca1cde8ee07c510e06bf21ad9103bb69 6395 cannot tune segkpsize up commit f703164ba851040400c59bb71d4667c477b96bdb 13777 bhyve kernel ifdef cleanup Conflicts: usr/src/uts/i86pc/io/vmm/vmm_sol_dev.c usr/src/uts/i86pc/io/vmm/vmm.c
-rw-r--r--usr/src/cmd/zpool/zpool_main.c45
-rw-r--r--usr/src/lib/libzfs/common/libzfs_pool.c14
-rw-r--r--usr/src/man/man1m/eeprom.1m44
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/svm.c273
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/svm.h5
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/svm_msr.c29
-rw-r--r--usr/src/uts/i86pc/io/vmm/amd/svm_softc.h13
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/ept.c2
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vmx.c127
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vmx.h4
-rw-r--r--usr/src/uts/i86pc/io/vmm/intel/vmx_msr.c23
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vatpit.c2
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vatpit.h2
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vhpet.c7
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vhpet.h2
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vlapic.c26
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vlapic.h2
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vrtc.c2
-rw-r--r--usr/src/uts/i86pc/io/vmm/io/vrtc.h2
-rw-r--r--usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h13
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm.c190
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_host.c26
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_sol_dev.c4
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_util.c39
-rw-r--r--usr/src/uts/i86pc/io/vmm/x86.c4
-rw-r--r--usr/src/uts/i86pc/os/startup.c15
-rw-r--r--usr/src/uts/i86pc/sys/vmm.h2
-rw-r--r--usr/src/uts/i86pc/sys/vmm_dev.h4
28 files changed, 101 insertions, 820 deletions
diff --git a/usr/src/cmd/zpool/zpool_main.c b/usr/src/cmd/zpool/zpool_main.c
index 4a904db641..d2ae332849 100644
--- a/usr/src/cmd/zpool/zpool_main.c
+++ b/usr/src/cmd/zpool/zpool_main.c
@@ -5152,7 +5152,7 @@ print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
static void
print_one_column(zpool_prop_t prop, uint64_t value, boolean_t scripted,
- boolean_t valid)
+ boolean_t valid, enum zfs_nicenum_format format)
{
char propval[64];
boolean_t fixed;
@@ -5164,23 +5164,32 @@ print_one_column(zpool_prop_t prop, uint64_t value, boolean_t scripted,
if (value == 0)
(void) strlcpy(propval, "-", sizeof (propval));
else
- zfs_nicenum(value, propval, sizeof (propval));
+ zfs_nicenum_format(value, propval, sizeof (propval),
+ format);
break;
case ZPOOL_PROP_FRAGMENTATION:
if (value == ZFS_FRAG_INVALID) {
(void) strlcpy(propval, "-", sizeof (propval));
+ } else if (format == ZFS_NICENUM_RAW) {
+ (void) snprintf(propval, sizeof (propval), "%llu",
+ (unsigned long long)value);
} else {
(void) snprintf(propval, sizeof (propval), "%llu%%",
- value);
+ (unsigned long long)value);
}
break;
case ZPOOL_PROP_CAPACITY:
- (void) snprintf(propval, sizeof (propval),
- value < 1000 ? "%1.2f%%" : value < 10000 ?
- "%2.1f%%" : "%3.0f%%", value / 100.0);
+ if (format == ZFS_NICENUM_RAW)
+ (void) snprintf(propval, sizeof (propval), "%llu",
+ (unsigned long long)value / 100);
+
+ else
+ (void) snprintf(propval, sizeof (propval),
+ value < 1000 ? "%1.2f%%" : value < 10000 ?
+ "%2.1f%%" : "%3.0f%%", value / 100.0);
break;
default:
- zfs_nicenum(value, propval, sizeof (propval));
+ zfs_nicenum_format(value, propval, sizeof (propval), format);
}
if (!valid)
@@ -5213,6 +5222,12 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
if (name != NULL) {
boolean_t toplevel = (vs->vs_space != 0);
uint64_t cap;
+ enum zfs_nicenum_format format;
+
+ if (cb->cb_literal)
+ format = ZFS_NICENUM_RAW;
+ else
+ format = ZFS_NICENUM_1024;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return;
@@ -5232,21 +5247,23 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
* to indicate that the value is valid.
*/
print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, scripted,
- toplevel);
+ toplevel, format);
print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, scripted,
- toplevel);
+ toplevel, format);
print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
- scripted, toplevel);
+ scripted, toplevel, format);
print_one_column(ZPOOL_PROP_CHECKPOINT,
- vs->vs_checkpoint_space, scripted, toplevel);
+ vs->vs_checkpoint_space, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, scripted,
- B_TRUE);
+ B_TRUE, format);
print_one_column(ZPOOL_PROP_FRAGMENTATION,
vs->vs_fragmentation, scripted,
- (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel));
+ (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
+ format);
cap = (vs->vs_space == 0) ? 0 :
(vs->vs_alloc * 10000 / vs->vs_space);
- print_one_column(ZPOOL_PROP_CAPACITY, cap, scripted, toplevel);
+ print_one_column(ZPOOL_PROP_CAPACITY, cap, scripted, toplevel,
+ format);
(void) printf("\n");
}
diff --git a/usr/src/lib/libzfs/common/libzfs_pool.c b/usr/src/lib/libzfs/common/libzfs_pool.c
index e56628e2be..a93e14d169 100644
--- a/usr/src/lib/libzfs/common/libzfs_pool.c
+++ b/usr/src/lib/libzfs/common/libzfs_pool.c
@@ -342,15 +342,23 @@ zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
case ZPOOL_PROP_FRAGMENTATION:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
+ } else if (literal) {
+ (void) snprintf(buf, len, "%llu",
+ (u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_DEDUPRATIO:
- (void) snprintf(buf, len, "%llu.%02llux",
- (u_longlong_t)(intval / 100),
- (u_longlong_t)(intval % 100));
+ if (literal)
+ (void) snprintf(buf, len, "%llu.%02llu",
+ (u_longlong_t)(intval / 100),
+ (u_longlong_t)(intval % 100));
+ else
+ (void) snprintf(buf, len, "%llu.%02llux",
+ (u_longlong_t)(intval / 100),
+ (u_longlong_t)(intval % 100));
break;
case ZPOOL_PROP_HEALTH:
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
diff --git a/usr/src/man/man1m/eeprom.1m b/usr/src/man/man1m/eeprom.1m
index 13eb4bcbc2..a1723ec9bf 100644
--- a/usr/src/man/man1m/eeprom.1m
+++ b/usr/src/man/man1m/eeprom.1m
@@ -1,5 +1,6 @@
'\" te
.\" Copyright 2019 Peter Tribble.
+.\" Copyright 2020 Carlos Neira <cneirabustos@gmail.com>.
.\" Copyright (c) 2004, Sun Microsystems, Inc. All Rights Reserved
.\" The contents of this file are subject to the terms of the Common Development and Distribution License (the "License"). You may not use this file except in compliance with the License.
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing. See the License for the specific language governing permissions and limitations under the License.
@@ -8,13 +9,11 @@
.SH NAME
eeprom \- EEPROM display and load utility
.SH SYNOPSIS
-.LP
.nf
\fB/usr/sbin/eeprom\fR [\fB-\fR] [\fB-f\fR \fIdevice\fR] [\fIparameter\fR[=\fIvalue\fR]]
.fi
.SH DESCRIPTION
-.LP
\fBeeprom\fR displays or changes the values of parameters in the \fBEEPROM.\fR
It processes parameters in the order given. When processing a \fIparameter\fR
accompanied by a \fIvalue\fR, \fBeeprom\fR makes the indicated alteration to
@@ -35,12 +34,10 @@ incorrect.
\fIplatform-name\fR is the name of the platform implementation and can be found
using the \fB-i\fR option of \fBuname\fR(1).
.SS "SPARC"
-.LP
\fBSPARC\fR based systems implement firmware password protection with
\fBeeprom\fR, using the \fBsecurity-mode\fR, \fBsecurity-password\fR and
\fBsecurity-#badlogins\fR properties.
.SS "x86"
-.LP
\fBEEPROM\fR storage is simulated using a file residing in the
platform-specific boot area. The \fB/boot/solaris/bootenv.rc\fR file simulates
\fBEEPROM\fR storage.
@@ -134,8 +131,25 @@ to \fBdiag-device\fR, until the console drivers are loaded.
After that, only \fBkmdb\fR will continue to use the \fBdiag-device\fR.
.RE
+.ne 2
+.na
+\fB\fIsegkpsize\fR\fR
+.ad
+.sp .6
+.RS 4n
+Specifies the amount in \fBbytes\fR of kernel pageable memory available.
+Increasing this variable allows either larger stacks for the same number
+of kernel threads or more kernel threads, by default this value is \fB0x80000000 (2GiB)\fR.
+If the value specified exceeds the size of physical memory it will be automatically capped to
+that.
+
+Valid Ranges: 0xC800000 - 0x200000000(200MiB - 8GiB)
+
+
+.RE
+
+
.SH NVRAM CONFIGURATION PARAMETERS
-.LP
Not all OpenBoot systems support all parameters. Defaults vary depending on the
system and the \fBPROM\fR revision. See the output in the "Default Value"
column of the \fBprintenv\fR command, as entered at the \fBok\fR (OpenBoot)
@@ -1092,7 +1106,6 @@ If \fBtrue\fR, reboot after watchdog reset. Defaults to \fBfalse\fR.
.RE
.SH EXAMPLES
-.LP
\fBExample 1 \fRChanging the Number of Megabytes of RAM.
.sp
.LP
@@ -1231,6 +1244,23 @@ consult the BIOS documentation for that machine. Also, on some x86 machines,
you might use a device other than device \fBa\fR, as shown above. For example,
you could set console to \fBttyb\fR if the second serial port is present.
+\fBExample 5 \fRChanging segkpsize.
+.sp
+.LP
+The following example demonstrates the method for changing the
+number of bytes that will be used for kernel pageable memory.
+Running the following command will set segkpsize to 3GiB.
+.sp
+.in +2
+.nf
+# \fBeeprom segkpsize=0xC0000000\fR
+
+.fi
+.in -2
+.sp
+
+
+
.SH FILES
.ne 2
.na
@@ -1262,8 +1292,8 @@ Platform-specific version of \fBeeprom\fR. Use \fBuname\fR \fB-i\fR to obtain
\fIplatform-name\fR.
.RE
+
.SH SEE ALSO
-.LP
\fBpasswd\fR(1), \fBsh\fR(1), \fBsvcs\fR(1), \fBtip\fR(1), \fBuname\fR(1),
\fBboot\fR(1M), \fBkadb\fR(1M), \fBkernel\fR(1M), \fBinit\fR(1M),
\fBsvcadm\fR(1M), \fBattributes\fR(5), \fBsmf\fR(5)
diff --git a/usr/src/uts/i86pc/io/vmm/amd/svm.c b/usr/src/uts/i86pc/io/vmm/amd/svm.c
index 71adb122fc..7071a5ad7f 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/svm.c
+++ b/usr/src/uts/i86pc/io/vmm/amd/svm.c
@@ -52,10 +52,8 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/sysctl.h>
-#ifndef __FreeBSD__
#include <sys/x86_archext.h>
#include <sys/trap.h>
-#endif
#include <vm/vm.h>
#include <vm/pmap.h>
@@ -147,150 +145,6 @@ decode_assist(void)
return ((svm_feature & AMD_CPUID_SVM_DECODE_ASSIST) != 0);
}
-#ifdef __FreeBSD__
-static void
-svm_disable(void *arg __unused)
-{
- uint64_t efer;
-
- efer = rdmsr(MSR_EFER);
- efer &= ~EFER_SVM;
- wrmsr(MSR_EFER, efer);
-}
-
-/*
- * Disable SVM on all CPUs.
- */
-static int
-svm_cleanup(void)
-{
-
- smp_rendezvous(NULL, svm_disable, NULL, NULL);
- return (0);
-}
-
-/*
- * Verify that all the features required by bhyve are available.
- */
-static int
-check_svm_features(void)
-{
- uint_t regs[4];
-
- /* CPUID Fn8000_000A is for SVM */
- do_cpuid(0x8000000A, regs);
- svm_feature &= regs[3];
-
- /*
- * The number of ASIDs can be configured to be less than what is
- * supported by the hardware but not more.
- */
- if (nasid == 0 || nasid > regs[1])
- nasid = regs[1];
- KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %x", nasid));
-
- /* bhyve requires the Nested Paging feature */
- if (!(svm_feature & AMD_CPUID_SVM_NP)) {
- printf("SVM: Nested Paging feature not available.\n");
- return (ENXIO);
- }
-
- /* bhyve requires the NRIP Save feature */
- if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) {
- printf("SVM: NRIP Save feature not available.\n");
- return (ENXIO);
- }
-
- return (0);
-}
-
-static void
-svm_enable(void *arg __unused)
-{
- uint64_t efer;
-
- efer = rdmsr(MSR_EFER);
- efer |= EFER_SVM;
- wrmsr(MSR_EFER, efer);
-
- wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu]));
-}
-
-/*
- * Return 1 if SVM is enabled on this processor and 0 otherwise.
- */
-static int
-svm_available(void)
-{
- uint64_t msr;
-
-#ifdef __FreeBSD__
- /* Section 15.4 Enabling SVM from APM2. */
- if ((amd_feature2 & AMDID2_SVM) == 0) {
- printf("SVM: not available.\n");
- return (0);
- }
-#else
- if (!is_x86_feature(x86_featureset, X86FSET_SVM)) {
- cmn_err(CE_WARN, "processor does not support SVM operation\n");
- return (0);
- }
-#endif
-
- msr = rdmsr(MSR_VM_CR);
- if ((msr & VM_CR_SVMDIS) != 0) {
-#ifdef __FreeBSD__
- printf("SVM: disabled by BIOS.\n");
-#else
- cmn_err(CE_WARN, "SVM disabled by BIOS.\n");
-#endif
- return (0);
- }
-
- return (1);
-}
-
-static int
-svm_init(int ipinum)
-{
- int error, cpu;
-
- if (!svm_available())
- return (ENXIO);
-
- error = check_svm_features();
- if (error)
- return (error);
-
- vmcb_clean &= VMCB_CACHE_DEFAULT;
-
- for (cpu = 0; cpu < MAXCPU; cpu++) {
- /*
- * Initialize the host ASIDs to their "highest" valid values.
- *
- * The next ASID allocation will rollover both 'gen' and 'num'
- * and start off the sequence at {1,1}.
- */
- asid[cpu].gen = ~0UL;
- asid[cpu].num = nasid - 1;
- }
-
- svm_msr_init();
- svm_npt_init(ipinum);
-
- /* Enable SVM on all CPUs */
- smp_rendezvous(NULL, svm_enable, NULL, NULL);
-
- return (0);
-}
-
-static void
-svm_restore(void)
-{
-
- svm_enable(NULL);
-}
-#else /* __FreeBSD__ */
static int
svm_cleanup(void)
{
@@ -314,7 +168,6 @@ svm_restore(void)
{
/* No-op on illumos */
}
-#endif /* __FreeBSD__ */
/* Pentium compatible MSRs */
#define MSR_PENTIUM_START 0
@@ -1408,11 +1261,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
struct svm_regctx *ctx;
uint64_t code, info1, info2, val;
uint32_t eax, ecx, edx;
-#ifdef __FreeBSD__
- int error, errcode_valid, handled, idtvec, reflect;
-#else
int error, errcode_valid = 0, handled, idtvec, reflect;
-#endif
ctx = svm_get_guest_regctx(svm_sc, vcpu);
vmcb = svm_get_vmcb(svm_sc, vcpu);
@@ -1518,11 +1367,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
*/
reflect = 0;
VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler");
-#ifdef __FreeBSD__
- __asm __volatile("int $18");
-#else
vmm_call_trap(T_MCE);
-#endif
break;
case IDT_PF:
error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
@@ -1925,110 +1770,6 @@ svm_inject_recheck(struct svm_softc *sc, int vcpu,
}
-#ifdef __FreeBSD__
-static void
-check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, uint_t thiscpu)
-{
- struct svm_vcpu *vcpustate;
- struct vmcb_ctrl *ctrl;
- long eptgen;
- bool alloc_asid;
-
- KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not "
- "active on cpu %u", __func__, thiscpu));
-
- vcpustate = svm_get_vcpu(sc, vcpuid);
- ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
-
- /*
- * The TLB entries associated with the vcpu's ASID are not valid
- * if either of the following conditions is true:
- *
- * 1. The vcpu's ASID generation is different than the host cpu's
- * ASID generation. This happens when the vcpu migrates to a new
- * host cpu. It can also happen when the number of vcpus executing
- * on a host cpu is greater than the number of ASIDs available.
- *
- * 2. The pmap generation number is different than the value cached in
- * the 'vcpustate'. This happens when the host invalidates pages
- * belonging to the guest.
- *
- * asidgen eptgen Action
- * mismatch mismatch
- * 0 0 (a)
- * 0 1 (b1) or (b2)
- * 1 0 (c)
- * 1 1 (d)
- *
- * (a) There is no mismatch in eptgen or ASID generation and therefore
- * no further action is needed.
- *
- * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
- * retained and the TLB entries associated with this ASID
- * are flushed by VMRUN.
- *
- * (b2) If the cpu does not support FlushByAsid then a new ASID is
- * allocated.
- *
- * (c) A new ASID is allocated.
- *
- * (d) A new ASID is allocated.
- */
-
- alloc_asid = false;
- eptgen = pmap->pm_eptgen;
- ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING;
-
- if (vcpustate->asid.gen != asid[thiscpu].gen) {
- alloc_asid = true; /* (c) and (d) */
- } else if (vcpustate->eptgen != eptgen) {
- if (flush_by_asid())
- ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */
- else
- alloc_asid = true; /* (b2) */
- } else {
- /*
- * This is the common case (a).
- */
- KASSERT(!alloc_asid, ("ASID allocation not necessary"));
- KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING,
- ("Invalid VMCB tlb_ctrl: %x", ctrl->tlb_ctrl));
- }
-
- if (alloc_asid) {
- if (++asid[thiscpu].num >= nasid) {
- asid[thiscpu].num = 1;
- if (++asid[thiscpu].gen == 0)
- asid[thiscpu].gen = 1;
- /*
- * If this cpu does not support "flush-by-asid"
- * then flush the entire TLB on a generation
- * bump. Subsequent ASID allocation in this
- * generation can be done without a TLB flush.
- */
- if (!flush_by_asid())
- ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL;
- }
- vcpustate->asid.gen = asid[thiscpu].gen;
- vcpustate->asid.num = asid[thiscpu].num;
-
- ctrl->asid = vcpustate->asid.num;
- svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
- /*
- * If this cpu supports "flush-by-asid" then the TLB
- * was not flushed after the generation bump. The TLB
- * is flushed selectively after every new ASID allocation.
- */
- if (flush_by_asid())
- ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
- }
- vcpustate->eptgen = eptgen;
-
- KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
- KASSERT(ctrl->asid == vcpustate->asid.num,
- ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num));
-}
-#else /* __FreeBSD__ */
static void
check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, uint_t thiscpu)
{
@@ -2069,7 +1810,6 @@ flush_asid(struct svm_softc *sc, int vcpuid)
* flush will be satisfied by the one which has just now been queued.
*/
}
-#endif /* __FreeBSD__ */
static __inline void
disable_gintr(void)
@@ -2184,11 +1924,7 @@ svm_vmrun(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
/*
* Force new ASID allocation by invalidating the generation.
*/
-#ifdef __FreeBSD__
- vcpustate->asid.gen = 0;
-#else
vcpustate->hma_asid.hsa_gen = 0;
-#endif
/*
* Invalidate the VMCB state cache by marking all fields dirty.
@@ -2213,10 +1949,8 @@ svm_vmrun(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
svm_msr_guest_enter(svm_sc, vcpu);
-#ifndef __FreeBSD__
VERIFY(!vcpustate->loaded && curthread->t_preempt != 0);
vcpustate->loaded = B_TRUE;
-#endif
/* Update Guest RIP */
state->rip = rip;
@@ -2320,10 +2054,8 @@ svm_vmrun(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
svm_msr_guest_exit(svm_sc, vcpu);
-#ifndef __FreeBSD__
VERIFY(vcpustate->loaded && curthread->t_preempt != 0);
vcpustate->loaded = B_FALSE;
-#endif
return (0);
}
@@ -2716,7 +2448,6 @@ svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
free(vlapic, M_SVM_VLAPIC);
}
-#ifndef __FreeBSD__
static void
svm_savectx(void *arg, int vcpu)
{
@@ -2736,7 +2467,6 @@ svm_restorectx(void *arg, int vcpu)
svm_msr_guest_enter(sc, vcpu);
}
}
-#endif /* __FreeBSD__ */
struct vmm_ops vmm_ops_amd = {
.init = svm_init,
@@ -2755,8 +2485,7 @@ struct vmm_ops vmm_ops_amd = {
.vmspace_free = svm_npt_free,
.vlapic_init = svm_vlapic_init,
.vlapic_cleanup = svm_vlapic_cleanup,
-#ifndef __FreeBSD__
+
.vmsavectx = svm_savectx,
.vmrestorectx = svm_restorectx,
-#endif
};
diff --git a/usr/src/uts/i86pc/io/vmm/amd/svm.h b/usr/src/uts/i86pc/io/vmm/amd/svm.h
index 127c04ab6e..91e8419789 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/svm.h
+++ b/usr/src/uts/i86pc/io/vmm/amd/svm.h
@@ -64,12 +64,7 @@ struct svm_regctx {
uint64_t host_debugctl;
};
-#ifdef __FreeBSD__
-struct pcpu;
-void svm_launch(uint64_t pa, struct svm_regctx *gctx, struct pcpu *pcpu);
-#else
struct cpu;
void svm_launch(uint64_t pa, struct svm_regctx *gctx, struct cpu *pcpu);
-#endif
#endif /* _SVM_H_ */
diff --git a/usr/src/uts/i86pc/io/vmm/amd/svm_msr.c b/usr/src/uts/i86pc/io/vmm/amd/svm_msr.c
index f97b61ee4e..ab1accbd7a 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/svm_msr.c
+++ b/usr/src/uts/i86pc/io/vmm/amd/svm_msr.c
@@ -67,24 +67,6 @@ enum {
IDX_MSR_SF_MASK,
HOST_MSR_NUM /* must be the last enumeration */
};
-
-#ifdef __FreeBSD__
-static uint64_t host_msrs[HOST_MSR_NUM];
-
-void
-svm_msr_init(void)
-{
- /*
- * It is safe to cache the values of the following MSRs because they
- * don't change based on curcpu, curproc or curthread.
- */
- host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
- host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
- host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
- host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
-}
-#else
-
CTASSERT(HOST_MSR_NUM == SVM_HOST_MSR_NUM);
void
@@ -95,7 +77,6 @@ svm_msr_init(void)
* values for them serves no purpose.
*/
}
-#endif /* __FreeBSD__ */
void
svm_msr_guest_init(struct svm_softc *sc, int vcpu)
@@ -113,26 +94,22 @@ svm_msr_guest_init(struct svm_softc *sc, int vcpu)
void
svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
{
+ uint64_t *host_msrs = sc->host_msrs[vcpu];
+
/*
* Save host MSRs (if any) and restore guest MSRs (if any).
*/
-#ifndef __FreeBSD__
- uint64_t *host_msrs = sc->host_msrs[vcpu];
-
- /* Save host MSRs */
host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
-#endif /* __FreeBSD__ */
}
void
svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
{
-#ifndef __FreeBSD__
uint64_t *host_msrs = sc->host_msrs[vcpu];
-#endif
+
/*
* Save guest MSRs (if any) and restore host MSRs.
*/
diff --git a/usr/src/uts/i86pc/io/vmm/amd/svm_softc.h b/usr/src/uts/i86pc/io/vmm/amd/svm_softc.h
index 61f8d38898..e3ac603e71 100644
--- a/usr/src/uts/i86pc/io/vmm/amd/svm_softc.h
+++ b/usr/src/uts/i86pc/io/vmm/amd/svm_softc.h
@@ -34,17 +34,10 @@
#define SVM_IO_BITMAP_SIZE (3 * PAGE_SIZE)
#define SVM_MSR_BITMAP_SIZE (2 * PAGE_SIZE)
-#ifdef __FreeBSD__
-struct asid {
- uint64_t gen; /* range is [1, ~0UL] */
- uint32_t num; /* range is [1, nasid - 1] */
-};
-#else
#include <sys/hma.h>
/* This must match HOST_MSR_NUM in svm_msr.c (where it is CTASSERTed) */
#define SVM_HOST_MSR_NUM 4
-#endif /* __FreeBSD__ */
/*
* XXX separate out 'struct vmcb' from 'svm_vcpu' to avoid wasting space
@@ -58,12 +51,8 @@ struct svm_vcpu {
int lastcpu; /* host cpu that the vcpu last ran on */
uint32_t dirty; /* state cache bits that must be cleared */
long eptgen; /* pmap->pm_eptgen when the vcpu last ran */
-#ifdef __FreeBSD__
- struct asid asid;
-#else
hma_svm_asid_t hma_asid;
boolean_t loaded;
-#endif
} __aligned(PAGE_SIZE);
/*
@@ -76,9 +65,7 @@ struct svm_softc {
uint8_t *iopm_bitmap; /* shared by all vcpus */
uint8_t *msr_bitmap; /* shared by all vcpus */
struct vm *vm;
-#ifndef __FreeBSD__
uint64_t host_msrs[VM_MAXCPU][SVM_HOST_MSR_NUM];
-#endif
};
CTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0);
diff --git a/usr/src/uts/i86pc/io/vmm/intel/ept.c b/usr/src/uts/i86pc/io/vmm/intel/ept.c
index 672a452111..35e29bcfcc 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/ept.c
+++ b/usr/src/uts/i86pc/io/vmm/intel/ept.c
@@ -49,9 +49,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
-#ifndef __FreeBSD__
#include <sys/hma.h>
-#endif
#include <vm/vm.h>
#include <vm/pmap.h>
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vmx.c b/usr/src/uts/i86pc/io/vmm/intel/vmx.c
index f6c7e868df..4f4c32ac69 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vmx.c
+++ b/usr/src/uts/i86pc/io/vmm/intel/vmx.c
@@ -55,13 +55,11 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/sysctl.h>
-#ifndef __FreeBSD__
#include <sys/x86_archext.h>
#include <sys/smp_impldefs.h>
#include <sys/smt.h>
#include <sys/hma.h>
#include <sys/trap.h>
-#endif
#include <vm/vm.h>
#include <vm/pmap.h>
@@ -104,17 +102,6 @@ __FBSDID("$FreeBSD$");
(PROCBASED_INT_WINDOW_EXITING | \
PROCBASED_NMI_WINDOW_EXITING)
-#ifdef __FreeBSD__
-#define PROCBASED_CTLS_ONE_SETTING \
- (PROCBASED_SECONDARY_CONTROLS | \
- PROCBASED_MWAIT_EXITING | \
- PROCBASED_MONITOR_EXITING | \
- PROCBASED_IO_EXITING | \
- PROCBASED_MSR_BITMAPS | \
- PROCBASED_CTLS_WINDOW_SETTING | \
- PROCBASED_CR8_LOAD_EXITING | \
- PROCBASED_CR8_STORE_EXITING)
-#else
/* We consider TSC offset a necessity for unsynched TSC handling */
#define PROCBASED_CTLS_ONE_SETTING \
(PROCBASED_SECONDARY_CONTROLS | \
@@ -126,7 +113,6 @@ __FBSDID("$FreeBSD$");
PROCBASED_CTLS_WINDOW_SETTING | \
PROCBASED_CR8_LOAD_EXITING | \
PROCBASED_CR8_STORE_EXITING)
-#endif /* __FreeBSD__ */
#define PROCBASED_CTLS_ZERO_SETTING \
(PROCBASED_CR3_LOAD_EXITING | \
@@ -186,12 +172,6 @@ static int no_flush_rsb;
/*
* Optional capabilities
*/
-#ifdef __FreeBSD__
-SYSCTL_DECL(_hw_vmm_vmx);
-static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap,
- CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
- NULL);
-#endif
/* HLT triggers a VM-exit */
static int cap_halt_exit;
@@ -211,9 +191,6 @@ static enum vmx_caps vmx_capabilities;
/* APICv posted interrupt vector */
static int pirvec = -1;
-#ifdef __FreeBSD__
-static struct unrhdr *vpid_unr;
-#endif /* __FreeBSD__ */
static uint_t vpid_alloc_failed;
int guest_l1d_flush;
@@ -395,11 +372,7 @@ vpid_free(int vpid)
*/
if (vpid > VM_MAXCPU)
-#ifdef __FreeBSD__
- free_unr(vpid_unr, vpid);
-#else
hma_vmx_vpid_free((uint16_t)vpid);
-#endif
}
static void
@@ -424,14 +397,11 @@ vpid_alloc(uint16_t *vpid, int num)
* Allocate a unique VPID for each vcpu from the unit number allocator.
*/
for (i = 0; i < num; i++) {
-#ifdef __FreeBSD__
- x = alloc_unr(vpid_unr);
-#else
uint16_t tmp;
tmp = hma_vmx_vpid_alloc();
x = (tmp == 0) ? -1 : tmp;
-#endif
+
if (x == -1)
break;
else
@@ -686,31 +656,8 @@ vmx_init(int ipinum)
static void
vmx_trigger_hostintr(int vector)
{
-#ifdef __FreeBSD__
- uintptr_t func;
- struct gate_descriptor *gd;
-
- gd = &idt[vector];
-
- KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
- "invalid vector %d", vector));
- KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
- vector));
- KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
- "has invalid type %d", vector, gd->gd_type));
- KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
- "has invalid dpl %d", vector, gd->gd_dpl));
- KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
- "for vector %d has invalid selector %d", vector, gd->gd_selector));
- KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
- "IST %d", vector, gd->gd_ist));
-
- func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
- vmx_call_isr(func);
-#else
VERIFY(vector >= 32 && vector <= 255);
vmx_call_isr(vector - 32);
-#endif /* __FreeBSD__ */
}
static void *
@@ -939,13 +886,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
static int
vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
{
-#ifdef __FreeBSD__
- int handled, func;
-
- func = vmxctx->guest_rax;
-#else
int handled;
-#endif
handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax,
(uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx,
@@ -1023,11 +964,6 @@ vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
return;
}
-#ifdef __FreeBSD__
- KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
- "critical section", __func__, vcpu));
-#endif
-
/*
* Invalidate all mappings tagged with 'vpid'
*
@@ -2122,10 +2058,6 @@ emulate_rdmsr(struct vmx *vmx, int vcpuid, uint_t num)
return (error);
}
-#ifndef __FreeBSD__
-#define __predict_false(x) (x)
-#endif
-
static int
vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
{
@@ -2157,13 +2089,9 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* These VM-exits are uncommon but must be handled specially
* as most VM-exit fields are not populated as usual.
*/
- if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
+ if (reason == EXIT_REASON_MCE_DURING_ENTRY) {
VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
-#ifdef __FreeBSD__
- __asm __volatile("int $18");
-#else
vmm_call_trap(T_MCE);
-#endif
return (1);
}
@@ -2424,11 +2352,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
*/
if (intr_vec == IDT_MC) {
VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
-#ifdef __FreeBSD__
- __asm __volatile("int $18");
-#else
vmm_call_trap(T_MCE);
-#endif
return (1);
}
@@ -2624,9 +2548,7 @@ vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
case VMX_VMRESUME_ERROR:
case VMX_VMLAUNCH_ERROR:
case VMX_INVEPT_ERROR:
-#ifndef __FreeBSD__
case VMX_VMWRITE_ERROR:
-#endif
vmexit->u.vmx.inst_type = rc;
break;
default:
@@ -2660,11 +2582,7 @@ vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
"to NMI has invalid vector: %x", intr_info));
VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
-#ifdef __FreeBSD__
- __asm __volatile("int $2");
-#else
vmm_call_trap(T_NMIFLT);
-#endif
}
}
@@ -2746,10 +2664,6 @@ vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
struct vm_exit *vmexit;
struct vlapic *vlapic;
uint32_t exit_reason;
-#ifdef __FreeBSD__
- struct region_descriptor gdtr, idtr;
- uint16_t ldt_sel;
-#endif
bool tpr_shadow_active;
vmx = arg;
@@ -2770,10 +2684,8 @@ vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
vmcs_load(vmcs_pa);
-#ifndef __FreeBSD__
VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0);
vmx->vmcs_state[vcpu] = VS_LOADED;
-#endif
/*
* XXX
@@ -2855,7 +2767,6 @@ vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
continue;
}
-#ifndef __FreeBSD__
if ((rc = smt_acquire()) != 1) {
enable_intr();
vmexit->rip = rip;
@@ -2895,23 +2806,6 @@ vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
handled = UNHANDLED;
break;
}
-#else
- /*
- * VM exits restore the base address but not the
- * limits of GDTR and IDTR. The VMCS only stores the
- * base address, so VM exits set the limits to 0xffff.
- * Save and restore the full GDTR and IDTR to restore
- * the limits.
- *
- * The VMCS does not save the LDTR at all, and VM
- * exits clear LDTR as if a NULL selector were loaded.
- * The userspace hypervisor probably doesn't use a
- * LDT, but save and restore it to be safe.
- */
- sgdt(&gdtr);
- sidt(&idtr);
- ldt_sel = sldt();
-#endif
if (tpr_shadow_active) {
vmx_tpr_shadow_enter(vlapic);
@@ -2924,14 +2818,8 @@ vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
vmx_dr_leave_guest(vmxctx);
vcpu_ustate_change(vm, vcpu, VU_EMU_KERN);
-#ifndef __FreeBSD__
vmx->vmcs_state[vcpu] |= VS_LAUNCHED;
smt_release();
-#else
- bare_lgdt(&gdtr);
- lidt(&idtr);
- lldt(ldt_sel);
-#endif
if (tpr_shadow_active) {
vmx_tpr_shadow_exit(vlapic);
@@ -2954,9 +2842,6 @@ vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
enable_intr();
vmx_exit_inst_error(vmxctx, rc, vmexit);
}
-#ifdef __FreeBSD__
- launched = 1;
-#endif
DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip,
uint32_t, exit_reason);
rip = vmexit->rip;
@@ -2974,10 +2859,8 @@ vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
vmcs_clear(vmcs_pa);
vmx_msr_guest_exit(vmx, vcpu);
-#ifndef __FreeBSD__
VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0);
vmx->vmcs_state[vcpu] = VS_NONE;
-#endif
return (0);
}
@@ -3733,7 +3616,6 @@ vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
free(vlapic, M_VLAPIC);
}
-#ifndef __FreeBSD__
static void
vmx_savectx(void *arg, int vcpu)
{
@@ -3764,7 +3646,6 @@ vmx_restorectx(void *arg, int vcpu)
vmcs_load(vmx->vmcs_pa[vcpu]);
}
}
-#endif /* __FreeBSD__ */
struct vmm_ops vmm_ops_intel = {
.init = vmx_init,
@@ -3784,13 +3665,10 @@ struct vmm_ops vmm_ops_intel = {
.vlapic_init = vmx_vlapic_init,
.vlapic_cleanup = vmx_vlapic_cleanup,
-#ifndef __FreeBSD__
.vmsavectx = vmx_savectx,
.vmrestorectx = vmx_restorectx,
-#endif
};
-#ifndef __FreeBSD__
/* Side-effect free HW validation derived from checks in vmx_init. */
int
vmx_x86_supported(const char **msg)
@@ -3856,4 +3734,3 @@ vmx_x86_supported(const char **msg)
return (0);
}
-#endif
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vmx.h b/usr/src/uts/i86pc/io/vmm/intel/vmx.h
index 6d17bdaadd..f86d812104 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vmx.h
+++ b/usr/src/uts/i86pc/io/vmm/intel/vmx.h
@@ -127,13 +127,11 @@ enum {
GUEST_MSR_NUM /* must be the last enumeration */
};
-#ifndef __FreeBSD__
typedef enum {
VS_NONE = 0x0,
VS_LAUNCHED = 0x1,
VS_LOADED = 0x2
} vmcs_state_t;
-#endif /* __FreeBSD__ */
/* virtual machine softc */
struct vmx {
@@ -142,12 +140,10 @@ struct vmx {
char msr_bitmap[PAGE_SIZE];
struct pir_desc pir_desc[VM_MAXCPU];
uint64_t guest_msrs[VM_MAXCPU][GUEST_MSR_NUM];
-#ifndef __FreeBSD__
uint64_t host_msrs[VM_MAXCPU][GUEST_MSR_NUM];
uint64_t tsc_offset_active[VM_MAXCPU];
vmcs_state_t vmcs_state[VM_MAXCPU];
uintptr_t vmcs_pa[VM_MAXCPU];
-#endif
struct vmxctx ctx[VM_MAXCPU];
struct vmxcap cap[VM_MAXCPU];
struct vmxstate state[VM_MAXCPU];
diff --git a/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.c b/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.c
index a7d674d005..df044fd09e 100644
--- a/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.c
+++ b/usr/src/uts/i86pc/io/vmm/intel/vmx_msr.c
@@ -175,9 +175,6 @@ msr_bitmap_change_access(char *bitmap, uint_t msr, int access)
static uint64_t misc_enable;
static uint64_t platform_info;
static uint64_t turbo_ratio_limit;
-#ifdef __FreeBSD__
-static uint64_t host_msrs[GUEST_MSR_NUM];
-#endif /* __FreeBSD__ */
static bool
nehalem_cpu(void)
@@ -252,18 +249,6 @@ vmx_msr_init(void)
uint64_t bus_freq, ratio;
int i;
-#ifdef __FreeBSD__
- /* XXXJOY: Do we want to do this caching? */
- /*
- * It is safe to cache the values of the following MSRs because
- * they don't change based on curcpu, curproc or curthread.
- */
- host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
- host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
- host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
- host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
-#endif /* __FreeBSD__ */
-
/*
* Initialize emulated MSRs
*/
@@ -354,8 +339,6 @@ void
vmx_msr_guest_enter(struct vmx *vmx, int vcpuid)
{
uint64_t *guest_msrs = vmx->guest_msrs[vcpuid];
-
-#ifndef __FreeBSD__
uint64_t *host_msrs = vmx->host_msrs[vcpuid];
/* Save host MSRs */
@@ -363,12 +346,8 @@ vmx_msr_guest_enter(struct vmx *vmx, int vcpuid)
host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
-#endif /* __FreeBSD__ */
/* Save host MSRs (in particular, KGSBASE) and restore guest MSRs */
-#ifdef __FreeBSD__
- update_pcb_bases(curpcb);
-#endif
wrmsr(MSR_LSTAR, guest_msrs[IDX_MSR_LSTAR]);
wrmsr(MSR_CSTAR, guest_msrs[IDX_MSR_CSTAR]);
wrmsr(MSR_STAR, guest_msrs[IDX_MSR_STAR]);
@@ -380,9 +359,7 @@ void
vmx_msr_guest_exit(struct vmx *vmx, int vcpuid)
{
uint64_t *guest_msrs = vmx->guest_msrs[vcpuid];
-#ifndef __FreeBSD__
uint64_t *host_msrs = vmx->host_msrs[vcpuid];
-#endif
/* Save guest MSRs */
guest_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
diff --git a/usr/src/uts/i86pc/io/vmm/io/vatpit.c b/usr/src/uts/i86pc/io/vmm/io/vatpit.c
index 024aa076f7..cb815fd9c5 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vatpit.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vatpit.c
@@ -467,7 +467,6 @@ vatpit_cleanup(struct vatpit *vatpit)
free(vatpit, M_VATPIT);
}
-#ifndef __FreeBSD__
void
vatpit_localize_resources(struct vatpit *vatpit)
{
@@ -478,4 +477,3 @@ vatpit_localize_resources(struct vatpit *vatpit)
}
}
}
-#endif /* __FreeBSD */
diff --git a/usr/src/uts/i86pc/io/vmm/io/vatpit.h b/usr/src/uts/i86pc/io/vmm/io/vatpit.h
index 9148326dd3..bee3a88293 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vatpit.h
+++ b/usr/src/uts/i86pc/io/vmm/io/vatpit.h
@@ -44,8 +44,6 @@ int vatpit_handler(void *arg, bool in, uint16_t port, uint8_t bytes,
int vatpit_nmisc_handler(void *arg, bool in, uint16_t port, uint8_t bytes,
uint32_t *eax);
-#ifndef __FreeBSD__
void vatpit_localize_resources(struct vatpit *);
-#endif
#endif /* _VATPIT_H_ */
diff --git a/usr/src/uts/i86pc/io/vmm/io/vhpet.c b/usr/src/uts/i86pc/io/vmm/io/vhpet.c
index edeee6c128..14418ff5fa 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vhpet.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vhpet.c
@@ -160,13 +160,8 @@ vhpet_counter(struct vhpet *vhpet, sbintime_t *nowptr)
if (vhpet_counter_enabled(vhpet)) {
now = sbinuptime();
delta = now - vhpet->countbase_sbt;
-#ifdef __FreeBSD__
- KASSERT(delta >= 0, ("vhpet_counter: uptime went backwards: "
- "%#lx to %#lx", vhpet->countbase_sbt, now));
-#else
KASSERT(delta >= 0, ("vhpet_counter: uptime went backwards: "
"%lx to %lx", vhpet->countbase_sbt, now));
-#endif
val += delta / vhpet->freq_sbt;
if (nowptr != NULL)
*nowptr = now;
@@ -769,7 +764,6 @@ vhpet_getcap(struct vm_hpet_cap *cap)
cap->capabilities = vhpet_capabilities();
return (0);
}
-#ifndef __FreeBSD__
void
vhpet_localize_resources(struct vhpet *vhpet)
{
@@ -777,4 +771,3 @@ vhpet_localize_resources(struct vhpet *vhpet)
vmm_glue_callout_localize(&vhpet->timer[i].callout);
}
}
-#endif /* __FreeBSD */
diff --git a/usr/src/uts/i86pc/io/vmm/io/vhpet.h b/usr/src/uts/i86pc/io/vmm/io/vhpet.h
index e416b315d6..0ea0a6b15a 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vhpet.h
+++ b/usr/src/uts/i86pc/io/vmm/io/vhpet.h
@@ -47,8 +47,6 @@ int vhpet_mmio_read(struct vm *vm, int vcpuid, uint64_t gpa, uint64_t *val,
int size);
int vhpet_getcap(struct vm_hpet_cap *cap);
-#ifndef __FreeBSD__
void vhpet_localize_resources(struct vhpet *vhpet);
-#endif
#endif /* _VHPET_H_ */
diff --git a/usr/src/uts/i86pc/io/vmm/io/vlapic.c b/usr/src/uts/i86pc/io/vmm/io/vlapic.c
index 8c054a52fb..8198ebfce6 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vlapic.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vlapic.c
@@ -144,15 +144,6 @@ vlapic_dfr_write_handler(struct vlapic *vlapic)
lapic->dfr &= APIC_DFR_MODEL_MASK;
lapic->dfr |= APIC_DFR_RESERVED;
-
-#ifdef __FreeBSD__
- if ((lapic->dfr & APIC_DFR_MODEL_MASK) == APIC_DFR_MODEL_FLAT)
- VLAPIC_CTR0(vlapic, "vlapic DFR in Flat Model");
- else if ((lapic->dfr & APIC_DFR_MODEL_MASK) == APIC_DFR_MODEL_CLUSTER)
- VLAPIC_CTR0(vlapic, "vlapic DFR in Cluster Model");
- else
- VLAPIC_CTR1(vlapic, "DFR in Unknown Model %#x", lapic->dfr);
-#endif
}
void
@@ -245,13 +236,8 @@ vlapic_get_ccr(struct vlapic *vlapic)
ccr += bt_rem.frac / vlapic->timer_freq_bt.frac;
}
}
-#ifdef __FreeBSD__
- KASSERT(ccr <= lapic->icr_timer, ("vlapic_get_ccr: invalid ccr %#x, "
- "icr_timer is %#x", ccr, lapic->icr_timer));
-#else
KASSERT(ccr <= lapic->icr_timer, ("vlapic_get_ccr: invalid ccr %x, "
"icr_timer is %x", ccr, lapic->icr_timer));
-#endif
VLAPIC_CTR2(vlapic, "vlapic ccr_timer = %#x, icr_timer = %#x",
ccr, lapic->icr_timer);
VLAPIC_TIMER_UNLOCK(vlapic);
@@ -398,13 +384,8 @@ lvt_off_to_idx(uint32_t offset)
index = -1;
break;
}
-#ifdef __FreeBSD__
- KASSERT(index >= 0 && index <= VLAPIC_MAXLVT_INDEX, ("lvt_off_to_idx: "
- "invalid lvt index %d for offset %#x", index, offset));
-#else
KASSERT(index >= 0 && index <= VLAPIC_MAXLVT_INDEX, ("lvt_off_to_idx: "
"invalid lvt index %d for offset %x", index, offset));
-#endif
return (index);
}
@@ -1329,13 +1310,8 @@ vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset,
uint32_t *regptr;
int retval;
-#ifdef __FreeBSD__
- KASSERT((offset & 0xf) == 0 && offset < PAGE_SIZE,
- ("vlapic_write: invalid offset %#lx", offset));
-#else
KASSERT((offset & 0xf) == 0 && offset < PAGE_SIZE,
("vlapic_write: invalid offset %lx", offset));
-#endif
VLAPIC_CTR2(vlapic, "vlapic write offset %#lx, data %#lx",
offset, data);
@@ -1655,13 +1631,11 @@ vlapic_enabled(struct vlapic *vlapic)
return (false);
}
-#ifndef __FreeBSD__
void
vlapic_localize_resources(struct vlapic *vlapic)
{
vmm_glue_callout_localize(&vlapic->callout);
}
-#endif /* __FreeBSD */
#ifdef __ISRVEC_DEBUG
static void
diff --git a/usr/src/uts/i86pc/io/vmm/io/vlapic.h b/usr/src/uts/i86pc/io/vmm/io/vlapic.h
index 6072b46101..f490eff637 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vlapic.h
+++ b/usr/src/uts/i86pc/io/vmm/io/vlapic.h
@@ -107,8 +107,6 @@ void vlapic_dcr_write_handler(struct vlapic *vlapic);
void vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset);
void vlapic_self_ipi_handler(struct vlapic *vlapic, uint64_t val);
-#ifndef __FreeBSD__
void vlapic_localize_resources(struct vlapic *vlapic);
-#endif
#endif /* _VLAPIC_H_ */
diff --git a/usr/src/uts/i86pc/io/vmm/io/vrtc.c b/usr/src/uts/i86pc/io/vmm/io/vrtc.c
index 4b53883a79..a67e82d156 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vrtc.c
+++ b/usr/src/uts/i86pc/io/vmm/io/vrtc.c
@@ -1005,10 +1005,8 @@ vrtc_cleanup(struct vrtc *vrtc)
free(vrtc, M_VRTC);
}
-#ifndef __FreeBSD__
void
vrtc_localize_resources(struct vrtc *vrtc)
{
vmm_glue_callout_localize(&vrtc->callout);
}
-#endif /* __FreeBSD */
diff --git a/usr/src/uts/i86pc/io/vmm/io/vrtc.h b/usr/src/uts/i86pc/io/vmm/io/vrtc.h
index 3fc4094921..d3140c1308 100644
--- a/usr/src/uts/i86pc/io/vmm/io/vrtc.h
+++ b/usr/src/uts/i86pc/io/vmm/io/vrtc.h
@@ -53,8 +53,6 @@ int vrtc_addr_handler(void *arg, bool in, uint16_t port, uint8_t bytes,
int vrtc_data_handler(void *arg, bool in, uint16_t port, uint8_t bytes,
uint32_t *val);
-#ifndef __FreeBSD__
void vrtc_localize_resources(struct vrtc *);
-#endif
#endif
diff --git a/usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h b/usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h
index 842b951f92..2ddcbc3199 100644
--- a/usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h
+++ b/usr/src/uts/i86pc/io/vmm/sys/vmm_kernel.h
@@ -86,10 +86,8 @@ typedef struct vmspace *(*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
typedef struct vlapic *(*vmi_vlapic_init)(void *vmi, int vcpu);
typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
-#ifndef __FreeBSD__
typedef void (*vmi_savectx)(void *vmi, int vcpu);
typedef void (*vmi_restorectx)(void *vmi, int vcpu);
-#endif
struct vmm_ops {
vmm_init_func_t init; /* module wide initialization */
@@ -110,10 +108,8 @@ struct vmm_ops {
vmi_vlapic_init vlapic_init;
vmi_vlapic_cleanup vlapic_cleanup;
-#ifndef __FreeBSD__
vmi_savectx vmsavectx;
vmi_restorectx vmrestorectx;
-#endif
};
extern struct vmm_ops vmm_ops_intel;
@@ -139,13 +135,8 @@ int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
void vm_free_memseg(struct vm *vm, int ident);
int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
-#ifdef __FreeBSD__
-int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
-int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
-#else
int vm_assign_pptdev(struct vm *vm, int pptfd);
int vm_unassign_pptdev(struct vm *vm, int pptfd);
-#endif /* __FreeBSD__ */
/*
* APIs that inspect the guest memory map require only a *single* vcpu to
@@ -384,8 +375,6 @@ enum event_inject_state {
EIS_REQ_EXIT = (1 << 15),
};
-#ifndef __FreeBSD__
-
void vmm_sol_glue_init(void);
void vmm_sol_glue_cleanup(void);
@@ -441,6 +430,4 @@ typedef struct vmm_vcpu_kstats {
int vmm_kstat_update_vcpu(struct kstat *, int);
-#endif /* __FreeBSD */
-
#endif /* _VMM_KERNEL_H_ */
diff --git a/usr/src/uts/i86pc/io/vmm/vmm.c b/usr/src/uts/i86pc/io/vmm/vmm.c
index 856eeef965..88c9f30b6d 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm.c
@@ -69,9 +69,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/vm_param.h>
-#ifdef __FreeBSD__
-#include <machine/cpu.h>
-#endif
#include <machine/pcb.h>
#include <machine/smp.h>
#include <machine/md_var.h>
@@ -154,11 +151,7 @@ struct mem_seg {
bool sysmem;
struct vm_object *object;
};
-#ifdef __FreeBSD__
-#define VM_MAX_MEMSEGS 3
-#else
#define VM_MAX_MEMSEGS 4
-#endif
struct mem_map {
vm_paddr_t gpa;
@@ -287,7 +280,6 @@ static void vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t);
static bool vcpu_sleep_bailout_checks(struct vm *vm, int vcpuid);
static int vcpu_vector_sipi(struct vm *vm, int vcpuid, uint8_t vector);
-#ifndef __FreeBSD__
static void vm_clear_memseg(struct vm *, int);
extern int arc_virt_machine_reserve(size_t);
@@ -304,8 +296,6 @@ typedef struct vm_thread_ctx {
enum vcpu_ustate vtc_ustate;
} vm_thread_ctx_t;
-#endif /* __FreeBSD__ */
-
#ifdef KTR
static const char *
vcpu_state2str(enum vcpu_state state)
@@ -351,16 +341,10 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
vcpu = &vm->vcpu[vcpu_id];
if (create) {
-#ifdef __FreeBSD__
- KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
- "initialized", vcpu_id));
-#endif
vcpu_lock_init(vcpu);
vcpu->state = VCPU_IDLE;
vcpu->hostcpu = NOCPU;
-#ifndef __FreeBSD__
vcpu->lastloccpu = NOCPU;
-#endif
vcpu->guestfpu = fpu_save_area_alloc();
vcpu->stats = vmm_stat_alloc();
vcpu->vie_ctx = vie_alloc();
@@ -425,15 +409,8 @@ vmm_init(void)
vmm_host_state_init();
-#ifdef __FreeBSD__
- vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
- &IDTVEC(justreturn));
- if (vmm_ipinum < 0)
- vmm_ipinum = IPI_AST;
-#else
/* We use cpu_poke() for IPIs */
vmm_ipinum = 0;
-#endif
error = vmm_mem_init();
if (error)
@@ -446,10 +423,6 @@ vmm_init(void)
else
return (ENXIO);
-#ifdef __FreeBSD__
- vmm_resume_p = vmm_resume;
-#endif
-
return (VMM_INIT(vmm_ipinum));
}
@@ -638,10 +611,9 @@ vm_cleanup(struct vm *vm, bool destroy)
*/
for (i = 0; i < VM_MAX_MEMMAPS; i++) {
mm = &vm->mem_maps[i];
- if (destroy || !sysmem_mapping(vm, mm))
+ if (destroy || !sysmem_mapping(vm, mm)) {
vm_free_memmap(vm, i);
-#ifndef __FreeBSD__
- else {
+ } else {
/*
* We need to reset the IOMMU flag so this mapping can
* be reused when a VM is rebooted. Since the IOMMU
@@ -650,7 +622,6 @@ vm_cleanup(struct vm *vm, bool destroy)
*/
mm->flags &= ~VM_MEMMAP_F_IOMMU;
}
-#endif
}
if (destroy) {
@@ -660,21 +631,15 @@ vm_cleanup(struct vm *vm, bool destroy)
VMSPACE_FREE(vm->vmspace);
vm->vmspace = NULL;
-#ifndef __FreeBSD__
arc_virt_machine_release(vm->arc_resv);
vm->arc_resv = 0;
-#endif
-
- }
-#ifndef __FreeBSD__
- else {
+ } else {
/*
* Clear the first memory segment (low mem), old memory contents
* could confuse the UEFI firmware.
*/
vm_clear_memseg(vm, 0);
}
-#endif
}
void
@@ -815,7 +780,6 @@ vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
return (0);
}
-#ifndef __FreeBSD__
static void
vm_clear_memseg(struct vm *vm, int ident)
{
@@ -829,7 +793,6 @@ vm_clear_memseg(struct vm *vm, int ident)
if (seg->object != NULL)
vm_object_clear(seg->object);
}
-#endif
void
vm_free_memseg(struct vm *vm, int ident)
@@ -1354,14 +1317,10 @@ save_guest_fpustate(struct vcpu *vcpu)
/* save guest FPU state */
fpu_stop_emulating();
fpusave(vcpu->guestfpu);
-#ifdef __FreeBSD__
- fpu_start_emulating();
-#else
/*
* When the host state has been restored, we should not re-enable
* CR0.TS on illumos for eager FPU.
*/
-#endif
}
static int
@@ -1385,11 +1344,7 @@ vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
vcpu_notify_event_locked(vcpu, VCPU_NOTIFY_EXIT);
VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to "
"idle requested", vcpu_state2str(vcpu->state));
-#ifdef __FreeBSD__
- msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
-#else
cv_wait(&vcpu->state_cv, &vcpu->mtx.m);
-#endif
}
} else {
KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
@@ -1437,11 +1392,7 @@ vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
vcpu->hostcpu = NOCPU;
if (newstate == VCPU_IDLE) {
-#ifdef __FreeBSD__
- wakeup(&vcpu->state);
-#else
cv_broadcast(&vcpu->state_cv);
-#endif
}
return (0);
@@ -1811,58 +1762,17 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid)
static int
vm_handle_suspend(struct vm *vm, int vcpuid)
{
-#ifdef __FreeBSD__
- int error, i;
- struct vcpu *vcpu;
- struct thread *td;
-
- error = 0;
- vcpu = &vm->vcpu[vcpuid];
- td = curthread;
-#else
int i;
struct vcpu *vcpu;
vcpu = &vm->vcpu[vcpuid];
-#endif
CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
-#ifdef __FreeBSD__
/*
* Wait until all 'active_cpus' have suspended themselves.
- *
- * Since a VM may be suspended at any time including when one or
- * more vcpus are doing a rendezvous we need to call the rendezvous
- * handler while we are waiting to prevent a deadlock.
*/
vcpu_lock(vcpu);
- while (error == 0) {
- if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
- VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
- break;
- }
-
- if (vm->rendezvous_func == NULL) {
- VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
- vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
- msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
- vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
- if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
- vcpu_unlock(vcpu);
- error = thread_check_susp(td, false);
- vcpu_lock(vcpu);
- }
- } else {
- VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
- vcpu_unlock(vcpu);
- error = vm_handle_rendezvous(vm, vcpuid);
- vcpu_lock(vcpu);
- }
- }
- vcpu_unlock(vcpu);
-#else
- vcpu_lock(vcpu);
vcpu_ustate_change(vm, vcpuid, VU_INIT);
while (1) {
int rc;
@@ -1895,8 +1805,6 @@ vm_handle_suspend(struct vm *vm, int vcpuid)
}
vcpu_unlock(vcpu);
-#endif
-
/*
* Wakeup the other sleeping vcpus and return to userspace.
*/
@@ -2120,8 +2028,6 @@ vm_exit_run_state(struct vm *vm, int vcpuid, uint64_t rip)
vmm_stat_incr(vm, vcpuid, VMEXIT_RUN_STATE, 1);
}
-
-#ifndef __FreeBSD__
/*
* Some vmm resources, such as the lapic, may have CPU-specific resources
* allocated to them which would benefit from migration onto the host CPU which
@@ -2236,8 +2142,6 @@ vmm_freectx(void *arg, int isexec)
vmm_savectx(arg);
}
-#endif /* __FreeBSD */
-
static int
vm_entry_actions(struct vm *vm, int vcpuid, const struct vm_entry *entry,
struct vm_exit *vme)
@@ -2324,23 +2228,16 @@ vm_run(struct vm *vm, int vcpuid, const struct vm_entry *entry)
{
int error;
struct vcpu *vcpu;
-#ifdef __FreeBSD__
- struct pcb *pcb;
-#endif
struct vm_exit *vme;
bool intr_disabled;
pmap_t pmap;
-#ifndef __FreeBSD__
vm_thread_ctx_t vtc;
int affinity_type = CPU_CURRENT;
-#endif
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
-
if (!CPU_ISSET(vcpuid, &vm->active_cpus))
return (EINVAL);
-
if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
return (EINVAL);
@@ -2350,14 +2247,11 @@ vm_run(struct vm *vm, int vcpuid, const struct vm_entry *entry)
vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN);
-#ifndef __FreeBSD__
vtc.vtc_vm = vm;
vtc.vtc_vcpuid = vcpuid;
vtc.vtc_status = 0;
-
installctx(curthread, &vtc, vmm_savectx, vmm_restorectx, NULL, NULL,
NULL, vmm_freectx);
-#endif
error = vm_entry_actions(vm, vcpuid, entry, vme);
if (error != 0) {
@@ -2370,7 +2264,6 @@ restart:
goto exit;
}
-#ifndef __FreeBSD__
thread_affinity_set(curthread, affinity_type);
/*
* Resource localization should happen after the CPU affinity for the
@@ -2381,51 +2274,31 @@ restart:
* This must be done prior to disabling kpreempt via critical_enter().
*/
vm_localize_resources(vm, vcpu);
-
affinity_type = CPU_CURRENT;
-#endif
-
critical_enter();
KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
("vm_run: absurd pm_active"));
-#ifdef __FreeBSD__
- pcb = PCPU_GET(curpcb);
- set_pcb_flags(pcb, PCB_FULL_IRET);
-#else
/* Force a trip through update_sregs to reload %fs/%gs and friends */
PCB_SET_UPDATE_SEGS(&ttolwp(curthread)->lwp_pcb);
-#endif
-#ifdef __FreeBSD__
- restore_guest_fpustate(vcpu);
-#else
if ((vtc.vtc_status & VTCS_FPU_RESTORED) == 0) {
restore_guest_fpustate(vcpu);
vtc.vtc_status |= VTCS_FPU_RESTORED;
}
vtc.vtc_status |= VTCS_FPU_CTX_CRITICAL;
-#endif
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap);
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
-#ifdef __FreeBSD__
- save_guest_fpustate(vcpu);
-#else
- vtc.vtc_status &= ~VTCS_FPU_CTX_CRITICAL;
-#endif
-
-#ifndef __FreeBSD__
/*
* Once clear of the delicate contexts comprising the VM_RUN handler,
* thread CPU affinity can be loosened while other processing occurs.
*/
+ vtc.vtc_status &= ~VTCS_FPU_CTX_CRITICAL;
thread_affinity_clear(curthread);
-#endif
-
critical_exit();
if (error != 0) {
@@ -2475,12 +2348,9 @@ restart:
case VM_EXITCODE_WRMSR:
error = vm_handle_wrmsr(vm, vcpuid, vme);
break;
-
- case VM_EXITCODE_HT: {
+ case VM_EXITCODE_HT:
affinity_type = CPU_BEST;
break;
- }
-
case VM_EXITCODE_MTRAP:
vm_suspend_cpu(vm, vcpuid);
error = -1;
@@ -2497,10 +2367,8 @@ restart:
}
exit:
-#ifndef __FreeBSD__
removectx(curthread, &vtc, vmm_savectx, vmm_restorectx, NULL, NULL,
NULL, vmm_freectx);
-#endif
VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
@@ -3202,50 +3070,6 @@ vm_hpet(struct vm *vm)
return (vm->vhpet);
}
-#ifdef __FreeBSD__
-bool
-vmm_is_pptdev(int bus, int slot, int func)
-{
- int b, f, i, n, s;
- char *val, *cp, *cp2;
- bool found;
-
- /*
- * XXX
- * The length of an environment variable is limited to 128 bytes which
- * puts an upper limit on the number of passthru devices that may be
- * specified using a single environment variable.
- *
- * Work around this by scanning multiple environment variable
- * names instead of a single one - yuck!
- */
- const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
-
- /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
- found = false;
- for (i = 0; names[i] != NULL && !found; i++) {
- cp = val = kern_getenv(names[i]);
- while (cp != NULL && *cp != '\0') {
- if ((cp2 = strchr(cp, ' ')) != NULL)
- *cp2 = '\0';
-
- n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
- if (n == 3 && bus == b && slot == s && func == f) {
- found = true;
- break;
- }
-
- if (cp2 != NULL)
- *cp2++ = ' ';
-
- cp = cp2;
- }
- freeenv(val);
- }
- return (found);
-}
-#endif
-
void *
vm_iommu_domain(struct vm *vm)
{
@@ -3545,11 +3369,7 @@ vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t ntype)
KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
"with hostcpu %d", vcpu->state, hostcpu));
if (vcpu->state == VCPU_SLEEPING) {
-#ifdef __FreeBSD__
- wakeup_one(vcpu);
-#else
cv_signal(&vcpu->vcpu_cv);
-#endif
}
}
}
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_host.c b/usr/src/uts/i86pc/io/vmm/vmm_host.c
index 9e390c93dd..2c1897b18f 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_host.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm_host.c
@@ -143,60 +143,34 @@ vmm_get_host_xcr0(void)
uint64_t
vmm_get_host_datasel(void)
{
-
-#ifdef __FreeBSD__
- return (GSEL(GDATA_SEL, SEL_KPL));
-#else
return (SEL_GDT(GDT_KDATA, SEL_KPL));
-#endif
-
}
uint64_t
vmm_get_host_codesel(void)
{
-
-#ifdef __FreeBSD__
- return (GSEL(GCODE_SEL, SEL_KPL));
-#else
return (SEL_GDT(GDT_KCODE, SEL_KPL));
-#endif
}
uint64_t
vmm_get_host_tsssel(void)
{
-
-#ifdef __FreeBSD__
- return (GSEL(GPROC0_SEL, SEL_KPL));
-#else
return (SEL_GDT(GDT_KTSS, SEL_KPL));
-#endif
}
uint64_t
vmm_get_host_fsbase(void)
{
-
-#ifdef __FreeBSD__
- return (0);
-#else
return (rdmsr(MSR_FSBASE));
-#endif
}
uint64_t
vmm_get_host_idtrbase(void)
{
-
-#ifdef __FreeBSD__
- return (r_idt.rd_base);
-#else
desctbr_t idtr;
rd_idtr(&idtr);
return (idtr.dtr_base);
-#endif
}
const struct xsave_limits *
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_sol_dev.c b/usr/src/uts/i86pc/io/vmm/vmm_sol_dev.c
index fc0a3339b4..67380cdf72 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_sol_dev.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm_sol_dev.c
@@ -498,9 +498,7 @@ vmmdev_do_ioctl(vmm_softc_t *sc, int cmd, intptr_t arg, int md,
case VM_RTC_SETTIME:
case VM_RTC_GETTIME:
case VM_PPTDEV_DISABLE_MSIX:
-#ifndef __FreeBSD__
case VM_DEVMEM_GETOFFSET:
-#endif
vmm_read_lock(sc);
lock_type = LOCK_READ_HOLD;
break;
@@ -1391,7 +1389,6 @@ vmmdev_do_ioctl(vmm_softc_t *sc, int cmd, intptr_t arg, int md,
break;
}
-#ifndef __FreeBSD__
case VM_DEVMEM_GETOFFSET: {
struct vm_devmem_offset vdo;
list_t *dl = &sc->vmm_devmem_list;
@@ -1427,7 +1424,6 @@ vmmdev_do_ioctl(vmm_softc_t *sc, int cmd, intptr_t arg, int md,
case VM_ARC_RESV:
error = vm_arc_resv(sc->vmm_vm, (uint64_t)arg);
break;
-#endif
default:
error = ENOTTY;
break;
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_util.c b/usr/src/uts/i86pc/io/vmm/vmm_util.c
index d6ed67f4b3..05dfd08aaa 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_util.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm_util.c
@@ -81,42 +81,3 @@ vmm_supports_1G_pages(void)
}
return (false);
}
-
-#ifdef __FreeBSD__
-#include <sys/proc.h>
-#include <machine/frame.h>
-#define DUMP_REG(x) printf(#x "\t\t0x%016lx\n", (long)(tf->tf_ ## x))
-#define DUMP_SEG(x) printf(#x "\t\t0x%04x\n", (unsigned)(tf->tf_ ## x))
-void
-dump_trapframe(struct trapframe *tf)
-{
- DUMP_REG(rdi);
- DUMP_REG(rsi);
- DUMP_REG(rdx);
- DUMP_REG(rcx);
- DUMP_REG(r8);
- DUMP_REG(r9);
- DUMP_REG(rax);
- DUMP_REG(rbx);
- DUMP_REG(rbp);
- DUMP_REG(r10);
- DUMP_REG(r11);
- DUMP_REG(r12);
- DUMP_REG(r13);
- DUMP_REG(r14);
- DUMP_REG(r15);
- DUMP_REG(trapno);
- DUMP_REG(addr);
- DUMP_REG(flags);
- DUMP_REG(err);
- DUMP_REG(rip);
- DUMP_REG(rflags);
- DUMP_REG(rsp);
- DUMP_SEG(cs);
- DUMP_SEG(ss);
- DUMP_SEG(fs);
- DUMP_SEG(gs);
- DUMP_SEG(es);
- DUMP_SEG(ds);
-}
-#endif
diff --git a/usr/src/uts/i86pc/io/vmm/x86.c b/usr/src/uts/i86pc/io/vmm/x86.c
index 1d3863b8a4..623e71efac 100644
--- a/usr/src/uts/i86pc/io/vmm/x86.c
+++ b/usr/src/uts/i86pc/io/vmm/x86.c
@@ -65,10 +65,6 @@ __FBSDID("$FreeBSD$");
#include "x86.h"
SYSCTL_DECL(_hw_vmm);
-#ifdef __FreeBSD__
-static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
- NULL);
-#endif
#define CPUID_VM_HIGH 0x40000000
diff --git a/usr/src/uts/i86pc/os/startup.c b/usr/src/uts/i86pc/os/startup.c
index 3664fd3abf..fe2b929dae 100644
--- a/usr/src/uts/i86pc/os/startup.c
+++ b/usr/src/uts/i86pc/os/startup.c
@@ -26,6 +26,7 @@
* Copyright (c) 2018 Joyent, Inc.
* Copyright (c) 2015 by Delphix. All rights reserved.
* Copyright 2020 Oxide Computer Company
+ * Copyright (c) 2020 Carlos Neira <cneirabustos@gmail.com>
*/
/*
* Copyright (c) 2010, Intel Corporation.
@@ -327,7 +328,7 @@ struct seg *segkpm = &kpmseg; /* 64bit kernel physical mapping segment */
caddr_t segkp_base; /* Base address of segkp */
caddr_t segzio_base; /* Base address of segzio */
-pgcnt_t segkpsize = btop(SEGKPDEFSIZE); /* size of segkp segment in pages */
+pgcnt_t segkpsize; /* size of segkp segment in pages */
caddr_t segkvmm_base;
pgcnt_t segkvmmsize;
pgcnt_t segziosize;
@@ -1895,9 +1896,9 @@ layout_kernel_va(void)
segkp_base = (caddr_t)valloc_base + valloc_sz;
if (!segkp_fromheap) {
size = mmu_ptob(segkpsize);
-
/*
- * determine size of segkp
+ * Determine size of segkp
+ * Users can change segkpsize through eeprom.
*/
if (size < SEGKPMINSIZE || size > SEGKPMAXSIZE) {
size = SEGKPDEFSIZE;
@@ -1906,7 +1907,6 @@ layout_kernel_va(void)
mmu_btop(size));
}
size = MIN(size, MAX(SEGKPMINSIZE, physmem_size));
-
segkpsize = mmu_btop(ROUND_UP_LPAGE(size));
}
PRM_DEBUG(segkp_base);
@@ -3100,6 +3100,13 @@ get_system_configuration(void)
else
segmapfreelists = (int)lvalue;
+ if (BOP_GETPROPLEN(bootops, "segkpsize") > sizeof (prop) ||
+ BOP_GETPROP(bootops, "segkpsize", prop) < 0 ||
+ kobj_getvalue(prop, &lvalue) == -1)
+ segkpsize = mmu_btop(SEGKPDEFSIZE);
+ else
+ segkpsize = mmu_btop((size_t)lvalue);
+
/* physmem used to be here, but moved much earlier to fakebop.c */
}
diff --git a/usr/src/uts/i86pc/sys/vmm.h b/usr/src/uts/i86pc/sys/vmm.h
index 65fdb19349..5b3e7f9b10 100644
--- a/usr/src/uts/i86pc/sys/vmm.h
+++ b/usr/src/uts/i86pc/sys/vmm.h
@@ -231,9 +231,7 @@ enum vm_exitcode {
VM_EXITCODE_DEBUG,
VM_EXITCODE_VMINSN,
VM_EXITCODE_BPT,
-#ifndef __FreeBSD__
VM_EXITCODE_HT,
-#endif
VM_EXITCODE_MAX
};
diff --git a/usr/src/uts/i86pc/sys/vmm_dev.h b/usr/src/uts/i86pc/sys/vmm_dev.h
index fdbdd76a19..a0f4569be2 100644
--- a/usr/src/uts/i86pc/sys/vmm_dev.h
+++ b/usr/src/uts/i86pc/sys/vmm_dev.h
@@ -165,11 +165,7 @@ struct vm_nmi {
int cpuid;
};
-#ifdef __FreeBSD__
-#define MAX_VM_STATS 64
-#else
#define MAX_VM_STATS (64 + VM_MAXCPU)
-#endif
struct vm_stats {
int cpuid; /* in */