summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Mustacchi <rm@joyent.com>2018-06-14 15:49:02 +0000
committerRobert Mustacchi <rm@joyent.com>2018-06-19 21:16:46 +0000
commit7058c3c954af033fd27362cf841933da915322f1 (patch)
treeabc01993714c65544e2f96ddf3e8091710544250
parentf367310f219802eaffd224b4cdddf2133302efa6 (diff)
downloadillumos-joyent-7058c3c954af033fd27362cf841933da915322f1.tar.gz
OS-7017 rescan cpuid after ucode updates
OS-7018 Need cpuid detection for security sec features Reviewed by: John Levon <john.levon@joyent.com> Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com> Approved by: Joshua M. Clulow <jmc@joyent.com>
-rw-r--r--usr/src/uts/i86pc/os/cpuid.c248
-rw-r--r--usr/src/uts/i86pc/os/microcode.c5
-rw-r--r--usr/src/uts/i86pc/os/mlsetup.c1
-rw-r--r--usr/src/uts/i86pc/os/mp_startup.c33
-rw-r--r--usr/src/uts/intel/sys/x86_archext.h49
5 files changed, 314 insertions, 22 deletions
diff --git a/usr/src/uts/i86pc/os/cpuid.c b/usr/src/uts/i86pc/os/cpuid.c
index 289ce29183..bc3d80189b 100644
--- a/usr/src/uts/i86pc/os/cpuid.c
+++ b/usr/src/uts/i86pc/os/cpuid.c
@@ -59,6 +59,7 @@
#include <sys/pci_cfgspace.h>
#include <sys/comm_page.h>
#include <sys/mach_mmu.h>
+#include <sys/ucode.h>
#include <sys/tsc.h>
#ifdef __xpv
@@ -207,6 +208,16 @@ static char *x86_feature_names[NUM_X86_FEATURES] = {
"ospke",
"pcid",
"invpcid",
+ "ibrs",
+ "ibpb",
+ "stibp",
+ "ssbd",
+ "ssbd_virt",
+ "rdcl_no",
+ "ibrs_all",
+ "rsba",
+ "ssb_no",
+ "stibp_all"
};
boolean_t
@@ -974,6 +985,86 @@ cpuid_amd_getids(cpu_t *cpu)
cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
}
+static void
+cpuid_scan_security(cpu_t *cpu, uchar_t *featureset)
+{
+ struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
+
+ if (cpi->cpi_vendor == X86_VENDOR_AMD &&
+ cpi->cpi_xmaxeax >= 0x80000008) {
+ if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBPB)
+ add_x86_feature(featureset, X86FSET_IBPB);
+ if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS)
+ add_x86_feature(featureset, X86FSET_IBRS);
+ if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP)
+ add_x86_feature(featureset, X86FSET_STIBP);
+ if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_IBRS_ALL)
+ add_x86_feature(featureset, X86FSET_IBRS_ALL);
+ if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_STIBP_ALL)
+ add_x86_feature(featureset, X86FSET_STIBP_ALL);
+ if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_PREFER_IBRS)
+ add_x86_feature(featureset, X86FSET_RSBA);
+ if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_SSBD)
+ add_x86_feature(featureset, X86FSET_SSBD);
+ if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_VIRT_SSBD)
+ add_x86_feature(featureset, X86FSET_SSBD_VIRT);
+ if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_SSB_NO)
+ add_x86_feature(featureset, X86FSET_SSB_NO);
+ } else if (cpi->cpi_vendor == X86_VENDOR_Intel &&
+ cpi->cpi_maxeax >= 7) {
+ struct cpuid_regs *ecp;
+ ecp = &cpi->cpi_std[7];
+
+ if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SPEC_CTRL) {
+ add_x86_feature(featureset, X86FSET_IBRS);
+ add_x86_feature(featureset, X86FSET_IBPB);
+ }
+
+ if (ecp->cp_edx & CPUID_INTC_EDX_7_0_STIBP) {
+ add_x86_feature(featureset, X86FSET_STIBP);
+ }
+
+ /*
+ * Don't read the arch caps MSR on xpv where we lack the
+ * on_trap().
+ */
+#ifndef __xpv
+ if (ecp->cp_edx & CPUID_INTC_EDX_7_0_ARCH_CAPS) {
+ on_trap_data_t otd;
+
+ /*
+ * Be paranoid and assume we'll get a #GP.
+ */
+ if (!on_trap(&otd, OT_DATA_ACCESS)) {
+ uint64_t reg;
+
+ reg = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
+ if (reg & IA32_ARCH_CAP_RDCL_NO) {
+ add_x86_feature(featureset,
+ X86FSET_RDCL_NO);
+ }
+ if (reg & IA32_ARCH_CAP_IBRS_ALL) {
+ add_x86_feature(featureset,
+ X86FSET_IBRS_ALL);
+ }
+ if (reg & IA32_ARCH_CAP_RSBA) {
+ add_x86_feature(featureset,
+ X86FSET_RSBA);
+ }
+ if (reg & IA32_ARCH_CAP_SSB_NO) {
+ add_x86_feature(featureset,
+ X86FSET_SSB_NO);
+ }
+ }
+ no_trap();
+ }
+#endif /* !__xpv */
+
+ if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SSBD)
+ add_x86_feature(featureset, X86FSET_SSBD);
+ }
+}
+
/*
* Setup XFeature_Enabled_Mask register. Required by xsave feature.
*/
@@ -1296,6 +1387,7 @@ cpuid_pass1(cpu_t *cpu, uchar_t *featureset)
ecp->cp_eax = 7;
ecp->cp_ecx = 0;
(void) __cpuid_insn(ecp);
+
/*
* If XSAVE has been disabled, just ignore all of the
* extended-save-area dependent flags here.
@@ -1918,6 +2010,11 @@ cpuid_pass1(cpu_t *cpu, uchar_t *featureset)
}
}
+ /*
+ * Check the processor leaves that are used for security features.
+ */
+ cpuid_scan_security(cpu, featureset);
+
pass1_done:
cpi->cpi_pass = 1;
}
@@ -1955,6 +2052,12 @@ cpuid_pass2(cpu_t *cpu)
cp->cp_eax = n;
/*
+ * n == 7 was handled in pass 1
+ */
+ if (n == 7)
+ continue;
+
+ /*
* CPUID function 4 expects %ecx to be initialized
* with an index which indicates which cache to return
* information about. The OS is expected to call function 4
@@ -1968,10 +2071,8 @@ cpuid_pass2(cpu_t *cpu)
*
* Note: we need to explicitly initialize %ecx here, since
* function 4 may have been previously invoked.
- *
- * The same is all true for CPUID function 7.
*/
- if (n == 4 || n == 7)
+ if (n == 4)
cp->cp_ecx = 0;
(void) __cpuid_insn(cp);
@@ -5255,3 +5356,144 @@ cpuid_get_ext_topo(uint_t vendor, uint_t *core_nbits, uint_t *strand_nbits)
}
}
}
+
+void
+cpuid_pass_ucode(cpu_t *cpu, uchar_t *fset)
+{
+ struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
+ struct cpuid_regs cp;
+
+ /*
+ * Reread the CPUID portions that we need for various security
+ * information.
+ */
+ if (cpi->cpi_vendor == X86_VENDOR_Intel) {
+ /*
+ * Check if we now have leaf 7 available to us.
+ */
+ if (cpi->cpi_maxeax < 7) {
+ bzero(&cp, sizeof (cp));
+ cp.cp_eax = 0;
+ cpi->cpi_maxeax = __cpuid_insn(&cp);
+ if (cpi->cpi_maxeax < 7)
+ return;
+ }
+
+ bzero(&cp, sizeof (cp));
+ cp.cp_eax = 7;
+ cp.cp_ecx = 0;
+ (void) __cpuid_insn(&cp);
+ cpi->cpi_std[7] = cp;
+ } else if (cpi->cpi_vendor == X86_VENDOR_AMD) {
+ /* No xcpuid support */
+ if (cpi->cpi_family < 5 ||
+ (cpi->cpi_family == 5 && cpi->cpi_model < 1))
+ return;
+
+ if (cpi->cpi_xmaxeax < 0x80000008) {
+ bzero(&cp, sizeof (cp));
+ cp.cp_eax = 0x80000000;
+ cpi->cpi_xmaxeax = __cpuid_insn(&cp);
+ if (cpi->cpi_xmaxeax < 0x80000008) {
+ return;
+ }
+ }
+
+ bzero(&cp, sizeof (cp));
+ cp.cp_eax = 0x80000008;
+ (void) __cpuid_insn(&cp);
+ platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, &cp);
+ cpi->cpi_extd[8] = cp;
+ } else {
+ /*
+ * Nothing to do here. Return an empty set which has already
+ * been zeroed for us.
+ */
+ return;
+ }
+ cpuid_scan_security(cpu, fset);
+}
+
+static int
+cpuid_post_ucodeadm_xc(xc_arg_t arg0, xc_arg_t arg1, xc_arg_t arg2)
+{
+ uchar_t *fset;
+
+ fset = (uchar_t *)(arg0 + sizeof (x86_featureset) * CPU->cpu_id);
+ cpuid_pass_ucode(CPU, fset);
+
+ return (0);
+}
+
+/*
+ * After a microcode update where the version has changed, then we need to
+ * rescan CPUID. To do this we check every CPU to make sure that they have the
+ * same microcode. Then we perform a cross call to all such CPUs. It's the
+ * caller's job to make sure that no one else can end up doing an update while
+ * this is going on.
+ *
+ * We assume that the system is microcode capable if we're called.
+ */
+void
+cpuid_post_ucodeadm(void)
+{
+ uint32_t rev;
+ int i;
+ struct cpu *cpu;
+ cpuset_t cpuset;
+ void *argdata;
+ uchar_t *f0;
+
+ argdata = kmem_zalloc(sizeof (x86_featureset) * NCPU, KM_SLEEP);
+
+ mutex_enter(&cpu_lock);
+ cpu = cpu_get(0);
+ rev = cpu->cpu_m.mcpu_ucode_info->cui_rev;
+ CPUSET_ONLY(cpuset, 0);
+ for (i = 1; i < max_ncpus; i++) {
+ if ((cpu = cpu_get(i)) == NULL)
+ continue;
+
+ if (cpu->cpu_m.mcpu_ucode_info->cui_rev != rev) {
+ panic("post microcode update CPU %d has differing "
+ "microcode revision (%u) from CPU 0 (%u)",
+ i, cpu->cpu_m.mcpu_ucode_info->cui_rev, rev);
+ }
+ CPUSET_ADD(cpuset, i);
+ }
+
+ kpreempt_disable();
+ xc_sync((xc_arg_t)argdata, 0, 0, CPUSET2BV(cpuset),
+ cpuid_post_ucodeadm_xc);
+ kpreempt_enable();
+
+ /*
+ * OK, now look at each CPU and see if their feature sets are equal.
+ */
+ f0 = argdata;
+ for (i = 1; i < max_ncpus; i++) {
+ uchar_t *fset;
+ if (!CPU_IN_SET(cpuset, i))
+ continue;
+
+ fset = (uchar_t *)((uintptr_t)argdata +
+ sizeof (x86_featureset) * i);
+
+ if (!compare_x86_featureset(f0, fset)) {
+ panic("Post microcode update CPU %d has "
+ "differing security feature (%p) set from CPU 0 "
+ "(%p), not appending to feature set", i, fset, f0);
+ }
+ }
+
+ mutex_exit(&cpu_lock);
+
+ for (i = 0; i < NUM_X86_FEATURES; i++) {
+ cmn_err(CE_CONT, "?post-ucode x86_feature: %s\n",
+ x86_feature_names[i]);
+ if (is_x86_feature(f0, i)) {
+ add_x86_feature(x86_featureset, i);
+ }
+ }
+ kmem_free(argdata, sizeof (x86_featureset) * NCPU);
+}
diff --git a/usr/src/uts/i86pc/os/microcode.c b/usr/src/uts/i86pc/os/microcode.c
index afc48953f5..d07a79bf18 100644
--- a/usr/src/uts/i86pc/os/microcode.c
+++ b/usr/src/uts/i86pc/os/microcode.c
@@ -1145,8 +1145,11 @@ ucode_update(uint8_t *ucodep, int size)
mutex_exit(&cpu_lock);
- if (!found)
+ if (!found) {
rc = search_rc;
+ } else if (rc == EM_OK) {
+ cpuid_post_ucodeadm();
+ }
return (rc);
}
diff --git a/usr/src/uts/i86pc/os/mlsetup.c b/usr/src/uts/i86pc/os/mlsetup.c
index 601c6937fd..7c17aab541 100644
--- a/usr/src/uts/i86pc/os/mlsetup.c
+++ b/usr/src/uts/i86pc/os/mlsetup.c
@@ -490,6 +490,7 @@ mlsetup(struct regs *rp)
* Fill out cpu_ucode_info. Update microcode if necessary.
*/
ucode_check(CPU);
+ cpuid_pass_ucode(CPU, x86_featureset);
if (workaround_errata(CPU) != 0)
panic("critical workaround(s) missing for boot cpu");
diff --git a/usr/src/uts/i86pc/os/mp_startup.c b/usr/src/uts/i86pc/os/mp_startup.c
index b1100377eb..d7265f63b3 100644
--- a/usr/src/uts/i86pc/os/mp_startup.c
+++ b/usr/src/uts/i86pc/os/mp_startup.c
@@ -1755,22 +1755,6 @@ mp_startup_common(boolean_t boot)
sti();
/*
- * Do a sanity check to make sure this new CPU is a sane thing
- * to add to the collection of processors running this system.
- *
- * XXX Clearly this needs to get more sophisticated, if x86
- * systems start to get built out of heterogenous CPUs; as is
- * likely to happen once the number of processors in a configuration
- * gets large enough.
- */
- if (compare_x86_featureset(x86_featureset, new_x86_featureset) ==
- B_FALSE) {
- cmn_err(CE_CONT, "cpu%d: featureset\n", cp->cpu_id);
- print_x86_featureset(new_x86_featureset);
- cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id);
- }
-
- /*
* There exists a small subset of systems which expose differing
* MWAIT/MONITOR support between CPUs. If MWAIT support is absent from
* the boot CPU, but is found on a later CPU, the system continues to
@@ -1875,6 +1859,23 @@ mp_startup_common(boolean_t boot)
* Fill out cpu_ucode_info. Update microcode if necessary.
*/
ucode_check(cp);
+ cpuid_pass_ucode(cp, new_x86_featureset);
+
+ /*
+ * Do a sanity check to make sure this new CPU is a sane thing
+ * to add to the collection of processors running this system.
+ *
+ * XXX Clearly this needs to get more sophisticated, if x86
+ * systems start to get built out of heterogenous CPUs; as is
+ * likely to happen once the number of processors in a configuration
+ * gets large enough.
+ */
+ if (compare_x86_featureset(x86_featureset, new_x86_featureset) ==
+ B_FALSE) {
+ cmn_err(CE_CONT, "cpu%d: featureset\n", cp->cpu_id);
+ print_x86_featureset(new_x86_featureset);
+ cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id);
+ }
#ifndef __xpv
{
diff --git a/usr/src/uts/intel/sys/x86_archext.h b/usr/src/uts/intel/sys/x86_archext.h
index bbb7d4fa63..07f88edd01 100644
--- a/usr/src/uts/intel/sys/x86_archext.h
+++ b/usr/src/uts/intel/sys/x86_archext.h
@@ -187,7 +187,16 @@ extern "C" {
/*
* AMD uses %ebx for some of their features (extended function 0x80000008).
*/
-#define CPUID_AMD_EBX_ERR_PTR_ZERO 0x00000004 /* AMD: FP Err. Ptr. Zero */
+#define CPUID_AMD_EBX_ERR_PTR_ZERO 0x000000004 /* AMD: FP Err. Ptr. Zero */
+#define CPUID_AMD_EBX_IBPB 0x000001000 /* AMD: IBPB */
+#define CPUID_AMD_EBX_IBRS 0x000004000 /* AMD: IBRS */
+#define CPUID_AMD_EBX_STIBP 0x000008000 /* AMD: STIBP */
+#define CPUID_AMD_EBX_IBRS_ALL 0x000010000 /* AMD: Enhanced IBRS */
+#define CPUID_AMD_EBX_STIBP_ALL 0x000020000 /* AMD: STIBP ALL */
+#define CPUID_AMD_EBX_PREFER_IBRS 0x000040000 /* AMD: Don't retpoline */
+#define CPUID_AMD_EBX_SSBD 0x001000000 /* AMD: SSBD */
+#define CPUID_AMD_EBX_VIRT_SSBD 0x002000000 /* AMD: VIRT SSBD */
+#define CPUID_AMD_EBX_SSB_NO 0x004000000 /* AMD: SSB Fixed */
/*
* Intel now seems to have claimed part of the "extended" function
@@ -243,6 +252,10 @@ extern "C" {
#define CPUID_INTC_EDX_7_0_AVX5124NNIW 0x00000004 /* AVX512 4NNIW */
#define CPUID_INTC_EDX_7_0_AVX5124FMAPS 0x00000008 /* AVX512 4FMAPS */
+#define CPUID_INTC_EDX_7_0_SPEC_CTRL 0x04000000 /* Spec, IBPB, IBRS */
+#define CPUID_INTC_EDX_7_0_STIBP 0x08000000 /* STIBP */
+#define CPUID_INTC_EDX_7_0_ARCH_CAPS 0x20000000 /* IA32_ARCH_CAPS */
+#define CPUID_INTC_EDX_7_0_SSBD 0x80000000 /* SSBD */
#define CPUID_INTC_EDX_7_0_ALL_AVX512 \
(CPUID_INTC_EDX_7_0_AVX5124NNIW | CPUID_INTC_EDX_7_0_AVX5124FMAPS)
@@ -346,6 +359,26 @@ extern "C" {
#define MSR_PPIN_CTL_LOCKED 0x01
#define MSR_PPIN_CTL_ENABLED 0x02
+/*
+ * Intel IA32_ARCH_CAPABILITIES MSR.
+ */
+#define MSR_IA32_ARCH_CAPABILITIES 0x10a
+#define IA32_ARCH_CAP_RDCL_NO 0x0001
+#define IA32_ARCH_CAP_IBRS_ALL 0x0002
+#define IA32_ARCH_CAP_RSBA 0x0004
+#define IA32_ARCH_CAP_SSB_NO 0x0010
+
+/*
+ * Intel Speculation related MSRs
+ */
+#define MSR_IA32_SPEC_CTRL 0x48
+#define IA32_SPEC_CTRL_IBRS 0x01
+#define IA32_SPEC_CTRL_STIBP 0x02
+#define IA32_SPEC_CTRL_SSBD 0x04
+
+#define MSR_IA32_PRED_CMD 0x49
+#define IA32_PRED_CMD_IBPB 0x01
+
#define MCI_CTL_VALUE 0xffffffff
#define MTRR_TYPE_UC 0
@@ -448,6 +481,16 @@ extern "C" {
#define X86FSET_OSPKE 68
#define X86FSET_PCID 69
#define X86FSET_INVPCID 70
+#define X86FSET_IBRS 71
+#define X86FSET_IBPB 72
+#define X86FSET_STIBP 73
+#define X86FSET_SSBD 74
+#define X86FSET_SSBD_VIRT 75
+#define X86FSET_RDCL_NO 76
+#define X86FSET_IBRS_ALL 77
+#define X86FSET_RSBA 78
+#define X86FSET_SSB_NO 79
+#define X86FSET_STIBP_ALL 80
/*
* Intel Deep C-State invariant TSC in leaf 0x80000007.
@@ -730,7 +773,7 @@ extern "C" {
#if defined(_KERNEL) || defined(_KMEMUSER)
-#define NUM_X86_FEATURES 71
+#define NUM_X86_FEATURES 81
extern uchar_t x86_featureset[];
extern void free_x86_featureset(void *featureset);
@@ -843,6 +886,8 @@ extern void cpuid_pass3(struct cpu *);
extern void cpuid_pass4(struct cpu *, uint_t *);
extern void cpuid_set_cpu_properties(void *, processorid_t,
struct cpuid_info *);
+extern void cpuid_pass_ucode(struct cpu *, uchar_t *);
+extern void cpuid_post_ucodeadm(void);
extern void cpuid_get_addrsize(struct cpu *, uint_t *, uint_t *);
extern uint_t cpuid_get_dtlb_nent(struct cpu *, size_t);