summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c79
-rw-r--r--usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c4
-rw-r--r--usr/src/uts/i86pc/generic_cpu/Makefile1
-rw-r--r--usr/src/uts/i86pc/os/cpuid.c64
-rw-r--r--usr/src/uts/intel/sys/x86_archext.h20
5 files changed, 120 insertions, 48 deletions
diff --git a/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c b/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c
index 66230b3d7b..a7effbdc46 100644
--- a/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c
+++ b/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c
@@ -64,64 +64,67 @@ static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL };
#endif
#ifndef __xpv
+
/*
- * This should probably be delegated to a CPU specific module. However, as those
- * haven't been developed as actively for recent CPUs, we should revisit this
- * when we do have it and move this out of gcpu.
+ * The purpose of this is to construct a unique identifier for a given processor
+ * that can be used by things like FMA to determine when a FRU has been
+ * replaced. It is supported on Intel Xeon Platforms since Ivy Bridge and AMD
+ * 17h processors since Rome. See cpuid_pass1_ppin() for how we determine if a
+ * CPU is supported.
+ *
+ * The protected processor inventory number (PPIN) can be used to create a
+ * unique identifier when combined with the processor's cpuid signature. We
+ * create a versioned, synthetic ID using the following scheme for the
+ * identifier: iv0-<vendor>-<signature>-<PPIN>. The iv0 is the illumos version
+ * zero of the ID. If we have a new scheme for a new generation of processors,
+ * then that should rev the version field, otherwise for a given processor, this
+ * synthetic ID should not change.
*
- * This method is only supported on Intel Xeon platforms. It relies on a
- * combination of the PPIN and the cpuid signature. Both are required to form
- * the synthetic ID. This ID is preceded with iv0-INTC to represent that this is
- * an Intel synthetic ID. The iv0 is the illumos version zero of the ID for
- * Intel. If we have a new scheme for a new generation of processors, then that
- * should rev the version field, otherwise for a given processor, this synthetic
- * ID should not change. For more information on PPIN and these MSRS, see the
- * relevant processor external design specification.
+ * We use the string "INTC" for Intel and "AMD" for AMD. None of these or the
+ * formatting of the values can change without changing the version string.
*/
static char *
-gcpu_init_ident_intc(cmi_hdl_t hdl)
+gcpu_init_ident_ppin(cmi_hdl_t hdl)
{
- uint64_t msr;
+ uint_t ppin_ctl_msr, ppin_msr;
+ uint64_t value;
+ const char *vendor;
/*
* This list should be extended as new Intel Xeon family processors come
* out.
*/
- switch (cmi_hdl_model(hdl)) {
- case INTC_MODEL_IVYBRIDGE_XEON:
- case INTC_MODEL_HASWELL_XEON:
- case INTC_MODEL_BROADWELL_XEON:
- case INTC_MODEL_BROADWELL_XEON_D:
- case INTC_MODEL_SKYLAKE_XEON:
+ switch (cmi_hdl_vendor(hdl)) {
+ case X86_VENDOR_Intel:
+ ppin_ctl_msr = MSR_PPIN_CTL_INTC;
+ ppin_msr = MSR_PPIN_INTC;
+ vendor = "INTC";
+ break;
+ case X86_VENDOR_AMD:
+ ppin_ctl_msr = MSR_PPIN_CTL_AMD;
+ ppin_msr = MSR_PPIN_AMD;
+ vendor = "AMD";
break;
default:
return (NULL);
}
- if (cmi_hdl_rdmsr(hdl, MSR_PLATFORM_INFO, &msr) != CMI_SUCCESS) {
- return (NULL);
- }
-
- if ((msr & MSR_PLATFORM_INFO_PPIN) == 0) {
+ if (cmi_hdl_rdmsr(hdl, ppin_ctl_msr, &value) != CMI_SUCCESS) {
return (NULL);
}
- if (cmi_hdl_rdmsr(hdl, MSR_PPIN_CTL, &msr) != CMI_SUCCESS) {
- return (NULL);
- }
-
- if ((msr & MSR_PPIN_CTL_ENABLED) == 0) {
- if ((msr & MSR_PPIN_CTL_LOCKED) != 0) {
+ if ((value & MSR_PPIN_CTL_ENABLED) == 0) {
+ if ((value & MSR_PPIN_CTL_LOCKED) != 0) {
return (NULL);
}
- if (cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_ENABLED) !=
+ if (cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_ENABLED) !=
CMI_SUCCESS) {
return (NULL);
}
}
- if (cmi_hdl_rdmsr(hdl, MSR_PPIN, &msr) != CMI_SUCCESS) {
+ if (cmi_hdl_rdmsr(hdl, ppin_msr, &value) != CMI_SUCCESS) {
return (NULL);
}
@@ -130,9 +133,10 @@ gcpu_init_ident_intc(cmi_hdl_t hdl)
* failure of this part, as we will have gotten everything that we need.
* It is possible that it locked open, for example.
*/
- (void) cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_LOCKED);
+ (void) cmi_hdl_wrmsr(hdl, ppin_ctl_msr, MSR_PPIN_CTL_LOCKED);
- return (kmem_asprintf("iv0-INTC-%x-%llx", cmi_hdl_chipsig(hdl), msr));
+ return (kmem_asprintf("iv0-%s-%x-%llx", vendor, cmi_hdl_chipsig(hdl),
+ value));
}
#endif /* __xpv */
@@ -159,11 +163,8 @@ gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp)
#endif
#ifndef __xpv
- switch (cmi_hdl_vendor(hdl)) {
- case X86_VENDOR_Intel:
- sp->gcpus_ident = gcpu_init_ident_intc(hdl);
- default:
- break;
+ if (is_x86_feature(x86_featureset, X86FSET_PPIN)) {
+ sp->gcpus_ident = gcpu_init_ident_ppin(hdl);
}
#endif /* __xpv */
}
diff --git a/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c b/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c
index 962bf52e89..a7ea684f9c 100644
--- a/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c
+++ b/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c
@@ -1330,7 +1330,7 @@ gcpu_mca_init(cmi_hdl_t hdl)
* Set threshold to 1 while unset the en field, to avoid
* CMCI trigged before APIC LVT entry init.
*/
- ctl2 = ctl2 & (~MSR_MC_CTL2_EN) | 1;
+ ctl2 = (ctl2 & (~MSR_MC_CTL2_EN)) | 1;
(void) cmi_hdl_wrmsr(hdl, IA32_MSR_MC_CTL2(i), ctl2);
/*
@@ -1365,7 +1365,7 @@ gcpu_mca_init(cmi_hdl_t hdl)
* AMD docs since K7 say we should process anything we find here.
*/
if (!gcpu_suppress_log_on_init &&
- (vendor == X86_VENDOR_Intel && family >= 0xf ||
+ ((vendor == X86_VENDOR_Intel && family >= 0xf) ||
vendor == X86_VENDOR_AMD))
gcpu_mca_logout(hdl, NULL, -1ULL, NULL, B_FALSE,
GCPU_MPT_WHAT_POKE_ERR);
diff --git a/usr/src/uts/i86pc/generic_cpu/Makefile b/usr/src/uts/i86pc/generic_cpu/Makefile
index 72bbfca250..929fd6e61a 100644
--- a/usr/src/uts/i86pc/generic_cpu/Makefile
+++ b/usr/src/uts/i86pc/generic_cpu/Makefile
@@ -42,7 +42,6 @@ ROOTMODULE = $(ROOT_PSM_CPU_DIR)/$(MODULE)
#
include ../cpu/Makefile.cpu
-CERRWARN += -_gcc=-Wno-parentheses
CERRWARN += $(CNOWARN_UNINIT)
#
diff --git a/usr/src/uts/i86pc/os/cpuid.c b/usr/src/uts/i86pc/os/cpuid.c
index 741f53714f..059930352b 100644
--- a/usr/src/uts/i86pc/os/cpuid.c
+++ b/usr/src/uts/i86pc/os/cpuid.c
@@ -1436,7 +1436,8 @@ static char *x86_feature_names[NUM_X86_FEATURES] = {
"core_thermal",
"pkg_thermal",
"tsx_ctrl",
- "taa_no"
+ "taa_no",
+ "ppin"
};
boolean_t
@@ -3228,6 +3229,60 @@ cpuid_pass1_thermal(cpu_t *cpu, uchar_t *featureset)
}
}
+/*
+ * PPIN is the protected processor inventory number. On AMD this is an actual
+ * feature bit. However, on Intel systems we need to read the platform
+ * information MSR if we're on a specific model.
+ */
+#if !defined(__xpv)
+static void
+cpuid_pass1_ppin(cpu_t *cpu, uchar_t *featureset)
+{
+ on_trap_data_t otd;
+ struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
+
+ switch (cpi->cpi_vendor) {
+ case X86_VENDOR_AMD:
+ /*
+ * This leaf will have already been gathered in the topology
+ * functions.
+ */
+ if (cpi->cpi_xmaxeax >= CPUID_LEAF_EXT_8) {
+ if (cpi->cpi_extd[8].cp_ebx & CPUID_AMD_EBX_PPIN) {
+ add_x86_feature(featureset, X86FSET_PPIN);
+ }
+ }
+ break;
+ case X86_VENDOR_Intel:
+ if (cpi->cpi_family != 6)
+ break;
+ switch (cpi->cpi_model) {
+ case INTC_MODEL_IVYBRIDGE_XEON:
+ case INTC_MODEL_HASWELL_XEON:
+ case INTC_MODEL_BROADWELL_XEON:
+ case INTC_MODEL_BROADWELL_XEON_D:
+ case INTC_MODEL_SKYLAKE_XEON:
+ if (!on_trap(&otd, OT_DATA_ACCESS)) {
+ uint64_t value;
+
+ value = rdmsr(MSR_PLATFORM_INFO);
+ if ((value & MSR_PLATFORM_INFO_PPIN) != 0) {
+ add_x86_feature(featureset,
+ X86FSET_PPIN);
+ }
+ }
+ no_trap();
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+#endif /* ! __xpv */
+
void
cpuid_pass1(cpu_t *cpu, uchar_t *featureset)
{
@@ -4065,8 +4120,15 @@ cpuid_pass1(cpu_t *cpu, uchar_t *featureset)
}
}
+ /*
+ * cpuid_pass1_ppin assumes that cpuid_pass1_topology has already been
+ * run and thus gathered some of its dependent leaves.
+ */
cpuid_pass1_topology(cpu, featureset);
cpuid_pass1_thermal(cpu, featureset);
+#if !defined(__xpv)
+ cpuid_pass1_ppin(cpu, featureset);
+#endif
/*
* Synthesize chip "revision" and socket type
diff --git a/usr/src/uts/intel/sys/x86_archext.h b/usr/src/uts/intel/sys/x86_archext.h
index 1999d6c568..c0357f48a9 100644
--- a/usr/src/uts/intel/sys/x86_archext.h
+++ b/usr/src/uts/intel/sys/x86_archext.h
@@ -209,6 +209,7 @@ extern "C" {
#define CPUID_AMD_EBX_IBRS_ALL 0x000010000 /* AMD: Enhanced IBRS */
#define CPUID_AMD_EBX_STIBP_ALL 0x000020000 /* AMD: STIBP ALL */
#define CPUID_AMD_EBX_PREFER_IBRS 0x000040000 /* AMD: Don't retpoline */
+#define CPUID_AMD_EBX_PPIN 0x000800000 /* AMD: PPIN Support */
#define CPUID_AMD_EBX_SSBD 0x001000000 /* AMD: SSBD */
#define CPUID_AMD_EBX_VIRT_SSBD 0x002000000 /* AMD: VIRT SSBD */
#define CPUID_AMD_EBX_SSB_NO 0x004000000 /* AMD: SSB Fixed */
@@ -443,13 +444,21 @@ extern "C" {
#define MSR_PRP4_LBSTK_TO_15 0x6cf
/*
- * General Xeon based MSRs
+ * PPIN definitions for Intel and AMD. Unfortunately, Intel and AMD use
+ * different MSRS for this and different MSRS to control whether or not it
+ * should be readable.
*/
-#define MSR_PPIN_CTL 0x04e
-#define MSR_PPIN 0x04f
+#define MSR_PPIN_CTL_INTC 0x04e
+#define MSR_PPIN_INTC 0x04f
#define MSR_PLATFORM_INFO 0x0ce
-
#define MSR_PLATFORM_INFO_PPIN (1 << 23)
+
+#define MSR_PPIN_CTL_AMD 0xC00102F0
+#define MSR_PPIN_AMD 0xC00102F1
+
+/*
+ * These values are currently the same between Intel and AMD.
+ */
#define MSR_PPIN_CTL_MASK 0x03
#define MSR_PPIN_CTL_LOCKED 0x01
#define MSR_PPIN_CTL_ENABLED 0x02
@@ -695,6 +704,7 @@ extern "C" {
#define X86FSET_PKG_THERMAL 96
#define X86FSET_TSX_CTRL 97
#define X86FSET_TAA_NO 98
+#define X86FSET_PPIN 99
/*
* Intel Deep C-State invariant TSC in leaf 0x80000007.
@@ -1072,7 +1082,7 @@ extern "C" {
#if defined(_KERNEL) || defined(_KMEMUSER)
-#define NUM_X86_FEATURES 99
+#define NUM_X86_FEATURES 100
extern uchar_t x86_featureset[];
extern void free_x86_featureset(void *featureset);