summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKuriakose Kuruvilla <kuriakose.kuruvilla@oracle.com>2010-07-14 15:47:07 -0700
committerKuriakose Kuruvilla <kuriakose.kuruvilla@oracle.com>2010-07-14 15:47:07 -0700
commit7417cfdecea1902cef03c0d61a72df97d945925d (patch)
tree96032c72181128bf76344c8c2bc8b2c8dd7a162a
parent5cd376e8b7030707d78315f63adb4bb2b4d9963e (diff)
downloadillumos-gate-7417cfdecea1902cef03c0d61a72df97d945925d.tar.gz
6812663 Running out of bits in x86_feature
-rw-r--r--usr/src/common/bignum/i386/bignum_i386_asm.s29
-rw-r--r--usr/src/common/crypto/aes/aes_impl.c9
-rw-r--r--usr/src/common/crypto/modes/gcm.c24
-rw-r--r--usr/src/common/hdcrc/hd_crc.h9
-rw-r--r--usr/src/uts/common/io/cpuid_drv.c5
-rw-r--r--usr/src/uts/common/io/drm/drm_cache.c5
-rw-r--r--usr/src/uts/i86pc/cpu/amd_opteron/ao_main.c2
-rw-r--r--usr/src/uts/i86pc/cpu/authenticamd/authamd_main.c2
-rw-r--r--usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c12
-rw-r--r--usr/src/uts/i86pc/cpu/genuineintel/gintel_main.c2
-rw-r--r--usr/src/uts/i86pc/io/apix/apix.c2
-rw-r--r--usr/src/uts/i86pc/io/immu_regs.c2
-rw-r--r--usr/src/uts/i86pc/io/pcplusmp/apic.c2
-rw-r--r--usr/src/uts/i86pc/ml/cpr_wakecode.s13
-rw-r--r--usr/src/uts/i86pc/ml/locore.s9
-rw-r--r--usr/src/uts/i86pc/ml/mpcore.s23
-rw-r--r--usr/src/uts/i86pc/ml/offsets.in1
-rw-r--r--usr/src/uts/i86pc/os/cpr_impl.c9
-rw-r--r--usr/src/uts/i86pc/os/cpuid.c347
-rw-r--r--usr/src/uts/i86pc/os/cpupm/pwrnow.c7
-rw-r--r--usr/src/uts/i86pc/os/cpupm/speedstep.c11
-rw-r--r--usr/src/uts/i86pc/os/ddi_impl.c6
-rw-r--r--usr/src/uts/i86pc/os/fastboot.c13
-rw-r--r--usr/src/uts/i86pc/os/fpu_subr.c16
-rw-r--r--usr/src/uts/i86pc/os/lgrpplat.c5
-rw-r--r--usr/src/uts/i86pc/os/machdep.c4
-rw-r--r--usr/src/uts/i86pc/os/mlsetup.c24
-rw-r--r--usr/src/uts/i86pc/os/mp_machdep.c15
-rw-r--r--usr/src/uts/i86pc/os/mp_pc.c6
-rw-r--r--usr/src/uts/i86pc/os/mp_startup.c68
-rw-r--r--usr/src/uts/i86pc/os/pci_mech1_amd.c2
-rw-r--r--usr/src/uts/i86pc/os/startup.c19
-rw-r--r--usr/src/uts/i86pc/os/trap.c17
-rw-r--r--usr/src/uts/i86pc/sys/rm_platter.h5
-rw-r--r--usr/src/uts/i86pc/vm/hat_i86.c11
-rw-r--r--usr/src/uts/i86pc/vm/htable.c2
-rw-r--r--usr/src/uts/intel/ia32/ml/i86_subr.s7
-rw-r--r--usr/src/uts/intel/ia32/os/cpc_subr.c5
-rw-r--r--usr/src/uts/intel/ia32/os/desctbls.c8
-rw-r--r--usr/src/uts/intel/ia32/os/sundep.c5
-rw-r--r--usr/src/uts/intel/ia32/sys/traptrace.h4
-rw-r--r--usr/src/uts/intel/pcbe/core_pcbe.c2
-rw-r--r--usr/src/uts/intel/pcbe/p123_pcbe.c7
-rw-r--r--usr/src/uts/intel/pcbe/p4_pcbe.c7
-rw-r--r--usr/src/uts/intel/sys/x86_archext.h90
45 files changed, 494 insertions, 379 deletions
diff --git a/usr/src/common/bignum/i386/bignum_i386_asm.s b/usr/src/common/bignum/i386/bignum_i386_asm.s
index 2b443c3799..0fb45a7370 100644
--- a/usr/src/common/bignum/i386/bignum_i386_asm.s
+++ b/usr/src/common/bignum/i386/bignum_i386_asm.s
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,12 +19,9 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/asm_linkage.h>
#include <sys/x86_archext.h>
#include <sys/controlregs.h>
@@ -182,21 +178,22 @@ big_mul_add_vec_umul(uint32_t *r, uint32_t *a, int len, uint32_t digit)
/ Note:
/ Using the cpuid instruction directly would work equally
/ well in userland and in the kernel, but we do not use the
-/ cpuid instruction in the kernel, we use the x86_feature
-/ variable, instead. This means we honor any decisions
-/ the kernel startup code may have made in setting this
-/ variable, including disabling SSE2 because of settings
-/ in /etc/system. It might even be a good idea to honor
-/ this kind of setting in userland, as well, but the variable,
-/ x86-feature is not readily available to userland processes.
+/ cpuid instruction in the kernel, we use x86_featureset,
+/ instead. This means we honor any decisions the kernel
+/ startup code may have made in setting this variable,
+/ including disabling SSE2. It might even be a good idea
+/ to honor this kind of setting in userland, as well, but
+/ the variable, x86_featureset is not readily available to
+/ userland processes.
/
/ uint32_t
/ bignum_use_sse2()
ENTRY(bignum_use_sse2)
#if defined(_KERNEL)
- movl x86_feature, %eax
- andl $X86_SSE2, %eax
+ xor %eax, %eax
+ bt $X86FSET_SSE2, x86_featureset
+ adc %eax, %eax
#else /* _KERNEL */
pushl %ebx
movl $1, %eax / Get feature information
diff --git a/usr/src/common/crypto/aes/aes_impl.c b/usr/src/common/crypto/aes/aes_impl.c
index f606caa38e..965aa21e0e 100644
--- a/usr/src/common/crypto/aes/aes_impl.c
+++ b/usr/src/common/crypto/aes/aes_impl.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/types.h>
@@ -37,7 +36,7 @@
#ifdef _KERNEL
#include <sys/cpuvar.h> /* cpu_t, CPU */
-#include <sys/x86_archext.h> /* x86_feature, X86_AES */
+#include <sys/x86_archext.h> /* x86_featureset, X86FSET_AES */
#include <sys/disp.h> /* kpreempt_disable(), kpreempt_enable */
/* Workaround for no XMM kernel thread save/restore */
@@ -1779,7 +1778,7 @@ aes_alloc_keysched(size_t *size, int kmflag)
* Cache the result, as the CPU can't change.
*
* Note: the userland version uses getisax(). The kernel version uses
- * global variable x86_feature.
+ * global variable x86_featureset.
*/
static int
intel_aes_instructions_present(void)
@@ -1788,7 +1787,7 @@ intel_aes_instructions_present(void)
if (cached_result == -1) { /* first time */
#ifdef _KERNEL
- cached_result = (x86_feature & X86_AES) != 0;
+ cached_result = is_x86_feature(x86_featureset, X86FSET_AES);
#else
uint_t ui = 0;
diff --git a/usr/src/common/crypto/modes/gcm.c b/usr/src/common/crypto/modes/gcm.c
index 8f1a07f92b..f75b0b70dd 100644
--- a/usr/src/common/crypto/modes/gcm.c
+++ b/usr/src/common/crypto/modes/gcm.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
*/
@@ -43,7 +42,7 @@
#ifdef _KERNEL
#include <sys/cpuvar.h> /* cpu_t, CPU */
-#include <sys/x86_archext.h> /* x86_feature, X86_*, CPUID_* */
+#include <sys/x86_archext.h> /* x86_featureset, X86FSET_*, CPUID_* */
#include <sys/disp.h> /* kpreempt_disable(), kpreempt_enable */
/* Workaround for no XMM kernel thread save/restore */
#define KPREEMPT_DISABLE kpreempt_disable()
@@ -731,7 +730,7 @@ gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
* Cache the result, as the CPU can't change.
*
* Note: the userland version uses getisax(). The kernel version uses
- * global variable x86_feature or the output of cpuid_insn().
+ * is_x86_featureset().
*/
static int
intel_pclmulqdq_instruction_present(void)
@@ -740,21 +739,8 @@ intel_pclmulqdq_instruction_present(void)
if (cached_result == -1) { /* first time */
#ifdef _KERNEL
-#ifdef X86_PCLMULQDQ
- cached_result = (x86_feature & X86_PCLMULQDQ) != 0;
-#else
- if (cpuid_getvendor(CPU) == X86_VENDOR_Intel) {
- struct cpuid_regs cpr;
- cpu_t *cp = CPU;
-
- cpr.cp_eax = 1; /* Function 1: get processor info */
- (void) cpuid_insn(cp, &cpr);
- cached_result = ((cpr.cp_ecx &
- CPUID_INTC_ECX_PCLMULQDQ) != 0);
- } else {
- cached_result = 0;
- }
-#endif /* X86_PCLMULQDQ */
+ cached_result =
+ is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ);
#else
uint_t ui = 0;
diff --git a/usr/src/common/hdcrc/hd_crc.h b/usr/src/common/hdcrc/hd_crc.h
index 7e6af954c2..af92b4900f 100644
--- a/usr/src/common/hdcrc/hd_crc.h
+++ b/usr/src/common/hdcrc/hd_crc.h
@@ -25,8 +25,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _CRC_HD_H
@@ -51,12 +50,12 @@ extern "C" {
#if defined(__i386) || defined(__amd_64) || defined(__x86_64)
#ifdef _KERNEL
-#include <sys/x86_archext.h> /* x86_feature, X86_AES */
+#include <sys/x86_archext.h> /* x86_featureset, X86FSET_SSE4_2 */
#else
#include <sys/auxv.h> /* getisax() */
-#include <sys/auxv_386.h> /* AV_386_AES bit */
+#include <sys/auxv_386.h> /* AV_386_SSE4_2 bit */
#endif
@@ -248,7 +247,7 @@ hd_crc32_avail(uint32_t *crc32_table)
};
#ifdef _KERNEL
- if (!(x86_feature & X86_SSE4_2)) {
+ if (!is_x86_feature(x86_featureset, X86FSET_SSE4_2)) {
return (B_FALSE);
} else {
#else
diff --git a/usr/src/uts/common/io/cpuid_drv.c b/usr/src/uts/common/io/cpuid_drv.c
index a00e40d8c5..084c916a68 100644
--- a/usr/src/uts/common/io/cpuid_drv.c
+++ b/usr/src/uts/common/io/cpuid_drv.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*/
@@ -111,7 +110,7 @@ cpuid_read(dev_t dev, uio_t *uio, cred_t *cr)
struct cpuid_regs crs;
int error = 0;
- if ((x86_feature & X86_CPUID) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_CPUID))
return (ENXIO);
if (uio->uio_resid & (sizeof (crs) - 1))
diff --git a/usr/src/uts/common/io/drm/drm_cache.c b/usr/src/uts/common/io/drm/drm_cache.c
index 44a19c0703..fe7eff0436 100644
--- a/usr/src/uts/common/io/drm/drm_cache.c
+++ b/usr/src/uts/common/io/drm/drm_cache.c
@@ -30,8 +30,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/x86_archext.h>
@@ -58,7 +57,7 @@ void
drm_clflush_pages(caddr_t *pages, unsigned long num_pages)
{
- if (x86_feature & X86_CLFSH) {
+ if (is_x86_feature(x86_featureset, X86FSET_CLFSH)) {
unsigned long i;
for (i = 0; i < num_pages; i++)
diff --git a/usr/src/uts/i86pc/cpu/amd_opteron/ao_main.c b/usr/src/uts/i86pc/cpu/amd_opteron/ao_main.c
index dd6c2dd616..36ea92669d 100644
--- a/usr/src/uts/i86pc/cpu/amd_opteron/ao_main.c
+++ b/usr/src/uts/i86pc/cpu/amd_opteron/ao_main.c
@@ -64,7 +64,7 @@ ao_ms_init(cmi_hdl_t hdl, void **datap)
if (ao_ms_support_disable || cmi_hdl_model(hdl) >= ao_model_limit)
return (ENOTSUP);
- if (!(x86_feature & X86_MCA))
+ if (!is_x86_feature(x86_featureset, X86FSET_MCA))
return (ENOTSUP);
if (cmi_hdl_rdmsr(hdl, IA32_MSR_MCG_CAP, &cap) != CMI_SUCCESS)
diff --git a/usr/src/uts/i86pc/cpu/authenticamd/authamd_main.c b/usr/src/uts/i86pc/cpu/authenticamd/authamd_main.c
index 46723b1437..311eb6d12f 100644
--- a/usr/src/uts/i86pc/cpu/authenticamd/authamd_main.c
+++ b/usr/src/uts/i86pc/cpu/authenticamd/authamd_main.c
@@ -492,7 +492,7 @@ authamd_init(cmi_hdl_t hdl, void **datap)
!authamd_supported(hdl))
return (ENOTSUP);
- if (!(x86_feature & X86_MCA))
+ if (!is_x86_feature(x86_featureset, X86FSET_MCA))
return (ENOTSUP);
if (cmi_hdl_rdmsr(hdl, IA32_MSR_MCG_CAP, &cap) != CMI_SUCCESS)
diff --git a/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c b/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c
index 50ef45bec9..1b9e259bd8 100644
--- a/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c
+++ b/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c
@@ -1067,13 +1067,13 @@ gcpu_mca_init(cmi_hdl_t hdl)
return;
/*
- * CPU startup code only calls cmi_mca_init if x86_feature indicates
- * both MCA and MCE support (i.e., X86_MCA). P5, K6, and earlier
+ * CPU startup code only calls cmi_mca_init if x86_featureset indicates
+ * both MCA and MCE support (i.e., X86FSET_MCA). P5, K6, and earlier
* processors, which have their own more primitive way of doing
* machine checks, will not have cmi_mca_init called since their
* CPUID information will not indicate both MCA and MCE features.
*/
- ASSERT(x86_feature & X86_MCA);
+ ASSERT(is_x86_feature(x86_featureset, X86FSET_MCA));
/*
* Determine whether the IA32_MCG_CTL register is present. If it
@@ -2018,13 +2018,13 @@ gcpu_mca_fini(cmi_hdl_t hdl)
int i;
/*
- * CPU startup code only calls cmi_mca_init if x86_feature indicates
- * both MCA and MCE support (i.e., X86_MCA). P5, K6, and earlier
+ * CPU startup code only calls cmi_mca_init if x86_featureset indicates
+ * both MCA and MCE support (i.e., X86FSET_MCA). P5, K6, and earlier
* processors, which have their own more primitive way of doing
* machine checks, will not have cmi_mca_init called since their
* CPUID information will not indicate both MCA and MCE features.
*/
- if ((x86_feature & X86_MCA) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_MCA))
return;
#ifndef __xpv
/*
diff --git a/usr/src/uts/i86pc/cpu/genuineintel/gintel_main.c b/usr/src/uts/i86pc/cpu/genuineintel/gintel_main.c
index e696725f6a..2eb7faea63 100644
--- a/usr/src/uts/i86pc/cpu/genuineintel/gintel_main.c
+++ b/usr/src/uts/i86pc/cpu/genuineintel/gintel_main.c
@@ -112,7 +112,7 @@ gintel_init(cmi_hdl_t hdl, void **datap)
if (gintel_ms_support_disable)
return (ENOTSUP);
- if (!(x86_feature & X86_MCA))
+ if (!is_x86_feature(x86_featureset, X86FSET_MCA))
return (ENOTSUP);
nb_chipset = (*pci_getl_func)(0, 0, 0, 0x0);
diff --git a/usr/src/uts/i86pc/io/apix/apix.c b/usr/src/uts/i86pc/io/apix/apix.c
index 5431a70328..8c4ccb6a0a 100644
--- a/usr/src/uts/i86pc/io/apix/apix.c
+++ b/usr/src/uts/i86pc/io/apix/apix.c
@@ -475,7 +475,7 @@ apix_init_intr()
if (nlvt >= 5) {
/* Enable performance counter overflow interrupt */
- if ((x86_feature & X86_MSR) != X86_MSR)
+ if (!is_x86_feature(x86_featureset, X86FSET_MSR))
apic_enable_cpcovf_intr = 0;
if (apic_enable_cpcovf_intr) {
if (apic_cpcovf_vect == 0) {
diff --git a/usr/src/uts/i86pc/io/immu_regs.c b/usr/src/uts/i86pc/io/immu_regs.c
index 97d56a3776..d18c6bc12a 100644
--- a/usr/src/uts/i86pc/io/immu_regs.c
+++ b/usr/src/uts/i86pc/io/immu_regs.c
@@ -382,7 +382,7 @@ setup_regs(immu_t *immu)
immu->immu_dvma_coherent = B_TRUE;
} else {
immu->immu_dvma_coherent = B_FALSE;
- if (!(x86_feature & X86_CLFSH)) {
+ if (!is_x86_feature(x86_featureset, X86FSET_CLFSH)) {
ddi_err(DER_WARN, NULL,
"immu unit %s can't be enabled due to "
"missing clflush functionality", immu->immu_name);
diff --git a/usr/src/uts/i86pc/io/pcplusmp/apic.c b/usr/src/uts/i86pc/io/pcplusmp/apic.c
index 22553d39d3..b72bf7bdf1 100644
--- a/usr/src/uts/i86pc/io/pcplusmp/apic.c
+++ b/usr/src/uts/i86pc/io/pcplusmp/apic.c
@@ -371,7 +371,7 @@ apic_init_intr(void)
if (nlvt >= 5) {
/* Enable performance counter overflow interrupt */
- if ((x86_feature & X86_MSR) != X86_MSR)
+ if (!is_x86_feature(x86_featureset, X86FSET_MSR))
apic_enable_cpcovf_intr = 0;
if (apic_enable_cpcovf_intr) {
if (apic_cpcovf_vect == 0) {
diff --git a/usr/src/uts/i86pc/ml/cpr_wakecode.s b/usr/src/uts/i86pc/ml/cpr_wakecode.s
index 917ce412aa..38540f699b 100644
--- a/usr/src/uts/i86pc/ml/cpr_wakecode.s
+++ b/usr/src/uts/i86pc/ml/cpr_wakecode.s
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/asm_linkage.h>
@@ -673,9 +672,8 @@ kernel_wc_code:
* Before proceeding, enable usage of the page table NX bit if
* that's how the page tables are set up.
*/
- movl x86_feature, %ecx
- andl $X86_NX, %ecx
- jz 1f
+ bt $X86FSET_NX, x86_featureset
+ jnc 1f
movl $MSR_AMD_EFER, %ecx
rdmsr
orl $AMD_EFER_NXE, %eax
@@ -1092,9 +1090,8 @@ kernel_wc_code:
* Before proceeding, enable usage of the page table NX bit if
* that's how the page tables are set up.
*/
- movl x86_feature, %ecx
- andl $X86_NX, %ecx
- jz 1f
+ bt $X86FSET_NX, x86_featureset
+ jnc 1f
movl $MSR_AMD_EFER, %ecx
rdmsr
orl $AMD_EFER_NXE, %eax
diff --git a/usr/src/uts/i86pc/ml/locore.s b/usr/src/uts/i86pc/ml/locore.s
index db016a55db..8aec1537e5 100644
--- a/usr/src/uts/i86pc/ml/locore.s
+++ b/usr/src/uts/i86pc/ml/locore.s
@@ -20,8 +20,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
@@ -208,7 +207,7 @@ _locore_start(struct boot_syscalls *sysp, ulong_t rsi, struct bootops *bop)
/*
* (We just assert this works by virtue of being here)
*/
- orl $X86_CPUID, x86_feature(%rip)
+ bts $X86FSET_CPUID, x86_featureset(%rip)
/*
* mlsetup() gets called with a struct regs as argument, while
@@ -623,7 +622,7 @@ have_cpuid:
/*
* cpuid instruction present
*/
- orl $X86_CPUID, x86_feature
+ bts $X86FSET_CPUID, x86_featureset / Just to set; Ignore the CF
movl $0, %eax
cpuid
@@ -2340,7 +2339,7 @@ cpu_vendor:
.globl CyrixInstead
- .globl x86_feature
+ .globl x86_featureset
.globl x86_type
.globl x86_vendor
#endif
diff --git a/usr/src/uts/i86pc/ml/mpcore.s b/usr/src/uts/i86pc/ml/mpcore.s
index 25a943870e..d3bfa14f86 100644
--- a/usr/src/uts/i86pc/ml/mpcore.s
+++ b/usr/src/uts/i86pc/ml/mpcore.s
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 2010, Intel Corporation.
@@ -316,9 +315,8 @@ kernel_cs_code:
* Before going any further, enable usage of page table NX bit if
* that's how our page tables are set up.
*/
- movl x86_feature, %ecx
- andl $X86_NX, %ecx
- jz 1f
+ bt $X86FSET_NX, x86_featureset
+ jnc 1f
movl $MSR_AMD_EFER, %ecx
rdmsr
orl $AMD_EFER_NXE, %eax
@@ -570,9 +568,8 @@ kernel_cs_code:
* Before going any further, enable usage of page table NX bit if
* that's how our page tables are set up.
*/
- movl x86_feature, %ecx
- andl $X86_NX, %ecx
- jz 1f
+ bt $X86FSET_NX, x86_featureset
+ jnc 1f
movl $MSR_AMD_EFER, %ecx
rdmsr
orl $AMD_EFER_NXE, %eax
@@ -671,9 +668,8 @@ kernel_cs_code:
* Before going any further, enable usage of page table NX bit if
* that's how our page tables are set up.
*/
- movl x86_feature, %ecx
- andl $X86_NX, %ecx
- jz 1f
+ bt $X86FSET_NX, x86_featureset
+ jnc 1f
movl %cr4, %ecx
andl $CR4_PAE, %ecx
jz 1f
@@ -763,9 +759,8 @@ kernel_cs_code:
* Before going any farther, enable usage of page table NX bit if
* that's how our page tables are set up.
*/
- movl x86_feature, %ecx
- andl $X86_NX, %ecx
- jz 1f
+ bt $X86FSET_NX, x86_featureset
+ jnc 1f
movl %cr4, %ecx
andl $CR4_PAE, %ecx
jz 1f
diff --git a/usr/src/uts/i86pc/ml/offsets.in b/usr/src/uts/i86pc/ml/offsets.in
index ceefce6d3c..4b5d4fc694 100644
--- a/usr/src/uts/i86pc/ml/offsets.in
+++ b/usr/src/uts/i86pc/ml/offsets.in
@@ -293,7 +293,6 @@ rm_platter
rm_pdbr CR3OFF
rm_cpu CPUNOFF
rm_cr4 CR4OFF
- rm_x86feature X86FEATURE
rm_cpu_halt_code CPUHALTCODEOFF
rm_cpu_halted CPUHALTEDOFF
diff --git a/usr/src/uts/i86pc/os/cpr_impl.c b/usr/src/uts/i86pc/os/cpr_impl.c
index 103955a097..8f57ca7366 100644
--- a/usr/src/uts/i86pc/os/cpr_impl.c
+++ b/usr/src/uts/i86pc/os/cpr_impl.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -876,8 +875,6 @@ init_real_mode_platter(int cpun, uint32_t offset, uint_t cr4, wc_desctbr_t gdt)
real_mode_platter->rm_gdt_lim = gdt.limit;
#if defined(__amd64)
- real_mode_platter->rm_x86feature = x86_feature;
-
if (getcr3() > 0xffffffffUL)
panic("Cannot initialize CPUs; kernel's 64-bit page tables\n"
"located above 4G in physical memory (@ 0x%llx).",
@@ -943,7 +940,7 @@ i_cpr_start_cpu(void)
* We need to Sync PAT with cpu0's PAT. We have to do
* this with interrupts disabled.
*/
- if (x86_feature & X86_PAT)
+ if (is_x86_feature(x86_featureset, X86FSET_PAT))
pat_sync();
/*
@@ -994,7 +991,7 @@ i_cpr_start_cpu(void)
* cmi already been init'd (during boot), so do not need to do it again
*/
#ifdef PM_REINITMCAONRESUME
- if (x86_feature & X86_MCA)
+ if (is_x86_feature(x86_featureset, X86FSET_MCA))
cmi_mca_init();
#endif
diff --git a/usr/src/uts/i86pc/os/cpuid.c b/usr/src/uts/i86pc/os/cpuid.c
index 4a86da8c3b..7802830e96 100644
--- a/usr/src/uts/i86pc/os/cpuid.c
+++ b/usr/src/uts/i86pc/os/cpuid.c
@@ -67,7 +67,7 @@
*
* Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
* for the boot CPU and does the basic analysis that the early kernel needs.
- * x86_feature is set based on the return value of cpuid_pass1() of the boot
+ * x86_featureset is set based on the return value of cpuid_pass1() of the boot
* CPU.
*
* Pass 1 includes:
@@ -111,7 +111,6 @@
* to the accessor code.
*/
-uint_t x86_feature = 0;
uint_t x86_vendor = X86_VENDOR_IntelClone;
uint_t x86_type = X86_TYPE_OTHER;
uint_t x86_clflush_size = 0;
@@ -119,6 +118,104 @@ uint_t x86_clflush_size = 0;
uint_t pentiumpro_bug4046376;
uint_t pentiumpro_bug4064495;
+#define NUM_X86_FEATURES 33
+void *x86_featureset;
+ulong_t x86_featureset0[BT_SIZEOFMAP(NUM_X86_FEATURES)];
+
+char *x86_feature_names[NUM_X86_FEATURES] = {
+ "lgpg",
+ "tsc",
+ "msr",
+ "mtrr",
+ "pge",
+ "de",
+ "cmov",
+ "mmx",
+ "mca",
+ "pae",
+ "cv8",
+ "pat",
+ "sep",
+ "sse",
+ "sse2",
+ "htt",
+ "asysc",
+ "nx",
+ "sse3",
+ "cx16",
+ "cmp",
+ "tscp",
+ "mwait",
+ "sse4a",
+ "cpuid",
+ "ssse3",
+ "sse4_1",
+ "sse4_2",
+ "1gpg",
+ "clfsh",
+ "64",
+ "aes",
+ "pclmulqdq" };
+
+static void *
+init_x86_featureset(void)
+{
+ return (kmem_zalloc(BT_SIZEOFMAP(NUM_X86_FEATURES), KM_SLEEP));
+}
+
+void
+free_x86_featureset(void *featureset)
+{
+ kmem_free(featureset, BT_SIZEOFMAP(NUM_X86_FEATURES));
+}
+
+boolean_t
+is_x86_feature(void *featureset, uint_t feature)
+{
+ ASSERT(feature < NUM_X86_FEATURES);
+ return (BT_TEST((ulong_t *)featureset, feature));
+}
+
+void
+add_x86_feature(void *featureset, uint_t feature)
+{
+ ASSERT(feature < NUM_X86_FEATURES);
+ BT_SET((ulong_t *)featureset, feature);
+}
+
+void
+remove_x86_feature(void *featureset, uint_t feature)
+{
+ ASSERT(feature < NUM_X86_FEATURES);
+ BT_CLEAR((ulong_t *)featureset, feature);
+}
+
+boolean_t
+compare_x86_featureset(void *setA, void *setB)
+{
+ /*
+ * We assume that the unused bits of the bitmap are always zero.
+ */
+ if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
+ return (B_TRUE);
+ } else {
+ return (B_FALSE);
+ }
+}
+
+void
+print_x86_featureset(void *featureset)
+{
+ uint_t i;
+
+ for (i = 0; i < NUM_X86_FEATURES; i++) {
+ if (is_x86_feature(featureset, i)) {
+ cmn_err(CE_CONT, "?x86_feature: %s\n",
+ x86_feature_names[i]);
+ }
+ }
+}
+
uint_t enable486;
/*
* This is set to platform type Solaris is running on.
@@ -542,7 +639,7 @@ is_controldom(void)
#endif /* __xpv */
static void
-cpuid_intel_getids(cpu_t *cpu, uint_t feature)
+cpuid_intel_getids(cpu_t *cpu, void *feature)
{
uint_t i;
uint_t chipid_shift = 0;
@@ -555,7 +652,7 @@ cpuid_intel_getids(cpu_t *cpu, uint_t feature)
cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift;
cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1);
- if (feature & X86_CMP) {
+ if (is_x86_feature(feature, X86FSET_CMP)) {
/*
* Multi-core (and possibly multi-threaded)
* processors.
@@ -591,7 +688,7 @@ cpuid_intel_getids(cpu_t *cpu, uint_t feature)
coreid_shift++;
cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift;
cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
- } else if (feature & X86_HTT) {
+ } else if (is_x86_feature(feature, X86FSET_HTT)) {
/*
* Single-core multi-threaded processors.
*/
@@ -718,11 +815,11 @@ cpuid_amd_getids(cpu_t *cpu)
}
}
-uint_t
+void *
cpuid_pass1(cpu_t *cpu)
{
uint32_t mask_ecx, mask_edx;
- uint_t feature = X86_CPUID;
+ void *featureset;
struct cpuid_info *cpi;
struct cpuid_regs *cp;
int xcpuid;
@@ -737,8 +834,16 @@ cpuid_pass1(cpu_t *cpu)
/*
* Space statically allocated for BSP, ensure pointer is set
*/
- if (cpu->cpu_id == 0 && cpu->cpu_m.mcpu_cpi == NULL)
- cpu->cpu_m.mcpu_cpi = &cpuid_info0;
+ if (cpu->cpu_id == 0) {
+ if (cpu->cpu_m.mcpu_cpi == NULL)
+ cpu->cpu_m.mcpu_cpi = &cpuid_info0;
+ featureset = x86_featureset0;
+ } else {
+ featureset = init_x86_featureset();
+ }
+
+ add_x86_feature(featureset, X86FSET_CPUID);
+
cpi = cpu->cpu_m.mcpu_cpi;
ASSERT(cpi != NULL);
cp = &cpi->cpi_std[0];
@@ -1004,58 +1109,82 @@ cpuid_pass1(cpu_t *cpu)
cp->cp_ecx |= cpuid_feature_ecx_include;
cp->cp_ecx &= ~cpuid_feature_ecx_exclude;
- if (cp->cp_edx & CPUID_INTC_EDX_PSE)
- feature |= X86_LARGEPAGE;
- if (cp->cp_edx & CPUID_INTC_EDX_TSC)
- feature |= X86_TSC;
- if (cp->cp_edx & CPUID_INTC_EDX_MSR)
- feature |= X86_MSR;
- if (cp->cp_edx & CPUID_INTC_EDX_MTRR)
- feature |= X86_MTRR;
- if (cp->cp_edx & CPUID_INTC_EDX_PGE)
- feature |= X86_PGE;
- if (cp->cp_edx & CPUID_INTC_EDX_CMOV)
- feature |= X86_CMOV;
- if (cp->cp_edx & CPUID_INTC_EDX_MMX)
- feature |= X86_MMX;
+ if (cp->cp_edx & CPUID_INTC_EDX_PSE) {
+ add_x86_feature(featureset, X86FSET_LARGEPAGE);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_TSC) {
+ add_x86_feature(featureset, X86FSET_TSC);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_MSR) {
+ add_x86_feature(featureset, X86FSET_MSR);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_MTRR) {
+ add_x86_feature(featureset, X86FSET_MTRR);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_PGE) {
+ add_x86_feature(featureset, X86FSET_PGE);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_CMOV) {
+ add_x86_feature(featureset, X86FSET_CMOV);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_MMX) {
+ add_x86_feature(featureset, X86FSET_MMX);
+ }
if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 &&
- (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0)
- feature |= X86_MCA;
- if (cp->cp_edx & CPUID_INTC_EDX_PAE)
- feature |= X86_PAE;
- if (cp->cp_edx & CPUID_INTC_EDX_CX8)
- feature |= X86_CX8;
- if (cp->cp_ecx & CPUID_INTC_ECX_CX16)
- feature |= X86_CX16;
- if (cp->cp_edx & CPUID_INTC_EDX_PAT)
- feature |= X86_PAT;
- if (cp->cp_edx & CPUID_INTC_EDX_SEP)
- feature |= X86_SEP;
+ (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) {
+ add_x86_feature(featureset, X86FSET_MCA);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_PAE) {
+ add_x86_feature(featureset, X86FSET_PAE);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_CX8) {
+ add_x86_feature(featureset, X86FSET_CX8);
+ }
+ if (cp->cp_ecx & CPUID_INTC_ECX_CX16) {
+ add_x86_feature(featureset, X86FSET_CX16);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_PAT) {
+ add_x86_feature(featureset, X86FSET_PAT);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_SEP) {
+ add_x86_feature(featureset, X86FSET_SEP);
+ }
if (cp->cp_edx & CPUID_INTC_EDX_FXSR) {
/*
* In our implementation, fxsave/fxrstor
* are prerequisites before we'll even
* try and do SSE things.
*/
- if (cp->cp_edx & CPUID_INTC_EDX_SSE)
- feature |= X86_SSE;
- if (cp->cp_edx & CPUID_INTC_EDX_SSE2)
- feature |= X86_SSE2;
- if (cp->cp_ecx & CPUID_INTC_ECX_SSE3)
- feature |= X86_SSE3;
+ if (cp->cp_edx & CPUID_INTC_EDX_SSE) {
+ add_x86_feature(featureset, X86FSET_SSE);
+ }
+ if (cp->cp_edx & CPUID_INTC_EDX_SSE2) {
+ add_x86_feature(featureset, X86FSET_SSE2);
+ }
+ if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) {
+ add_x86_feature(featureset, X86FSET_SSE3);
+ }
if (cpi->cpi_vendor == X86_VENDOR_Intel) {
- if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3)
- feature |= X86_SSSE3;
- if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1)
- feature |= X86_SSE4_1;
- if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2)
- feature |= X86_SSE4_2;
- if (cp->cp_ecx & CPUID_INTC_ECX_AES)
- feature |= X86_AES;
+ if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) {
+ add_x86_feature(featureset, X86FSET_SSSE3);
+ }
+ if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) {
+ add_x86_feature(featureset, X86FSET_SSE4_1);
+ }
+ if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) {
+ add_x86_feature(featureset, X86FSET_SSE4_2);
+ }
+ if (cp->cp_ecx & CPUID_INTC_ECX_AES) {
+ add_x86_feature(featureset, X86FSET_AES);
+ }
+ if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
+ add_x86_feature(featureset, X86FSET_PCLMULQDQ);
+ }
}
}
- if (cp->cp_edx & CPUID_INTC_EDX_DE)
- feature |= X86_DE;
+ if (cp->cp_edx & CPUID_INTC_EDX_DE) {
+ add_x86_feature(featureset, X86FSET_DE);
+ }
#if !defined(__xpv)
if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
@@ -1065,7 +1194,7 @@ cpuid_pass1(cpu_t *cpu)
*/
if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
cpi->cpi_mwait.support |= MWAIT_SUPPORT;
- feature |= X86_MWAIT;
+ add_x86_feature(featureset, X86FSET_MWAIT);
} else {
extern int idle_cpu_assert_cflush_monitor;
@@ -1086,11 +1215,10 @@ cpuid_pass1(cpu_t *cpu)
* we only capture this for the bootcpu.
*/
if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
- feature |= X86_CLFSH;
+ add_x86_feature(featureset, X86FSET_CLFSH);
x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8);
}
-
- if (feature & X86_PAE)
+ if (is_x86_feature(featureset, X86FSET_PAE))
cpi->cpi_pabits = 36;
/*
@@ -1105,7 +1233,7 @@ cpuid_pass1(cpu_t *cpu)
if (cp->cp_edx & CPUID_INTC_EDX_HTT) {
cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
if (cpi->cpi_ncpu_per_chip > 1)
- feature |= X86_HTT;
+ add_x86_feature(featureset, X86FSET_HTT);
} else {
cpi->cpi_ncpu_per_chip = 1;
}
@@ -1180,27 +1308,31 @@ cpuid_pass1(cpu_t *cpu)
/*
* Compute the additions to the kernel's feature word.
*/
- if (cp->cp_edx & CPUID_AMD_EDX_NX)
- feature |= X86_NX;
+ if (cp->cp_edx & CPUID_AMD_EDX_NX) {
+ add_x86_feature(featureset, X86FSET_NX);
+ }
/*
* Regardless whether or not we boot 64-bit,
* we should have a way to identify whether
* the CPU is capable of running 64-bit.
*/
- if (cp->cp_edx & CPUID_AMD_EDX_LM)
- feature |= X86_64;
+ if (cp->cp_edx & CPUID_AMD_EDX_LM) {
+ add_x86_feature(featureset, X86FSET_64);
+ }
#if defined(__amd64)
/* 1 GB large page - enable only for 64 bit kernel */
- if (cp->cp_edx & CPUID_AMD_EDX_1GPG)
- feature |= X86_1GPG;
+ if (cp->cp_edx & CPUID_AMD_EDX_1GPG) {
+ add_x86_feature(featureset, X86FSET_1GPG);
+ }
#endif
if ((cpi->cpi_vendor == X86_VENDOR_AMD) &&
(cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) &&
- (cp->cp_ecx & CPUID_AMD_ECX_SSE4A))
- feature |= X86_SSE4A;
+ (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) {
+ add_x86_feature(featureset, X86FSET_SSE4A);
+ }
/*
* If both the HTT and CMP_LGCY bits are set,
@@ -1208,10 +1340,10 @@ cpuid_pass1(cpu_t *cpu)
* "AMD CPUID Specification" for more details.
*/
if (cpi->cpi_vendor == X86_VENDOR_AMD &&
- (feature & X86_HTT) &&
+ is_x86_feature(featureset, X86FSET_HTT) &&
(cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) {
- feature &= ~X86_HTT;
- feature |= X86_CMP;
+ remove_x86_feature(featureset, X86FSET_HTT);
+ add_x86_feature(featureset, X86FSET_CMP);
}
#if defined(__amd64)
/*
@@ -1220,19 +1352,22 @@ cpuid_pass1(cpu_t *cpu)
* instead. In the amd64 kernel, things are -way-
* better.
*/
- if (cp->cp_edx & CPUID_AMD_EDX_SYSC)
- feature |= X86_ASYSC;
+ if (cp->cp_edx & CPUID_AMD_EDX_SYSC) {
+ add_x86_feature(featureset, X86FSET_ASYSC);
+ }
/*
* While we're thinking about system calls, note
* that AMD processors don't support sysenter
* in long mode at all, so don't try to program them.
*/
- if (x86_vendor == X86_VENDOR_AMD)
- feature &= ~X86_SEP;
+ if (x86_vendor == X86_VENDOR_AMD) {
+ remove_x86_feature(featureset, X86FSET_SEP);
+ }
#endif
- if (cp->cp_edx & CPUID_AMD_EDX_TSCP)
- feature |= X86_TSCP;
+ if (cp->cp_edx & CPUID_AMD_EDX_TSCP) {
+ add_x86_feature(featureset, X86FSET_TSCP);
+ }
break;
default:
break;
@@ -1327,20 +1462,22 @@ cpuid_pass1(cpu_t *cpu)
/*
* If more than one core, then this processor is CMP.
*/
- if (cpi->cpi_ncore_per_chip > 1)
- feature |= X86_CMP;
+ if (cpi->cpi_ncore_per_chip > 1) {
+ add_x86_feature(featureset, X86FSET_CMP);
+ }
/*
* If the number of cores is the same as the number
* of CPUs, then we cannot have HyperThreading.
*/
- if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip)
- feature &= ~X86_HTT;
+ if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) {
+ remove_x86_feature(featureset, X86FSET_HTT);
+ }
cpi->cpi_apicid = CPI_APIC_ID(cpi);
cpi->cpi_procnodes_per_pkg = 1;
-
- if ((feature & (X86_HTT | X86_CMP)) == 0) {
+ if (is_x86_feature(featureset, X86FSET_HTT) == B_FALSE &&
+ is_x86_feature(featureset, X86FSET_CMP) == B_FALSE) {
/*
* Single-core single-threaded processors.
*/
@@ -1354,7 +1491,7 @@ cpuid_pass1(cpu_t *cpu)
cpi->cpi_procnodeid = cpi->cpi_chipid;
} else if (cpi->cpi_ncpu_per_chip > 1) {
if (cpi->cpi_vendor == X86_VENDOR_Intel)
- cpuid_intel_getids(cpu, feature);
+ cpuid_intel_getids(cpu, featureset);
else if (cpi->cpi_vendor == X86_VENDOR_AMD)
cpuid_amd_getids(cpu);
else {
@@ -1380,7 +1517,7 @@ cpuid_pass1(cpu_t *cpu)
pass1_done:
cpi->cpi_pass = 1;
- return (feature);
+ return (featureset);
}
/*
@@ -1703,7 +1840,7 @@ intel_cpubrand(const struct cpuid_info *cpi)
{
int i;
- if ((x86_feature & X86_CPUID) == 0 ||
+ if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
return ("i486");
@@ -1837,7 +1974,7 @@ intel_cpubrand(const struct cpuid_info *cpi)
static const char *
amd_cpubrand(const struct cpuid_info *cpi)
{
- if ((x86_feature & X86_CPUID) == 0 ||
+ if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
return ("i486 compatible");
@@ -1907,7 +2044,7 @@ amd_cpubrand(const struct cpuid_info *cpi)
static const char *
cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
{
- if ((x86_feature & X86_CPUID) == 0 ||
+ if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 ||
type == X86_TYPE_CYRIX_486)
return ("i486 compatible");
@@ -2224,29 +2361,31 @@ cpuid_pass4(cpu_t *cpu)
/*
* [these require explicit kernel support]
*/
- if ((x86_feature & X86_SEP) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_SEP))
*edx &= ~CPUID_INTC_EDX_SEP;
- if ((x86_feature & X86_SSE) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_SSE))
*edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE);
- if ((x86_feature & X86_SSE2) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_SSE2))
*edx &= ~CPUID_INTC_EDX_SSE2;
- if ((x86_feature & X86_HTT) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_HTT))
*edx &= ~CPUID_INTC_EDX_HTT;
- if ((x86_feature & X86_SSE3) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_SSE3))
*ecx &= ~CPUID_INTC_ECX_SSE3;
if (cpi->cpi_vendor == X86_VENDOR_Intel) {
- if ((x86_feature & X86_SSSE3) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_SSSE3))
*ecx &= ~CPUID_INTC_ECX_SSSE3;
- if ((x86_feature & X86_SSE4_1) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_SSE4_1))
*ecx &= ~CPUID_INTC_ECX_SSE4_1;
- if ((x86_feature & X86_SSE4_2) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_SSE4_2))
*ecx &= ~CPUID_INTC_ECX_SSE4_2;
- if ((x86_feature & X86_AES) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_AES))
*ecx &= ~CPUID_INTC_ECX_AES;
+ if (!is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ))
+ *ecx &= ~CPUID_INTC_ECX_PCLMULQDQ;
}
/*
@@ -2326,14 +2465,14 @@ cpuid_pass4(cpu_t *cpu)
*/
switch (cpi->cpi_vendor) {
case X86_VENDOR_Intel:
- if ((x86_feature & X86_TSCP) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
*edx &= ~CPUID_AMD_EDX_TSCP;
break;
case X86_VENDOR_AMD:
- if ((x86_feature & X86_TSCP) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
*edx &= ~CPUID_AMD_EDX_TSCP;
- if ((x86_feature & X86_SSE4A) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_SSE4A))
*ecx &= ~CPUID_AMD_ECX_SSE4A;
break;
@@ -2349,7 +2488,7 @@ cpuid_pass4(cpu_t *cpu)
*edx &= ~(CPUID_AMD_EDX_MMXamd |
CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx);
- if ((x86_feature & X86_NX) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_NX))
*edx &= ~CPUID_AMD_EDX_NX;
#if !defined(__amd64)
*edx &= ~CPUID_AMD_EDX_LM;
@@ -3340,7 +3479,7 @@ intel_walk_cacheinfo(struct cpuid_info *cpi,
des_b1_ct.ct_code = 0xb1;
des_b1_ct.ct_assoc = 4;
des_b1_ct.ct_line_size = 0;
- if (x86_feature & X86_PAE) {
+ if (is_x86_feature(x86_featureset, X86FSET_PAE)) {
des_b1_ct.ct_size = 8;
des_b1_ct.ct_label = itlb2M_str;
} else {
@@ -3687,7 +3826,7 @@ cpuid_set_cpu_properties(void *dip, processorid_t cpu_id,
"clock-frequency", (int)mul);
}
- if ((x86_feature & X86_CPUID) == 0) {
+ if (!is_x86_feature(x86_featureset, X86FSET_CPUID)) {
return;
}
@@ -4083,7 +4222,7 @@ cpuid_deep_cstates_supported(void)
cpi = CPU->cpu_m.mcpu_cpi;
- if (!(x86_feature & X86_CPUID))
+ if (!is_x86_feature(x86_featureset, X86FSET_CPUID))
return (0);
switch (cpi->cpi_vendor) {
@@ -4145,7 +4284,7 @@ cpuid_arat_supported(void)
struct cpuid_regs regs;
ASSERT(cpuid_checkpass(CPU, 1));
- ASSERT(x86_feature & X86_CPUID);
+ ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
cpi = CPU->cpu_m.mcpu_cpi;
@@ -4178,7 +4317,8 @@ cpuid_iepb_supported(struct cpu *cp)
ASSERT(cpuid_checkpass(cp, 1));
- if (!(x86_feature & X86_CPUID) || !(x86_feature & X86_MSR)) {
+ if (!(is_x86_feature(x86_featureset, X86FSET_CPUID)) ||
+ !(is_x86_feature(x86_featureset, X86FSET_MSR))) {
return (0);
}
@@ -4205,7 +4345,8 @@ patch_memops(uint_t vendor)
size_t cnt, i;
caddr_t to, from;
- if ((vendor == X86_VENDOR_Intel) && ((x86_feature & X86_SSE4_2) != 0)) {
+ if ((vendor == X86_VENDOR_Intel) &&
+ is_x86_feature(x86_featureset, X86FSET_SSE4_2)) {
cnt = &bcopy_patch_end - &bcopy_patch_start;
to = &bcopy_ck_size;
from = &bcopy_patch_start;
diff --git a/usr/src/uts/i86pc/os/cpupm/pwrnow.c b/usr/src/uts/i86pc/os/cpupm/pwrnow.c
index 8840d8ce34..d403c69e34 100644
--- a/usr/src/uts/i86pc/os/cpupm/pwrnow.c
+++ b/usr/src/uts/i86pc/os/cpupm/pwrnow.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/x86_archext.h>
@@ -217,8 +216,8 @@ pwrnow_supported()
struct cpuid_regs cpu_regs;
/* Required features */
- if (!(x86_feature & X86_CPUID) ||
- !(x86_feature & X86_MSR)) {
+ if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
+ !is_x86_feature(x86_featureset, X86FSET_MSR)) {
PWRNOW_DEBUG(("No CPUID or MSR support."));
return (B_FALSE);
}
diff --git a/usr/src/uts/i86pc/os/cpupm/speedstep.c b/usr/src/uts/i86pc/os/cpupm/speedstep.c
index 151fdc79ae..2be4529a83 100644
--- a/usr/src/uts/i86pc/os/cpupm/speedstep.c
+++ b/usr/src/uts/i86pc/os/cpupm/speedstep.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 2009, Intel Corporation.
@@ -444,8 +443,8 @@ speedstep_supported(uint_t family, uint_t model)
struct cpuid_regs cpu_regs;
/* Required features */
- if (!(x86_feature & X86_CPUID) ||
- !(x86_feature & X86_MSR)) {
+ if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
+ !is_x86_feature(x86_featureset, X86FSET_MSR)) {
return (B_FALSE);
}
@@ -476,8 +475,8 @@ turbo_supported(void)
struct cpuid_regs cpu_regs;
/* Required features */
- if (!(x86_feature & X86_CPUID) ||
- !(x86_feature & X86_MSR)) {
+ if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
+ !is_x86_feature(x86_featureset, X86FSET_MSR)) {
return (B_FALSE);
}
diff --git a/usr/src/uts/i86pc/os/ddi_impl.c b/usr/src/uts/i86pc/os/ddi_impl.c
index c1a27cc466..a1ae318703 100644
--- a/usr/src/uts/i86pc/os/ddi_impl.c
+++ b/usr/src/uts/i86pc/os/ddi_impl.c
@@ -20,8 +20,7 @@
*/
/*
- * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -1557,7 +1556,8 @@ i_ddi_cacheattr_to_hatacc(uint_t flags, uint_t *hataccp)
* If write-combining is not supported, then it falls back
* to uncacheable.
*/
- if (cache_attr == IOMEM_DATA_UC_WR_COMBINE && !(x86_feature & X86_PAT))
+ if (cache_attr == IOMEM_DATA_UC_WR_COMBINE &&
+ !is_x86_feature(x86_featureset, X86FSET_PAT))
cache_attr = IOMEM_DATA_UNCACHED;
/*
diff --git a/usr/src/uts/i86pc/os/fastboot.c b/usr/src/uts/i86pc/os/fastboot.c
index 4cedb0be28..1520a6653c 100644
--- a/usr/src/uts/i86pc/os/fastboot.c
+++ b/usr/src/uts/i86pc/os/fastboot.c
@@ -20,8 +20,7 @@
*/
/*
- * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -601,7 +600,7 @@ fastboot_build_mbi(char *mdep, fastboot_info_t *nk)
static void
fastboot_init_fields(fastboot_info_t *nk)
{
- if (x86_feature & X86_PAE) {
+ if (is_x86_feature(x86_featureset, X86FSET_PAE)) {
nk->fi_has_pae = 1;
nk->fi_shift_amt = fastboot_shift_amt_pae;
nk->fi_ptes_per_table = 512;
@@ -1155,9 +1154,11 @@ load_kernel_retry:
goto err_out;
}
- if ((x86_feature & X86_64) == 0 ||
- (x86_feature & X86_PAE) == 0) {
- cmn_err(CE_NOTE, "!Fastboot: Cannot "
+ if (!is_x86_feature(x86_featureset,
+ X86FSET_64) ||
+ !is_x86_feature(x86_featureset,
+ X86FSET_PAE)) {
+ cmn_err(CE_NOTE, "Fastboot: Cannot "
"reboot to %s: "
"not a 64-bit capable system",
kern_bootfile);
diff --git a/usr/src/uts/i86pc/os/fpu_subr.c b/usr/src/uts/i86pc/os/fpu_subr.c
index 11f226a1eb..7bb68f7168 100644
--- a/usr/src/uts/i86pc/os/fpu_subr.c
+++ b/usr/src/uts/i86pc/os/fpu_subr.c
@@ -20,12 +20,9 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Floating point configuration.
*/
@@ -142,7 +139,8 @@ fpu_probe(void)
*
* (Perhaps we should complain more about this case!)
*/
- if ((x86_feature & X86_SSE|X86_SSE2) == (X86_SSE|X86_SSE2)) {
+ if (is_x86_feature(x86_featureset, X86FSET_SSE) &&
+ is_x86_feature(x86_featureset, X86FSET_SSE2)) {
fp_kind = __FP_SSE;
ENABLE_SSE();
}
@@ -151,15 +149,15 @@ fpu_probe(void)
* SSE and SSE2 are both optional, and we patch kernel
* code to exploit it when present.
*/
- if (x86_feature & X86_SSE) {
+ if (is_x86_feature(x86_featureset, X86FSET_SSE)) {
fp_kind = __FP_SSE;
fpsave_ctxt = fpxsave_ctxt;
patch_sse();
- if (x86_feature & X86_SSE2)
+ if (is_x86_feature(x86_featureset, X86FSET_SSE2))
patch_sse2();
ENABLE_SSE();
} else {
- x86_feature &= ~X86_SSE2;
+ remove_x86_feature(x86_featureset, X86FSET_SSE2);
/*
* (Just in case the BIOS decided we wanted SSE
* enabled when we didn't. See 4965674.)
@@ -167,7 +165,7 @@ fpu_probe(void)
DISABLE_SSE();
}
#endif
- if (x86_feature & X86_SSE2) {
+ if (is_x86_feature(x86_featureset, X86FSET_SSE2)) {
use_sse_pagecopy = use_sse_pagezero = use_sse_copy = 1;
}
diff --git a/usr/src/uts/i86pc/os/lgrpplat.c b/usr/src/uts/i86pc/os/lgrpplat.c
index 02596478c0..ac647bea16 100644
--- a/usr/src/uts/i86pc/os/lgrpplat.c
+++ b/usr/src/uts/i86pc/os/lgrpplat.c
@@ -20,8 +20,7 @@
*/
/*
- * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 2010, Intel Corporation.
@@ -177,7 +176,7 @@
#include <sys/thread.h>
#include <sys/types.h>
#include <sys/var.h>
-#include <sys/x86_archext.h> /* for x86_feature and X86_AMD */
+#include <sys/x86_archext.h>
#include <vm/hat_i86.h>
#include <vm/seg_kmem.h>
#include <vm/vm_dep.h>
diff --git a/usr/src/uts/i86pc/os/machdep.c b/usr/src/uts/i86pc/os/machdep.c
index d56ad1fe1b..acd8a5140f 100644
--- a/usr/src/uts/i86pc/os/machdep.c
+++ b/usr/src/uts/i86pc/os/machdep.c
@@ -1161,7 +1161,7 @@ get_cpu_mstate(cpu_t *cpu, hrtime_t *times)
int
checked_rdmsr(uint_t msr, uint64_t *value)
{
- if ((x86_feature & X86_MSR) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_MSR))
return (ENOTSUP);
*value = rdmsr(msr);
return (0);
@@ -1174,7 +1174,7 @@ checked_rdmsr(uint_t msr, uint64_t *value)
int
checked_wrmsr(uint_t msr, uint64_t value)
{
- if ((x86_feature & X86_MSR) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_MSR))
return (ENOTSUP);
wrmsr(msr, value);
return (0);
diff --git a/usr/src/uts/i86pc/os/mlsetup.c b/usr/src/uts/i86pc/os/mlsetup.c
index bf57dd3c50..8086b773fd 100644
--- a/usr/src/uts/i86pc/os/mlsetup.c
+++ b/usr/src/uts/i86pc/os/mlsetup.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 2010, Intel Corporation.
@@ -180,13 +179,13 @@ mlsetup(struct regs *rp)
* was done in locore before mlsetup was called. Do the next
* pass in C code.
*
- * The x86_feature bits are set here on the basis of the capabilities
+ * The x86_featureset is initialized here based on the capabilities
* of the boot CPU. Note that if we choose to support CPUs that have
* different feature sets (at which point we would almost certainly
* want to set the feature bits to correspond to the feature
* minimum) this value may be altered.
*/
- x86_feature = cpuid_pass1(cpu[0]);
+ x86_featureset = cpuid_pass1(cpu[0]);
#if !defined(__xpv)
@@ -212,13 +211,16 @@ mlsetup(struct regs *rp)
* The Xen hypervisor does not correctly report whether rdtscp is
* supported or not, so we must assume that it is not.
*/
- if (get_hwenv() != HW_XEN_HVM && (x86_feature & X86_TSCP))
+ if (get_hwenv() != HW_XEN_HVM &&
+ is_x86_feature(x86_featureset, X86FSET_TSCP))
patch_tsc_read(X86_HAVE_TSCP);
else if (cpuid_getvendor(CPU) == X86_VENDOR_AMD &&
- cpuid_getfamily(CPU) <= 0xf && (x86_feature & X86_SSE2) != 0)
+ cpuid_getfamily(CPU) <= 0xf &&
+ is_x86_feature(x86_featureset, X86FSET_SSE2))
patch_tsc_read(X86_TSC_MFENCE);
else if (cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
- cpuid_getfamily(CPU) <= 6 && (x86_feature & X86_SSE2) != 0)
+ cpuid_getfamily(CPU) <= 6 &&
+ is_x86_feature(x86_featureset, X86FSET_SSE2))
patch_tsc_read(X86_TSC_LFENCE);
#endif /* !__xpv */
@@ -229,7 +231,7 @@ mlsetup(struct regs *rp)
* or at least they do not implement it correctly. Patch them to
* return 0.
*/
- if ((x86_feature & X86_TSC) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_TSC))
patch_tsc_read(X86_NO_TSC);
#endif /* __i386 && !__xpv */
@@ -246,13 +248,13 @@ mlsetup(struct regs *rp)
* (the cpuid) for the rdtscp instruction on appropriately
* capable hardware.
*/
- if (x86_feature & X86_TSC)
+ if (is_x86_feature(x86_featureset, X86FSET_TSC))
setcr4(getcr4() & ~CR4_TSD);
- if (x86_feature & X86_TSCP)
+ if (is_x86_feature(x86_featureset, X86FSET_TSCP))
(void) wrmsr(MSR_AMD_TSCAUX, 0);
- if (x86_feature & X86_DE)
+ if (is_x86_feature(x86_featureset, X86FSET_DE))
setcr4(getcr4() | CR4_DE);
#endif /* __xpv */
diff --git a/usr/src/uts/i86pc/os/mp_machdep.c b/usr/src/uts/i86pc/os/mp_machdep.c
index dfa49538c9..d7aab080a6 100644
--- a/usr/src/uts/i86pc/os/mp_machdep.c
+++ b/usr/src/uts/i86pc/os/mp_machdep.c
@@ -237,7 +237,7 @@ pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw)
{
switch (hw) {
case PGHW_IPIPE:
- if (x86_feature & (X86_HTT)) {
+ if (is_x86_feature(x86_featureset, X86FSET_HTT)) {
/*
* Hyper-threading is SMT
*/
@@ -251,7 +251,8 @@ pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw)
else
return (0);
case PGHW_CHIP:
- if (x86_feature & (X86_CMP|X86_HTT))
+ if (is_x86_feature(x86_featureset, X86FSET_CMP) ||
+ is_x86_feature(x86_featureset, X86FSET_HTT))
return (1);
else
return (0);
@@ -1017,7 +1018,8 @@ mach_init()
idle_cpu = cpu_idle_adaptive;
CPU->cpu_m.mcpu_idle_cpu = cpu_idle;
#ifndef __xpv
- if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) {
+ if (is_x86_feature(x86_featureset, X86FSET_MWAIT) &&
+ idle_cpu_prefer_mwait) {
CPU->cpu_m.mcpu_mwait = cpuid_mwait_alloc(CPU);
/*
* Protect ourself from insane mwait size.
@@ -1130,7 +1132,8 @@ mach_smpinit(void)
if (idle_cpu_use_hlt) {
disp_enq_thread = cpu_wakeup;
#ifndef __xpv
- if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait)
+ if (is_x86_feature(x86_featureset, X86FSET_MWAIT) &&
+ idle_cpu_prefer_mwait)
disp_enq_thread = cpu_wakeup_mwait;
non_deep_idle_disp_enq_thread = disp_enq_thread;
#endif
@@ -1239,7 +1242,7 @@ mach_getcpufreq(void)
uint32_t pit_counter;
uint64_t processor_clks;
- if (x86_feature & X86_TSC) {
+ if (is_x86_feature(x86_featureset, X86FSET_TSC)) {
/*
* We have a TSC. freq_tsc() knows how to measure the number
* of clock cycles sampled against the PIT.
@@ -1411,7 +1414,7 @@ mach_clkinit(int preferred_mode, int *set_mode)
cpu_freq = machhztomhz(cpu_freq_hz);
- if (!(x86_feature & X86_TSC) || (cpu_freq == 0))
+ if (!is_x86_feature(x86_featureset, X86FSET_TSC) || (cpu_freq == 0))
tsc_gethrtime_enable = 0;
#ifndef __xpv
diff --git a/usr/src/uts/i86pc/os/mp_pc.c b/usr/src/uts/i86pc/os/mp_pc.c
index bc21812187..0429b463f6 100644
--- a/usr/src/uts/i86pc/os/mp_pc.c
+++ b/usr/src/uts/i86pc/os/mp_pc.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 2010, Intel Corporation.
@@ -313,7 +312,6 @@ mach_cpucontext_xalloc(struct cpu *cp, int optype)
*/
rm->rm_pdbr = MAKECR3(kas.a_hat->hat_htable->ht_pfn);
rm->rm_cpu = cp->cpu_id;
- rm->rm_x86feature = x86_feature;
/*
* For hot-adding CPU at runtime, Machine Check and Performance Counter
@@ -624,7 +622,7 @@ out_enable_cmi:
if ((hdl = cmi_init(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp))) != NULL) {
- if (x86_feature & X86_MCA)
+ if (is_x86_feature(x86_featureset, X86FSET_MCA))
cmi_mca_init(hdl);
cp->cpu_m.mcpu_cmi_hdl = hdl;
}
diff --git a/usr/src/uts/i86pc/os/mp_startup.c b/usr/src/uts/i86pc/os/mp_startup.c
index 146f879a66..f52c320c4b 100644
--- a/usr/src/uts/i86pc/os/mp_startup.c
+++ b/usr/src/uts/i86pc/os/mp_startup.c
@@ -20,8 +20,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 2010, Intel Corporation.
@@ -165,7 +164,8 @@ init_cpu_syscall(struct cpu *cp)
kpreempt_disable();
#if defined(__amd64)
- if ((x86_feature & (X86_MSR | X86_ASYSC)) == (X86_MSR | X86_ASYSC)) {
+ if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
+ is_x86_feature(x86_featureset, X86FSET_ASYSC)) {
#if !defined(__lint)
/*
@@ -205,7 +205,8 @@ init_cpu_syscall(struct cpu *cp)
* On 64-bit kernels on Nocona machines, the 32-bit syscall
* variant isn't available to 32-bit applications, but sysenter is.
*/
- if ((x86_feature & (X86_MSR | X86_SEP)) == (X86_MSR | X86_SEP)) {
+ if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
+ is_x86_feature(x86_featureset, X86FSET_SEP)) {
#if !defined(__lint)
/*
@@ -415,7 +416,8 @@ mp_cpu_configure_common(int cpun, boolean_t boot)
*/
cpuid_alloc_space(cp);
#if !defined(__xpv)
- if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) {
+ if (is_x86_feature(x86_featureset, X86FSET_MWAIT) &&
+ idle_cpu_prefer_mwait) {
cp->cpu_m.mcpu_mwait = cpuid_mwait_alloc(cp);
cp->cpu_m.mcpu_idle_cpu = cpu_idle_mwait;
} else
@@ -1142,7 +1144,7 @@ workaround_errata(struct cpu *cpu)
if (opteron_workaround_6323525) {
opteron_workaround_6323525++;
#if defined(__xpv)
- } else if (x86_feature & X86_SSE2) {
+ } else if (is_x86_feature(x86_featureset, X86FSET_SSE2)) {
if (DOMAIN_IS_INITDOMAIN(xen_info)) {
/*
* XXPV Use dom0_msr here when extended
@@ -1160,7 +1162,8 @@ workaround_errata(struct cpu *cpu)
opteron_workaround_6323525++;
}
#else /* __xpv */
- } else if ((x86_feature & X86_SSE2) && ((opteron_get_nnodes() *
+ } else if (is_x86_feature(x86_featureset, X86FSET_SSE2) &&
+ ((opteron_get_nnodes() *
cpuid_get_ncpu_per_chip(cpu)) > 1)) {
if ((xrdmsr(MSR_AMD_BU_CFG) & (UINT64_C(1) << 33)) == 0)
opteron_workaround_6323525++;
@@ -1602,8 +1605,7 @@ static void
mp_startup_common(boolean_t boot)
{
cpu_t *cp = CPU;
- uint_t new_x86_feature;
- const char *fmt = "?cpu%d: %b\n";
+ void *new_x86_featureset;
extern void cpu_event_init_cpu(cpu_t *);
/*
@@ -1629,13 +1631,13 @@ mp_startup_common(boolean_t boot)
*/
(void) (*ap_mlsetup)();
- new_x86_feature = cpuid_pass1(cp);
+ new_x86_featureset = cpuid_pass1(cp);
#ifndef __xpv
/*
* Program this cpu's PAT
*/
- if (x86_feature & X86_PAT)
+ if (is_x86_feature(x86_featureset, X86FSET_PAT))
pat_sync();
#endif
@@ -1643,7 +1645,7 @@ mp_startup_common(boolean_t boot)
* Set up TSC_AUX to contain the cpuid for this processor
* for the rdtscp instruction.
*/
- if (x86_feature & X86_TSCP)
+ if (is_x86_feature(x86_featureset, X86FSET_TSCP))
(void) wrmsr(MSR_AMD_TSCAUX, cp->cpu_id);
/*
@@ -1671,9 +1673,10 @@ mp_startup_common(boolean_t boot)
* likely to happen once the number of processors in a configuration
* gets large enough.
*/
- if ((x86_feature & new_x86_feature) != x86_feature) {
- cmn_err(CE_CONT, fmt, cp->cpu_id, new_x86_feature,
- FMT_X86_FEATURE);
+ if (compare_x86_featureset(x86_featureset, new_x86_featureset) ==
+ B_FALSE) {
+ cmn_err(CE_CONT, "cpu%d: featureset\n", cp->cpu_id);
+ print_x86_featureset(new_x86_featureset);
cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id);
}
@@ -1681,9 +1684,12 @@ mp_startup_common(boolean_t boot)
* We do not support cpus with mixed monitor/mwait support if the
* boot cpu supports monitor/mwait.
*/
- if ((x86_feature & ~new_x86_feature) & X86_MWAIT)
+ if (is_x86_feature(x86_featureset, X86FSET_MWAIT) !=
+ is_x86_feature(new_x86_featureset, X86FSET_MWAIT))
panic("unsupported mixed cpu monitor/mwait support detected");
+ free_x86_featureset(new_x86_featureset);
+
/*
* We could be more sophisticated here, and just mark the CPU
* as "faulted" but at this point we'll opt for the easier
@@ -1775,7 +1781,7 @@ mp_startup_common(boolean_t boot)
if ((hdl = cmi_init(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU),
cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) != NULL) {
- if (x86_feature & X86_MCA)
+ if (is_x86_feature(x86_featureset, X86FSET_MCA))
cmi_mca_init(hdl);
cp->cpu_m.mcpu_cmi_hdl = hdl;
}
@@ -1934,19 +1940,21 @@ mp_cpu_faulted_exit(struct cpu *cp)
* The following two routines are used as context operators on threads belonging
* to processes with a private LDT (see sysi86). Due to the rarity of such
* processes, these routines are currently written for best code readability and
- * organization rather than speed. We could avoid checking x86_feature at every
- * context switch by installing different context ops, depending on the
- * x86_feature flags, at LDT creation time -- one for each combination of fast
- * syscall feature flags.
+ * organization rather than speed. We could avoid checking x86_featureset at
+ * every context switch by installing different context ops, depending on
+ * x86_featureset, at LDT creation time -- one for each combination of fast
+ * syscall features.
*/
/*ARGSUSED*/
void
cpu_fast_syscall_disable(void *arg)
{
- if ((x86_feature & (X86_MSR | X86_SEP)) == (X86_MSR | X86_SEP))
+ if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
+ is_x86_feature(x86_featureset, X86FSET_SEP))
cpu_sep_disable();
- if ((x86_feature & (X86_MSR | X86_ASYSC)) == (X86_MSR | X86_ASYSC))
+ if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
+ is_x86_feature(x86_featureset, X86FSET_ASYSC))
cpu_asysc_disable();
}
@@ -1954,16 +1962,18 @@ cpu_fast_syscall_disable(void *arg)
void
cpu_fast_syscall_enable(void *arg)
{
- if ((x86_feature & (X86_MSR | X86_SEP)) == (X86_MSR | X86_SEP))
+ if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
+ is_x86_feature(x86_featureset, X86FSET_SEP))
cpu_sep_enable();
- if ((x86_feature & (X86_MSR | X86_ASYSC)) == (X86_MSR | X86_ASYSC))
+ if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
+ is_x86_feature(x86_featureset, X86FSET_ASYSC))
cpu_asysc_enable();
}
static void
cpu_sep_enable(void)
{
- ASSERT(x86_feature & X86_SEP);
+ ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP));
ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL);
@@ -1972,7 +1982,7 @@ cpu_sep_enable(void)
static void
cpu_sep_disable(void)
{
- ASSERT(x86_feature & X86_SEP);
+ ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP));
ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
/*
@@ -1985,7 +1995,7 @@ cpu_sep_disable(void)
static void
cpu_asysc_enable(void)
{
- ASSERT(x86_feature & X86_ASYSC);
+ ASSERT(is_x86_feature(x86_featureset, X86FSET_ASYSC));
ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) |
@@ -1995,7 +2005,7 @@ cpu_asysc_enable(void)
static void
cpu_asysc_disable(void)
{
- ASSERT(x86_feature & X86_ASYSC);
+ ASSERT(is_x86_feature(x86_featureset, X86FSET_ASYSC));
ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
/*
diff --git a/usr/src/uts/i86pc/os/pci_mech1_amd.c b/usr/src/uts/i86pc/os/pci_mech1_amd.c
index d45408731b..3b6eb918fe 100644
--- a/usr/src/uts/i86pc/os/pci_mech1_amd.c
+++ b/usr/src/uts/i86pc/os/pci_mech1_amd.c
@@ -42,7 +42,7 @@ pci_check_amd_ioecs(void)
struct cpuid_regs cp;
int family;
- if ((x86_feature & X86_CPUID) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_CPUID))
return (B_FALSE);
/*
diff --git a/usr/src/uts/i86pc/os/startup.c b/usr/src/uts/i86pc/os/startup.c
index f69b37a9f2..e5afdcc014 100644
--- a/usr/src/uts/i86pc/os/startup.c
+++ b/usr/src/uts/i86pc/os/startup.c
@@ -1320,7 +1320,6 @@ static void
startup_kmem(void)
{
extern void page_set_colorequiv_arr(void);
- const char *fmt = "?features: %b\n";
PRM_POINT("startup_kmem() starting...");
@@ -1429,7 +1428,7 @@ startup_kmem(void)
/*
* print this out early so that we know what's going on
*/
- cmn_err(CE_CONT, fmt, x86_feature, FMT_X86_FEATURE);
+ print_x86_featureset(x86_featureset);
/*
* Initialize bp_mapin().
@@ -1651,7 +1650,7 @@ startup_modules(void)
if ((hdl = cmi_init(CMI_HDL_SOLARIS_xVM_MCA,
xen_physcpu_chipid(cpi), xen_physcpu_coreid(cpi),
xen_physcpu_strandid(cpi))) != NULL &&
- (x86_feature & X86_MCA))
+ is_x86_feature(x86_featureset, X86FSET_MCA))
cmi_mca_init(hdl);
}
}
@@ -1663,7 +1662,7 @@ startup_modules(void)
if ((get_hwenv() != HW_XEN_HVM) &&
(hdl = cmi_init(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU),
cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) != NULL &&
- (x86_feature & X86_MCA)) {
+ is_x86_feature(x86_featureset, X86FSET_MCA)) {
cmi_mca_init(hdl);
CPU->cpu_m.mcpu_cmi_hdl = hdl;
}
@@ -2670,7 +2669,7 @@ pat_sync(void)
{
ulong_t cr0, cr0_orig, cr4;
- if (!(x86_feature & X86_PAT))
+ if (!is_x86_feature(x86_featureset, X86FSET_PAT))
return;
cr0_orig = cr0 = getcr0();
cr4 = getcr4();
@@ -2993,12 +2992,13 @@ setx86isalist(void)
case X86_VENDOR_Intel:
case X86_VENDOR_AMD:
case X86_VENDOR_TM:
- if (x86_feature & X86_CMOV) {
+ if (is_x86_feature(x86_featureset, X86FSET_CMOV)) {
/*
* Pentium Pro or later
*/
(void) strcat(tp, "pentium_pro");
- (void) strcat(tp, x86_feature & X86_MMX ?
+ (void) strcat(tp,
+ is_x86_feature(x86_featureset, X86FSET_MMX) ?
"+mmx pentium_pro " : " ");
}
/*FALLTHROUGH*/
@@ -3007,9 +3007,10 @@ setx86isalist(void)
* The Cyrix 6x86 does not have any Pentium features
* accessible while not at privilege level 0.
*/
- if (x86_feature & X86_CPUID) {
+ if (is_x86_feature(x86_featureset, X86FSET_CPUID)) {
(void) strcat(tp, "pentium");
- (void) strcat(tp, x86_feature & X86_MMX ?
+ (void) strcat(tp,
+ is_x86_feature(x86_featureset, X86FSET_MMX) ?
"+mmx pentium " : " ");
}
break;
diff --git a/usr/src/uts/i86pc/os/trap.c b/usr/src/uts/i86pc/os/trap.c
index af35ddf1b8..71995cc992 100644
--- a/usr/src/uts/i86pc/os/trap.c
+++ b/usr/src/uts/i86pc/os/trap.c
@@ -256,17 +256,17 @@ instr_is_other_syscall(caddr_t pc, int which)
{
uchar_t instr[FAST_SCALL_SIZE];
- ASSERT(which == X86_SEP || which == X86_ASYSC || which == 0xCD);
+ ASSERT(which == X86FSET_SEP || which == X86FSET_ASYSC || which == 0xCD);
if (copyin_nowatch(pc, (caddr_t)instr, FAST_SCALL_SIZE) != 0)
return (0);
switch (which) {
- case X86_SEP:
+ case X86FSET_SEP:
if (instr[0] == 0x0F && instr[1] == 0x34)
return (1);
break;
- case X86_ASYSC:
+ case X86FSET_ASYSC:
if (instr[0] == 0x0F && instr[1] == 0x05)
return (1);
break;
@@ -283,9 +283,9 @@ static const char *
syscall_insn_string(int syscall_insn)
{
switch (syscall_insn) {
- case X86_SEP:
+ case X86FSET_SEP:
return ("sysenter");
- case X86_ASYSC:
+ case X86FSET_ASYSC:
return ("syscall");
case 0xCD:
return ("int");
@@ -916,7 +916,7 @@ trap(struct regs *rp, caddr_t addr, processorid_t cpuid)
* be to emulate that particular instruction.
*/
if (p->p_ldt != NULL &&
- ldt_rewrite_syscall(rp, p, X86_ASYSC))
+ ldt_rewrite_syscall(rp, p, X86FSET_ASYSC))
goto out;
#ifdef __amd64
@@ -1018,7 +1018,8 @@ trap(struct regs *rp, caddr_t addr, processorid_t cpuid)
case T_SIMDFPE + USER: /* SSE and SSE2 exceptions */
if (tudebug && tudebugsse)
showregs(type, rp, addr);
- if ((x86_feature & (X86_SSE|X86_SSE2)) == 0) {
+ if (!is_x86_feature(x86_featureset, X86FSET_SSE) &&
+ !is_x86_feature(x86_featureset, X86FSET_SSE2)) {
/*
* There are rumours that some user instructions
* on older CPUs can cause this trap to occur; in
@@ -1268,7 +1269,7 @@ trap(struct regs *rp, caddr_t addr, processorid_t cpuid)
* this will be to emulate that particular instruction.
*/
if (p->p_ldt != NULL &&
- ldt_rewrite_syscall(rp, p, X86_SEP))
+ ldt_rewrite_syscall(rp, p, X86FSET_SEP))
goto out;
/*FALLTHROUGH*/
diff --git a/usr/src/uts/i86pc/sys/rm_platter.h b/usr/src/uts/i86pc/sys/rm_platter.h
index 48e141126b..9ca3a4908d 100644
--- a/usr/src/uts/i86pc/sys/rm_platter.h
+++ b/usr/src/uts/i86pc/sys/rm_platter.h
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 2010, Intel Corporation.
@@ -70,7 +69,7 @@ typedef struct rm_platter {
gate_desc_t *rm_idt_base;
uint_t rm_pdbr; /* cr3 value */
uint_t rm_cpu; /* easy way to know which CPU we are */
- uint_t rm_x86feature; /* X86 supported features */
+ uint_t rm_filler3;
uint_t rm_cr4; /* cr4 value on cpu0 */
#if defined(__amd64)
/*
diff --git a/usr/src/uts/i86pc/vm/hat_i86.c b/usr/src/uts/i86pc/vm/hat_i86.c
index 092cd1659b..bc8e3e197f 100644
--- a/usr/src/uts/i86pc/vm/hat_i86.c
+++ b/usr/src/uts/i86pc/vm/hat_i86.c
@@ -491,7 +491,7 @@ set_max_page_level()
if (!kbm_largepage_support) {
lvl = 0;
} else {
- if (x86_feature & X86_1GPG) {
+ if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
lvl = 2;
if (chk_optimal_1gtlb &&
cpuid_opteron_erratum(CPU, 6671130)) {
@@ -528,7 +528,8 @@ mmu_init(void)
* If CPU enabled the page table global bit, use it for the kernel
* This is bit 7 in CR4 (PGE - Page Global Enable).
*/
- if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0)
+ if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
+ (getcr4() & CR4_PGE) != 0)
mmu.pt_global = PT_GLOBAL;
/*
@@ -576,10 +577,10 @@ mmu_init(void)
mmu.pte_size_shift = 2;
}
- if (mmu.pae_hat && (x86_feature & X86_PAE) == 0)
+ if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
panic("Processor does not support PAE");
- if ((x86_feature & X86_CX8) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_CX8))
panic("Processor does not support cmpxchg8b instruction");
#if defined(__amd64)
@@ -1095,7 +1096,7 @@ hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
/* nothing to set */;
} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
PTE_SET(pte, PT_NOCACHE);
- if (x86_feature & X86_PAT)
+ if (is_x86_feature(x86_featureset, X86FSET_PAT))
PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
else
PTE_SET(pte, PT_WRITETHRU);
diff --git a/usr/src/uts/i86pc/vm/htable.c b/usr/src/uts/i86pc/vm/htable.c
index 3bc7eb254d..bcb2b117a3 100644
--- a/usr/src/uts/i86pc/vm/htable.c
+++ b/usr/src/uts/i86pc/vm/htable.c
@@ -2416,7 +2416,7 @@ x86pte_zero(htable_t *dest, uint_t entry, uint_t count)
size = count << mmu.pte_size_shift;
ASSERT(size > BLOCKZEROALIGN);
#ifdef __i386
- if ((x86_feature & X86_SSE2) == 0)
+ if (!is_x86_feature(x86_featureset, X86FSET_SSE2))
bzero(dst_va, size);
else
#endif
diff --git a/usr/src/uts/intel/ia32/ml/i86_subr.s b/usr/src/uts/intel/ia32/ml/i86_subr.s
index b16c2ce457..17356cc573 100644
--- a/usr/src/uts/intel/ia32/ml/i86_subr.s
+++ b/usr/src/uts/intel/ia32/ml/i86_subr.s
@@ -20,8 +20,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -3086,8 +3085,8 @@ getcregs(struct cregs *crp)
movl %eax, CREG_CR2(%edx) /* cr2 */
movl %cr3, %eax
movl %eax, CREG_CR3(%edx) /* cr3 */
- testl $X86_LARGEPAGE, x86_feature
- jz .nocr4
+ bt $X86FSET_LARGEPAGE, x86_featureset
+ jnc .nocr4
movl %cr4, %eax
movl %eax, CREG_CR4(%edx) /* cr4 */
jmp .skip
diff --git a/usr/src/uts/intel/ia32/os/cpc_subr.c b/usr/src/uts/intel/ia32/os/cpc_subr.c
index df7d5cda88..b15c57a8e5 100644
--- a/usr/src/uts/intel/ia32/os/cpc_subr.c
+++ b/usr/src/uts/intel/ia32/os/cpc_subr.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -118,7 +117,7 @@ kcpc_hw_init(cpu_t *cp)
struct cpuid_regs cpuid;
strands_perfmon_shared = 0;
- if (x86_feature & X86_HTT) {
+ if (is_x86_feature(x86_featureset, X86FSET_HTT)) {
if (cpuid_getvendor(cpu[0]) == X86_VENDOR_Intel) {
/*
* Intel processors that support Architectural
diff --git a/usr/src/uts/intel/ia32/os/desctbls.c b/usr/src/uts/intel/ia32/os/desctbls.c
index 146a89e842..a21e4a0386 100644
--- a/usr/src/uts/intel/ia32/os/desctbls.c
+++ b/usr/src/uts/intel/ia32/os/desctbls.c
@@ -1332,7 +1332,7 @@ brand_interpositioning_enable(void)
#else
- if (x86_feature & X86_ASYSC) {
+ if (is_x86_feature(x86_featureset, X86FSET_ASYSC)) {
wrmsr(MSR_AMD_LSTAR, (uintptr_t)brand_sys_syscall);
wrmsr(MSR_AMD_CSTAR, (uintptr_t)brand_sys_syscall32);
}
@@ -1340,7 +1340,7 @@ brand_interpositioning_enable(void)
#endif
#endif /* __amd64 */
- if (x86_feature & X86_SEP)
+ if (is_x86_feature(x86_featureset, X86FSET_SEP))
wrmsr(MSR_INTC_SEP_EIP, (uintptr_t)brand_sys_sysenter);
}
@@ -1376,7 +1376,7 @@ brand_interpositioning_disable(void)
#else
- if (x86_feature & X86_ASYSC) {
+ if (is_x86_feature(x86_featureset, X86FSET_ASYSC)) {
wrmsr(MSR_AMD_LSTAR, (uintptr_t)sys_syscall);
wrmsr(MSR_AMD_CSTAR, (uintptr_t)sys_syscall32);
}
@@ -1384,6 +1384,6 @@ brand_interpositioning_disable(void)
#endif
#endif /* __amd64 */
- if (x86_feature & X86_SEP)
+ if (is_x86_feature(x86_featureset, X86FSET_SEP))
wrmsr(MSR_INTC_SEP_EIP, (uintptr_t)sys_sysenter);
}
diff --git a/usr/src/uts/intel/ia32/os/sundep.c b/usr/src/uts/intel/ia32/os/sundep.c
index 13f04658aa..a8ce06fa4c 100644
--- a/usr/src/uts/intel/ia32/os/sundep.c
+++ b/usr/src/uts/intel/ia32/os/sundep.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
@@ -858,7 +857,7 @@ lwp_installctx(klwp_t *lwp)
* sep_save zeros the sysenter stack pointer msr; sep_restore sets
* it to the lwp's kernel stack pointer (kstktop).
*/
- if (x86_feature & X86_SEP) {
+ if (is_x86_feature(x86_featureset, X86FSET_SEP)) {
#if defined(__amd64)
caddr_t kstktop = (caddr_t)lwp->lwp_regs;
#elif defined(__i386)
diff --git a/usr/src/uts/intel/ia32/sys/traptrace.h b/usr/src/uts/intel/ia32/sys/traptrace.h
index 8edce6ff38..038f01715c 100644
--- a/usr/src/uts/intel/ia32/sys/traptrace.h
+++ b/usr/src/uts/intel/ia32/sys/traptrace.h
@@ -245,8 +245,8 @@ extern trap_trace_rec_t trap_trace_postmort; /* Entry used after death */
#define TRACE_STAMP(reg) \
xorl %eax, %eax; \
xorl %edx, %edx; \
- testl $X86_TSC, x86_feature; \
- jz 9f; \
+ btl $X86FSET_TSC, x86_featureset; \
+ jnc 9f; \
rdtsc; \
9: movl %eax, TTR_STAMP(reg); \
movl %edx, TTR_STAMP+4(reg)
diff --git a/usr/src/uts/intel/pcbe/core_pcbe.c b/usr/src/uts/intel/pcbe/core_pcbe.c
index 02455478b9..ea0164ef3d 100644
--- a/usr/src/uts/intel/pcbe/core_pcbe.c
+++ b/usr/src/uts/intel/pcbe/core_pcbe.c
@@ -1193,7 +1193,7 @@ core_pcbe_init(void)
return (-1);
/* Set HTT-specific names of architectural & FFC events */
- if (x86_feature & X86_HTT) {
+ if (is_x86_feature(x86_featureset, X86FSET_HTT)) {
ffc_names = ffc_names_htt;
arch_events_table = arch_events_table_htt;
known_arch_events =
diff --git a/usr/src/uts/intel/pcbe/p123_pcbe.c b/usr/src/uts/intel/pcbe/p123_pcbe.c
index e9f671af58..0ccaab6ba0 100644
--- a/usr/src/uts/intel/pcbe/p123_pcbe.c
+++ b/usr/src/uts/intel/pcbe/p123_pcbe.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -536,7 +535,7 @@ ptm_pcbe_init(void)
int i;
size_t size;
- if (x86_feature & X86_MMX)
+ if (is_x86_feature(x86_featureset, X86FSET_MMX))
ptm_rdpmc_avail = 1;
/*
@@ -567,7 +566,7 @@ ptm_pcbe_init(void)
ptm_ver = PTM_VER_P6;
ptm_cpuref = P6_CPUREF;
ptm_pcbe_ops.pcbe_caps = CPC_CAP_OVERFLOW_INTERRUPT;
- if (x86_feature & X86_MMX)
+ if (is_x86_feature(x86_featureset, X86FSET_MMX))
ptm_impl_name = "Pentium Pro with MMX, Pentium II";
else
ptm_impl_name = "Pentium Pro, Pentium II";
diff --git a/usr/src/uts/intel/pcbe/p4_pcbe.c b/usr/src/uts/intel/pcbe/p4_pcbe.c
index 8c05c599a3..95f075921f 100644
--- a/usr/src/uts/intel/pcbe/p4_pcbe.c
+++ b/usr/src/uts/intel/pcbe/p4_pcbe.c
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -500,13 +499,13 @@ p4_pcbe_init(void)
p4_eventlist[i][size - 1] = '\0';
}
- if (x86_feature & X86_MMX)
+ if (is_x86_feature(x86_featureset, X86FSET_MMX))
p4_rdpmc_avail = 1;
/*
* The X86_HTT flag may disappear soon, so we'll isolate the impact of
* its demise to the following if().
*/
- if (x86_feature & X86_HTT)
+ if (is_x86_feature(x86_featureset, X86FSET_HTT))
p4_htt = 1;
return (0);
diff --git a/usr/src/uts/intel/sys/x86_archext.h b/usr/src/uts/intel/sys/x86_archext.h
index 810c256677..f05460dabf 100644
--- a/usr/src/uts/intel/sys/x86_archext.h
+++ b/usr/src/uts/intel/sys/x86_archext.h
@@ -19,8 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 2009, Intel Corporation.
@@ -324,45 +323,39 @@ extern "C" {
((uint64_t)MTRR_TYPE_WC << 48) | \
((uint64_t)MTRR_TYPE_UC << 56))
-#define X86_LARGEPAGE 0x00000001
-#define X86_TSC 0x00000002
-#define X86_MSR 0x00000004
-#define X86_MTRR 0x00000008
-#define X86_PGE 0x00000010
-#define X86_DE 0x00000020
-#define X86_CMOV 0x00000040
-#define X86_MMX 0x00000080
-#define X86_MCA 0x00000100
-#define X86_PAE 0x00000200
-#define X86_CX8 0x00000400
-#define X86_PAT 0x00000800
-#define X86_SEP 0x00001000
-#define X86_SSE 0x00002000
-#define X86_SSE2 0x00004000
-#define X86_HTT 0x00008000
-#define X86_ASYSC 0x00010000
-#define X86_NX 0x00020000
-#define X86_SSE3 0x00040000
-#define X86_CX16 0x00080000
-#define X86_CMP 0x00100000
-#define X86_TSCP 0x00200000
-#define X86_MWAIT 0x00400000
-#define X86_SSE4A 0x00800000
-#define X86_CPUID 0x01000000
-#define X86_SSSE3 0x02000000
-#define X86_SSE4_1 0x04000000
-#define X86_SSE4_2 0x08000000
-#define X86_1GPG 0x10000000
-#define X86_CLFSH 0x20000000
-#define X86_64 0x40000000
-#define X86_AES 0x80000000
-
-#define FMT_X86_FEATURE \
- "\20" \
- "\40aes\34sse4_2\33sse4_1\32ssse3\31cpuid" \
- "\30sse4a\27mwait\26tscp\25cmp\24cx16\23sse3\22nx\21asysc"\
- "\20htt\17sse2\16sse\15sep\14pat\13cx8\12pae\11mca" \
- "\10mmx\7cmov\6de\5pge\4mtrr\3msr\2tsc\1lgpg"
+#define X86FSET_LARGEPAGE 0
+#define X86FSET_TSC 1
+#define X86FSET_MSR 2
+#define X86FSET_MTRR 3
+#define X86FSET_PGE 4
+#define X86FSET_DE 5
+#define X86FSET_CMOV 6
+#define X86FSET_MMX 7
+#define X86FSET_MCA 8
+#define X86FSET_PAE 9
+#define X86FSET_CX8 10
+#define X86FSET_PAT 11
+#define X86FSET_SEP 12
+#define X86FSET_SSE 13
+#define X86FSET_SSE2 14
+#define X86FSET_HTT 15
+#define X86FSET_ASYSC 16
+#define X86FSET_NX 17
+#define X86FSET_SSE3 18
+#define X86FSET_CX16 19
+#define X86FSET_CMP 20
+#define X86FSET_TSCP 21
+#define X86FSET_MWAIT 22
+#define X86FSET_SSE4A 23
+#define X86FSET_CPUID 24
+#define X86FSET_SSSE3 25
+#define X86FSET_SSE4_1 26
+#define X86FSET_SSE4_2 27
+#define X86FSET_1GPG 28
+#define X86FSET_CLFSH 29
+#define X86FSET_64 30
+#define X86FSET_AES 31
+#define X86FSET_PCLMULQDQ 32
/*
* flags to patch tsc_read routine.
@@ -389,7 +382,7 @@ extern "C" {
/*
* x86_type is a legacy concept; this is supplanted
- * for most purposes by x86_feature; modern CPUs
+ * for most purposes by x86_featureset; modern CPUs
* should be X86_TYPE_OTHER
*/
#define X86_TYPE_OTHER 0
@@ -567,7 +560,16 @@ extern "C" {
#if defined(_KERNEL) || defined(_KMEMUSER)
-extern uint_t x86_feature;
+extern void *x86_featureset;
+
+extern void free_x86_featureset(void *featureset);
+extern boolean_t is_x86_feature(void *featureset, uint_t feature);
+extern void add_x86_feature(void *featureset, uint_t feature);
+extern void remove_x86_feature(void *featureset, uint_t feature);
+extern boolean_t compare_x86_featureset(void *setA, void *setB);
+extern void print_x86_featureset(void *featureset);
+
+
extern uint_t x86_type;
extern uint_t x86_vendor;
extern uint_t x86_clflush_size;
@@ -653,7 +655,7 @@ struct cpuid_info;
extern void setx86isalist(void);
extern void cpuid_alloc_space(struct cpu *);
extern void cpuid_free_space(struct cpu *);
-extern uint_t cpuid_pass1(struct cpu *);
+extern void *cpuid_pass1(struct cpu *);
extern void cpuid_pass2(struct cpu *);
extern void cpuid_pass3(struct cpu *);
extern uint_t cpuid_pass4(struct cpu *);