summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Haywood <Mark.Haywood@Sun.COM>2008-08-13 12:28:54 -0400
committerMark Haywood <Mark.Haywood@Sun.COM>2008-08-13 12:28:54 -0400
commit7f606acec863be28b51fb0f694ca86b41ca76e6d (patch)
tree533d1e3ec55bea148c9f85328f932d93685b87d4
parentd3a1459128b677cee1a84512ca49eef4bffd392d (diff)
downloadillumos-joyent-7f606acec863be28b51fb0f694ca86b41ca76e6d.tar.gz
6715149 T-State support for intel based processors
--HG-- rename : usr/src/uts/i86pc/io/cpu_acpi.c => usr/src/uts/i86pc/io/cpudrv/cpu_acpi.c rename : usr/src/uts/i86pc/io/cpudrv_plat.c => usr/src/uts/i86pc/io/cpudrv/cpudrv_mach.c rename : usr/src/uts/i86pc/io/pwrnow.c => usr/src/uts/i86pc/io/cpudrv/pwrnow.c rename : usr/src/uts/i86pc/io/speedstep.c => usr/src/uts/i86pc/io/cpudrv/speedstep.c rename : usr/src/uts/i86pc/sys/cpudrv_plat.h => usr/src/uts/i86pc/sys/cpudrv_mach.h rename : usr/src/uts/sun4u/io/cpudrv_plat.c => usr/src/uts/sun4u/io/cpudrv_mach.c rename : usr/src/uts/sun4u/sys/cpudrv_plat.h => usr/src/uts/sun4u/sys/cpudrv_mach.h
-rw-r--r--usr/src/uts/common/io/cpudrv.c210
-rw-r--r--usr/src/uts/common/sys/cpudrv.h17
-rw-r--r--usr/src/uts/i86pc/Makefile.files13
-rw-r--r--usr/src/uts/i86pc/Makefile.rules9
-rw-r--r--usr/src/uts/i86pc/io/cpu_acpi.c519
-rw-r--r--usr/src/uts/i86pc/io/cpudrv/cpu_acpi.c791
-rw-r--r--usr/src/uts/i86pc/io/cpudrv/cpudrv_amd.c51
-rw-r--r--usr/src/uts/i86pc/io/cpudrv/cpudrv_intel.c93
-rw-r--r--usr/src/uts/i86pc/io/cpudrv/cpudrv_mach.c506
-rw-r--r--usr/src/uts/i86pc/io/cpudrv/cpudrv_throttle.c342
-rw-r--r--usr/src/uts/i86pc/io/cpudrv/pwrnow.c (renamed from usr/src/uts/i86pc/io/pwrnow.c)170
-rw-r--r--usr/src/uts/i86pc/io/cpudrv/speedstep.c (renamed from usr/src/uts/i86pc/io/speedstep.c)194
-rw-r--r--usr/src/uts/i86pc/io/cpudrv_plat.c312
-rw-r--r--usr/src/uts/i86pc/os/cpupm.c37
-rw-r--r--usr/src/uts/i86pc/sys/cpu_acpi.h137
-rw-r--r--usr/src/uts/i86pc/sys/cpudrv_mach.h211
-rw-r--r--usr/src/uts/i86pc/sys/cpudrv_plat.h134
-rw-r--r--usr/src/uts/i86pc/sys/cpudrv_throttle.h41
-rw-r--r--usr/src/uts/i86pc/sys/cpupm.h16
-rw-r--r--usr/src/uts/i86pc/sys/pwrnow.h12
-rw-r--r--usr/src/uts/i86pc/sys/speedstep.h12
-rw-r--r--usr/src/uts/sun4u/Makefile.files4
-rw-r--r--usr/src/uts/sun4u/io/cpudrv_mach.c (renamed from usr/src/uts/sun4u/io/cpudrv_plat.c)37
-rw-r--r--usr/src/uts/sun4u/sys/cpudrv_mach.h (renamed from usr/src/uts/sun4u/sys/cpudrv_plat.h)38
24 files changed, 2539 insertions, 1367 deletions
diff --git a/usr/src/uts/common/io/cpudrv.c b/usr/src/uts/common/io/cpudrv.c
index 3edf61efd8..5a53f6c64e 100644
--- a/usr/src/uts/common/io/cpudrv.c
+++ b/usr/src/uts/common/io/cpudrv.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* CPU Device driver. The driver is not DDI-compliant.
*
@@ -48,8 +46,7 @@
#include <sys/machsystm.h>
#include <sys/x_call.h>
-#include <sys/cpudrv.h>
-#include <sys/cpudrv_plat.h>
+#include <sys/cpudrv_mach.h>
#include <sys/msacct.h>
/*
@@ -99,7 +96,7 @@ struct dev_ops cpudrv_ops = {
static struct modldrv modldrv = {
&mod_driverops, /* modops */
- "CPU Driver %I%", /* linkinfo */
+ "CPU Driver", /* linkinfo */
&cpudrv_ops, /* dev_ops */
};
@@ -112,7 +109,7 @@ static struct modlinkage modlinkage = {
/*
* Function prototypes
*/
-static int cpudrv_pm_init(cpudrv_devstate_t *cpudsp);
+static int cpudrv_pm_init_power(cpudrv_devstate_t *cpudsp);
static void cpudrv_pm_free(cpudrv_devstate_t *cpudsp);
static int cpudrv_pm_comp_create(cpudrv_devstate_t *cpudsp);
static void cpudrv_pm_monitor_disp(void *arg);
@@ -157,11 +154,14 @@ int cpudrv_direct_pm = 0;
* for current speed.
*/
#define CPUDRV_PM_MONITOR_INIT(cpudsp) { \
- ASSERT(mutex_owned(&(cpudsp)->lock)); \
- (cpudsp)->cpudrv_pm.timeout_id = timeout(cpudrv_pm_monitor_disp, \
- (cpudsp), (((cpudsp)->cpudrv_pm.cur_spd == NULL) ? \
- CPUDRV_PM_QUANT_CNT_OTHR : \
- (cpudsp)->cpudrv_pm.cur_spd->quant_cnt)); \
+ if (CPUDRV_PM_POWER_ENABLED(cpudsp)) { \
+ ASSERT(mutex_owned(&(cpudsp)->lock)); \
+ (cpudsp)->cpudrv_pm.timeout_id = \
+ timeout(cpudrv_pm_monitor_disp, \
+ (cpudsp), (((cpudsp)->cpudrv_pm.cur_spd == NULL) ? \
+ CPUDRV_PM_QUANT_CNT_OTHR : \
+ (cpudsp)->cpudrv_pm.cur_spd->quant_cnt)); \
+ } \
}
/*
@@ -170,16 +170,17 @@ int cpudrv_direct_pm = 0;
#define CPUDRV_PM_MONITOR_FINI(cpudsp) { \
timeout_id_t tmp_tid; \
ASSERT(mutex_owned(&(cpudsp)->lock)); \
- ASSERT((cpudsp)->cpudrv_pm.timeout_id); \
tmp_tid = (cpudsp)->cpudrv_pm.timeout_id; \
(cpudsp)->cpudrv_pm.timeout_id = 0; \
mutex_exit(&(cpudsp)->lock); \
- (void) untimeout(tmp_tid); \
- mutex_enter(&(cpudsp)->cpudrv_pm.timeout_lock); \
- while ((cpudsp)->cpudrv_pm.timeout_count != 0) \
- cv_wait(&(cpudsp)->cpudrv_pm.timeout_cv, \
- &(cpudsp)->cpudrv_pm.timeout_lock); \
- mutex_exit(&(cpudsp)->cpudrv_pm.timeout_lock); \
+ if (tmp_tid != 0) { \
+ (void) untimeout(tmp_tid); \
+ mutex_enter(&(cpudsp)->cpudrv_pm.timeout_lock); \
+ while ((cpudsp)->cpudrv_pm.timeout_count != 0) \
+ cv_wait(&(cpudsp)->cpudrv_pm.timeout_cv, \
+ &(cpudsp)->cpudrv_pm.timeout_lock); \
+ mutex_exit(&(cpudsp)->cpudrv_pm.timeout_lock); \
+ } \
mutex_enter(&(cpudsp)->lock); \
}
@@ -240,6 +241,8 @@ cpudrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
case DDI_ATTACH:
DPRINTF(D_ATTACH, ("cpudrv_attach: instance %d: "
"DDI_ATTACH called\n", instance));
+ if (CPUDRV_PM_DISABLED())
+ return (DDI_FAILURE);
if (ddi_soft_state_zalloc(cpudrv_state, instance) !=
DDI_SUCCESS) {
cmn_err(CE_WARN, "cpudrv_attach: instance %d: "
@@ -267,63 +270,78 @@ cpudrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
CPUDRV_PM_DISABLE();
return (DDI_FAILURE);
}
- if (cpudrv_pm_init(cpudsp) != DDI_SUCCESS) {
- ddi_soft_state_free(cpudrv_state, instance);
- CPUDRV_PM_DISABLE();
- return (DDI_FAILURE);
- }
- if (cpudrv_pm_comp_create(cpudsp) != DDI_SUCCESS) {
+ if (!cpudrv_mach_pm_init(cpudsp)) {
ddi_soft_state_free(cpudrv_state, instance);
CPUDRV_PM_DISABLE();
- cpudrv_pm_free(cpudsp);
- return (DDI_FAILURE);
- }
- if (ddi_prop_update_string(DDI_DEV_T_NONE,
- dip, "pm-class", "CPU") != DDI_PROP_SUCCESS) {
- ddi_soft_state_free(cpudrv_state, instance);
- CPUDRV_PM_DISABLE();
- cpudrv_pm_free(cpudsp);
return (DDI_FAILURE);
}
+ mutex_init(&cpudsp->lock, NULL, MUTEX_DRIVER, NULL);
+ if (CPUDRV_PM_POWER_ENABLED(cpudsp)) {
+ if (cpudrv_pm_init_power(cpudsp) != DDI_SUCCESS) {
+ CPUDRV_PM_DISABLE();
+ cpudrv_pm_free(cpudsp);
+ ddi_soft_state_free(cpudrv_state, instance);
+ return (DDI_FAILURE);
+ }
+ if (cpudrv_pm_comp_create(cpudsp) != DDI_SUCCESS) {
+ CPUDRV_PM_DISABLE();
+ cpudrv_pm_free(cpudsp);
+ ddi_soft_state_free(cpudrv_state, instance);
+ return (DDI_FAILURE);
+ }
+ if (ddi_prop_update_string(DDI_DEV_T_NONE,
+ dip, "pm-class", "CPU") != DDI_PROP_SUCCESS) {
+ CPUDRV_PM_DISABLE();
+ cpudrv_pm_free(cpudsp);
+ ddi_soft_state_free(cpudrv_state, instance);
+ return (DDI_FAILURE);
+ }
- /*
- * Taskq is used to dispatch routine to monitor CPU activities.
- */
- cpudsp->cpudrv_pm.tq = taskq_create_instance(
- "cpudrv_pm_monitor",
- ddi_get_instance(dip), CPUDRV_PM_TASKQ_THREADS,
- (maxclsyspri - 1), CPUDRV_PM_TASKQ_MIN,
- CPUDRV_PM_TASKQ_MAX, TASKQ_PREPOPULATE|TASKQ_CPR_SAFE);
+ /*
+ * Taskq is used to dispatch routine to monitor CPU
+ * activities.
+ */
+ cpudsp->cpudrv_pm.tq = taskq_create_instance(
+ "cpudrv_pm_monitor",
+ ddi_get_instance(dip), CPUDRV_PM_TASKQ_THREADS,
+ (maxclsyspri - 1), CPUDRV_PM_TASKQ_MIN,
+ CPUDRV_PM_TASKQ_MAX,
+ TASKQ_PREPOPULATE|TASKQ_CPR_SAFE);
+
+ mutex_init(&cpudsp->cpudrv_pm.timeout_lock, NULL,
+ MUTEX_DRIVER, NULL);
+ cv_init(&cpudsp->cpudrv_pm.timeout_cv, NULL,
+ CV_DEFAULT, NULL);
- mutex_init(&cpudsp->lock, NULL, MUTEX_DRIVER, NULL);
- mutex_init(&cpudsp->cpudrv_pm.timeout_lock, NULL, MUTEX_DRIVER,
- NULL);
- cv_init(&cpudsp->cpudrv_pm.timeout_cv, NULL, CV_DEFAULT, NULL);
+ /*
+ * Driver needs to assume that CPU is running at
+ * unknown speed at DDI_ATTACH and switch it to the
+ * needed speed. We assume that initial needed speed
+ * is full speed for us.
+ */
+ /*
+ * We need to take the lock because cpudrv_pm_monitor()
+ * will start running in parallel with attach().
+ */
+ mutex_enter(&cpudsp->lock);
+ cpudsp->cpudrv_pm.cur_spd = NULL;
+ cpudsp->cpudrv_pm.targ_spd =
+ cpudsp->cpudrv_pm.head_spd;
+ cpudsp->cpudrv_pm.pm_started = B_FALSE;
+ /*
+ * We don't call pm_raise_power() directly from attach
+ * because driver attach for a slave CPU node can
+ * happen before the CPU is even initialized. We just
+ * start the monitoring system which understands
+ * unknown speed and moves CPU to targ_spd when it
+ * have been initialized.
+ */
+ CPUDRV_PM_MONITOR_INIT(cpudsp);
+ mutex_exit(&cpudsp->lock);
- /*
- * Driver needs to assume that CPU is running at unknown speed
- * at DDI_ATTACH and switch it to the needed speed. We assume
- * that initial needed speed is full speed for us.
- */
- /*
- * We need to take the lock because cpudrv_pm_monitor()
- * will start running in parallel with attach().
- */
- mutex_enter(&cpudsp->lock);
- cpudsp->cpudrv_pm.cur_spd = NULL;
- cpudsp->cpudrv_pm.targ_spd = cpudsp->cpudrv_pm.head_spd;
- cpudsp->cpudrv_pm.pm_started = B_FALSE;
- /*
- * We don't call pm_raise_power() directly from attach because
- * driver attach for a slave CPU node can happen before the
- * CPU is even initialized. We just start the monitoring
- * system which understands unknown speed and moves CPU
- * to targ_spd when it have been initialized.
- */
- CPUDRV_PM_MONITOR_INIT(cpudsp);
- mutex_exit(&cpudsp->lock);
+ }
- CPUDRV_PM_INSTALL_TOPSPEED_CHANGE_HANDLER(cpudsp, dip);
+ CPUDRV_PM_INSTALL_MAX_CHANGE_HANDLER(cpudsp, dip);
ddi_report_dev(dip);
return (DDI_SUCCESS);
@@ -331,12 +349,16 @@ cpudrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
case DDI_RESUME:
DPRINTF(D_ATTACH, ("cpudrv_attach: instance %d: "
"DDI_RESUME called\n", instance));
- if ((cpudsp = ddi_get_soft_state(cpudrv_state, instance)) ==
- NULL) {
- cmn_err(CE_WARN, "cpudrv_attach: instance %d: "
- "can't get state", instance);
- return (DDI_FAILURE);
- }
+
+ cpudsp = ddi_get_soft_state(cpudrv_state, instance);
+ ASSERT(cpudsp != NULL);
+
+ /*
+ * Nothing to do for resume, if not doing active PM.
+ */
+ if (!CPUDRV_PM_POWER_ENABLED(cpudsp))
+ return (DDI_SUCCESS);
+
mutex_enter(&cpudsp->lock);
/*
* Driver needs to assume that CPU is running at unknown speed
@@ -382,12 +404,16 @@ cpudrv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
case DDI_SUSPEND:
DPRINTF(D_DETACH, ("cpudrv_detach: instance %d: "
"DDI_SUSPEND called\n", instance));
- if ((cpudsp = ddi_get_soft_state(cpudrv_state, instance)) ==
- NULL) {
- cmn_err(CE_WARN, "cpudrv_detach: instance %d: "
- "can't get state", instance);
- return (DDI_FAILURE);
- }
+
+ cpudsp = ddi_get_soft_state(cpudrv_state, instance);
+ ASSERT(cpudsp != NULL);
+
+ /*
+ * Nothing to do for suspend, if not doing active PM.
+ */
+ if (!CPUDRV_PM_POWER_ENABLED(cpudsp))
+ return (DDI_SUCCESS);
+
/*
* During a checkpoint-resume sequence, framework will
* stop interrupts to quiesce kernel activity. This will
@@ -476,10 +502,10 @@ cpudrv_power(dev_info_t *dip, int comp, int level)
* In normal operation, we fail if we are busy and request is
* to lower the power level. We let this go through if the driver
* is in special direct pm mode. On x86, we also let this through
- * if the change is due to a request to throttle the max speed.
+ * if the change is due to a request to govern the max speed.
*/
if (!cpudrv_direct_pm && (cpupm->pm_busycnt >= 1) &&
- !cpudrv_pm_is_throttle_thread(cpupm)) {
+ !cpudrv_pm_is_governor_thread(cpupm)) {
if ((cpupm->cur_spd != NULL) &&
(level < cpupm->cur_spd->pm_level)) {
mutex_exit(&cpudsp->lock);
@@ -492,7 +518,7 @@ cpudrv_power(dev_info_t *dip, int comp, int level)
break;
}
if (!new_spd) {
- CPUDRV_PM_RESET_THROTTLE_THREAD(cpupm);
+ CPUDRV_PM_RESET_GOVERNOR_THREAD(cpupm);
mutex_exit(&cpudsp->lock);
cmn_err(CE_WARN, "cpudrv_power: instance %d: "
"can't locate new CPU speed", instance);
@@ -513,12 +539,12 @@ cpudrv_power(dev_info_t *dip, int comp, int level)
if (!is_ready) {
DPRINTF(D_POWER, ("cpudrv_power: instance %d: "
"CPU not ready for x-calls\n", instance));
- } else if (!(is_ready = cpudrv_pm_all_instances_ready())) {
+ } else if (!(is_ready = cpudrv_pm_power_ready())) {
DPRINTF(D_POWER, ("cpudrv_power: instance %d: "
- "waiting for all CPUs to be ready\n", instance));
+ "waiting for all CPUs to be power manageable\n", instance));
}
if (!is_ready) {
- CPUDRV_PM_RESET_THROTTLE_THREAD(cpupm);
+ CPUDRV_PM_RESET_GOVERNOR_THREAD(cpupm);
mutex_exit(&cpudsp->lock);
return (DDI_FAILURE);
}
@@ -560,7 +586,7 @@ cpudrv_power(dev_info_t *dip, int comp, int level)
cpupm->lastquan_mstate[CMS_USER] = 0;
cpupm->lastquan_lbolt = 0;
cpupm->cur_spd = new_spd;
- CPUDRV_PM_RESET_THROTTLE_THREAD(cpupm);
+ CPUDRV_PM_RESET_GOVERNOR_THREAD(cpupm);
mutex_exit(&cpudsp->lock);
return (DDI_SUCCESS);
@@ -607,7 +633,7 @@ set_supp_freqs(cpu_t *cp, cpudrv_pm_t *cpupm)
* Initialize power management data.
*/
static int
-cpudrv_pm_init(cpudrv_devstate_t *cpudsp)
+cpudrv_pm_init_power(cpudrv_devstate_t *cpudsp)
{
cpudrv_pm_t *cpupm = &(cpudsp->cpudrv_pm);
cpudrv_pm_spd_t *cur_spd;
@@ -618,14 +644,10 @@ cpudrv_pm_init(cpudrv_devstate_t *cpudsp)
int user_cnt_percent;
int i;
- if (!cpudrv_pm_init_module(cpudsp))
- return (DDI_FAILURE);
-
CPUDRV_PM_GET_SPEEDS(cpudsp, speeds, nspeeds);
if (nspeeds < 2) {
/* Need at least two speeds to power manage */
CPUDRV_PM_FREE_SPEEDS(speeds, nspeeds);
- cpudrv_pm_free_module(cpudsp);
return (DDI_FAILURE);
}
cpupm->num_spd = nspeeds;
@@ -750,7 +772,7 @@ cpudrv_pm_free(cpudrv_devstate_t *cpudsp)
cur_spd = next_spd;
}
bzero(cpupm, sizeof (cpudrv_pm_t));
- cpudrv_pm_free_module(cpudsp);
+ cpudrv_mach_pm_free(cpudsp);
}
/*
@@ -961,9 +983,9 @@ cpudrv_pm_monitor(void *arg)
if (!is_ready) {
DPRINTF(D_PM_MONITOR, ("cpudrv_pm_monitor: instance %d: "
"CPU not ready for x-calls\n", ddi_get_instance(dip)));
- } else if (!(is_ready = cpudrv_pm_all_instances_ready())) {
+ } else if (!(is_ready = cpudrv_pm_power_ready())) {
DPRINTF(D_PM_MONITOR, ("cpudrv_pm_monitor: instance %d: "
- "waiting for all CPUs to be ready\n",
+ "waiting for all CPUs to be power manageable\n",
ddi_get_instance(dip)));
}
if (!is_ready) {
diff --git a/usr/src/uts/common/sys/cpudrv.h b/usr/src/uts/common/sys/cpudrv.h
index decdc77636..8ca2eab769 100644
--- a/usr/src/uts/common/sys/cpudrv.h
+++ b/usr/src/uts/common/sys/cpudrv.h
@@ -26,8 +26,6 @@
#ifndef _SYS_CPUDRV_H
#define _SYS_CPUDRV_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/promif.h>
#include <sys/cpuvar.h>
#include <sys/taskq.h>
@@ -88,7 +86,7 @@ typedef struct cpudrv_pm {
kmutex_t timeout_lock; /* protect timeout_count */
kcondvar_t timeout_cv; /* wait on timeout_count change */
#if defined(__x86)
- kthread_t *pm_throttle_thread; /* throttling thread */
+ kthread_t *pm_governor_thread; /* governor thread */
#endif
boolean_t pm_started; /* PM really started */
} cpudrv_pm_t;
@@ -165,10 +163,7 @@ typedef struct cpudrv_devstate {
processorid_t cpu_id; /* CPU number for this node */
cpudrv_pm_t cpudrv_pm; /* power management data */
kmutex_t lock; /* protects state struct */
-#if defined(__x86)
- void *acpi_handle; /* ACPI cache */
- void *module_state; /* CPU module state */
-#endif
+ void *mach_state; /* machine specific state */
} cpudrv_devstate_t;
extern void *cpudrv_state;
@@ -199,10 +194,10 @@ extern uint_t cpudrv_debug;
extern int cpudrv_pm_change_speed(cpudrv_devstate_t *, cpudrv_pm_spd_t *);
extern boolean_t cpudrv_pm_get_cpu_id(dev_info_t *, processorid_t *);
-extern boolean_t cpudrv_pm_all_instances_ready(void);
-extern boolean_t cpudrv_pm_is_throttle_thread(cpudrv_pm_t *);
-extern boolean_t cpudrv_pm_init_module(cpudrv_devstate_t *);
-extern void cpudrv_pm_free_module(cpudrv_devstate_t *);
+extern boolean_t cpudrv_pm_power_ready(void);
+extern boolean_t cpudrv_pm_is_governor_thread(cpudrv_pm_t *);
+extern boolean_t cpudrv_mach_pm_init(cpudrv_devstate_t *);
+extern void cpudrv_mach_pm_free(cpudrv_devstate_t *);
#endif /* _KERNEL */
diff --git a/usr/src/uts/i86pc/Makefile.files b/usr/src/uts/i86pc/Makefile.files
index 6c73dd88ba..a306ecbcdd 100644
--- a/usr/src/uts/i86pc/Makefile.files
+++ b/usr/src/uts/i86pc/Makefile.files
@@ -23,8 +23,6 @@
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
-# ident "%Z%%M% %I% %E% SMI"
-#
# This Makefile defines file modules in the directory uts/i86pc
# and its children. These are the source files which are i86pc
# "implementation architecture" dependent.
@@ -178,7 +176,16 @@ MCAMD_OBJS += \
mcamd_subr.o \
mcamd_pcicfg.o
-CPUDRV_OBJS += cpudrv.o cpudrv_plat.o cpu_acpi.o speedstep.o pwrnow.o
+CPUDRV_OBJS += \
+ cpudrv.o \
+ cpudrv_amd.o \
+ cpudrv_intel.o \
+ cpudrv_mach.o \
+ cpudrv_throttle.o \
+ cpu_acpi.o \
+ speedstep.o \
+ pwrnow.o
+
PPM_OBJS += ppm_subr.o ppm.o ppm_plat.o
ACPIPPM_OBJS += acpippm.o acpisleep.o
diff --git a/usr/src/uts/i86pc/Makefile.rules b/usr/src/uts/i86pc/Makefile.rules
index fb8311a748..c70d7a09ed 100644
--- a/usr/src/uts/i86pc/Makefile.rules
+++ b/usr/src/uts/i86pc/Makefile.rules
@@ -23,8 +23,6 @@
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
-# ident "%Z%%M% %I% %E% SMI"
-#
# This Makefile defines the build rules for the directory uts/i86pc
# and its children. These are the source files which are i86pc
@@ -73,6 +71,10 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/i86pc/io/acpi_drv/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(UTSBASE)/i86pc/io/cpudrv/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
$(OBJS_DIR)/%.o: $(UTSBASE)/i86pc/io/ioat/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
@@ -264,6 +266,9 @@ $(LINTS_DIR)/%.ln: $(UTSBASE)/i86pc/io/%.c
$(LINTS_DIR)/%.ln: $(UTSBASE)/i86pc/io/acpi_drv/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
+$(LINTS_DIR)/%.ln: $(UTSBASE)/i86pc/io/cpudrv/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
$(LINTS_DIR)/%.ln: $(UTSBASE)/i86pc/io/ioat/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
diff --git a/usr/src/uts/i86pc/io/cpu_acpi.c b/usr/src/uts/i86pc/io/cpu_acpi.c
deleted file mode 100644
index 11fe37561f..0000000000
--- a/usr/src/uts/i86pc/io/cpu_acpi.c
+++ /dev/null
@@ -1,519 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-#include <sys/cpu_acpi.h>
-
-#define CPU_ACPI_PSTATES_SIZE(cnt) (cnt * sizeof (cpu_acpi_pstate_t))
-#define CPU_ACPI_PSS_SIZE (sizeof (cpu_acpi_pstate_t) / sizeof (uint32_t))
-
-/*
- * Map the dip to an ACPI handle for the device.
- */
-cpu_acpi_handle_t
-cpu_acpi_init(dev_info_t *dip)
-{
- cpu_acpi_handle_t handle;
-
- handle = kmem_zalloc(sizeof (cpu_acpi_state_t), KM_SLEEP);
-
- if (ACPI_FAILURE(acpica_get_handle(dip, &handle->cs_handle))) {
- kmem_free(handle, sizeof (cpu_acpi_state_t));
- return (NULL);
- }
- handle->cs_dip = dip;
- return (handle);
-}
-
-/*
- * Free any resources.
- */
-void
-cpu_acpi_fini(cpu_acpi_handle_t handle)
-{
- if (handle->cs_pstates != NULL) {
- if (CPU_ACPI_PSTATES(handle) != NULL)
- kmem_free(CPU_ACPI_PSTATES(handle),
- CPU_ACPI_PSTATES_SIZE(
- CPU_ACPI_PSTATES_COUNT(handle)));
- kmem_free(handle->cs_pstates, sizeof (cpu_acpi_pstates_t));
- }
- kmem_free(handle, sizeof (cpu_acpi_state_t));
-}
-
-/*
- * Cache the ACPI _PCT data. The _PCT data defines the interface to use
- * when making power level transitions (i.e., system IO ports, fixed
- * hardware port, etc).
- */
-int
-cpu_acpi_cache_pct(cpu_acpi_handle_t handle)
-{
- ACPI_BUFFER abuf;
- ACPI_OBJECT *obj;
- AML_RESOURCE_GENERIC_REGISTER *greg;
- cpu_acpi_pct_t *pct;
- int ret = -1;
- int i;
-
- /*
- * Fetch the _PCT (if present) for the CPU node. Since the PCT is
- * optional, non-existence is not a failure (we just consider
- * it a fixed hardware case).
- */
- CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PCT_CACHED);
- abuf.Length = ACPI_ALLOCATE_BUFFER;
- abuf.Pointer = NULL;
- if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, "_PCT",
- NULL, &abuf, ACPI_TYPE_PACKAGE))) {
- CPU_ACPI_PCT(handle)[0].pc_addrspace_id =
- ACPI_ADR_SPACE_FIXED_HARDWARE;
- CPU_ACPI_PCT(handle)[1].pc_addrspace_id =
- ACPI_ADR_SPACE_FIXED_HARDWARE;
- return (1);
- }
-
- obj = abuf.Pointer;
- if (obj->Package.Count != 2) {
- cmn_err(CE_NOTE, "!cpu_acpi: _PCT package bad count %d.",
- obj->Package.Count);
- goto out;
- }
-
- /*
- * Does the package look coherent?
- */
- for (i = 0; i < obj->Package.Count; i++) {
- if (obj->Package.Elements[i].Type != ACPI_TYPE_BUFFER) {
- cmn_err(CE_NOTE, "!cpu_acpi: "
- "Unexpected data in _PCT package.");
- goto out;
- }
-
- greg = (AML_RESOURCE_GENERIC_REGISTER *)
- obj->Package.Elements[i].Buffer.Pointer;
- if (greg->DescriptorType !=
- ACPI_RESOURCE_NAME_GENERIC_REGISTER) {
- cmn_err(CE_NOTE, "!cpu_acpi: "
- "_PCT package has format error.");
- goto out;
- }
- if (greg->ResourceLength !=
- ACPI_AML_SIZE_LARGE(AML_RESOURCE_GENERIC_REGISTER)) {
- cmn_err(CE_NOTE, "!cpu_acpi: "
- "_PCT package not right size.");
- goto out;
- }
- if (greg->AddressSpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE &&
- greg->AddressSpaceId != ACPI_ADR_SPACE_SYSTEM_IO) {
- cmn_err(CE_NOTE, "!cpu_apci: _PCT contains unsupported "
- "address space type %x", greg->AddressSpaceId);
- goto out;
- }
- }
-
- /*
- * Looks good!
- */
- for (i = 0; i < obj->Package.Count; i++) {
- greg = (AML_RESOURCE_GENERIC_REGISTER *)
- obj->Package.Elements[i].Buffer.Pointer;
- pct = &CPU_ACPI_PCT(handle)[i];
- pct->pc_addrspace_id = greg->AddressSpaceId;
- pct->pc_width = greg->BitWidth;
- pct->pc_offset = greg->BitOffset;
- pct->pc_asize = greg->AccessSize;
- pct->pc_address = greg->Address;
- }
- CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PCT_CACHED);
- ret = 0;
-out:
- AcpiOsFree(abuf.Pointer);
- return (ret);
-}
-
-/*
- * Cache the ACPI _PSD data. The _PSD data defines CPU dependencies
- * (think CPU domains).
- */
-int
-cpu_acpi_cache_psd(cpu_acpi_handle_t handle)
-{
- ACPI_BUFFER abuf;
- ACPI_OBJECT *pkg, *elements;
- cpu_acpi_psd_t *psd;
- int ret = -1;
-
- /*
- * Fetch the _PSD (if present) for the CPU node. Since the PSD is
- * optional, non-existence is not a failure (it's up to the caller
- * to determine how to handle non-existence).
- */
- CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSD_CACHED);
- abuf.Length = ACPI_ALLOCATE_BUFFER;
- abuf.Pointer = NULL;
- if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, "_PSD",
- NULL, &abuf, ACPI_TYPE_PACKAGE))) {
- return (1);
- }
-
- pkg = abuf.Pointer;
- if (pkg->Package.Count != 1) {
- cmn_err(CE_NOTE, "!cpu_acpi: _PSD unsupported package "
- "count %d.", pkg->Package.Count);
- goto out;
- }
-
- if (pkg->Package.Elements[0].Type != ACPI_TYPE_PACKAGE ||
- pkg->Package.Elements[0].Package.Count != 5) {
- cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in _PSD package.");
- goto out;
- }
- elements = pkg->Package.Elements[0].Package.Elements;
- if (elements[0].Integer.Value != 5 || elements[1].Integer.Value != 0) {
- cmn_err(CE_NOTE, "!cpu_acpi: Unexpected _PSD revision.");
- goto out;
- }
- psd = &CPU_ACPI_PSD(handle);
-
- psd->pd_entries = elements[0].Integer.Value;
- psd->pd_revision = elements[1].Integer.Value;
- psd->pd_domain = elements[2].Integer.Value;
- psd->pd_type = elements[3].Integer.Value;
- psd->pd_num = elements[4].Integer.Value;
- CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSD_CACHED);
- ret = 0;
-out:
- AcpiOsFree(abuf.Pointer);
- return (ret);
-}
-
-/*
- * Cache the _PSS data. The _PSS data defines the different power levels
- * supported by the CPU and the attributes associated with each power level
- * (i.e., frequency, voltage, etc.). The power levels are number from
- * highest to lowest. That is, the highest power level is _PSS entry 0
- * and the lowest power level is the last _PSS entry.
- */
-int
-cpu_acpi_cache_pstates(cpu_acpi_handle_t handle)
-{
- ACPI_BUFFER abuf;
- ACPI_OBJECT *obj, *q, *l;
- cpu_acpi_pstate_t *pstate;
- boolean_t eot = B_FALSE;
- int ret = -1;
- int cnt;
- int i, j;
-
- /*
- * Fetch the _PSS (if present) for the CPU node. If there isn't
- * one, then CPU power management will not be possible.
- */
- CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSS_CACHED);
- abuf.Length = ACPI_ALLOCATE_BUFFER;
- abuf.Pointer = NULL;
- if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle, "_PSS",
- NULL, &abuf, ACPI_TYPE_PACKAGE))) {
- cmn_err(CE_NOTE, "!cpu_acpi: _PSS package not found.");
- return (1);
- }
- obj = abuf.Pointer;
- if (obj->Package.Count < 2) {
- cmn_err(CE_NOTE, "!cpu_acpi: _PSS package bad count %d.",
- obj->Package.Count);
- goto out;
- }
-
- /*
- * Does the package look coherent?
- */
- cnt = 0;
- for (i = 0, l = NULL; i < obj->Package.Count; i++, l = q) {
- if (obj->Package.Elements[i].Type != ACPI_TYPE_PACKAGE ||
- obj->Package.Elements[i].Package.Count !=
- CPU_ACPI_PSS_SIZE) {
- cmn_err(CE_NOTE, "!cpu_acpi: "
- "Unexpected data in _PSS package.");
- goto out;
- }
-
- q = obj->Package.Elements[i].Package.Elements;
- for (j = 0; j < CPU_ACPI_PSS_SIZE; j++) {
- if (q[j].Type != ACPI_TYPE_INTEGER) {
- cmn_err(CE_NOTE, "!cpu_acpi: "
- "_PSS element invalid (type)");
- goto out;
- }
- }
-
- /*
- * Ignore duplicate entries.
- */
- if (l != NULL && l[0].Integer.Value == q[0].Integer.Value)
- continue;
-
- /*
- * Some _PSS tables are larger than required
- * and unused elements are filled with patterns
- * of 0xff. Simply check here for frequency = 0xffff
- * and stop counting if found.
- */
- if (q[0].Integer.Value == 0xffff) {
- eot = B_TRUE;
- continue;
- }
-
- /*
- * We should never find a valid entry after we've hit
- * an end-of-table entry.
- */
- if (eot) {
- cmn_err(CE_NOTE, "!cpu_acpi: "
- "Unexpected data in _PSS package after eot.");
- goto out;
- }
-
- /*
- * pstates must be defined in order from highest to lowest.
- */
- if (l != NULL && l[0].Integer.Value < q[0].Integer.Value) {
- cmn_err(CE_NOTE, "!cpu_acpi: "
- "_PSS package pstate definitions out of order.");
- goto out;
- }
-
- /*
- * This entry passes.
- */
- cnt++;
- }
- if (cnt == 0)
- goto out;
-
- /*
- * Yes, fill in pstate structure.
- */
- handle->cs_pstates = kmem_zalloc(sizeof (cpu_acpi_pstates_t), KM_SLEEP);
- CPU_ACPI_PSTATES_COUNT(handle) = cnt;
- CPU_ACPI_PSTATES(handle) = kmem_zalloc(CPU_ACPI_PSTATES_SIZE(cnt),
- KM_SLEEP);
- pstate = CPU_ACPI_PSTATES(handle);
- for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) {
- uint32_t *up;
-
- q = obj->Package.Elements[i].Package.Elements;
-
- /*
- * Skip duplicate entries.
- */
- if (l != NULL && l[0].Integer.Value == q[0].Integer.Value)
- continue;
-
- up = (uint32_t *)pstate;
- for (j = 0; j < CPU_ACPI_PSS_SIZE; j++)
- up[j] = q[j].Integer.Value;
- pstate++;
- cnt--;
- }
- CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSS_CACHED);
- ret = 0;
-out:
- AcpiOsFree(abuf.Pointer);
- return (ret);
-}
-
-/*
- * Cache the _PPC data. The _PPC simply contains an integer value which
- * represents the highest power level that a CPU should transition to.
- * That is, it's an index into the array of _PSS entries and will be
- * greater than or equal to zero.
- */
-void
-cpu_acpi_cache_ppc(cpu_acpi_handle_t handle)
-{
- ACPI_BUFFER abuf;
- ACPI_OBJECT *obj;
-
- /*
- * Fetch the _PPC (if present) for the CPU node. Since the PPC is
- * optional (I think), non-existence is not a failure.
- */
- CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PPC_CACHED);
- abuf.Length = ACPI_ALLOCATE_BUFFER;
- abuf.Pointer = NULL;
- if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, "_PPC",
- NULL, &abuf))) {
- CPU_ACPI_PPC(handle) = 0;
- return;
- }
-
- obj = (ACPI_OBJECT *)abuf.Pointer;
- CPU_ACPI_PPC(handle) = obj->Integer.Value;
- CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PPC_CACHED);
- AcpiOsFree(abuf.Pointer);
-}
-
-/*
- * Cache the _PCT, _PSS, _PSD and _PPC data.
- */
-int
-cpu_acpi_cache_data(cpu_acpi_handle_t handle)
-{
- if (cpu_acpi_cache_pct(handle) < 0) {
- cmn_err(CE_WARN, "!cpu_acpi: error parsing _PCT for "
- "CPU instance %d", ddi_get_instance(handle->cs_dip));
- return (-1);
- }
-
- if (cpu_acpi_cache_pstates(handle) != 0) {
- cmn_err(CE_WARN, "!cpu_acpi: error parsing _PSS for "
- "CPU instance %d", ddi_get_instance(handle->cs_dip));
- return (-1);
- }
-
- if (cpu_acpi_cache_psd(handle) < 0) {
- cmn_err(CE_WARN, "!cpu_acpi: error parsing _PSD for "
- "CPU instance %d", ddi_get_instance(handle->cs_dip));
- return (-1);
- }
-
- cpu_acpi_cache_ppc(handle);
-
- return (0);
-}
-
-/*
- * Register a handler for _PPC change notifications. The _PPC
- * change notification is the means by which _P
- */
-void
-cpu_acpi_install_ppc_handler(cpu_acpi_handle_t handle,
- ACPI_NOTIFY_HANDLER handler, dev_info_t *dip)
-{
- char path[MAXNAMELEN];
- if (ACPI_FAILURE(AcpiInstallNotifyHandler(handle->cs_handle,
- ACPI_DEVICE_NOTIFY, handler, dip)))
- cmn_err(CE_NOTE, "!cpu_acpi: Unable to register _PPC "
- "notify handler for %s", ddi_pathname(dip, path));
-}
-
-/*
- * Write _PDC.
- */
-int
-cpu_acpi_write_pdc(cpu_acpi_handle_t handle, uint32_t revision, uint32_t count,
- uint32_t *capabilities)
-{
- ACPI_OBJECT obj;
- ACPI_OBJECT_LIST list = { 1, &obj};
- uint32_t *buffer;
- uint32_t *bufptr;
- uint32_t bufsize;
- int i;
-
- bufsize = (count + 2) * sizeof (uint32_t);
- buffer = kmem_zalloc(bufsize, KM_SLEEP);
- buffer[0] = revision;
- buffer[1] = count;
- bufptr = &buffer[2];
- for (i = 0; i < count; i++)
- *bufptr++ = *capabilities++;
-
- obj.Type = ACPI_TYPE_BUFFER;
- obj.Buffer.Length = bufsize;
- obj.Buffer.Pointer = (void *)buffer;
-
- /*
- * _PDC is optional, so don't log failure.
- */
- if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, "_PDC",
- &list, NULL))) {
- kmem_free(buffer, bufsize);
- return (-1);
- }
-
- kmem_free(buffer, bufsize);
- return (0);
-}
-
-/*
- * Write to system IO port.
- */
-int
-cpu_acpi_write_port(ACPI_IO_ADDRESS address, uint32_t value, uint32_t width)
-{
- if (ACPI_FAILURE(AcpiOsWritePort(address, value, width))) {
- cmn_err(CE_NOTE, "cpu_acpi: error writing system IO port "
- "%lx.", (long)address);
- return (-1);
- }
- return (0);
-}
-
-/*
- * Read from a system IO port.
- */
-int
-cpu_acpi_read_port(ACPI_IO_ADDRESS address, uint32_t *value, uint32_t width)
-{
- if (ACPI_FAILURE(AcpiOsReadPort(address, value, width))) {
- cmn_err(CE_NOTE, "cpu_acpi: error reading system IO port "
- "%lx.", (long)address);
- return (-1);
- }
- return (0);
-}
-
-/*
- * Return supported frequencies.
- */
-uint_t
-cpu_acpi_get_speeds(cpu_acpi_handle_t handle, int **speeds)
-{
- cpu_acpi_pstate_t *pstate;
- int *hspeeds;
- uint_t nspeeds;
- int i;
-
- nspeeds = CPU_ACPI_PSTATES_COUNT(handle);
- hspeeds = kmem_zalloc(nspeeds * sizeof (int), KM_SLEEP);
- for (i = 0; i < nspeeds; i++) {
- pstate = CPU_ACPI_PSTATE(handle, i);
- hspeeds[i] = CPU_ACPI_FREQ(pstate);
- }
- *speeds = hspeeds;
- return (nspeeds);
-}
-
-/*
- * Free resources allocated by cpu_acpi_get_speeds().
- */
-void
-cpu_acpi_free_speeds(int *speeds, uint_t nspeeds)
-{
- kmem_free(speeds, nspeeds * sizeof (int));
-}
diff --git a/usr/src/uts/i86pc/io/cpudrv/cpu_acpi.c b/usr/src/uts/i86pc/io/cpudrv/cpu_acpi.c
new file mode 100644
index 0000000000..e19363e12a
--- /dev/null
+++ b/usr/src/uts/i86pc/io/cpudrv/cpu_acpi.c
@@ -0,0 +1,791 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/cpu_acpi.h>
+
+/*
+ * List of the processor ACPI object types that are being used.
+ */
+typedef enum cpu_acpi_obj {
+ PDC_OBJ = 0,
+ PCT_OBJ,
+ PSS_OBJ,
+ PSD_OBJ,
+ PPC_OBJ,
+ PTC_OBJ,
+ TSS_OBJ,
+ TSD_OBJ,
+ TPC_OBJ
+} cpu_acpi_obj_t;
+
+/*
+ * Container to store object name.
+ * Other attributes can be added in the future as necessary.
+ */
+typedef struct cpu_acpi_obj_attr {
+ char *name;
+} cpu_acpi_obj_attr_t;
+
+/*
+ * List of object attributes.
+ * NOTE: Please keep the ordering of the list as same as cpu_acpi_obj_t.
+ */
+static cpu_acpi_obj_attr_t cpu_acpi_obj_attrs[] = {
+ {"_PDC"},
+ {"_PCT"},
+ {"_PSS"},
+ {"_PSD"},
+ {"_PPC"},
+ {"_PTC"},
+ {"_TSS"},
+ {"_TSD"},
+ {"_TPC"}
+};
+
+/*
+ * Cache the ACPI CPU control data objects.
+ */
+static int
+cpu_acpi_cache_ctrl_regs(cpu_acpi_handle_t handle, cpu_acpi_obj_t objtype,
+ cpu_acpi_ctrl_regs_t *regs)
+{
+ ACPI_BUFFER abuf;
+ ACPI_OBJECT *obj;
+ AML_RESOURCE_GENERIC_REGISTER *greg;
+ int ret = -1;
+ int i;
+
+ /*
+ * Fetch the control registers (if present) for the CPU node.
+ * Since they are optional, non-existence is not a failure
+ * (we just consider it a fixed hardware case).
+ */
+ abuf.Length = ACPI_ALLOCATE_BUFFER;
+ abuf.Pointer = NULL;
+ if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle,
+ cpu_acpi_obj_attrs[objtype].name, NULL, &abuf,
+ ACPI_TYPE_PACKAGE))) {
+ regs[0].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE;
+ regs[1].cr_addrspace_id = ACPI_ADR_SPACE_FIXED_HARDWARE;
+ return (1);
+ }
+
+ obj = abuf.Pointer;
+ if (obj->Package.Count != 2) {
+ cmn_err(CE_NOTE, "!cpu_acpi: %s package bad count %d.",
+ cpu_acpi_obj_attrs[objtype].name, obj->Package.Count);
+ goto out;
+ }
+
+ /*
+ * Does the package look coherent?
+ */
+ for (i = 0; i < obj->Package.Count; i++) {
+ if (obj->Package.Elements[i].Type != ACPI_TYPE_BUFFER) {
+ cmn_err(CE_NOTE, "!cpu_acpi: "
+ "Unexpected data in %s package.",
+ cpu_acpi_obj_attrs[objtype].name);
+ goto out;
+ }
+
+ greg = (AML_RESOURCE_GENERIC_REGISTER *)
+ obj->Package.Elements[i].Buffer.Pointer;
+ if (greg->DescriptorType !=
+ ACPI_RESOURCE_NAME_GENERIC_REGISTER) {
+ cmn_err(CE_NOTE, "!cpu_acpi: "
+ "%s package has format error.",
+ cpu_acpi_obj_attrs[objtype].name);
+ goto out;
+ }
+ if (greg->ResourceLength !=
+ ACPI_AML_SIZE_LARGE(AML_RESOURCE_GENERIC_REGISTER)) {
+ cmn_err(CE_NOTE, "!cpu_acpi: "
+ "%s package not right size.",
+ cpu_acpi_obj_attrs[objtype].name);
+ goto out;
+ }
+ if (greg->AddressSpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE &&
+ greg->AddressSpaceId != ACPI_ADR_SPACE_SYSTEM_IO) {
+ cmn_err(CE_NOTE, "!cpu_apci: %s contains unsupported "
+ "address space type %x",
+ cpu_acpi_obj_attrs[objtype].name,
+ greg->AddressSpaceId);
+ goto out;
+ }
+ }
+
+ /*
+ * Looks good!
+ */
+ for (i = 0; i < obj->Package.Count; i++) {
+ greg = (AML_RESOURCE_GENERIC_REGISTER *)
+ obj->Package.Elements[i].Buffer.Pointer;
+ regs[i].cr_addrspace_id = greg->AddressSpaceId;
+ regs[i].cr_width = greg->BitWidth;
+ regs[i].cr_offset = greg->BitOffset;
+ regs[i].cr_asize = greg->AccessSize;
+ regs[i].cr_address = greg->Address;
+ }
+ ret = 0;
+out:
+ AcpiOsFree(abuf.Pointer);
+ return (ret);
+}
+
+/*
+ * Cache the ACPI _PCT data. The _PCT data defines the interface to use
+ * when making power level transitions (i.e., system IO ports, fixed
+ * hardware port, etc).
+ */
+static int
+cpu_acpi_cache_pct(cpu_acpi_handle_t handle)
+{
+ cpu_acpi_pct_t *pct;
+ int ret;
+
+ CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PCT_CACHED);
+ pct = &CPU_ACPI_PCT(handle)[0];
+ if ((ret = cpu_acpi_cache_ctrl_regs(handle, PCT_OBJ, pct)) == 0)
+ CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PCT_CACHED);
+ return (ret);
+}
+
+/*
+ * Cache the ACPI _PTC data. The _PTC data defines the interface to use
+ * when making T-state transitions (i.e., system IO ports, fixed
+ * hardware port, etc).
+ */
+static int
+cpu_acpi_cache_ptc(cpu_acpi_handle_t handle)
+{
+ cpu_acpi_ptc_t *ptc;
+ int ret;
+
+ CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PTC_CACHED);
+ ptc = &CPU_ACPI_PTC(handle)[0];
+ if ((ret = cpu_acpi_cache_ctrl_regs(handle, PTC_OBJ, ptc)) == 0)
+ CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PTC_CACHED);
+ return (ret);
+}
+
+/*
+ * Cache the ACPI CPU state dependency data objects.
+ */
+static int
+cpu_acpi_cache_state_dependencies(cpu_acpi_handle_t handle,
+ cpu_acpi_obj_t objtype, cpu_acpi_state_dependency_t *sd)
+{
+ ACPI_BUFFER abuf;
+ ACPI_OBJECT *pkg, *elements;
+ int ret = -1;
+
+ /*
+ * Fetch the dependencies (if present) for the CPU node.
+ * Since they are optional, non-existence is not a failure
+ * (it's up to the caller to determine how to handle non-existence).
+ */
+ abuf.Length = ACPI_ALLOCATE_BUFFER;
+ abuf.Pointer = NULL;
+ if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle,
+ cpu_acpi_obj_attrs[objtype].name, NULL, &abuf,
+ ACPI_TYPE_PACKAGE))) {
+ return (1);
+ }
+
+ pkg = abuf.Pointer;
+ if (pkg->Package.Count != 1) {
+ cmn_err(CE_NOTE, "!cpu_acpi: %s unsupported package "
+ "count %d.", cpu_acpi_obj_attrs[objtype].name,
+ pkg->Package.Count);
+ goto out;
+ }
+
+ if (pkg->Package.Elements[0].Type != ACPI_TYPE_PACKAGE ||
+ pkg->Package.Elements[0].Package.Count != 5) {
+ cmn_err(CE_NOTE, "!cpu_acpi: Unexpected data in %s package.",
+ cpu_acpi_obj_attrs[objtype].name);
+ goto out;
+ }
+ elements = pkg->Package.Elements[0].Package.Elements;
+ if (elements[0].Integer.Value != 5 || elements[1].Integer.Value != 0) {
+ cmn_err(CE_NOTE, "!cpu_acpi: Unexpected %s revision.",
+ cpu_acpi_obj_attrs[objtype].name);
+ goto out;
+ }
+
+ sd->sd_entries = elements[0].Integer.Value;
+ sd->sd_revision = elements[1].Integer.Value;
+ sd->sd_domain = elements[2].Integer.Value;
+ sd->sd_type = elements[3].Integer.Value;
+ sd->sd_num = elements[4].Integer.Value;
+
+ ret = 0;
+out:
+ AcpiOsFree(abuf.Pointer);
+ return (ret);
+}
+
+/*
+ * Cache the ACPI _PSD data. The _PSD data defines P-state CPU dependencies
+ * (think CPU domains).
+ */
+static int
+cpu_acpi_cache_psd(cpu_acpi_handle_t handle)
+{
+ cpu_acpi_psd_t *psd;
+ int ret;
+
+ CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSD_CACHED);
+ psd = &CPU_ACPI_PSD(handle);
+ ret = cpu_acpi_cache_state_dependencies(handle, PSD_OBJ, psd);
+ if (ret == 0)
+ CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSD_CACHED);
+ return (ret);
+
+}
+
+/*
+ * Cache the ACPI _TSD data. The _TSD data defines T-state CPU dependencies
+ * (think CPU domains).
+ */
+static int
+cpu_acpi_cache_tsd(cpu_acpi_handle_t handle)
+{
+ cpu_acpi_tsd_t *tsd;
+ int ret;
+
+ CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSD_CACHED);
+ tsd = &CPU_ACPI_TSD(handle);
+ ret = cpu_acpi_cache_state_dependencies(handle, TSD_OBJ, tsd);
+ if (ret == 0)
+ CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSD_CACHED);
+ return (ret);
+
+}
+
+static void
+cpu_acpi_cache_pstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt)
+{
+ cpu_acpi_pstate_t *pstate;
+ ACPI_OBJECT *q, *l;
+ int i, j;
+
+ CPU_ACPI_PSTATES_COUNT(handle) = cnt;
+ CPU_ACPI_PSTATES(handle) = kmem_zalloc(CPU_ACPI_PSTATES_SIZE(cnt),
+ KM_SLEEP);
+ pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
+ for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) {
+ uint32_t *up;
+
+ q = obj->Package.Elements[i].Package.Elements;
+
+ /*
+ * Skip duplicate entries.
+ */
+ if (l != NULL && l[0].Integer.Value == q[0].Integer.Value)
+ continue;
+
+ up = (uint32_t *)pstate;
+ for (j = 0; j < CPU_ACPI_PSS_CNT; j++)
+ up[j] = q[j].Integer.Value;
+ pstate++;
+ cnt--;
+ }
+}
+
+static void
+cpu_acpi_cache_tstate(cpu_acpi_handle_t handle, ACPI_OBJECT *obj, int cnt)
+{
+ cpu_acpi_tstate_t *tstate;
+ ACPI_OBJECT *q, *l;
+ int i, j;
+
+ CPU_ACPI_TSTATES_COUNT(handle) = cnt;
+ CPU_ACPI_TSTATES(handle) = kmem_zalloc(CPU_ACPI_TSTATES_SIZE(cnt),
+ KM_SLEEP);
+ tstate = (cpu_acpi_tstate_t *)CPU_ACPI_TSTATES(handle);
+ for (i = 0, l = NULL; i < obj->Package.Count && cnt > 0; i++, l = q) {
+ uint32_t *up;
+
+ q = obj->Package.Elements[i].Package.Elements;
+
+ /*
+ * Skip duplicate entries.
+ */
+ if (l != NULL && l[0].Integer.Value == q[0].Integer.Value)
+ continue;
+
+ up = (uint32_t *)tstate;
+ for (j = 0; j < CPU_ACPI_TSS_CNT; j++)
+ up[j] = q[j].Integer.Value;
+ tstate++;
+ cnt--;
+ }
+}
+
+/*
+ * Cache the _PSS or _TSS data.
+ */
+static int
+cpu_acpi_cache_supported_states(cpu_acpi_handle_t handle,
+ cpu_acpi_obj_t objtype, int fcnt)
+{
+ ACPI_BUFFER abuf;
+ ACPI_OBJECT *obj, *q, *l;
+ boolean_t eot = B_FALSE;
+ int ret = -1;
+ int cnt;
+ int i, j;
+
+ /*
+ * Fetch the data (if present) for the CPU node.
+ */
+ abuf.Length = ACPI_ALLOCATE_BUFFER;
+ abuf.Pointer = NULL;
+ if (ACPI_FAILURE(AcpiEvaluateObjectTyped(handle->cs_handle,
+ cpu_acpi_obj_attrs[objtype].name, NULL, &abuf,
+ ACPI_TYPE_PACKAGE))) {
+ cmn_err(CE_NOTE, "!cpu_acpi: %s package not found.",
+ cpu_acpi_obj_attrs[objtype].name);
+ return (1);
+ }
+ obj = abuf.Pointer;
+ if (obj->Package.Count < 2) {
+ cmn_err(CE_NOTE, "!cpu_acpi: %s package bad count %d.",
+ cpu_acpi_obj_attrs[objtype].name, obj->Package.Count);
+ goto out;
+ }
+
+ /*
+ * Does the package look coherent?
+ */
+ cnt = 0;
+ for (i = 0, l = NULL; i < obj->Package.Count; i++, l = q) {
+ if (obj->Package.Elements[i].Type != ACPI_TYPE_PACKAGE ||
+ obj->Package.Elements[i].Package.Count != fcnt) {
+ cmn_err(CE_NOTE, "!cpu_acpi: "
+ "Unexpected data in %s package.",
+ cpu_acpi_obj_attrs[objtype].name);
+ goto out;
+ }
+
+ q = obj->Package.Elements[i].Package.Elements;
+ for (j = 0; j < fcnt; j++) {
+ if (q[j].Type != ACPI_TYPE_INTEGER) {
+ cmn_err(CE_NOTE, "!cpu_acpi: "
+ "%s element invalid (type)",
+ cpu_acpi_obj_attrs[objtype].name);
+ goto out;
+ }
+ }
+
+ /*
+ * Ignore duplicate entries.
+ */
+ if (l != NULL && l[0].Integer.Value == q[0].Integer.Value)
+ continue;
+
+ /*
+ * Some supported state tables are larger than required
+ * and unused elements are filled with patterns
+ * of 0xff. Simply check here for frequency = 0xffff
+ * and stop counting if found.
+ */
+ if (q[0].Integer.Value == 0xffff) {
+ eot = B_TRUE;
+ continue;
+ }
+
+ /*
+ * We should never find a valid entry after we've hit
+ * an the end-of-table entry.
+ */
+ if (eot) {
+ cmn_err(CE_NOTE, "!cpu_acpi: "
+ "Unexpected data in %s package after eot.",
+ cpu_acpi_obj_attrs[objtype].name);
+ goto out;
+ }
+
+ /*
+ * states must be defined in order from highest to lowest.
+ */
+ if (l != NULL && l[0].Integer.Value < q[0].Integer.Value) {
+ cmn_err(CE_NOTE, "!cpu_acpi: "
+ "%s package state definitions out of order.",
+ cpu_acpi_obj_attrs[objtype].name);
+ goto out;
+ }
+
+ /*
+ * This entry passes.
+ */
+ cnt++;
+ }
+ if (cnt == 0)
+ goto out;
+
+ /*
+ * Yes, fill in the structure.
+ */
+ ASSERT(objtype == PSS_OBJ || objtype == TSS_OBJ);
+ (objtype == PSS_OBJ) ? cpu_acpi_cache_pstate(handle, obj, cnt) :
+ cpu_acpi_cache_tstate(handle, obj, cnt);
+
+ ret = 0;
+out:
+ AcpiOsFree(abuf.Pointer);
+ return (ret);
+}
+
+/*
+ * Cache the _PSS data. The _PSS data defines the different power levels
+ * supported by the CPU and the attributes associated with each power level
+ * (i.e., frequency, voltage, etc.). The power levels are number from
+ * highest to lowest. That is, the highest power level is _PSS entry 0
+ * and the lowest power level is the last _PSS entry.
+ */
+static int
+cpu_acpi_cache_pstates(cpu_acpi_handle_t handle)
+{
+ int ret;
+
+ CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PSS_CACHED);
+ ret = cpu_acpi_cache_supported_states(handle, PSS_OBJ,
+ CPU_ACPI_PSS_CNT);
+ if (ret == 0)
+ CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PSS_CACHED);
+ return (ret);
+}
+
+/*
+ * Cache the _TSS data. The _TSS data defines the different freq throttle
+ * levels supported by the CPU and the attributes associated with each
+ * throttle level (i.e., frequency throttle percentage, voltage, etc.).
+ * The throttle levels are number from highest to lowest.
+ */
+static int
+cpu_acpi_cache_tstates(cpu_acpi_handle_t handle)
+{
+ int ret;
+
+ CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TSS_CACHED);
+ ret = cpu_acpi_cache_supported_states(handle, TSS_OBJ,
+ CPU_ACPI_TSS_CNT);
+ if (ret == 0)
+ CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TSS_CACHED);
+ return (ret);
+}
+
+/*
+ * Cache the ACPI CPU present capabilities data objects.
+ */
+static int
+cpu_acpi_cache_present_capabilities(cpu_acpi_handle_t handle,
+ cpu_acpi_obj_t objtype, cpu_acpi_present_capabilities_t *pc)
+
+{
+ ACPI_BUFFER abuf;
+ ACPI_OBJECT *obj;
+
+ /*
+ * Fetch the present capabilites object (if present) for the CPU node.
+ * Since they are optional, non-existence is not a failure.
+ */
+ abuf.Length = ACPI_ALLOCATE_BUFFER;
+ abuf.Pointer = NULL;
+ if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle,
+ cpu_acpi_obj_attrs[objtype].name, NULL, &abuf))) {
+ *pc = 0;
+ return (1);
+ }
+
+ obj = (ACPI_OBJECT *)abuf.Pointer;
+ *pc = obj->Integer.Value;
+ AcpiOsFree(abuf.Pointer);
+ return (0);
+}
+
+/*
+ * Cache the _PPC data. The _PPC simply contains an integer value which
+ * represents the highest power level that a CPU should transition to.
+ * That is, it's an index into the array of _PSS entries and will be
+ * greater than or equal to zero.
+ */
+void
+cpu_acpi_cache_ppc(cpu_acpi_handle_t handle)
+{
+ cpu_acpi_ppc_t *ppc;
+ int ret;
+
+ CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_PPC_CACHED);
+ ppc = &CPU_ACPI_PPC(handle);
+ ret = cpu_acpi_cache_present_capabilities(handle, PPC_OBJ, ppc);
+ if (ret == 0)
+ CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_PPC_CACHED);
+}
+
+/*
+ * Cache the _TPC data. The _TPC simply contains an integer value which
+ * represents the throttle level that a CPU should transition to.
+ * That is, it's an index into the array of _TSS entries and will be
+ * greater than or equal to zero.
+ */
+void
+cpu_acpi_cache_tpc(cpu_acpi_handle_t handle)
+{
+ cpu_acpi_tpc_t *tpc;
+ int ret;
+
+ CPU_ACPI_OBJ_IS_NOT_CACHED(handle, CPU_ACPI_TPC_CACHED);
+ tpc = &CPU_ACPI_TPC(handle);
+ ret = cpu_acpi_cache_present_capabilities(handle, TPC_OBJ, tpc);
+ if (ret == 0)
+ CPU_ACPI_OBJ_IS_CACHED(handle, CPU_ACPI_TPC_CACHED);
+}
+
+/*
+ * Cache the _PCT, _PSS, _PSD and _PPC data.
+ */
+int
+cpu_acpi_cache_pstate_data(cpu_acpi_handle_t handle)
+{
+ if (cpu_acpi_cache_pct(handle) < 0) {
+ cmn_err(CE_WARN, "!cpu_acpi: error parsing _PCT for "
+ "CPU instance %d", ddi_get_instance(handle->cs_dip));
+ return (-1);
+ }
+
+ if (cpu_acpi_cache_pstates(handle) != 0) {
+ cmn_err(CE_WARN, "!cpu_acpi: error parsing _PSS for "
+ "CPU instance %d", ddi_get_instance(handle->cs_dip));
+ return (-1);
+ }
+
+ if (cpu_acpi_cache_psd(handle) < 0) {
+ cmn_err(CE_WARN, "!cpu_acpi: error parsing _PSD for "
+ "CPU instance %d", ddi_get_instance(handle->cs_dip));
+ return (-1);
+ }
+
+ cpu_acpi_cache_ppc(handle);
+
+ return (0);
+}
+
+void
+cpu_acpi_free_pstate_data(cpu_acpi_handle_t handle)
+{
+ if (handle != NULL) {
+ if (CPU_ACPI_PSTATES(handle)) {
+ kmem_free(CPU_ACPI_PSTATES(handle),
+ CPU_ACPI_PSTATES_SIZE(
+ CPU_ACPI_PSTATES_COUNT(handle)));
+ CPU_ACPI_PSTATES(handle) = NULL;
+ }
+ }
+}
+
+/*
+ * Cache the _PTC, _TSS, _TSD and _TPC data.
+ */
+int
+cpu_acpi_cache_tstate_data(cpu_acpi_handle_t handle)
+{
+ if (cpu_acpi_cache_ptc(handle) < 0) {
+ cmn_err(CE_WARN, "!cpu_acpi: error parsing _PTC for "
+ "CPU instance %d", ddi_get_instance(handle->cs_dip));
+ return (-1);
+ }
+
+ if (cpu_acpi_cache_tstates(handle) != 0) {
+ cmn_err(CE_WARN, "!cpu_acpi: error parsing _TSS for "
+ "CPU instance %d", ddi_get_instance(handle->cs_dip));
+ return (-1);
+ }
+
+ if (cpu_acpi_cache_tsd(handle) < 0) {
+ cmn_err(CE_WARN, "!cpu_acpi: error parsing _TSD for "
+ "CPU instance %d", ddi_get_instance(handle->cs_dip));
+ return (-1);
+ }
+
+ cpu_acpi_cache_tpc(handle);
+
+ return (0);
+}
+
+void
+cpu_acpi_free_tstate_data(cpu_acpi_handle_t handle)
+{
+ if (handle != NULL) {
+ if (CPU_ACPI_TSTATES(handle)) {
+ kmem_free(CPU_ACPI_TSTATES(handle),
+ CPU_ACPI_TSTATES_SIZE(
+ CPU_ACPI_TSTATES_COUNT(handle)));
+ CPU_ACPI_TSTATES(handle) = NULL;
+ }
+ }
+}
+
+/*
+ * Register a handler for processor change notifications.
+ */
+void
+cpu_acpi_install_notify_handler(cpu_acpi_handle_t handle,
+ ACPI_NOTIFY_HANDLER handler, dev_info_t *dip)
+{
+ char path[MAXNAMELEN];
+ if (ACPI_FAILURE(AcpiInstallNotifyHandler(handle->cs_handle,
+ ACPI_DEVICE_NOTIFY, handler, dip)))
+ cmn_err(CE_NOTE, "!cpu_acpi: Unable to register "
+ "notify handler for %s", ddi_pathname(dip, path));
+}
+
+/*
+ * Write _PDC.
+ */
+int
+cpu_acpi_write_pdc(cpu_acpi_handle_t handle, uint32_t revision, uint32_t count,
+ uint32_t *capabilities)
+{
+ ACPI_OBJECT obj;
+ ACPI_OBJECT_LIST list = { 1, &obj};
+ uint32_t *buffer;
+ uint32_t *bufptr;
+ uint32_t bufsize;
+ int i;
+
+ bufsize = (count + 2) * sizeof (uint32_t);
+ buffer = kmem_zalloc(bufsize, KM_SLEEP);
+ buffer[0] = revision;
+ buffer[1] = count;
+ bufptr = &buffer[2];
+ for (i = 0; i < count; i++)
+ *bufptr++ = *capabilities++;
+
+ obj.Type = ACPI_TYPE_BUFFER;
+ obj.Buffer.Length = bufsize;
+ obj.Buffer.Pointer = (void *)buffer;
+
+ /*
+ * _PDC is optional, so don't log failure.
+ */
+ if (ACPI_FAILURE(AcpiEvaluateObject(handle->cs_handle, "_PDC",
+ &list, NULL))) {
+ kmem_free(buffer, bufsize);
+ return (-1);
+ }
+
+ kmem_free(buffer, bufsize);
+ return (0);
+}
+
+/*
+ * Write to system IO port.
+ */
+int
+cpu_acpi_write_port(ACPI_IO_ADDRESS address, uint32_t value, uint32_t width)
+{
+ if (ACPI_FAILURE(AcpiOsWritePort(address, value, width))) {
+ cmn_err(CE_NOTE, "cpu_acpi: error writing system IO port "
+ "%lx.", (long)address);
+ return (-1);
+ }
+ return (0);
+}
+
+/*
+ * Read from a system IO port.
+ */
+int
+cpu_acpi_read_port(ACPI_IO_ADDRESS address, uint32_t *value, uint32_t width)
+{
+ if (ACPI_FAILURE(AcpiOsReadPort(address, value, width))) {
+ cmn_err(CE_NOTE, "cpu_acpi: error reading system IO port "
+ "%lx.", (long)address);
+ return (-1);
+ }
+ return (0);
+}
+
+/*
+ * Return supported frequencies.
+ */
+uint_t
+cpu_acpi_get_speeds(cpu_acpi_handle_t handle, int **speeds)
+{
+ cpu_acpi_pstate_t *pstate;
+ int *hspeeds;
+ uint_t nspeeds;
+ int i;
+
+ nspeeds = CPU_ACPI_PSTATES_COUNT(handle);
+ pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
+ hspeeds = kmem_zalloc(nspeeds * sizeof (int), KM_SLEEP);
+ for (i = 0; i < nspeeds; i++) {
+ hspeeds[i] = CPU_ACPI_FREQ(pstate);
+ pstate++;
+ }
+ *speeds = hspeeds;
+ return (nspeeds);
+}
+
+/*
+ * Free resources allocated by cpu_acpi_get_speeds().
+ */
+void
+cpu_acpi_free_speeds(int *speeds, uint_t nspeeds)
+{
+ kmem_free(speeds, nspeeds * sizeof (int));
+}
+
+/*
+ * Map the dip to an ACPI handle for the device.
+ */
+cpu_acpi_handle_t
+cpu_acpi_init(dev_info_t *dip)
+{
+ cpu_acpi_handle_t handle;
+
+ handle = kmem_zalloc(sizeof (cpu_acpi_state_t), KM_SLEEP);
+
+ if (ACPI_FAILURE(acpica_get_handle(dip, &handle->cs_handle))) {
+ kmem_free(handle, sizeof (cpu_acpi_state_t));
+ return (NULL);
+ }
+ handle->cs_dip = dip;
+ return (handle);
+}
+
+/*
+ * Free any resources.
+ */
+void
+cpu_acpi_fini(cpu_acpi_handle_t handle)
+{
+ if (handle)
+ kmem_free(handle, sizeof (cpu_acpi_state_t));
+}
diff --git a/usr/src/uts/i86pc/io/cpudrv/cpudrv_amd.c b/usr/src/uts/i86pc/io/cpudrv/cpudrv_amd.c
new file mode 100644
index 0000000000..21dd88980c
--- /dev/null
+++ b/usr/src/uts/i86pc/io/cpudrv/cpudrv_amd.c
@@ -0,0 +1,51 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * AMD specific CPU power management support.
+ */
+
+#include <sys/x86_archext.h>
+#include <sys/cpudrv_mach.h>
+#include <sys/cpu_acpi.h>
+#include <sys/pwrnow.h>
+
+boolean_t
+cpudrv_amd_init(cpudrv_devstate_t *cpudsp)
+{
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+
+ /* AMD? */
+ if (x86_vendor != X86_VENDOR_AMD)
+ return (B_FALSE);
+
+ /*
+ * If we support PowerNow! on this processor, then set the
+ * correct pstate_ops for the processor.
+ */
+ mach_state->cpupm_pstate_ops = pwrnow_supported() ? &pwrnow_ops : NULL;
+
+ return (B_TRUE);
+}
diff --git a/usr/src/uts/i86pc/io/cpudrv/cpudrv_intel.c b/usr/src/uts/i86pc/io/cpudrv/cpudrv_intel.c
new file mode 100644
index 0000000000..0d2bc20c8e
--- /dev/null
+++ b/usr/src/uts/i86pc/io/cpudrv/cpudrv_intel.c
@@ -0,0 +1,93 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Intel specific CPU power management support.
+ */
+
+#include <sys/x86_archext.h>
+#include <sys/cpudrv_mach.h>
+#include <sys/cpu_acpi.h>
+#include <sys/speedstep.h>
+#include <sys/cpudrv_throttle.h>
+
+/*
+ * The Intel Processor Driver Capabilities (_PDC).
+ * See Intel Processor Vendor-Specific ACPI Interface Specification
+ * for details.
+ */
+#define CPUDRV_INTEL_PDC_REVISION 0x1
+#define CPUDRV_INTEL_PDC_PS_MSR 0x0001
+#define CPUDRV_INTEL_PDC_C1_HALT 0x0002
+#define CPUDRV_INTEL_PDC_TS_MSR 0x0004
+#define CPUDRV_INTEL_PDC_MP 0x0008
+#define CPUDRV_INTEL_PDC_PSD 0x0020
+#define CPUDRV_INTEL_PDC_TSD 0x0080
+
+static uint32_t cpudrv_intel_pdccap = 0;
+
+boolean_t
+cpudrv_intel_init(cpudrv_devstate_t *cpudsp)
+{
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ uint_t family;
+ uint_t model;
+
+ if (x86_vendor != X86_VENDOR_Intel)
+ return (B_FALSE);
+
+ family = cpuid_getfamily(CPU);
+ model = cpuid_getmodel(CPU);
+
+ /*
+ * If we support SpeedStep on this processor, then set the
+ * correct pstate_ops for the processor and enable appropriate
+ * _PDC bits.
+ */
+ if (speedstep_supported(family, model)) {
+ mach_state->cpupm_pstate_ops = &speedstep_ops;
+ cpudrv_intel_pdccap = CPUDRV_INTEL_PDC_PS_MSR |
+ CPUDRV_INTEL_PDC_MP | CPUDRV_INTEL_PDC_PSD;
+ } else {
+ mach_state->cpupm_pstate_ops = NULL;
+ }
+
+ /*
+ * Set the correct tstate_ops for the processor and
+ * enable appropriate _PDC bits.
+ */
+ mach_state->cpupm_tstate_ops = &cpudrv_throttle_ops;
+ cpudrv_intel_pdccap |= CPUDRV_INTEL_PDC_TS_MSR |
+ CPUDRV_INTEL_PDC_TSD;
+
+ /*
+ * _PDC support is optional and the driver should
+ * function even if the _PDC write fails.
+ */
+ (void) cpu_acpi_write_pdc(mach_state->acpi_handle,
+ CPUDRV_INTEL_PDC_REVISION, 1, &cpudrv_intel_pdccap);
+
+ return (B_TRUE);
+}
diff --git a/usr/src/uts/i86pc/io/cpudrv/cpudrv_mach.c b/usr/src/uts/i86pc/io/cpudrv/cpudrv_mach.c
new file mode 100644
index 0000000000..4380f3cd10
--- /dev/null
+++ b/usr/src/uts/i86pc/io/cpudrv/cpudrv_mach.c
@@ -0,0 +1,506 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * CPU power management driver support for i86pc.
+ */
+
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/cpupm.h>
+#include <sys/cpudrv_mach.h>
+#include <sys/machsystm.h>
+
+/*
+ * Constants used by the Processor Device Notification handler
+ * that identify what kind of change has occurred. We currently
+ * only handle PPC_CHANGE_NOTIFICATION. The other two are
+ * ignored.
+ */
+#define PPC_CHANGE_NOTIFICATION 0x80
+#define CST_CHANGE_NOTIFICATION 0x81
+#define TPC_CHANGE_NOTIFICATION 0x82
+
+/*
+ * Note that our driver numbers the power levels from lowest to
+ * highest starting at 1 (i.e., the lowest power level is 1 and
+ * the highest power level is cpupm->num_spd). The x86 modules get
+ * their power levels from ACPI which numbers power levels from
+ * highest to lowest starting at 0 (i.e., the lowest power level
+ * is (cpupm->num_spd - 1) and the highest power level is 0). So to
+ * map one of our driver power levels to one understood by ACPI we
+ * simply subtract our driver power level from cpupm->num_spd. Likewise,
+ * to map an ACPI power level to the proper driver power level, we
+ * subtract the ACPI power level from cpupm->num_spd.
+ */
+#define PM_2_PLAT_LEVEL(cpupm, pm_level) (cpupm->num_spd - pm_level)
+#define PLAT_2_PM_LEVEL(cpupm, plat_level) (cpupm->num_spd - plat_level)
+
+extern boolean_t cpudrv_intel_init(cpudrv_devstate_t *);
+extern boolean_t cpudrv_amd_init(cpudrv_devstate_t *);
+
+typedef struct cpudrv_mach_vendor {
+ boolean_t (*cpuv_init)(cpudrv_devstate_t *);
+} cpudrv_mach_vendor_t;
+
+/*
+ * Table of supported vendors.
+ */
+static cpudrv_mach_vendor_t cpudrv_vendors[] = {
+ cpudrv_intel_init,
+ cpudrv_amd_init,
+ NULL
+};
+
+uint_t
+cpudrv_pm_get_speeds(cpudrv_devstate_t *cpudsp, int **speeds)
+{
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ return (cpu_acpi_get_speeds(mach_state->acpi_handle, speeds));
+}
+
+void
+cpudrv_pm_free_speeds(int *speeds, uint_t nspeeds)
+{
+ cpu_acpi_free_speeds(speeds, nspeeds);
+}
+
+/*
+ * Change CPU speed using interface provided by module.
+ */
+int
+cpudrv_pm_change_speed(cpudrv_devstate_t *cpudsp, cpudrv_pm_spd_t *new_spd)
+{
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpudrv_pm_t *cpupm;
+ uint32_t plat_level;
+ int ret;
+
+ if (!(mach_state->caps & CPUDRV_P_STATES))
+ return (DDI_FAILURE);
+ ASSERT(mach_state->cpupm_pstate_ops != NULL);
+ cpupm = &(cpudsp->cpudrv_pm);
+ plat_level = PM_2_PLAT_LEVEL(cpupm, new_spd->pm_level);
+ ret = mach_state->cpupm_pstate_ops->cpups_power(cpudsp, plat_level);
+ if (ret != 0)
+ return (DDI_FAILURE);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Determine the cpu_id for the CPU device.
+ */
+boolean_t
+cpudrv_pm_get_cpu_id(dev_info_t *dip, processorid_t *cpu_id)
+{
+ return ((*cpu_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "reg", -1)) != -1);
+
+}
+
+/*
+ * All CPU instances have been initialized successfully.
+ */
+boolean_t
+cpudrv_pm_power_ready(void)
+{
+ return (cpupm_is_enabled(CPUPM_P_STATES) && cpupm_is_ready());
+}
+
+/*
+ * All CPU instances have been initialized successfully.
+ */
+boolean_t
+cpudrv_pm_throttle_ready(void)
+{
+ return (cpupm_is_enabled(CPUPM_T_STATES) && cpupm_is_ready());
+}
+
+/*
+ * Is the current thread the thread that is handling the
+ * PPC change notification?
+ */
+boolean_t
+cpudrv_pm_is_governor_thread(cpudrv_pm_t *cpupm)
+{
+ return (curthread == cpupm->pm_governor_thread);
+}
+
+/*
+ * Initialize the machine.
+ * See if a module exists for managing power for this CPU.
+ */
+boolean_t
+cpudrv_mach_pm_init(cpudrv_devstate_t *cpudsp)
+{
+ cpudrv_mach_vendor_t *vendors;
+ cpudrv_mach_state_t *mach_state;
+ int ret;
+
+ mach_state = cpudsp->mach_state =
+ kmem_zalloc(sizeof (cpudrv_mach_state_t), KM_SLEEP);
+ mach_state->caps = CPUDRV_NO_STATES;
+
+ mach_state->acpi_handle = cpu_acpi_init(cpudsp->dip);
+ if (mach_state->acpi_handle == NULL) {
+ cpudrv_mach_pm_free(cpudsp);
+ cmn_err(CE_WARN, "!cpudrv_mach_pm_init: instance %d: "
+ "unable to get ACPI handle",
+ ddi_get_instance(cpudsp->dip));
+ cmn_err(CE_NOTE, "!CPU power management will not function.");
+ return (B_FALSE);
+ }
+
+ /*
+ * Loop through the CPU management module table and see if
+ * any of the modules implement CPU power management
+ * for this CPU.
+ */
+ for (vendors = cpudrv_vendors; vendors->cpuv_init != NULL; vendors++) {
+ if (vendors->cpuv_init(cpudsp))
+ break;
+ }
+
+ /*
+ * Nope, we can't power manage this CPU.
+ */
+ if (vendors == NULL) {
+ cpudrv_mach_pm_free(cpudsp);
+ return (B_FALSE);
+ }
+
+ /*
+ * If P-state support exists for this system, then initialize it.
+ */
+ if (mach_state->cpupm_pstate_ops != NULL) {
+ ret = mach_state->cpupm_pstate_ops->cpups_init(cpudsp);
+ if (ret != 0) {
+ cmn_err(CE_WARN, "!cpudrv_mach_pm_init: instance %d:"
+ " unable to initialize P-state support",
+ ddi_get_instance(cpudsp->dip));
+ mach_state->cpupm_pstate_ops = NULL;
+ cpupm_disable(CPUPM_P_STATES);
+ } else {
+ mach_state->caps |= CPUDRV_P_STATES;
+ }
+ }
+
+ if (mach_state->cpupm_tstate_ops != NULL) {
+ ret = mach_state->cpupm_tstate_ops->cputs_init(cpudsp);
+ if (ret != 0) {
+ cmn_err(CE_WARN, "!cpudrv_mach_pm_init: instance %d:"
+ " unable to initialize T-state support",
+ ddi_get_instance(cpudsp->dip));
+ mach_state->cpupm_tstate_ops = NULL;
+ cpupm_disable(CPUPM_T_STATES);
+ } else {
+ mach_state->caps |= CPUDRV_T_STATES;
+ }
+ }
+
+ if (mach_state->caps == CPUDRV_NO_STATES) {
+ cpudrv_mach_pm_free(cpudsp);
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Free any resources allocated by cpudrv_mach_pm_init().
+ */
+void
+cpudrv_mach_pm_free(cpudrv_devstate_t *cpudsp)
+{
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+
+ if (mach_state == NULL)
+ return;
+ if (mach_state->cpupm_pstate_ops != NULL) {
+ mach_state->cpupm_pstate_ops->cpups_fini(cpudsp);
+ mach_state->cpupm_pstate_ops = NULL;
+ }
+
+ if (mach_state->cpupm_tstate_ops != NULL) {
+ mach_state->cpupm_tstate_ops->cputs_fini(cpudsp);
+ mach_state->cpupm_tstate_ops = NULL;
+ }
+
+ if (mach_state->acpi_handle != NULL) {
+ cpu_acpi_fini(mach_state->acpi_handle);
+ mach_state->acpi_handle = NULL;
+ }
+
+ kmem_free(mach_state, sizeof (cpudrv_mach_state_t));
+ cpudsp->mach_state = NULL;
+}
+
+/*
+ * This routine changes the top speed to which the CPUs can transition by:
+ *
+ * - Resetting the up_spd for all speeds lower than the new top speed
+ * to point to the new top speed.
+ * - Updating the framework with a new "normal" (maximum power) for this
+ * device.
+ */
+void
+cpudrv_pm_set_topspeed(void *ctx, int plat_level)
+{
+ cpudrv_devstate_t *cpudsp;
+ cpudrv_pm_t *cpupm;
+ cpudrv_pm_spd_t *spd;
+ cpudrv_pm_spd_t *top_spd;
+ dev_info_t *dip;
+ int pm_level;
+ int instance;
+ int i;
+
+ dip = ctx;
+ instance = ddi_get_instance(dip);
+ cpudsp = ddi_get_soft_state(cpudrv_state, instance);
+ ASSERT(cpudsp != NULL);
+
+ mutex_enter(&cpudsp->lock);
+ cpupm = &(cpudsp->cpudrv_pm);
+ pm_level = PLAT_2_PM_LEVEL(cpupm, plat_level);
+ for (i = 0, spd = cpupm->head_spd; spd; i++, spd = spd->down_spd) {
+ /*
+ * Don't mess with speeds that are higher than the new
+ * top speed. They should be out of range anyway.
+ */
+ if (spd->pm_level > pm_level)
+ continue;
+ /*
+ * This is the new top speed.
+ */
+ if (spd->pm_level == pm_level)
+ top_spd = spd;
+
+ spd->up_spd = top_spd;
+ }
+ cpupm->targ_spd = top_spd;
+
+ cpupm->pm_governor_thread = curthread;
+
+ mutex_exit(&cpudsp->lock);
+
+ (void) pm_update_maxpower(dip, 0, top_spd->pm_level);
+}
+
+/*
+ * This routine reads the ACPI _PPC object. It's accessed as a callback
+ * by the ppm driver whenever a _PPC change notification is received.
+ */
+int
+cpudrv_pm_get_topspeed(void *ctx)
+{
+ cpudrv_mach_state_t *mach_state;
+ cpu_acpi_handle_t handle;
+ cpudrv_devstate_t *cpudsp;
+ dev_info_t *dip;
+ int instance;
+ int plat_level;
+
+ dip = ctx;
+ instance = ddi_get_instance(dip);
+ cpudsp = ddi_get_soft_state(cpudrv_state, instance);
+ ASSERT(cpudsp != NULL);
+ mach_state = cpudsp->mach_state;
+ handle = mach_state->acpi_handle;
+
+ cpu_acpi_cache_ppc(handle);
+ plat_level = CPU_ACPI_PPC(handle);
+ return (plat_level);
+}
+
+/*
+ * This routine reads the ACPI _TPC object. It's accessed as a callback
+ * by the cpu driver whenever a _TPC change notification is received.
+ */
+int
+cpudrv_pm_get_topthrottle(cpudrv_devstate_t *cpudsp)
+{
+ cpudrv_mach_state_t *mach_state;
+ cpu_acpi_handle_t handle;
+ int throtl_level;
+
+ mach_state = cpudsp->mach_state;
+ handle = mach_state->acpi_handle;
+
+ cpu_acpi_cache_tpc(handle);
+ throtl_level = CPU_ACPI_TPC(handle);
+ return (throtl_level);
+}
+
+/*
+ * Take care of CPU throttling when _TPC notification arrives
+ */
+void
+cpudrv_pm_throttle_instance(cpudrv_devstate_t *cpudsp)
+{
+ cpudrv_mach_state_t *mach_state;
+ uint32_t new_level;
+ int ret;
+
+ ASSERT(cpudsp != NULL);
+ mach_state = cpudsp->mach_state;
+ if (!(mach_state->caps & CPUDRV_T_STATES))
+ return;
+ ASSERT(mach_state->cpupm_tstate_ops != NULL);
+
+ /*
+ * Get the new T-State support level
+ */
+ new_level = cpudrv_pm_get_topthrottle(cpudsp);
+
+ /*
+ * Change the cpu throttling to the new level
+ */
+ ret = mach_state->cpupm_tstate_ops->cputs_throttle(cpudsp, new_level);
+ if (ret != 0) {
+ cmn_err(CE_WARN, "Cannot change the cpu throttling to the new"
+ " level: %d, Instance: %d", new_level, cpudsp->cpu_id);
+ }
+}
+
+/*
+ * Take care of CPU throttling when _TPC notification arrives
+ */
+void
+cpudrv_pm_manage_throttling(void *ctx)
+{
+ cpudrv_devstate_t *cpudsp;
+ cpudrv_mach_state_t *mach_state;
+ cpudrv_tstate_domain_t *domain;
+ cpudrv_tstate_domain_node_t *domain_node;
+ int instance;
+ boolean_t is_ready;
+
+ instance = ddi_get_instance((dev_info_t *)ctx);
+ cpudsp = ddi_get_soft_state(cpudrv_state, instance);
+ ASSERT(cpudsp != NULL);
+
+ /*
+ * We currently refuse to power manage if the CPU is not ready to
+ * take cross calls (cross calls fail silently if CPU is not ready
+ * for it).
+ *
+ * Additionally, for x86 platforms we cannot power manage
+ * any one instance, until all instances have been initialized.
+ * That's because we don't know what the CPU domains look like
+ * until all instances have been initialized.
+ */
+ is_ready = CPUDRV_PM_XCALL_IS_READY(cpudsp->cpu_id);
+ if (!is_ready) {
+ DPRINTF(D_POWER, ("cpudrv_power: instance %d: "
+ "CPU not ready for x-calls\n", instance));
+ } else if (!(is_ready = cpudrv_pm_throttle_ready())) {
+ DPRINTF(D_POWER, ("cpudrv_power: instance %d: "
+ "waiting for all CPUs to be ready\n", instance));
+ }
+ if (!is_ready) {
+ return;
+ }
+
+ mach_state = cpudsp->mach_state;
+ domain_node = mach_state->tstate_domain_node;
+ domain = domain_node->tdn_domain;
+
+ switch (domain->td_type) {
+ case CPU_ACPI_SW_ANY:
+ /*
+ * Just throttle the current instance and all other instances
+ * under the same domain will get throttled to the same level
+ */
+ cpudrv_pm_throttle_instance(cpudsp);
+ break;
+ case CPU_ACPI_HW_ALL:
+ case CPU_ACPI_SW_ALL:
+ /*
+ * Along with the current instance, throttle all the CPU's that
+ * belong to the same domain
+ */
+ mutex_enter(&domain->td_lock);
+ for (domain_node = domain->td_node; domain_node != NULL;
+ domain_node = domain_node->tdn_next)
+ cpudrv_pm_throttle_instance(domain_node->tdn_cpudsp);
+ mutex_exit(&domain->td_lock);
+ break;
+
+ default:
+ cmn_err(CE_WARN, "Not a valid coordination type (%x) to"
+ " throttle cpu", domain->td_domain);
+ break;
+ }
+}
+
+/*
+ * This notification handler is called whenever the ACPI _PPC
+ * object changes. The _PPC is a sort of governor on power levels.
+ * It sets an upper threshold on which, _PSS defined, power levels
+ * are usuable. The _PPC value is dynamic and may change as properties
+ * (i.e., thermal or AC source) of the system change.
+ */
+/* ARGSUSED */
+static void
+cpudrv_pm_notify_handler(ACPI_HANDLE obj, UINT32 val, void *ctx)
+{
+ /*
+ * We only handle _PPC change notifications.
+ */
+ if (val == PPC_CHANGE_NOTIFICATION)
+ cpudrv_pm_redefine_topspeed(ctx);
+ else if (val == TPC_CHANGE_NOTIFICATION) {
+ cpudrv_pm_manage_throttling(ctx);
+ }
+}
+
+void
+cpudrv_pm_install_notify_handler(cpudrv_devstate_t *cpudsp, dev_info_t *dip)
+{
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_install_notify_handler(mach_state->acpi_handle,
+ cpudrv_pm_notify_handler, dip);
+}
+
+void
+cpudrv_pm_redefine_topspeed(void *ctx)
+{
+ /*
+ * This should never happen, unless ppm does not get loaded.
+ */
+ if (cpupm_redefine_topspeed == NULL) {
+ cmn_err(CE_WARN, "cpudrv_pm_redefine_topspeed: "
+ "cpupm_redefine_topspeed has not been initialized - "
+ "ignoring notification");
+ return;
+ }
+
+ /*
+ * ppm callback needs to handle redefinition for all CPUs in
+ * the domain.
+ */
+ (*cpupm_redefine_topspeed)(ctx);
+}
diff --git a/usr/src/uts/i86pc/io/cpudrv/cpudrv_throttle.c b/usr/src/uts/i86pc/io/cpudrv/cpudrv_throttle.c
new file mode 100644
index 0000000000..d15c05e8d1
--- /dev/null
+++ b/usr/src/uts/i86pc/io/cpudrv/cpudrv_throttle.c
@@ -0,0 +1,342 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/x86_archext.h>
+#include <sys/machsystm.h>
+#include <sys/x_call.h>
+#include <sys/cpu_acpi.h>
+#include <sys/cpudrv_throttle.h>
+#include <sys/dtrace.h>
+#include <sys/sdt.h>
+
+static int cpudrv_throttle_init(cpudrv_devstate_t *);
+static void cpudrv_throttle_fini(cpudrv_devstate_t *);
+static int cpudrv_throttle(cpudrv_devstate_t *, uint32_t);
+
+cpudrv_tstate_ops_t cpudrv_throttle_ops = {
+ "Generic ACPI T-state Support",
+ cpudrv_throttle_init,
+ cpudrv_throttle_fini,
+ cpudrv_throttle
+};
+
+/*
+ * Error returns
+ */
+#define THROTTLE_RET_SUCCESS 0x00
+#define THROTTLE_RET_INCOMPLETE_DATA 0x01
+#define THROTTLE_RET_UNSUP_STATE 0x02
+#define THROTTLE_RET_TRANS_INCOMPLETE 0x03
+
+#define THROTTLE_LATENCY_WAIT 1
+
+/*
+ * MSR register for clock modulation
+ */
+#define IA32_CLOCK_MODULATION_MSR 0x19A
+
+/*
+ * Debugging support
+ */
+#ifdef DEBUG
+volatile int cpudrv_throttle_debug = 0;
+#define CTDEBUG(arglist) if (cpudrv_throttle_debug) printf arglist;
+#else
+#define CTDEBUG(arglist)
+#endif
+
+cpudrv_tstate_domain_t *cpudrv_tstate_domains = NULL;
+
+/*
+ * Allocate a new domain node.
+ */
+static void
+cpudrv_alloc_tstate_domain(cpudrv_devstate_t *cpudsp)
+{
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_handle_t handle = mach_state->acpi_handle;
+ cpudrv_tstate_domain_t *dptr;
+ cpudrv_tstate_domain_node_t *nptr;
+ uint32_t domain;
+ uint32_t type;
+ cpu_t *cp;
+
+ if (CPU_ACPI_IS_OBJ_CACHED(handle, CPU_ACPI_TSD_CACHED)) {
+ domain = CPU_ACPI_TSD(handle).sd_domain;
+ type = CPU_ACPI_TSD(handle).sd_type;
+ } else {
+ mutex_enter(&cpu_lock);
+ cp = cpu[CPU->cpu_id];
+ domain = cpuid_get_chipid(cp);
+ mutex_exit(&cpu_lock);
+ type = CPU_ACPI_SW_ALL;
+ }
+
+ for (dptr = cpudrv_tstate_domains; dptr != NULL;
+ dptr = dptr->td_next) {
+ if (dptr->td_domain == domain)
+ break;
+ }
+
+ /* new domain is created and linked at the head */
+ if (dptr == NULL) {
+ dptr = kmem_zalloc(sizeof (cpudrv_tstate_domain_t), KM_SLEEP);
+ dptr->td_domain = domain;
+ dptr->td_type = type;
+ dptr->td_next = cpudrv_tstate_domains;
+ mutex_init(&dptr->td_lock, NULL, MUTEX_DRIVER, NULL);
+ cpudrv_tstate_domains = dptr;
+ }
+
+ /* new domain node is created and linked at the head of the domain */
+ nptr = kmem_zalloc(sizeof (cpudrv_tstate_domain_node_t), KM_SLEEP);
+ nptr->tdn_cpudsp = cpudsp;
+ nptr->tdn_domain = dptr;
+ nptr->tdn_next = dptr->td_node;
+ dptr->td_node = nptr;
+ mach_state->tstate_domain_node = nptr;
+}
+
+static void
+cpudrv_free_tstate_domains()
+{
+ cpudrv_tstate_domain_t *this_domain, *next_domain;
+ cpudrv_tstate_domain_node_t *this_node, *next_node;
+
+ this_domain = cpudrv_tstate_domains;
+ while (this_domain != NULL) {
+ next_domain = this_domain->td_next;
+
+ /* discard CPU node chain */
+ this_node = this_domain->td_node;
+ while (this_node != NULL) {
+ next_node = this_node->tdn_next;
+ kmem_free((void *)this_node,
+ sizeof (cpudrv_tstate_domain_node_t));
+ this_node = next_node;
+ }
+ mutex_destroy(&this_domain->td_lock);
+ kmem_free((void *)this_domain,
+ sizeof (cpudrv_tstate_domain_t));
+ this_domain = next_domain;
+ }
+ cpudrv_tstate_domains = NULL;
+}
+
+/*
+ * Write the _PTC ctrl register. How it is written, depends upon the _PTC
+ * APCI object value.
+ */
+static int
+write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
+{
+ cpu_acpi_ptc_t *ptc_ctrl;
+ uint64_t reg;
+ int ret = 0;
+
+ ptc_ctrl = CPU_ACPI_PTC_CTRL(handle);
+
+ switch (ptc_ctrl->cr_addrspace_id) {
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ /*
+ * Read current thermal state because reserved bits must be
+ * preserved, compose new value, and write it.The writable
+ * bits are 4:1 (1 to 4).
+ * Bits 3:1 => On-Demand Clock Modulation Duty Cycle
+ * Bit 4 => On-Demand Clock Modulation Enable
+ * Left shift ctrl by 1 to allign with bits 1-4 of MSR
+ */
+ reg = rdmsr(IA32_CLOCK_MODULATION_MSR);
+ reg &= ~((uint64_t)0x1E);
+ reg |= ctrl;
+ wrmsr(IA32_CLOCK_MODULATION_MSR, reg);
+ break;
+
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ ret = cpu_acpi_write_port(ptc_ctrl->cr_address, ctrl,
+ ptc_ctrl->cr_width);
+ break;
+
+ default:
+ DTRACE_PROBE1(throttle_ctrl_unsupported_type, uint8_t,
+ ptc_ctrl->cr_addrspace_id);
+
+ ret = -1;
+ }
+
+ DTRACE_PROBE1(throttle_ctrl_write, uint32_t, ctrl);
+ DTRACE_PROBE1(throttle_ctrl_write_err, int, ret);
+
+ return (ret);
+}
+
+static int
+read_status(cpu_acpi_handle_t handle, uint32_t *stat)
+{
+ cpu_acpi_ptc_t *ptc_stat;
+ uint64_t reg;
+ int ret = 0;
+
+ ptc_stat = CPU_ACPI_PTC_STATUS(handle);
+
+ switch (ptc_stat->cr_addrspace_id) {
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ reg = rdmsr(IA32_CLOCK_MODULATION_MSR);
+ *stat = reg & 0x1E;
+ ret = 0;
+ break;
+
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ ret = cpu_acpi_read_port(ptc_stat->cr_address, stat,
+ ptc_stat->cr_width);
+ break;
+
+ default:
+ DTRACE_PROBE1(throttle_status_unsupported_type, uint8_t,
+ ptc_stat->cr_addrspace_id);
+
+ return (-1);
+ }
+
+ DTRACE_PROBE1(throttle_status_read, uint32_t, *stat);
+ DTRACE_PROBE1(throttle_status_read_err, int, ret);
+
+ return (ret);
+}
+
+/*
+ * Transition the current processor to the requested throttling state.
+ */
+static void
+cpudrv_tstate_transition(int *ret, cpudrv_devstate_t *cpudsp,
+ uint32_t req_state)
+{
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_handle_t handle = mach_state->acpi_handle;
+ cpu_acpi_tstate_t *req_tstate;
+ uint32_t ctrl;
+ uint32_t stat;
+ int i;
+
+ req_tstate = (cpu_acpi_tstate_t *)CPU_ACPI_TSTATES(handle);
+ req_tstate += req_state;
+ DTRACE_PROBE1(throttle_transition, uint32_t,
+ CPU_ACPI_FREQPER(req_tstate));
+
+ /*
+ * Initiate the processor t-state change.
+ */
+ ctrl = CPU_ACPI_TSTATE_CTRL(req_tstate);
+ if (write_ctrl(handle, ctrl) != 0) {
+ *ret = THROTTLE_RET_UNSUP_STATE;
+ return;
+ }
+
+ /*
+ * If status is zero, then transition is synchronous and
+ * no status value comparison is required.
+ */
+ if (CPU_ACPI_TSTATE_STAT(req_tstate) == 0) {
+ *ret = THROTTLE_RET_SUCCESS;
+ return;
+ }
+
+ /* Wait until switch is complete, but bound the loop just in case. */
+ for (i = CPU_ACPI_TSTATE_TRANSLAT(req_tstate) * 2; i >= 0;
+ i -= THROTTLE_LATENCY_WAIT) {
+ if (read_status(handle, &stat) == 0 &&
+ CPU_ACPI_TSTATE_STAT(req_tstate) == stat)
+ break;
+ drv_usecwait(THROTTLE_LATENCY_WAIT);
+ }
+
+ if (CPU_ACPI_TSTATE_STAT(req_tstate) != stat) {
+ DTRACE_PROBE(throttle_transition_incomplete);
+ *ret = THROTTLE_RET_TRANS_INCOMPLETE;
+ } else {
+ *ret = THROTTLE_RET_SUCCESS;
+ }
+}
+
+static int
+cpudrv_throttle(cpudrv_devstate_t *cpudsp, uint32_t throtl_lvl)
+{
+ cpuset_t cpus;
+ int ret;
+
+ CPUSET_ONLY(cpus, cpudsp->cpu_id);
+
+ kpreempt_disable();
+ xc_call((xc_arg_t)&ret, (xc_arg_t)cpudsp, (xc_arg_t)throtl_lvl,
+ X_CALL_HIPRI, cpus, (xc_func_t)cpudrv_tstate_transition);
+ kpreempt_enable();
+
+ return (ret);
+}
+
+static int
+cpudrv_throttle_init(cpudrv_devstate_t *cpudsp)
+{
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_handle_t handle = mach_state->acpi_handle;
+ cpu_acpi_ptc_t *ptc_stat;
+
+ if (cpu_acpi_cache_tstate_data(handle) != 0) {
+ CTDEBUG(("Failed to cache T-state ACPI data\n"));
+ cpudrv_throttle_fini(cpudsp);
+ return (THROTTLE_RET_INCOMPLETE_DATA);
+ }
+
+ /*
+ * Check the address space used for transitions
+ */
+ ptc_stat = CPU_ACPI_PTC_STATUS(handle);
+ switch (ptc_stat->cr_addrspace_id) {
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ CTDEBUG(("T-State transitions will use fixed hardware\n"));
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ CTDEBUG(("T-State transitions will use System IO\n"));
+ break;
+ default:
+ cmn_err(CE_WARN, "!_PTC conifgured for unsupported "
+ "address space type = %d.", ptc_stat->cr_addrspace_id);
+ return (THROTTLE_RET_INCOMPLETE_DATA);
+ }
+
+ cpudrv_alloc_tstate_domain(cpudsp);
+
+ return (THROTTLE_RET_SUCCESS);
+}
+
+static void
+cpudrv_throttle_fini(cpudrv_devstate_t *cpudsp)
+{
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_handle_t handle = mach_state->acpi_handle;
+
+ cpudrv_free_tstate_domains();
+ cpu_acpi_free_tstate_data(handle);
+}
diff --git a/usr/src/uts/i86pc/io/pwrnow.c b/usr/src/uts/i86pc/io/cpudrv/pwrnow.c
index 9ef54d1e1c..b17007aa57 100644
--- a/usr/src/uts/i86pc/io/pwrnow.c
+++ b/usr/src/uts/i86pc/io/cpudrv/pwrnow.c
@@ -23,19 +23,32 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/x86_archext.h>
#include <sys/machsystm.h>
#include <sys/x_call.h>
#include <sys/acpi/acpi.h>
#include <sys/acpica.h>
+#include <sys/cpudrv_mach.h>
#include <sys/pwrnow.h>
#include <sys/cpu_acpi.h>
#include <sys/cpupm.h>
#include <sys/dtrace.h>
#include <sys/sdt.h>
+static int pwrnow_init(cpudrv_devstate_t *);
+static void pwrnow_fini(cpudrv_devstate_t *);
+static int pwrnow_power(cpudrv_devstate_t *, uint32_t);
+
+/*
+ * Interfaces for modules implementing AMD's PowerNow!.
+ */
+cpudrv_pstate_ops_t pwrnow_ops = {
+ "PowerNow! Technology",
+ pwrnow_init,
+ pwrnow_fini,
+ pwrnow_power
+};
+
/*
* Error returns
*/
@@ -65,10 +78,6 @@ volatile int pwrnow_debug = 0;
#define PWRNOW_DEBUG(arglist)
#endif
-typedef struct pwrnow_state {
- uint32_t pn_state;
-} pwrnow_state_t;
-
/*
* Read the status register.
*/
@@ -81,7 +90,7 @@ read_status(cpu_acpi_handle_t handle, uint32_t *stat)
pct_stat = CPU_ACPI_PCT_STATUS(handle);
- switch (pct_stat->pc_addrspace_id) {
+ switch (pct_stat->cr_addrspace_id) {
case ACPI_ADR_SPACE_FIXED_HARDWARE:
reg = rdmsr(PWRNOW_PERF_STATUS_MSR);
*stat = reg & 0xFFFFFFFF;
@@ -90,7 +99,7 @@ read_status(cpu_acpi_handle_t handle, uint32_t *stat)
default:
DTRACE_PROBE1(pwrnow_status_unsupported_type, uint8_t,
- pct_stat->pc_addrspace_id);
+ pct_stat->cr_addrspace_id);
return (-1);
}
@@ -112,7 +121,7 @@ write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
- switch (pct_ctrl->pc_addrspace_id) {
+ switch (pct_ctrl->cr_addrspace_id) {
case ACPI_ADR_SPACE_FIXED_HARDWARE:
reg = ctrl;
wrmsr(PWRNOW_PERF_CTL_MSR, reg);
@@ -121,7 +130,7 @@ write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
default:
DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t,
- pct_ctrl->pc_addrspace_id);
+ pct_ctrl->cr_addrspace_id);
return (-1);
}
@@ -134,55 +143,54 @@ write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
/*
* Transition the current processor to the requested state.
*/
-void
+static void
pwrnow_pstate_transition(int *ret, cpudrv_devstate_t *cpudsp,
uint32_t req_state)
{
- pwrnow_state_t *pwrnow_state = cpudsp->module_state;
- cpu_acpi_handle_t handle = cpudsp->acpi_handle;
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_handle_t handle = mach_state->acpi_handle;
cpu_acpi_pstate_t *req_pstate;
uint32_t ctrl;
uint32_t stat;
- cpu_t *cp;
int i;
- req_pstate = CPU_ACPI_PSTATE(handle, req_state);
+ req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
+ req_pstate += req_state;
DTRACE_PROBE1(pwrnow_transition_freq, uint32_t,
CPU_ACPI_FREQ(req_pstate));
/*
* Initiate the processor p-state change.
*/
- ctrl = CPU_ACPI_CTRL(req_pstate);
+ ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
if (write_ctrl(handle, ctrl) != 0) {
*ret = PWRNOW_RET_UNSUP_STATE;
return;
}
/* Wait until switch is complete, but bound the loop just in case. */
- for (i = CPU_ACPI_TRANSLAT(req_pstate) * 2; i >= 0;
+ for (i = CPU_ACPI_PSTATE_TRANSLAT(req_pstate) * 2; i >= 0;
i -= PWRNOW_LATENCY_WAIT) {
if (read_status(handle, &stat) == 0 &&
- CPU_ACPI_STAT(req_pstate) == stat)
+ CPU_ACPI_PSTATE_STAT(req_pstate) == stat)
break;
drv_usecwait(PWRNOW_LATENCY_WAIT);
}
- if (CPU_ACPI_STAT(req_pstate) != stat) {
+ if (CPU_ACPI_PSTATE_STAT(req_pstate) != stat) {
DTRACE_PROBE(pwrnow_transition_incomplete);
*ret = PWRNOW_RET_TRANS_INCOMPLETE;
return;
}
- pwrnow_state->pn_state = req_state;
- cp = cpu[CPU->cpu_id];
- cp->cpu_curr_clock = ((uint64_t)
+ mach_state->pstate = req_state;
+ CPU->cpu_curr_clock = ((uint64_t)
CPU_ACPI_FREQ(req_pstate) * 1000000);
*ret = PWRNOW_RET_SUCCESS;
}
-int
+static int
pwrnow_power(cpudrv_devstate_t *cpudsp, uint32_t req_state)
{
cpuset_t cpus;
@@ -202,96 +210,48 @@ pwrnow_power(cpudrv_devstate_t *cpudsp, uint32_t req_state)
* Validate that this processor supports PowerNow! and if so,
* get the P-state data from ACPI and cache it.
*/
-int
+static int
pwrnow_init(cpudrv_devstate_t *cpudsp)
{
- pwrnow_state_t *pwrnow_state;
- cpu_acpi_handle_t handle;
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_handle_t handle = mach_state->acpi_handle;
cpu_acpi_pct_t *pct_stat;
- struct cpuid_regs cpu_regs;
cpu_t *cp;
int domain;
PWRNOW_DEBUG(("pwrnow_init: instance %d\n",
ddi_get_instance(cpudsp->dip)));
- /* AMD w/ CPUID support and rdmsr/wrmsr? */
- if (x86_vendor != X86_VENDOR_AMD ||
- !(x86_feature & X86_CPUID) ||
- !(x86_feature & X86_MSR)) {
- PWRNOW_DEBUG(("Either not AMD or feature not supported.\n"));
- return (PWRNOW_RET_NO_PM);
- }
-
/*
- * Get the Advanced Power Management Information.
+ * Cache the P-state specific ACPI data.
*/
- cpu_regs.cp_eax = 0x80000007;
- (void) __cpuid_insn(&cpu_regs);
-
- /*
- * We currently only support CPU power management of
- * processors that are P-state TSC invariant.
- */
- if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) {
- PWRNOW_DEBUG(("No support for CPUs that are not P-state "
- "TSC invariant.\n"));
- return (PWRNOW_RET_NO_PM);
- }
-
- /*
- * We only support the "Fire and Forget" style of PowerNow! (i.e.,
- * single MSR write to change speed).
- */
- if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) {
- PWRNOW_DEBUG(("Hardware P-State control is not supported.\n"));
- return (PWRNOW_RET_NO_PM);
- }
-
- /*
- * PowerNow! requires ACPI support. Get a handle
- * to the correct processor object for this dip.
- */
- handle = cpudsp->acpi_handle = cpu_acpi_init(cpudsp->dip);
- if (handle == NULL) {
- cmn_err(CE_WARN, "!pwrnow_init: instance %d: "
- "unable to get ACPI handle",
- ddi_get_instance(cpudsp->dip));
- cmn_err(CE_NOTE, "!CPU power management will not function.");
- return (PWRNOW_RET_NO_PM);
- }
-
- if (cpu_acpi_cache_data(handle) != 0) {
+ if (cpu_acpi_cache_pstate_data(handle) != 0) {
PWRNOW_DEBUG(("Failed to cache ACPI data\n"));
- cpu_acpi_fini(handle);
+ pwrnow_fini(cpudsp);
return (PWRNOW_RET_NO_PM);
}
pct_stat = CPU_ACPI_PCT_STATUS(handle);
- switch (pct_stat->pc_addrspace_id) {
+ switch (pct_stat->cr_addrspace_id) {
case ACPI_ADR_SPACE_FIXED_HARDWARE:
PWRNOW_DEBUG(("Transitions will use fixed hardware\n"));
break;
default:
cmn_err(CE_WARN, "!_PCT configured for unsupported "
- "addrspace = %d.", pct_stat->pc_addrspace_id);
+ "addrspace = %d.", pct_stat->cr_addrspace_id);
cmn_err(CE_NOTE, "!CPU power management will not function.");
- cpu_acpi_fini(handle);
+ pwrnow_fini(cpudsp);
return (PWRNOW_RET_NO_PM);
}
if (CPU_ACPI_IS_OBJ_CACHED(handle, CPU_ACPI_PSD_CACHED))
- domain = CPU_ACPI_PSD(handle).pd_domain;
+ domain = CPU_ACPI_PSD(handle).sd_domain;
else {
cp = cpu[CPU->cpu_id];
domain = cpuid_get_chipid(cp);
}
cpupm_add_cpu2dependency(cpudsp->dip, domain);
- pwrnow_state = kmem_zalloc(sizeof (pwrnow_state_t), KM_SLEEP);
- pwrnow_state->pn_state = NULL;
- cpudsp->module_state = pwrnow_state;
-
PWRNOW_DEBUG(("Instance %d succeeded.\n",
ddi_get_instance(cpudsp->dip)));
return (PWRNOW_RET_SUCCESS);
@@ -300,9 +260,51 @@ pwrnow_init(cpudrv_devstate_t *cpudsp)
/*
* Free resources allocated by pwrnow_init().
*/
-void
+static void
pwrnow_fini(cpudrv_devstate_t *cpudsp)
{
- cpu_acpi_fini(cpudsp->acpi_handle);
- kmem_free(cpudsp->module_state, sizeof (pwrnow_state_t));
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_handle_t handle = mach_state->acpi_handle;
+
+ cpupm_free_cpu_dependencies();
+ cpu_acpi_free_pstate_data(handle);
+}
+
+boolean_t
+pwrnow_supported()
+{
+ struct cpuid_regs cpu_regs;
+
+ /* Required features */
+ if (!(x86_feature & X86_CPUID) ||
+ !(x86_feature & X86_MSR)) {
+ PWRNOW_DEBUG(("No CPUID or MSR support."));
+ return (B_FALSE);
+ }
+
+ /*
+ * Get the Advanced Power Management Information.
+ */
+ cpu_regs.cp_eax = 0x80000007;
+ (void) __cpuid_insn(&cpu_regs);
+
+ /*
+ * We currently only support CPU power management of
+ * processors that are P-state TSC invariant
+ */
+ if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) {
+ PWRNOW_DEBUG(("No support for CPUs that are not P-state "
+ "TSC invariant.\n"));
+ return (B_FALSE);
+ }
+
+ /*
+ * We only support the "Fire and Forget" style of PowerNow! (i.e.,
+ * single MSR write to change speed).
+ */
+ if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) {
+ PWRNOW_DEBUG(("Hardware P-State control is not supported.\n"));
+ return (B_FALSE);
+ }
+ return (B_TRUE);
}
diff --git a/usr/src/uts/i86pc/io/speedstep.c b/usr/src/uts/i86pc/io/cpudrv/speedstep.c
index feaedec36c..7d9724c69e 100644
--- a/usr/src/uts/i86pc/io/speedstep.c
+++ b/usr/src/uts/i86pc/io/cpudrv/speedstep.c
@@ -23,19 +23,32 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/x86_archext.h>
#include <sys/machsystm.h>
#include <sys/x_call.h>
#include <sys/acpi/acpi.h>
#include <sys/acpica.h>
+#include <sys/cpudrv_mach.h>
#include <sys/speedstep.h>
#include <sys/cpu_acpi.h>
#include <sys/cpupm.h>
#include <sys/dtrace.h>
#include <sys/sdt.h>
+static int speedstep_init(cpudrv_devstate_t *);
+static void speedstep_fini(cpudrv_devstate_t *);
+static int speedstep_power(cpudrv_devstate_t *, uint32_t);
+
+/*
+ * Interfaces for modules implementing Intel's Enhanced SpeedStep.
+ */
+cpudrv_pstate_ops_t speedstep_ops = {
+ "Enhanced SpeedStep Technology",
+ speedstep_init,
+ speedstep_fini,
+ speedstep_power
+};
+
/*
* Error returns
*/
@@ -81,10 +94,6 @@ volatile int ess_debug = 0;
#define ESSDEBUG(arglist)
#endif
-typedef struct speedstep_state {
- uint32_t ss_state;
-} speedstep_state_t;
-
/*
* Note that SpeedStep support requires the following _PDC bits be
* enabled so that ACPI returns the proper objects. The requirement
@@ -111,21 +120,21 @@ read_status(cpu_acpi_handle_t handle, uint32_t *stat)
pct_stat = CPU_ACPI_PCT_STATUS(handle);
- switch (pct_stat->pc_addrspace_id) {
+ switch (pct_stat->cr_addrspace_id) {
case ACPI_ADR_SPACE_FIXED_HARDWARE:
reg = rdmsr(IA32_PERF_STAT_MSR);
- *stat = reg & 0xFFFF;
+ *stat = reg & 0x1E;
ret = 0;
break;
case ACPI_ADR_SPACE_SYSTEM_IO:
- ret = cpu_acpi_read_port(pct_stat->pc_address, stat,
- pct_stat->pc_width);
+ ret = cpu_acpi_read_port(pct_stat->cr_address, stat,
+ pct_stat->cr_width);
break;
default:
DTRACE_PROBE1(ess_status_unsupported_type, uint8_t,
- pct_stat->pc_addrspace_id);
+ pct_stat->cr_addrspace_id);
return (-1);
}
@@ -148,7 +157,7 @@ write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
- switch (pct_ctrl->pc_addrspace_id) {
+ switch (pct_ctrl->cr_addrspace_id) {
case ACPI_ADR_SPACE_FIXED_HARDWARE:
/*
* Read current power state because reserved bits must be
@@ -162,13 +171,13 @@ write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
break;
case ACPI_ADR_SPACE_SYSTEM_IO:
- ret = cpu_acpi_write_port(pct_ctrl->pc_address, ctrl,
- pct_ctrl->pc_width);
+ ret = cpu_acpi_write_port(pct_ctrl->cr_address, ctrl,
+ pct_ctrl->cr_width);
break;
default:
DTRACE_PROBE1(ess_ctrl_unsupported_type, uint8_t,
- pct_ctrl->pc_addrspace_id);
+ pct_ctrl->cr_addrspace_id);
return (-1);
}
@@ -185,29 +194,31 @@ void
speedstep_pstate_transition(int *ret, cpudrv_devstate_t *cpudsp,
uint32_t req_state)
{
- speedstep_state_t *speedstep_state = cpudsp->module_state;
- cpu_acpi_handle_t handle = cpudsp->acpi_handle;
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_handle_t handle = mach_state->acpi_handle;
cpu_acpi_pstate_t *req_pstate;
uint32_t ctrl;
uint32_t stat;
int i;
- req_pstate = CPU_ACPI_PSTATE(handle, req_state);
+ req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
+ req_pstate += req_state;
DTRACE_PROBE1(ess_transition, uint32_t, CPU_ACPI_FREQ(req_pstate));
/*
* Initiate the processor p-state change.
*/
- ctrl = CPU_ACPI_CTRL(req_pstate);
+ ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
if (write_ctrl(handle, ctrl) != 0) {
*ret = ESS_RET_UNSUP_STATE;
return;
}
/* Wait until switch is complete, but bound the loop just in case. */
- for (i = 0; i < ESS_MAX_LATENCY_MICROSECS; i += ESS_LATENCY_WAIT) {
+ for (i = CPU_ACPI_PSTATE_TRANSLAT(req_pstate) * 2; i >= 0;
+ i -= ESS_LATENCY_WAIT) {
if (read_status(handle, &stat) == 0 &&
- CPU_ACPI_STAT(req_pstate) == stat)
+ CPU_ACPI_PSTATE_STAT(req_pstate) == stat)
break;
drv_usecwait(ESS_LATENCY_WAIT);
}
@@ -215,13 +226,13 @@ speedstep_pstate_transition(int *ret, cpudrv_devstate_t *cpudsp,
DTRACE_PROBE(ess_transition_incomplete);
}
- speedstep_state->ss_state = req_state;
+ mach_state->pstate = req_state;
CPU->cpu_curr_clock =
(((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000));
*ret = ESS_RET_SUCCESS;
}
-int
+static int
speedstep_power(cpudrv_devstate_t *cpudsp, uint32_t req_state)
{
cpuset_t cpus;
@@ -241,89 +252,29 @@ speedstep_power(cpudrv_devstate_t *cpudsp, uint32_t req_state)
* Validate that this processor supports Speedstep and if so,
* get the P-state data from ACPI and cache it.
*/
-int
+static int
speedstep_init(cpudrv_devstate_t *cpudsp)
{
- speedstep_state_t *speedstep_state;
- cpu_acpi_handle_t handle;
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_handle_t handle = mach_state->acpi_handle;
cpu_acpi_pct_t *pct_stat;
- uint64_t reg;
- uint_t family;
- uint_t model;
- struct cpuid_regs cpu_regs;
cpu_t *cp;
int dependency;
ESSDEBUG(("speedstep_init: instance %d\n",
ddi_get_instance(cpudsp->dip)));
- /* Intel w/ CPUID support and rdmsr/wrmsr? */
- if (x86_vendor != X86_VENDOR_Intel ||
- !(x86_feature & X86_CPUID) ||
- !(x86_feature & X86_MSR)) {
- ESSDEBUG(("Either not Intel or feature not supported.\n"));
- return (ESS_RET_NO_PM);
- }
-
- /*
- * Enhanced Speedstep supported?
- */
- cpu_regs.cp_eax = 0x1;
- (void) __cpuid_insn(&cpu_regs);
- if (!(cpu_regs.cp_ecx & CPUID_INTC_ECX_EST)) {
- ESSDEBUG(("Enhanced Speedstep not supported.\n"));
- return (ESS_RET_NO_PM);
- }
-
- family = cpuid_getfamily(CPU);
- model = cpuid_getmodel(CPU);
- if (!((family == 0xf && model >= 0x3) ||
- (family == 0x6 && model >= 0xe))) {
- ESSDEBUG(("Variant TSC not supported.\n"));
- return (ESS_RET_NO_PM);
- }
-
- /*
- * If Enhanced Speedstep has not been enabled on the system,
- * then we probably should not override the BIOS setting.
- */
- reg = rdmsr(IA32_MISC_ENABLE_MSR);
- if (! (reg & IA32_MISC_ENABLE_EST)) {
- cmn_err(CE_NOTE, "!Enhanced Intel SpeedStep not enabled.");
- cmn_err(CE_NOTE, "!CPU power management will not function.");
- return (ESS_RET_NO_PM);
- }
-
/*
- * Enhanced Speedstep requires ACPI support. Get a handle
- * to the correct processor object for this dip.
+ * Cache the P-state specific ACPI data.
*/
- handle = cpudsp->acpi_handle = cpu_acpi_init(cpudsp->dip);
- if (handle == NULL) {
- cmn_err(CE_WARN, "!speedstep_init: instance %d: "
- "unable to get ACPI handle",
- ddi_get_instance(cpudsp->dip));
-
- cmn_err(CE_NOTE, "!CPU power management will not function.");
- return (ESS_RET_NO_PM);
- }
-
- /*
- * _PDC support is optional and the driver should
- * function even if the _PDC write fails.
- */
- if (cpu_acpi_write_pdc(handle, ESS_PDC_REVISION, 1,
- &ess_pdccap) != 0)
- ESSDEBUG(("Failed to write PDC\n"));
-
- if (cpu_acpi_cache_data(handle) != 0) {
+ if (cpu_acpi_cache_pstate_data(handle) != 0) {
ESSDEBUG(("Failed to cache ACPI data\n"));
- cpu_acpi_fini(handle);
+ speedstep_fini(cpudsp);
return (ESS_RET_NO_PM);
}
pct_stat = CPU_ACPI_PCT_STATUS(handle);
- switch (pct_stat->pc_addrspace_id) {
+ switch (pct_stat->cr_addrspace_id) {
case ACPI_ADR_SPACE_FIXED_HARDWARE:
ESSDEBUG(("Transitions will use fixed hardware\n"));
break;
@@ -332,14 +283,14 @@ speedstep_init(cpudrv_devstate_t *cpudsp)
break;
default:
cmn_err(CE_WARN, "!_PCT conifgured for unsupported "
- "addrspace = %d.", pct_stat->pc_addrspace_id);
+ "addrspace = %d.", pct_stat->cr_addrspace_id);
cmn_err(CE_NOTE, "!CPU power management will not function.");
- cpu_acpi_fini(handle);
+ speedstep_fini(cpudsp);
return (ESS_RET_NO_PM);
}
if (CPU_ACPI_IS_OBJ_CACHED(handle, CPU_ACPI_PSD_CACHED))
- dependency = CPU_ACPI_PSD(handle).pd_domain;
+ dependency = CPU_ACPI_PSD(handle).sd_domain;
else {
mutex_enter(&cpu_lock);
cp = cpu[CPU->cpu_id];
@@ -348,10 +299,6 @@ speedstep_init(cpudrv_devstate_t *cpudsp)
}
cpupm_add_cpu2dependency(cpudsp->dip, dependency);
- speedstep_state = kmem_zalloc(sizeof (speedstep_state_t), KM_SLEEP);
- speedstep_state->ss_state = NULL;
- cpudsp->module_state = speedstep_state;
-
ESSDEBUG(("Instance %d succeeded.\n", ddi_get_instance(cpudsp->dip)));
return (ESS_RET_SUCCESS);
}
@@ -359,9 +306,56 @@ speedstep_init(cpudrv_devstate_t *cpudsp)
/*
* Free resources allocated by speedstep_init().
*/
-void
+static void
speedstep_fini(cpudrv_devstate_t *cpudsp)
{
- cpu_acpi_fini(cpudsp->acpi_handle);
- kmem_free(cpudsp->module_state, sizeof (speedstep_state_t));
+ cpudrv_mach_state_t *mach_state = cpudsp->mach_state;
+ cpu_acpi_handle_t handle = mach_state->acpi_handle;
+
+ cpupm_free_cpu_dependencies();
+ cpu_acpi_free_pstate_data(handle);
+}
+
+boolean_t
+speedstep_supported(uint_t family, uint_t model)
+{
+ struct cpuid_regs cpu_regs;
+ uint64_t reg;
+
+ /* Required features */
+ if (!(x86_feature & X86_CPUID) ||
+ !(x86_feature & X86_MSR)) {
+ return (B_FALSE);
+ }
+
+ /*
+ * We only support family/model combinations which
+ * are P-state TSC invariant.
+ */
+ if (!((family == 0xf && model >= 0x3) ||
+ (family == 0x6 && model >= 0xe))) {
+ return (B_FALSE);
+ }
+
+ /*
+ * Enhanced SpeedStep supported?
+ */
+ cpu_regs.cp_eax = 0x1;
+ (void) __cpuid_insn(&cpu_regs);
+ if (!(cpu_regs.cp_ecx & CPUID_INTC_ECX_EST)) {
+ return (B_FALSE);
+ }
+
+ /*
+ * If Enhanced SpeedStep has not been enabled on the system,
+ * then we probably should not override the BIOS setting.
+ */
+ reg = rdmsr(IA32_MISC_ENABLE_MSR);
+ if (! (reg & IA32_MISC_ENABLE_EST)) {
+ cmn_err(CE_NOTE, "!Enhanced Intel SpeedStep not enabled.");
+ cmn_err(CE_NOTE, "!CPU power management will not function.");
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
}
diff --git a/usr/src/uts/i86pc/io/cpudrv_plat.c b/usr/src/uts/i86pc/io/cpudrv_plat.c
deleted file mode 100644
index a9eced0141..0000000000
--- a/usr/src/uts/i86pc/io/cpudrv_plat.c
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-/*
- * CPU power management driver platform support.
- */
-
-#include <sys/ddi.h>
-#include <sys/sunddi.h>
-#include <sys/cpupm.h>
-#include <sys/cpudrv_plat.h>
-#include <sys/cpudrv.h>
-#include <sys/speedstep.h>
-#include <sys/pwrnow.h>
-#include <sys/machsystm.h>
-
-/*
- * Different processor families have their own technologies for supporting
- * CPU power management (i.e., Intel has Enhanced SpeedStep for some of it's
- * processors and AMD has PowerNow! for some of it's processors). We support
- * these different technologies via modules that export the interfaces
- * described below.
- *
- * If a module implements the technology that should be used to manage
- * the current CPU device, then the cpum_init() module should return
- * succesfully (i.e., return code of 0) and perform any initialization
- * such that future power transistions can be performed by calling
- * the cpum_power() interface(). And the cpum_fini() interface can be
- * used to free any resources allocated by cpum_init().
- */
-struct cpudrv_module_ops {
- char *cm_label;
- int (*cpum_init)(cpudrv_devstate_t *);
- void (*cpum_fini)(cpudrv_devstate_t *);
- int (*cpum_power)(cpudrv_devstate_t *, uint32_t);
-};
-
-/*
- * Interfaces for modules implementing Intel's Enhanced SpeedStep.
- */
-static struct cpudrv_module_ops speedstep_ops = {
- "Enhanced SpeedStep Technology",
- speedstep_init,
- speedstep_fini,
- speedstep_power,
-};
-
-/*
- * Interfaces for modules implementing AMD's PowerNow!.
- */
-static struct cpudrv_module_ops pwrnow_ops = {
- "PowerNow! Technology",
- pwrnow_init,
- pwrnow_fini,
- pwrnow_power
-};
-
-/*
- * Table of supported modules.
- */
-static struct cpudrv_module_ops *cpudrv_module_ops_table[] = {
- &speedstep_ops,
- &pwrnow_ops,
- NULL
-};
-static struct cpudrv_module_ops **cpumops;
-
-/*
- * Constants used by the Processor Device Notification handler
- * that identify what kind of change has occurred. We currently
- * only handle PPC_CHANGE_NOTIFICATION. The other two are
- * ignored.
- */
-#define PPC_CHANGE_NOTIFICATION 0x80
-#define CST_CHANGE_NOTIFICATION 0x81
-#define TPC_CHANGE_NOTIFICATION 0x82
-
-/*
- * Note that our driver numbers the power levels from lowest to
- * highest starting at 1 (i.e., the lowest power level is 1 and
- * the highest power level is cpupm->num_spd). The x86 modules get
- * their power levels from ACPI which numbers power levels from
- * highest to lowest starting at 0 (i.e., the lowest power level
- * is (cpupm->num_spd - 1) and the highest power level is 0). So to
- * map one of our driver power levels to one understood by ACPI we
- * simply subtract our driver power level from cpupm->num_spd. Likewise,
- * to map an ACPI power level to the proper driver power level, we
- * subtract the ACPI power level from cpupm->num_spd.
- */
-#define PM_2_PLAT_LEVEL(cpupm, pm_level) (cpupm->num_spd - pm_level)
-#define PLAT_2_PM_LEVEL(cpupm, plat_level) (cpupm->num_spd - plat_level)
-
-/*
- * Change CPU speed using interface provided by module.
- */
-int
-cpudrv_pm_change_speed(cpudrv_devstate_t *cpudsp, cpudrv_pm_spd_t *new_spd)
-{
- cpudrv_pm_t *cpupm;
- uint32_t plat_level;
- int ret;
-
- cpupm = &(cpudsp->cpudrv_pm);
- plat_level = PM_2_PLAT_LEVEL(cpupm, new_spd->pm_level);
- ret = (*cpumops)->cpum_power(cpudsp, plat_level);
- if (ret != 0)
- return (DDI_FAILURE);
- return (DDI_SUCCESS);
-}
-
-/*
- * Determine the cpu_id for the CPU device.
- */
-boolean_t
-cpudrv_pm_get_cpu_id(dev_info_t *dip, processorid_t *cpu_id)
-{
- return ((*cpu_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "reg", -1)) != -1);
-
-}
-
-/*
- * All CPU instances have been initialized successfully.
- */
-boolean_t
-cpudrv_pm_all_instances_ready(void)
-{
- return (cpupm_is_ready());
-}
-
-/*
- * Is the current thread the thread that is handling the
- * PPC change notification?
- */
-boolean_t
-cpudrv_pm_is_throttle_thread(cpudrv_pm_t *cpupm)
-{
- return (curthread == cpupm->pm_throttle_thread);
-}
-
-/*
- * See if a module exists for managing power for this CPU.
- */
-boolean_t
-cpudrv_pm_init_module(cpudrv_devstate_t *cpudsp)
-{
- /*
- * Loop through the CPU management module table and see if
- * any of the modules implement CPU power management
- * for this CPU.
- */
- for (cpumops = cpudrv_module_ops_table; *cpumops != NULL; cpumops++) {
- if ((*cpumops)->cpum_init(cpudsp) == 0)
- break;
- }
-
- /*
- * Nope, we can't power manage this CPU.
- */
- if (*cpumops == NULL) {
- return (B_FALSE);
- }
-
- return (B_TRUE);
-}
-
-/*
- * Free any resources associated with the power management module.
- */
-void
-cpudrv_pm_free_module(cpudrv_devstate_t *cpudsp)
-{
- (*cpumops)->cpum_fini(cpudsp);
-}
-
-/*
- * This routine changes the top speed to which the CPUs can transition by:
- *
- * - Resetting the up_spd for all speeds lower than the new top speed
- * to point to the new top speed.
- * - Updating the framework with a new "normal" (maximum power) for this
- * device.
- */
-void
-cpudrv_pm_set_topspeed(void *ctx, int plat_level)
-{
- cpudrv_devstate_t *cpudsp;
- cpudrv_pm_t *cpupm;
- cpudrv_pm_spd_t *spd;
- cpudrv_pm_spd_t *top_spd;
- dev_info_t *dip;
- int pm_level;
- int instance;
- int i;
-
- dip = ctx;
- instance = ddi_get_instance(dip);
- cpudsp = ddi_get_soft_state(cpudrv_state, instance);
- ASSERT(cpudsp != NULL);
-
- mutex_enter(&cpudsp->lock);
- cpupm = &(cpudsp->cpudrv_pm);
- pm_level = PLAT_2_PM_LEVEL(cpupm, plat_level);
- for (i = 0, spd = cpupm->head_spd; spd; i++, spd = spd->down_spd) {
- /*
- * Don't mess with speeds that are higher than the new
- * top speed. They should be out of range anyway.
- */
- if (spd->pm_level > pm_level)
- continue;
- /*
- * This is the new top speed.
- */
- if (spd->pm_level == pm_level)
- top_spd = spd;
-
- spd->up_spd = top_spd;
- }
- cpupm->targ_spd = top_spd;
-
- cpupm->pm_throttle_thread = curthread;
-
- mutex_exit(&cpudsp->lock);
-
- (void) pm_update_maxpower(dip, 0, top_spd->pm_level);
-}
-
-/*
- * This routine reads the ACPI _PPC object. It's accessed as a callback
- * by the ppm driver whenever a _PPC change notification is received.
- */
-int
-cpudrv_pm_get_topspeed(void *ctx)
-{
- cpu_acpi_handle_t handle;
- cpudrv_devstate_t *cpudsp;
- dev_info_t *dip;
- int instance;
- int plat_level;
-
- dip = ctx;
- instance = ddi_get_instance(dip);
- cpudsp = ddi_get_soft_state(cpudrv_state, instance);
- ASSERT(cpudsp != NULL);
- handle = cpudsp->acpi_handle;
-
- cpu_acpi_cache_ppc(handle);
- plat_level = CPU_ACPI_PPC(handle);
- return (plat_level);
-}
-
-/*
- * This notification handler is called whenever the ACPI _PPC
- * object changes. The _PPC is a sort of governor on power levels.
- * It sets an upper threshold on which, _PSS defined, power levels
- * are usuable. The _PPC value is dynamic and may change as properties
- * (i.e., thermal or AC source) of the system change.
- */
-/* ARGSUSED */
-void
-cpudrv_pm_ppc_notify_handler(ACPI_HANDLE obj, UINT32 val, void *ctx)
-{
- /*
- * We only handle _PPC change notifications.
- */
- if (val == PPC_CHANGE_NOTIFICATION)
- cpudrv_pm_redefine_topspeed(ctx);
-}
-
-void
-cpudrv_pm_redefine_topspeed(void *ctx)
-{
- /*
- * This should never happen, unless ppm does not get loaded.
- */
- if (cpupm_redefine_topspeed == NULL) {
- cmn_err(CE_WARN, "cpudrv_pm_redefine_topspeed: "
- "cpupm_redefine_topspeed has not been initialized - "
- "ignoring notification");
- return;
- }
-
- /*
- * ppm callback needs to handle redefinition for all CPUs in
- * the domain.
- */
- (*cpupm_redefine_topspeed)(ctx);
-}
diff --git a/usr/src/uts/i86pc/os/cpupm.c b/usr/src/uts/i86pc/os/cpupm.c
index 9a5112d9d7..6aad26948a 100644
--- a/usr/src/uts/i86pc/os/cpupm.c
+++ b/usr/src/uts/i86pc/os/cpupm.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/cpupm.h>
/*
@@ -82,7 +80,7 @@ static boolean_t cpupm_dependencies_valid = B_TRUE;
/*
* If any CPU fails to attach, then cpupm is disabled for all CPUs.
*/
-static boolean_t cpupm_enabled = B_TRUE;
+static uint32_t cpupm_enabled = CPUPM_P_STATES | CPUPM_T_STATES;
/*
* Until all CPUs have succesfully attached, we do not allow
@@ -197,7 +195,7 @@ boolean_t
cpupm_is_ready()
{
#ifndef __xpv
- if (!cpupm_enabled)
+ if (cpupm_enabled == CPUPM_NO_STATES)
return (B_FALSE);
return (cpupm_ready);
#else
@@ -205,16 +203,22 @@ cpupm_is_ready()
#endif
}
+boolean_t
+cpupm_is_enabled(uint32_t state)
+{
+ return ((cpupm_enabled & state) == state);
+}
+
/*
- * By default, cpupm is enabled. But if there are any errors attaching
- * any of the CPU devices, then it is disabled.
+ * By default, all states are enabled. But if there are any errors attaching
+ * any of the CPU devices, then they are disabled.
*/
void
-cpupm_enable(boolean_t enable)
+cpupm_disable(uint32_t state)
{
- if (!enable)
+ cpupm_enabled &= ~state;
+ if (state & CPUPM_P_STATES)
cpupm_free_cpu_dependencies();
- cpupm_enabled = enable;
}
/*
@@ -234,17 +238,10 @@ cpupm_post_startup()
(*cpupm_rebuild_cpu_domains)();
/*
- * If CPU power management was disabled, then there
- * is nothing to do.
+ * Only initialize the topspeed if P-states are enabled.
*/
- if (!cpupm_enabled)
- return;
-
- cpupm_ready = B_TRUE;
-
- if (cpupm_init_topspeed != NULL)
+ if (cpupm_enabled & CPUPM_P_STATES && cpupm_init_topspeed != NULL)
(*cpupm_init_topspeed)();
-#else
- cpupm_ready = B_TRUE;
#endif
+ cpupm_ready = B_TRUE;
}
diff --git a/usr/src/uts/i86pc/sys/cpu_acpi.h b/usr/src/uts/i86pc/sys/cpu_acpi.h
index da9e23d9b9..8a5d264028 100644
--- a/usr/src/uts/i86pc/sys/cpu_acpi.h
+++ b/usr/src/uts/i86pc/sys/cpu_acpi.h
@@ -19,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _CPU_ACPI_H
#define _CPU_ACPI_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/acpi/acpi.h>
#include <sys/acpi/acresrc.h>
#include <sys/acpi/acglobal.h>
@@ -37,56 +35,96 @@
extern "C" {
#endif
+/*
+ * P-state related macros
+ */
#define CPU_ACPI_PPC(sp) sp->cs_ppc
#define CPU_ACPI_PSD(sp) sp->cs_psd
#define CPU_ACPI_PCT(sp) sp->cs_pct
#define CPU_ACPI_PCT_CTRL(sp) &sp->cs_pct[0]
#define CPU_ACPI_PCT_STATUS(sp) &sp->cs_pct[1]
-#define CPU_ACPI_PSTATES(sp) sp->cs_pstates->pss_pstates
-#define CPU_ACPI_PSTATES_COUNT(sp) sp->cs_pstates->pss_count
+#define CPU_ACPI_PSTATES(sp) sp->cs_pstates.ss_states
+#define CPU_ACPI_PSTATES_COUNT(sp) sp->cs_pstates.ss_count
-#define CPU_ACPI_PSTATE(sp, i) &sp->cs_pstates->pss_pstates[i]
#define CPU_ACPI_FREQ(pstate) pstate->ps_freq
-#define CPU_ACPI_TRANSLAT(pstate) pstate->ps_translat
-#define CPU_ACPI_CTRL(pstate) pstate->ps_ctrl
-#define CPU_ACPI_STAT(pstate) pstate->ps_state
+#define CPU_ACPI_PSTATE_TRANSLAT(pstate) pstate->ps_translat
+#define CPU_ACPI_PSTATE_CTRL(pstate) pstate->ps_ctrl
+#define CPU_ACPI_PSTATE_STAT(pstate) pstate->ps_state
+
+/*
+ * T-state related macros
+ */
+#define CPU_ACPI_TPC(sp) sp->cs_tpc
+#define CPU_ACPI_TSD(sp) sp->cs_tsd
+#define CPU_ACPI_PTC(sp) sp->cs_ptc
+#define CPU_ACPI_PTC_CTRL(sp) &sp->cs_ptc[0]
+#define CPU_ACPI_PTC_STATUS(sp) &sp->cs_ptc[1]
+#define CPU_ACPI_TSTATES(sp) sp->cs_tstates.ss_states
+#define CPU_ACPI_TSTATES_COUNT(sp) sp->cs_tstates.ss_count
+
+#define CPU_ACPI_FREQPER(tstate) tstate->ts_freqper
+#define CPU_ACPI_TSTATE_TRANSLAT(tstate) tstate->ts_translat
+#define CPU_ACPI_TSTATE_CTRL(tstate) tstate->ts_ctrl
+#define CPU_ACPI_TSTATE_STAT(tstate) tstate->ts_state
#define CPU_ACPI_NONE_CACHED 0x00
#define CPU_ACPI_PCT_CACHED 0x01
#define CPU_ACPI_PSS_CACHED 0x02
#define CPU_ACPI_PSD_CACHED 0x04
#define CPU_ACPI_PPC_CACHED 0x08
+#define CPU_ACPI_PTC_CACHED 0x10
+#define CPU_ACPI_TSS_CACHED 0x20
+#define CPU_ACPI_TSD_CACHED 0x40
+#define CPU_ACPI_TPC_CACHED 0x80
#define CPU_ACPI_IS_OBJ_CACHED(sp, obj) (sp->cpu_acpi_cached & obj)
#define CPU_ACPI_OBJ_IS_CACHED(sp, obj) (sp->cpu_acpi_cached |= obj)
#define CPU_ACPI_OBJ_IS_NOT_CACHED(sp, obj) (sp->cpu_acpi_cached &= ~obj)
+#define CPU_ACPI_PSTATES_SIZE(cnt) (cnt * sizeof (cpu_acpi_pstate_t))
+#define CPU_ACPI_PSS_CNT (sizeof (cpu_acpi_pstate_t) / sizeof (uint32_t))
+#define CPU_ACPI_TSTATES_SIZE(cnt) (cnt * sizeof (cpu_acpi_tstate_t))
+#define CPU_ACPI_TSS_CNT (sizeof (cpu_acpi_tstate_t) / sizeof (uint32_t))
+
/*
- * Container for _PSD information
+ * CPU Domain Coordination Types
*/
-typedef struct cpu_acpi_psd
+#define CPU_ACPI_SW_ALL 0xfc
+#define CPU_ACPI_SW_ANY 0xfd
+#define CPU_ACPI_HW_ALL 0xfe
+
+/*
+ * Container for ACPI processor state dependency information
+ */
+typedef struct cpu_acpi_state_dependency
{
- uint8_t pd_entries;
- uint8_t pd_revision;
- uint32_t pd_domain;
- uint32_t pd_type;
- uint32_t pd_num;
-} cpu_acpi_psd_t;
+ uint8_t sd_entries;
+ uint8_t sd_revision;
+ uint32_t sd_domain;
+ uint32_t sd_type;
+ uint32_t sd_num;
+} cpu_acpi_state_dependency_t;
+
+typedef cpu_acpi_state_dependency_t cpu_acpi_psd_t;
+typedef cpu_acpi_state_dependency_t cpu_acpi_tsd_t;
/*
- * Container for _PCT information
+ * Container for ACPI processor control register information
*/
-typedef struct cpu_acpi_pct
+typedef struct cpu_acpi_ctrl_regs
{
- uint8_t pc_addrspace_id;
- uint8_t pc_width;
- uint8_t pc_offset;
- uint8_t pc_asize;
- ACPI_IO_ADDRESS pc_address;
-} cpu_acpi_pct_t;
+ uint8_t cr_addrspace_id;
+ uint8_t cr_width;
+ uint8_t cr_offset;
+ uint8_t cr_asize;
+ ACPI_IO_ADDRESS cr_address;
+} cpu_acpi_ctrl_regs_t;
+
+typedef cpu_acpi_ctrl_regs_t cpu_acpi_pct_t;
+typedef cpu_acpi_ctrl_regs_t cpu_acpi_ptc_t;
/*
- * Containers for _PSS information
+ * Container for ACPI _PSS information
*/
typedef struct cpu_acpi_pstate
{
@@ -98,12 +136,30 @@ typedef struct cpu_acpi_pstate
uint32_t ps_state;
} cpu_acpi_pstate_t;
-typedef struct cpu_acpi_pstates {
- cpu_acpi_pstate_t *pss_pstates;
- uint32_t pss_count;
-} cpu_acpi_pstates_t;
+/*
+ * Container for _TSS information
+ */
+typedef struct cpu_acpi_tstate
+{
+ uint32_t ts_freqper;
+ uint32_t ts_disp;
+ uint32_t ts_translat;
+ uint32_t ts_ctrl;
+ uint32_t ts_state;
+
+} cpu_acpi_tstate_t;
+
+typedef struct cpu_acpi_supported_states {
+ void *ss_states;
+ uint32_t ss_count;
+} cpu_acpi_supported_states_t;
+typedef cpu_acpi_supported_states_t cpu_acpi_pstates_t;
+typedef cpu_acpi_supported_states_t cpu_acpi_tstates_t;
+
+typedef int cpu_acpi_present_capabilities_t;
typedef int cpu_acpi_ppc_t;
+typedef int cpu_acpi_tpc_t;
/*
* Container for cached ACPI data.
@@ -112,22 +168,25 @@ typedef struct cpu_acpi_state {
ACPI_HANDLE cs_handle;
dev_info_t *cs_dip;
uint_t cpu_acpi_cached;
- cpu_acpi_pstates_t *cs_pstates;
+ cpu_acpi_pstates_t cs_pstates;
cpu_acpi_pct_t cs_pct[2];
cpu_acpi_psd_t cs_psd;
cpu_acpi_ppc_t cs_ppc;
+ cpu_acpi_tstates_t cs_tstates;
+ cpu_acpi_ptc_t cs_ptc[2];
+ cpu_acpi_tsd_t cs_tsd;
+ cpu_acpi_tpc_t cs_tpc;
} cpu_acpi_state_t;
typedef cpu_acpi_state_t *cpu_acpi_handle_t;
-extern cpu_acpi_handle_t cpu_acpi_init(dev_info_t *);
-extern void cpu_acpi_fini(cpu_acpi_handle_t);
-extern int cpu_acpi_cache_pstates(cpu_acpi_handle_t);
-extern int cpu_acpi_cache_pct(cpu_acpi_handle_t);
-extern int cpu_acpi_cache_psd(cpu_acpi_handle_t);
extern void cpu_acpi_cache_ppc(cpu_acpi_handle_t);
-extern int cpu_acpi_cache_data(cpu_acpi_handle_t);
-extern void cpu_acpi_install_ppc_handler(cpu_acpi_handle_t,
+extern void cpu_acpi_cache_tpc(cpu_acpi_handle_t);
+extern int cpu_acpi_cache_pstate_data(cpu_acpi_handle_t);
+extern void cpu_acpi_free_pstate_data(cpu_acpi_handle_t);
+extern int cpu_acpi_cache_tstate_data(cpu_acpi_handle_t);
+extern void cpu_acpi_free_tstate_data(cpu_acpi_handle_t);
+extern void cpu_acpi_install_notify_handler(cpu_acpi_handle_t,
ACPI_NOTIFY_HANDLER, dev_info_t *);
extern int cpu_acpi_write_pdc(cpu_acpi_handle_t, uint32_t, uint32_t,
uint32_t *);
@@ -135,6 +194,8 @@ extern int cpu_acpi_write_port(ACPI_IO_ADDRESS, uint32_t, uint32_t);
extern int cpu_acpi_read_port(ACPI_IO_ADDRESS, uint32_t *, uint32_t);
extern uint_t cpu_acpi_get_speeds(cpu_acpi_handle_t, int **);
extern void cpu_acpi_free_speeds(int *, uint_t);
+extern cpu_acpi_handle_t cpu_acpi_init(dev_info_t *);
+extern void cpu_acpi_fini(cpu_acpi_handle_t);
#ifdef __cplusplus
}
diff --git a/usr/src/uts/i86pc/sys/cpudrv_mach.h b/usr/src/uts/i86pc/sys/cpudrv_mach.h
new file mode 100644
index 0000000000..2657ec3be0
--- /dev/null
+++ b/usr/src/uts/i86pc/sys/cpudrv_mach.h
@@ -0,0 +1,211 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CPUDRV_MACH_H
+#define _SYS_CPUDRV_MACH_H
+
+#include <sys/cpuvar.h>
+#include <sys/cpupm.h>
+#include <sys/cpu_acpi.h>
+#include <sys/cpudrv.h>
+#include <sys/ksynch.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * We currently refuse to power manage if the CPU in not ready to
+ * take cross calls (cross calls fail silently if CPU is not ready
+ * for it).
+ */
+extern cpuset_t cpu_ready_set;
+#define CPUDRV_PM_XCALL_IS_READY(cpuid) CPU_IN_SET(cpu_ready_set, (cpuid))
+
+/*
+ * An error attaching any of the devices results in disabling
+ * CPU power management.
+ */
+#define CPUDRV_PM_DISABLE() cpupm_disable(CPUPM_ALL_STATES)
+
+/*
+ * If no power management states are enabled, then CPU power
+ * management is disabled.
+ */
+#define CPUDRV_PM_DISABLED() \
+ (!cpupm_is_enabled(CPUPM_P_STATES) && !cpupm_is_enabled(CPUPM_T_STATES))
+
+/*
+ * Is P-state management enabled?
+ */
+#define CPUDRV_PM_POWER_ENABLED(cpudsp) \
+ (((cpudrv_mach_state_t *)cpudsp->mach_state)->caps & CPUDRV_P_STATES)
+
+/*
+ * We're about to exit the _PPC thread so reset tag.
+ */
+#define CPUDRV_PM_RESET_GOVERNOR_THREAD(cpupm) { \
+ if (curthread == cpupm->pm_governor_thread) \
+ cpupm->pm_governor_thread = NULL; \
+}
+
+/*
+ * Install a _PPC/_TPC change notification handler.
+ */
+#define CPUDRV_PM_INSTALL_MAX_CHANGE_HANDLER(cpudsp, dip) \
+ cpudrv_pm_install_notify_handler(cpudsp, dip);
+
+/*
+ * Redefine the topspeed.
+ */
+#define CPUDRV_PM_REDEFINE_TOPSPEED(dip) cpudrv_pm_redefine_topspeed(dip)
+
+/*
+ * Set callbacks so that PPM can callback into CPUDRV
+ */
+#define CPUDRV_PM_SET_PPM_CALLBACKS() { \
+ cpupm_get_topspeed = cpudrv_pm_get_topspeed; \
+ cpupm_set_topspeed = cpudrv_pm_set_topspeed; \
+}
+
+/*
+ * ACPI provides the supported speeds.
+ */
+#define CPUDRV_PM_GET_SPEEDS(cpudsp, speeds, nspeeds) \
+ nspeeds = cpudrv_pm_get_speeds(cpudsp, &speeds);
+#define CPUDRV_PM_FREE_SPEEDS(speeds, nspeeds) \
+ cpudrv_pm_free_speeds(speeds, nspeeds);
+
+/*
+ * Convert speed to Hz.
+ */
+#define CPUDRV_PM_SPEED_HZ(unused, mhz) ((uint64_t)mhz * 1000000)
+
+/*
+ * Compute the idle cnt percentage for a given speed.
+ */
+#define CPUDRV_PM_IDLE_CNT_PERCENT(hwm, speeds, i) \
+ (100 - (((100 - hwm) * speeds[0]) / speeds[i]))
+
+/*
+ * Compute the user cnt percentage for a given speed.
+ */
+#define CPUDRV_PM_USER_CNT_PERCENT(hwm, speeds, i) \
+ ((hwm * speeds[i]) / speeds[i - 1]);
+
+/*
+ * pm-components property defintions for this machine type.
+ *
+ * Fully constructed pm-components property should be an array of
+ * strings that look something like:
+ *
+ * pmc[0] = "NAME=CPU Speed"
+ * pmc[1] = "1=2800MHz"
+ * pmc[2] = "2=3200MHz"
+ *
+ * The amount of memory needed for each string is:
+ * digits for power level + '=' + digits for freq + 'MHz' + '\0'
+ */
+#define CPUDRV_PM_COMP_SIZE() \
+ (CPUDRV_PM_COMP_MAX_DIG + 1 + CPUDRV_PM_COMP_MAX_DIG + 3 + 1);
+#define CPUDRV_PM_COMP_SPEED(cpupm, cur_spd) cur_spd->speed;
+#define CPUDRV_PM_COMP_SPRINT(pmc, cpupm, cur_spd, comp_spd) \
+ (void) sprintf(pmc, "%d=%dMHz", cur_spd->pm_level, comp_spd);
+
+/*
+ * T-State domain list
+ */
+typedef struct cpudrv_tstate_domain_node {
+ struct cpudrv_tstate_domain_node *tdn_next;
+ struct cpudrv_tstate_domain *tdn_domain;
+ cpudrv_devstate_t *tdn_cpudsp;
+} cpudrv_tstate_domain_node_t;
+
+typedef struct cpudrv_tstate_domain {
+ struct cpudrv_tstate_domain *td_next;
+ cpudrv_tstate_domain_node_t *td_node;
+ uint32_t td_domain;
+ uint32_t td_type;
+ kmutex_t td_lock;
+} cpudrv_tstate_domain_t;
+
+extern cpudrv_tstate_domain_t *cpudrv_tstate_domains;
+
+/*
+ * Different processor families have their own technologies for supporting
+ * CPU power management (i.e., Intel has Enhanced SpeedStep for some of it's
+ * processors and AMD has PowerNow! for some of it's processors). We support
+ * these different technologies via modules that export the interfaces
+ * described below.
+ *
+ * If a module implements the technology that should be used to manage
+ * the current CPU device, then the cpups_init() module should return
+ * succesfully (i.e., return code of 0) and perform any initialization
+ * such that future power transistions can be performed by calling
+ * the cpups_power() interface(). And the cpups_fini() interface can be
+ * used to free any resources allocated by cpups_init().
+ */
+typedef struct cpudrv_pstate_ops {
+ char *cpups_label;
+ int (*cpups_init)(cpudrv_devstate_t *);
+ void (*cpups_fini)(cpudrv_devstate_t *);
+ int (*cpups_power)(cpudrv_devstate_t *, uint32_t);
+} cpudrv_pstate_ops_t;
+
+/*
+ * T-state support.
+ */
+typedef struct cpudrv_tstate_ops {
+ char *cputs_label;
+ int (*cputs_init)(cpudrv_devstate_t *);
+ void (*cputs_fini)(cpudrv_devstate_t *);
+ int (*cputs_throttle)(cpudrv_devstate_t *, uint32_t);
+} cpudrv_tstate_ops_t;
+
+typedef struct cpudrv_mach_state {
+ void *acpi_handle;
+ cpudrv_pstate_ops_t *cpupm_pstate_ops;
+ cpudrv_tstate_ops_t *cpupm_tstate_ops;
+ cpudrv_tstate_domain_node_t *tstate_domain_node;
+ uint32_t pstate;
+ uint32_t tstate;
+ uint32_t caps;
+} cpudrv_mach_state_t;
+
+#define CPUDRV_NO_STATES 0x00
+#define CPUDRV_P_STATES 0x01
+#define CPUDRV_T_STATES 0x02
+
+extern uint_t cpudrv_pm_get_speeds(cpudrv_devstate_t *, int **);
+extern void cpudrv_pm_free_speeds(int *, uint_t);
+extern void cpudrv_pm_set_topspeed(void *, int);
+extern int cpudrv_pm_get_topspeed(void *);
+extern void cpudrv_pm_redefine_topspeed(void *);
+extern void cpudrv_pm_install_notify_handler(cpudrv_devstate_t *, dev_info_t *);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CPUDRV_MACH_H */
diff --git a/usr/src/uts/i86pc/sys/cpudrv_plat.h b/usr/src/uts/i86pc/sys/cpudrv_plat.h
deleted file mode 100644
index 73d85ff814..0000000000
--- a/usr/src/uts/i86pc/sys/cpudrv_plat.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _SYS_CPUDRV_PLAT_H
-#define _SYS_CPUDRV_PLAT_H
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-#include <sys/cpuvar.h>
-#include <sys/cpupm.h>
-#include <sys/cpu_acpi.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * We currently refuse to power manage if the CPU in not ready to
- * take cross calls (cross calls fail silently if CPU is not ready
- * for it).
- */
-extern cpuset_t cpu_ready_set;
-#define CPUDRV_PM_XCALL_IS_READY(cpuid) CPU_IN_SET(cpu_ready_set, (cpuid))
-
-/*
- * An error attaching any of the devices results in disabling
- * CPU power management.
- */
-#define CPUDRV_PM_DISABLE() cpupm_enable(B_FALSE)
-
-/*
- * We're about to exit the _PPC thread so reset tag.
- */
-#define CPUDRV_PM_RESET_THROTTLE_THREAD(cpupm) { \
- if (curthread == cpupm->pm_throttle_thread) \
- cpupm->pm_throttle_thread = NULL; \
-}
-
-/*
- * Install a _PPC change notification handler.
- */
-#define CPUDRV_PM_INSTALL_TOPSPEED_CHANGE_HANDLER(cpudsp, dip) \
- cpu_acpi_install_ppc_handler(cpudsp->acpi_handle, \
- cpudrv_pm_ppc_notify_handler, dip);
-
-/*
- * Redefine the topspeed.
- */
-#define CPUDRV_PM_REDEFINE_TOPSPEED(dip) cpudrv_pm_redefine_topspeed(dip)
-
-/*
- * Set callbacks so that PPM can callback into CPUDRV
- */
-#define CPUDRV_PM_SET_PPM_CALLBACKS() { \
- cpupm_get_topspeed = cpudrv_pm_get_topspeed; \
- cpupm_set_topspeed = cpudrv_pm_set_topspeed; \
-}
-
-/*
- * ACPI provides the supported speeds.
- */
-#define CPUDRV_PM_GET_SPEEDS(cpudsp, speeds, nspeeds) \
- nspeeds = cpu_acpi_get_speeds(cpudsp->acpi_handle, &speeds);
-#define CPUDRV_PM_FREE_SPEEDS(speeds, nspeeds) \
- cpu_acpi_free_speeds(speeds, nspeeds);
-
-/*
- * Convert speed to Hz.
- */
-#define CPUDRV_PM_SPEED_HZ(unused, mhz) ((uint64_t)mhz * 1000000)
-
-/*
- * Compute the idle cnt percentage for a given speed.
- */
-#define CPUDRV_PM_IDLE_CNT_PERCENT(hwm, speeds, i) \
- (100 - (((100 - hwm) * speeds[0]) / speeds[i]))
-
-/*
- * Compute the user cnt percentage for a given speed.
- */
-#define CPUDRV_PM_USER_CNT_PERCENT(hwm, speeds, i) \
- ((hwm * speeds[i]) / speeds[i - 1]);
-
-/*
- * pm-components property defintions for this platform.
- *
- * Fully constructed pm-components property should be an array of
- * strings that look something like:
- *
- * pmc[0] = "NAME=CPU Speed"
- * pmc[1] = "1=2800MHz"
- * pmc[2] = "2=3200MHz"
- *
- * The amount of memory needed for each string is:
- * digits for power level + '=' + digits for freq + 'MHz' + '\0'
- */
-#define CPUDRV_PM_COMP_SIZE() \
- (CPUDRV_PM_COMP_MAX_DIG + 1 + CPUDRV_PM_COMP_MAX_DIG + 3 + 1);
-#define CPUDRV_PM_COMP_SPEED(cpupm, cur_spd) cur_spd->speed;
-#define CPUDRV_PM_COMP_SPRINT(pmc, cpupm, cur_spd, comp_spd) \
- (void) sprintf(pmc, "%d=%dMHz", cur_spd->pm_level, comp_spd);
-
-extern void cpudrv_pm_set_topspeed(void *, int);
-extern int cpudrv_pm_get_topspeed(void *);
-extern void cpudrv_pm_redefine_topspeed(void *);
-extern void cpudrv_pm_ppc_notify_handler(ACPI_HANDLE, UINT32, void *);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SYS_CPUDRV_PLAT_H */
diff --git a/usr/src/uts/i86pc/sys/cpudrv_throttle.h b/usr/src/uts/i86pc/sys/cpudrv_throttle.h
new file mode 100644
index 0000000000..ae4d352c14
--- /dev/null
+++ b/usr/src/uts/i86pc/sys/cpudrv_throttle.h
@@ -0,0 +1,41 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _CPUDRV_THROTTLE_H
+#define _CPUDRV_THROTTLE_H
+
+#include <sys/cpudrv_mach.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+cpudrv_tstate_ops_t cpudrv_throttle_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CPUDRV_THROTTLE_H */
diff --git a/usr/src/uts/i86pc/sys/cpupm.h b/usr/src/uts/i86pc/sys/cpupm.h
index eb32f3e494..2510a0fb60 100644
--- a/usr/src/uts/i86pc/sys/cpupm.h
+++ b/usr/src/uts/i86pc/sys/cpupm.h
@@ -19,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _CPUPM_H
#define _CPUPM_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -51,6 +49,15 @@ typedef struct cpupm_cpu_dependency {
} cpupm_cpu_dependency_t;
/*
+ * If any states are added, then make sure to add them to
+ * CPUPM_ALL_STATES.
+ */
+#define CPUPM_NO_STATES 0x00
+#define CPUPM_P_STATES 0x01
+#define CPUPM_T_STATES 0x02
+#define CPUPM_ALL_STATES (CPUPM_P_STATES | CPUPM_T_STATES)
+
+/*
* Callbacks used for CPU power management.
*/
extern void (*cpupm_rebuild_cpu_domains)(void);
@@ -71,7 +78,8 @@ extern void cpupm_free_cpu_dependencies();
*
*/
extern boolean_t cpupm_is_ready();
-extern void cpupm_enable(boolean_t);
+extern boolean_t cpupm_is_enabled(uint32_t);
+extern void cpupm_disable(uint32_t);
extern void cpupm_post_startup();
#ifdef __cplusplus
diff --git a/usr/src/uts/i86pc/sys/pwrnow.h b/usr/src/uts/i86pc/sys/pwrnow.h
index 1ea3ccf66d..1e3cc24e3f 100644
--- a/usr/src/uts/i86pc/sys/pwrnow.h
+++ b/usr/src/uts/i86pc/sys/pwrnow.h
@@ -19,24 +19,22 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _PWRNOW_H
#define _PWRNOW_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-#include <sys/cpudrv.h>
+#include <sys/cpudrv_mach.h>
#ifdef __cplusplus
extern "C" {
#endif
-extern int pwrnow_init(cpudrv_devstate_t *);
-extern void pwrnow_fini(cpudrv_devstate_t *);
-extern int pwrnow_power(cpudrv_devstate_t *, uint32_t);
+boolean_t pwrnow_supported();
+
+cpudrv_pstate_ops_t pwrnow_ops;
#ifdef __cplusplus
}
diff --git a/usr/src/uts/i86pc/sys/speedstep.h b/usr/src/uts/i86pc/sys/speedstep.h
index 32f593f679..f9debb2758 100644
--- a/usr/src/uts/i86pc/sys/speedstep.h
+++ b/usr/src/uts/i86pc/sys/speedstep.h
@@ -19,24 +19,22 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SPEEDSTEP_H
#define _SPEEDSTEP_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-#include <sys/cpudrv.h>
+#include <sys/cpudrv_mach.h>
#ifdef __cplusplus
extern "C" {
#endif
-extern int speedstep_init(cpudrv_devstate_t *);
-extern void speedstep_fini(cpudrv_devstate_t *);
-extern int speedstep_power(cpudrv_devstate_t *, uint32_t);
+boolean_t speedstep_supported(uint_t, uint_t);
+
+cpudrv_pstate_ops_t speedstep_ops;
#ifdef __cplusplus
}
diff --git a/usr/src/uts/sun4u/Makefile.files b/usr/src/uts/sun4u/Makefile.files
index 14f6737fd2..1d7d978a4f 100644
--- a/usr/src/uts/sun4u/Makefile.files
+++ b/usr/src/uts/sun4u/Makefile.files
@@ -23,8 +23,6 @@
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
-# ident "%Z%%M% %I% %E% SMI"
-#
# This Makefile defines all file modules for the directory uts/sun4u
# and it's children. These are the source files which are sun4u
# "implementation architecture" dependent.
@@ -97,7 +95,7 @@ PSYCHO_PCI_OBJS += $(PCI_COMMON_OBJS) pcipsy.o
SCHIZO_PCI_OBJS += $(PCI_COMMON_OBJS) pcisch_asm.o pcisch.o pcix.o
SIMBA_PCI_OBJS += simba.o
DB21554_OBJS += db21554.o
-US_OBJS += cpudrv.o cpudrv_plat.o
+US_OBJS += cpudrv.o cpudrv_mach.o
POWER_OBJS += power.o
EPIC_OBJS += epic.o
GRBEEP_OBJS += grbeep.o
diff --git a/usr/src/uts/sun4u/io/cpudrv_plat.c b/usr/src/uts/sun4u/io/cpudrv_mach.c
index 724cc48479..a9ca3debb4 100644
--- a/usr/src/uts/sun4u/io/cpudrv_plat.c
+++ b/usr/src/uts/sun4u/io/cpudrv_mach.c
@@ -19,21 +19,20 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
- * CPU power management driver platform support.
+ * CPU power management driver support for sun4u.
*/
#include <sys/ddi.h>
#include <sys/sunddi.h>
-#include <sys/cpudrv_plat.h>
-#include <sys/cpudrv.h>
+#include <sys/cpudrv_mach.h>
#include <sys/machsystm.h>
+boolean_t cpudrv_enabled = B_TRUE;
+
/*
* Change CPU speed.
*/
@@ -55,39 +54,49 @@ cpudrv_pm_get_cpu_id(dev_info_t *dip, processorid_t *cpu_id)
}
/*
- * A noop for this platform.
+ * A noop for this machine type.
*/
boolean_t
-cpudrv_pm_all_instances_ready(void)
+cpudrv_pm_power_ready(void)
{
return (B_TRUE);
}
/*
- * A noop for this platform.
+ * A noop for this machine type.
*/
/* ARGSUSED */
boolean_t
-cpudrv_pm_is_throttle_thread(cpudrv_pm_t *cpupm)
+cpudrv_pm_is_governor_thread(cpudrv_pm_t *cpupm)
{
return (B_FALSE);
}
/*
- * A noop for this platform.
+ * A noop for this machine type.
*/
/*ARGSUSED*/
boolean_t
-cpudrv_pm_init_module(cpudrv_devstate_t *cpudsp)
+cpudrv_mach_pm_init(cpudrv_devstate_t *cpudsp)
{
return (B_TRUE);
}
/*
- * A noop for this platform.
+ * A noop for this machine type.
*/
/*ARGSUSED*/
void
-cpudrv_pm_free_module(cpudrv_devstate_t *cpudsp)
+cpudrv_mach_pm_free(cpudrv_devstate_t *cpudsp)
{
}
+
+/*
+ * On SPARC all instances support power management unless attach fails.
+ * In the case of attach failure, cpupm_enabled will be false.
+ */
+boolean_t
+cpudrv_pm_enabled()
+{
+ return (B_TRUE);
+}
diff --git a/usr/src/uts/sun4u/sys/cpudrv_plat.h b/usr/src/uts/sun4u/sys/cpudrv_mach.h
index e2f3946145..cc1536579d 100644
--- a/usr/src/uts/sun4u/sys/cpudrv_plat.h
+++ b/usr/src/uts/sun4u/sys/cpudrv_mach.h
@@ -19,16 +19,15 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#ifndef _SYS_CPUDRV_PLAT_H
-#define _SYS_CPUDRV_PLAT_H
-
-#pragma ident "%Z%%M% %I% %E% SMI"
+#ifndef _SYS_CPUDRV_MACH_H
+#define _SYS_CPUDRV_MACH_H
#include <sys/cpu_module.h>
+#include <sys/cpudrv.h>
#ifdef __cplusplus
extern "C" {
@@ -42,27 +41,34 @@ extern "C" {
#define CPUDRV_PM_XCALL_IS_READY(cpuid) (CPU_XCALL_READY(cpuid))
/*
- * CPU power management is always enabled for this platform.
+ * If a failure occurs during attach(), then CPU power management
+ * is disabled.
*/
-#define CPUDRV_PM_DISABLE()
+extern boolean_t cpudrv_enabled;
+
+#define CPUDRV_PM_DISABLE() (cpudrv_enabled = B_FALSE)
+
+#define CPUDRV_PM_DISABLED() (!cpudrv_enabled)
+
+#define CPUDRV_PM_POWER_ENABLED(cpudsp) cpudrv_pm_enabled()
/*
- * Currently, there is no throttling on this platform.
+ * Currently, there is no governor on sun4u,
*/
-#define CPUDRV_PM_RESET_THROTTLE_THREAD(cpupm)
+#define CPUDRV_PM_RESET_GOVERNOR_THREAD(cpupm)
/*
- * Currently, there is no need for a throttling handler on this platform.
+ * Currently, there is no need for a handler on sun4u.
*/
-#define CPUDRV_PM_INSTALL_TOPSPEED_CHANGE_HANDLER(cpudsp, dip)
+#define CPUDRV_PM_INSTALL_MAX_CHANGE_HANDLER(cpudsp, dip)
/*
- * There is no notion of changing topspeed on this platform.
+ * There is no notion of changing topspeed on sun4u.
*/
#define CPUDRV_PM_REDEFINE_TOPSPEED(dip)
/*
- * There are no PPM callbacks for this platform.
+ * There are no PPM callbacks for sun4u.
*/
#define CPUDRV_PM_SET_PPM_CALLBACKS()
@@ -103,7 +109,7 @@ extern "C" {
((hwm * speeds[i - 1]) / speeds[i])
/*
- * pm-components property defintions for this platform.
+ * pm-components property defintions for sun4u.
*
* Fully constructed pm-components property should be an array of
* strings that look something like:
@@ -132,8 +138,10 @@ extern "C" {
comp_spd, CPUDRV_PM_COMP_OTHER); \
}
+extern boolean_t cpudrv_pm_enabled(void);
+
#ifdef __cplusplus
}
#endif
-#endif /* _SYS_CPUDRV_PLAT_H */
+#endif /* _SYS_CPUDRV_MACH_H */