summaryrefslogtreecommitdiff
path: root/usr/src/uts/common
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common')
-rw-r--r--usr/src/uts/common/Makefile.files1
-rw-r--r--usr/src/uts/common/cpr/cpr_driver.c131
-rw-r--r--usr/src/uts/common/cpr/cpr_dump.c51
-rw-r--r--usr/src/uts/common/cpr/cpr_main.c692
-rw-r--r--usr/src/uts/common/cpr/cpr_misc.c32
-rw-r--r--usr/src/uts/common/cpr/cpr_mod.c191
-rw-r--r--usr/src/uts/common/cpr/cpr_stat.c25
-rw-r--r--usr/src/uts/common/cpr/cpr_uthread.c36
-rw-r--r--usr/src/uts/common/io/asy.c871
-rw-r--r--usr/src/uts/common/io/audio/sada/drv/audio810/audio810.c258
-rw-r--r--usr/src/uts/common/io/fdc.c111
-rw-r--r--usr/src/uts/common/io/i8042.c72
-rw-r--r--usr/src/uts/common/io/kb8042/kb8042.c90
-rw-r--r--usr/src/uts/common/io/kb8042/kb8042.h12
-rw-r--r--usr/src/uts/common/io/pci-ide/pci-ide.c74
-rw-r--r--usr/src/uts/common/io/pm.c359
-rw-r--r--usr/src/uts/common/io/ppm/ppm.c175
-rw-r--r--usr/src/uts/common/io/ppm/ppm_subr.c74
-rw-r--r--usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c59
-rwxr-xr-xusr/src/uts/common/io/srn.c563
-rwxr-xr-xusr/src/uts/common/io/srn.conf27
-rw-r--r--usr/src/uts/common/io/usb/hcd/ehci/ehci.c7
-rw-r--r--usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c3
-rw-r--r--usr/src/uts/common/io/usb/hcd/openhci/ohci.c4
-rw-r--r--usr/src/uts/common/os/callb.c11
-rw-r--r--usr/src/uts/common/os/cpu.c21
-rw-r--r--usr/src/uts/common/os/sunpci.c283
-rw-r--r--usr/src/uts/common/os/sunpm.c193
-rw-r--r--usr/src/uts/common/sys/Makefile1
-rw-r--r--usr/src/uts/common/sys/asy.h18
-rw-r--r--usr/src/uts/common/sys/audio/impl/audio810_impl.h14
-rw-r--r--usr/src/uts/common/sys/cpr.h49
-rw-r--r--usr/src/uts/common/sys/cpuvar.h1
-rw-r--r--usr/src/uts/common/sys/dktp/cmdk.h24
-rw-r--r--usr/src/uts/common/sys/dktp/dadk.h6
-rw-r--r--usr/src/uts/common/sys/epm.h118
-rw-r--r--usr/src/uts/common/sys/pm.h38
-rw-r--r--usr/src/uts/common/sys/ppmvar.h13
-rw-r--r--usr/src/uts/common/sys/rtc.h12
-rw-r--r--usr/src/uts/common/sys/srn.h79
-rw-r--r--usr/src/uts/common/sys/sunddi.h6
-rw-r--r--usr/src/uts/common/sys/uadmin.h17
-rw-r--r--usr/src/uts/common/syscall/uadmin.c12
43 files changed, 4068 insertions, 766 deletions
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index 1575dd888f..21071a1742 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -616,6 +616,7 @@ OPTIONS_OBJS += options.o
WINLOCK_OBJS += winlockio.o
PM_OBJS += pm.o
+SRN_OBJS += srn.o
PSEUDO_OBJS += pseudonex.o
diff --git a/usr/src/uts/common/cpr/cpr_driver.c b/usr/src/uts/common/cpr/cpr_driver.c
index a23a9cbf7c..442473c7ca 100644
--- a/usr/src/uts/common/cpr/cpr_driver.c
+++ b/usr/src/uts/common/cpr/cpr_driver.c
@@ -45,6 +45,18 @@ extern int devi_attach(dev_info_t *, int);
static char *devi_string(dev_info_t *, char *);
static int cpr_is_real_device(dev_info_t *);
+/*
+ * Xen uses this code to suspend _all_ drivers quickly and easily.
+ * Suspend and Resume uses it for the same reason, but also has
+ * to contend with some platform specific code that Xen does not.
+ * it is also used as a test entry point for developers/testers to
+ * execute code without going through a complete suspend. So additions
+ * that have platform implications shall need #if[n]def's.
+ */
+#ifndef __xpv
+extern void i_cpr_save_configuration(dev_info_t *);
+extern void i_cpr_restore_configuration(dev_info_t *);
+#endif
/*
* Traverse the dev info tree:
@@ -70,22 +82,52 @@ cpr_suspend_devices(dev_info_t *dip)
devi_string(dip, buf));
ASSERT((DEVI(dip)->devi_cpr_flags & DCF_CPR_SUSPENDED) == 0);
- if (!i_ddi_devi_attached(dip))
+#ifndef __xpv
+ i_cpr_save_configuration(dip);
+#endif
+
+
+ if (!i_ddi_devi_attached(dip)) {
error = DDI_FAILURE;
- else
- error = devi_detach(dip, DDI_SUSPEND);
+ } else {
+#ifndef __xpv
+ if (cpr_test_point != DEVICE_SUSPEND_TO_RAM ||
+ (cpr_test_point == DEVICE_SUSPEND_TO_RAM &&
+ cpr_device == ddi_driver_major(dip))) {
+#endif
+ error = devi_detach(dip, DDI_SUSPEND);
+#ifndef __xpv
+ } else {
+ error = DDI_SUCCESS;
+ }
+#endif
+ }
- if (error == DDI_SUCCESS)
+ if (error == DDI_SUCCESS) {
DEVI(dip)->devi_cpr_flags |= DCF_CPR_SUSPENDED;
+ }
+
else {
CPR_DEBUG(CPR_DEBUG2,
"WARNING: Unable to suspend device %s\n",
devi_string(dip, buf));
cpr_err(CE_WARN, "Unable to suspend device %s.",
- devi_string(dip, buf));
+ devi_string(dip, buf));
cpr_err(CE_WARN, "Device is busy or does not "
- "support suspend/resume.");
- return (ENXIO);
+ "support suspend/resume.");
+#ifndef __xpv
+ /*
+ * the device has failed to suspend however,
+ * if cpr_test_point == FORCE_SUSPEND_TO_RAM
+ * after putting out the warning message above,
+ * we carry on as if suspending the device had
+ * been successful
+ */
+ if (cpr_test_point == FORCE_SUSPEND_TO_RAM)
+ DEVI(dip)->devi_cpr_flags |= DCF_CPR_SUSPENDED;
+ else
+#endif
+ return (ENXIO);
}
}
return (0);
@@ -124,13 +166,27 @@ cpr_resume_devices(dev_info_t *start, int resume_failed)
DEVI(dip)->devi_cpr_flags &= ~DCF_CPR_SUSPENDED;
/*
+ * Always attempt to restore device configuration before
+ * attempting resume
+ */
+#ifndef __xpv
+ i_cpr_restore_configuration(dip);
+#endif
+
+ /*
* There may be background attaches happening on devices
* that were not originally suspended by cpr, so resume
* only devices that were suspended by cpr. Also, stop
* resuming after the first resume failure, but traverse
- * the entire tree to clear the suspend flag.
+ * the entire tree to clear the suspend flag unless the
+ * FORCE_SUSPEND_TO_RAM test point is set.
*/
+#ifndef __xpv
+ if (did_suspend && (!error ||
+ cpr_test_point == FORCE_SUSPEND_TO_RAM)) {
+#else
if (did_suspend && !error) {
+#endif
CPR_DEBUG(CPR_DEBUG2, "Resuming device %s\n",
devi_string(dip, buf));
/*
@@ -146,17 +202,28 @@ cpr_resume_devices(dev_info_t *start, int resume_failed)
cpr_err(CE_WARN, "Skipping %s, device "
"not ready for resume",
devi_string(dip, buf));
- } else if (devi_attach(dip, DDI_RESUME) !=
- DDI_SUCCESS) {
- CPR_DEBUG(CPR_DEBUG2,
- "WARNING: Unable to resume device %s\n",
- devi_string(dip, buf));
- cpr_err(CE_WARN, "Unable to resume device %s",
- devi_string(dip, buf));
- error = ENXIO;
+#ifndef __xpv
+ } else if (cpr_test_point != DEVICE_SUSPEND_TO_RAM ||
+ (cpr_test_point == DEVICE_SUSPEND_TO_RAM &&
+ cpr_device == ddi_driver_major(dip))) {
+#else
+ } else {
+#endif
+ if (devi_attach(dip, DDI_RESUME) !=
+ DDI_SUCCESS) {
+ error = ENXIO;
+ }
}
}
+ if (error == ENXIO) {
+ CPR_DEBUG(CPR_DEBUG2,
+ "WARNING: Unable to resume device %s\n",
+ devi_string(dip, buf));
+ cpr_err(CE_WARN, "Unable to resume device %s",
+ devi_string(dip, buf));
+ }
+
error = cpr_resume_devices(ddi_get_child(dip), error);
last = dip;
}
@@ -176,10 +243,8 @@ devi_string(dev_info_t *devi, char *buf)
name = ddi_node_name(devi);
address = ddi_get_name_addr(devi);
- size = (name == NULL) ?
- strlen("<null name>") : strlen(name);
- size += (address == NULL) ?
- strlen("<null>") : strlen(address);
+ size = (name == NULL) ? strlen("<null name>") : strlen(name);
+ size += (address == NULL) ? strlen("<null>") : strlen(address);
/*
* Make sure that we don't over-run the buffer.
@@ -237,29 +302,3 @@ cpr_is_real_device(dev_info_t *dip)
return (1);
}
}
-
-/*
- * Power down the system.
- */
-void
-cpr_power_down(void)
-{
-#if defined(__sparc)
- /*
- * XXX This platform firmware implementation dependency
- * doesn't belong in common code!
- */
- int is_defined = 0;
- char *wordexists = "p\" power-off\" find nip swap l! ";
- char *req = "power-off";
-
- /*
- * is_defined has value -1 when defined
- */
- prom_interpret(wordexists, (uintptr_t)&is_defined, 0, 0, 0, 0);
- if (is_defined) {
- CPR_DEBUG(CPR_DEBUG1, "\ncpr: %s...\n", req);
- prom_interpret(req, 0, 0, 0, 0, 0);
- }
-#endif
-}
diff --git a/usr/src/uts/common/cpr/cpr_dump.c b/usr/src/uts/common/cpr/cpr_dump.c
index 99f5cea43d..28fee49bf9 100644
--- a/usr/src/uts/common/cpr/cpr_dump.c
+++ b/usr/src/uts/common/cpr/cpr_dump.c
@@ -54,17 +54,20 @@
#include <sys/ddi.h>
#include <sys/panic.h>
#include <sys/thread.h>
+#include <sys/note.h>
/* Local defines and variables */
#define BTOb(bytes) ((bytes) << 3) /* Bytes to bits, log2(NBBY) */
#define bTOB(bits) ((bits) >> 3) /* bits to Bytes, log2(NBBY) */
+#if defined(__sparc)
static uint_t cpr_pages_tobe_dumped;
static uint_t cpr_regular_pgs_dumped;
-
static int cpr_dump_regular_pages(vnode_t *);
static int cpr_count_upages(int, bitfunc_t);
static int cpr_compress_and_write(vnode_t *, uint_t, pfn_t, pgcnt_t);
+#endif
+
int cpr_flush_write(vnode_t *);
int cpr_contig_pages(vnode_t *, int);
@@ -75,6 +78,8 @@ extern size_t cpr_get_devsize(dev_t);
extern int i_cpr_dump_setup(vnode_t *);
extern int i_cpr_blockzero(char *, char **, int *, vnode_t *);
extern int cpr_test_mode;
+int cpr_setbit(pfn_t, int);
+int cpr_clrbit(pfn_t, int);
ctrm_t cpr_term;
@@ -87,13 +92,16 @@ int cpr_nbitmaps;
char *cpr_pagedata; /* page buffer for compression / tmp copy */
size_t cpr_pagedata_size; /* page buffer size in bytes */
+#if defined(__sparc)
static char *cpr_wptr; /* keep track of where to write to next */
static int cpr_file_bn; /* cpr state-file block offset */
static int cpr_disk_writes_ok;
static size_t cpr_dev_space = 0;
+#endif
char cpr_pagecopy[CPR_MAXCONTIG * MMU_PAGESIZE];
+#if defined(__sparc)
/*
* On some platforms bcopy may modify the thread structure
* during bcopy (eg, to prevent cpu migration). If the
@@ -194,6 +202,7 @@ cpr_write_header(vnode_t *vp)
struct cpr_dump_desc cdump;
pgcnt_t bitmap_pages;
pgcnt_t kpages, vpages, upages;
+ pgcnt_t cpr_count_kpages(int mapflag, bitfunc_t bitfunc);
cdump.cdd_magic = (uint_t)CPR_DUMP_MAGIC;
cdump.cdd_version = CPR_VERSION;
@@ -237,19 +246,20 @@ cpr_write_header(vnode_t *vp)
* Roundup will be done in the file allocation code.
*/
STAT->cs_nocomp_statefsz = sizeof (cdd_t) + sizeof (cmd_t) +
- (sizeof (cbd_t) * cdump.cdd_bitmaprec) +
- (sizeof (cpd_t) * cdump.cdd_dumppgsize) +
- mmu_ptob(cdump.cdd_dumppgsize + bitmap_pages);
+ (sizeof (cbd_t) * cdump.cdd_bitmaprec) +
+ (sizeof (cpd_t) * cdump.cdd_dumppgsize) +
+ mmu_ptob(cdump.cdd_dumppgsize + bitmap_pages);
/*
* If the estimated statefile is not big enough,
* go retry now to save un-necessary operations.
*/
if (!(CPR->c_flags & C_COMPRESSING) &&
- (STAT->cs_nocomp_statefsz > STAT->cs_est_statefsz)) {
+ (STAT->cs_nocomp_statefsz > STAT->cs_est_statefsz)) {
if (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG7))
- prom_printf("cpr_write_header: STAT->cs_nocomp_statefsz > "
- "STAT->cs_est_statefsz\n");
+ prom_printf("cpr_write_header: "
+ "STAT->cs_nocomp_statefsz > "
+ "STAT->cs_est_statefsz\n");
return (ENOSPC);
}
@@ -272,10 +282,10 @@ cpr_write_terminator(vnode_t *vp)
/* count the last one (flush) */
cpr_term.real_statef_size = STAT->cs_real_statefsz +
- btod(cpr_wptr - cpr_buf) * DEV_BSIZE;
+ btod(cpr_wptr - cpr_buf) * DEV_BSIZE;
CPR_DEBUG(CPR_DEBUG9, "cpr_dump: Real Statefile Size: %ld\n",
- STAT->cs_real_statefsz);
+ STAT->cs_real_statefsz);
cpr_tod_get(&cpr_term.tm_shutdown);
@@ -382,6 +392,7 @@ cpr_write_statefile(vnode_t *vp)
return (error);
}
+#endif
/*
@@ -393,9 +404,13 @@ cpr_write_statefile(vnode_t *vp)
* - writes the remaining user pages
* - writes the kernel pages
*/
+#if defined(__x86)
+ _NOTE(ARGSUSED(0))
+#endif
int
cpr_dump(vnode_t *vp)
{
+#if defined(__sparc)
int error;
if (cpr_buf == NULL) {
@@ -484,11 +499,13 @@ cpr_dump(vnode_t *vp)
if (error = i_cpr_blockzero(cpr_buf, &cpr_wptr, &cpr_file_bn, vp))
return (error);
+#endif
return (0);
}
+#if defined(__sparc)
/*
* cpr_xwalk() is called many 100x with a range within kvseg or kvseg_reloc;
* a page-count from each range is accumulated at arg->pages.
@@ -633,7 +650,8 @@ cpr_sparse_seg_check(struct seg *seg)
for (; ste->st_seg; ste++) {
tseg = (ste->st_addrtype == KSEG_PTR_ADDR) ?
- *ste->st_seg : (struct seg *)ste->st_seg;
+ *ste->st_seg : (struct seg *)ste->st_seg;
+
if (seg == tseg)
return (ste);
}
@@ -690,7 +708,8 @@ cpr_count_kpages(int mapflag, bitfunc_t bitfunc)
CPR_DEBUG(CPR_DEBUG9, "cpr_count_kpages: kas_cnt=%ld\n", kas_cnt);
CPR_DEBUG(CPR_DEBUG7, "\ncpr_count_kpages: %ld pages, 0x%lx bytes\n",
- kas_cnt, mmu_ptob(kas_cnt));
+ kas_cnt, mmu_ptob(kas_cnt));
+
return (kas_cnt);
}
@@ -796,7 +815,7 @@ cpr_count_upages(int mapflag, bitfunc_t bitfunc)
extern struct vnode prom_ppages;
if (pp->p_vnode == NULL || PP_ISKAS(pp) ||
pp->p_vnode == &prom_ppages ||
- PP_ISFREE(pp) && PP_ISAGED(pp))
+ PP_ISFREE(pp) && PP_ISAGED(pp))
#else
if (pp->p_vnode == NULL || PP_ISKAS(pp) ||
PP_ISFREE(pp) && PP_ISAGED(pp))
@@ -813,9 +832,10 @@ cpr_count_upages(int mapflag, bitfunc_t bitfunc)
STAT->cs_upage2statef = dcnt;
CPR_DEBUG(CPR_DEBUG9, "cpr_count_upages: dirty=%ld total=%ld\n",
- dcnt, tcnt);
+ dcnt, tcnt);
CPR_DEBUG(CPR_DEBUG7, "cpr_count_upages: %ld pages, 0x%lx bytes\n",
- dcnt, mmu_ptob(dcnt));
+ dcnt, mmu_ptob(dcnt));
+
return (dcnt);
}
@@ -907,7 +927,7 @@ cpr_compress_and_write(vnode_t *vp, uint_t va, pfn_t pfn, pgcnt_t npg)
i_cpr_mapin(CPR->c_mapping_area, npg, pfn);
CPR_DEBUG(CPR_DEBUG3, "mapped-in %ld pages, vaddr 0x%p, pfn 0x%lx\n",
- npg, CPR->c_mapping_area, pfn);
+ npg, CPR->c_mapping_area, pfn);
/*
* Fill cpr page descriptor.
@@ -1181,3 +1201,4 @@ cpr_dump_regular_pages(vnode_t *vp)
CPR_DEBUG(CPR_DEBUG7, "cpr_dump_regular_pages() done.\n");
return (error);
}
+#endif
diff --git a/usr/src/uts/common/cpr/cpr_main.c b/usr/src/uts/common/cpr/cpr_main.c
index 6669469681..65e911cb11 100644
--- a/usr/src/uts/common/cpr/cpr_main.c
+++ b/usr/src/uts/common/cpr/cpr_main.c
@@ -25,7 +25,6 @@
#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* This module contains the guts of checkpoint-resume mechanism.
* All code in this module is platform independent.
@@ -51,6 +50,10 @@
#include <sys/reboot.h>
#include <sys/kdi.h>
#include <sys/promif.h>
+#include <sys/srn.h>
+#include <sys/cpr_impl.h>
+
+#define PPM(dip) ((dev_info_t *)DEVI(dip)->devi_pm_ppm)
extern struct cpr_terminator cpr_term;
@@ -63,18 +66,47 @@ extern void cpr_set_bitmap_size(void);
extern void cpr_stat_init();
extern void cpr_statef_close(void);
extern void flush_windows(void);
+extern void (*srn_signal)(int, int);
+extern void init_cpu_syscall(struct cpu *);
+extern void i_cpr_pre_resume_cpus();
+extern void i_cpr_post_resume_cpus();
extern int pm_powering_down;
-
-static int cpr_suspend(void);
-static int cpr_resume(void);
-static void cpr_suspend_init(void);
+extern kmutex_t srn_clone_lock;
+extern int srn_inuse;
+
+static int cpr_suspend(int);
+static int cpr_resume(int);
+static void cpr_suspend_init(int);
+#if defined(__x86)
+static int cpr_suspend_cpus(void);
+static void cpr_resume_cpus(void);
+#endif
+static int cpr_all_online(void);
+static void cpr_restore_offline(void);
cpr_time_t wholecycle_tv;
int cpr_suspend_succeeded;
pfn_t curthreadpfn;
int curthreadremapped;
+extern cpuset_t cpu_ready_set;
+extern void *(*cpu_pause_func)(void *);
+
+extern processorid_t i_cpr_bootcpuid(void);
+extern cpu_t *i_cpr_bootcpu(void);
+extern void tsc_adjust_delta(hrtime_t tdelta);
+extern void tsc_resume(void);
+extern int tsc_resume_in_cyclic;
+
+/*
+ * Set this variable to 1, to have device drivers resume in an
+ * uniprocessor environment. This is to allow drivers that assume
+ * that they resume on a UP machine to continue to work. Should be
+ * deprecated once the broken drivers are fixed
+ */
+int cpr_resume_uniproc = 0;
+
/*
* save or restore abort_enable; this prevents a drop
* to kadb or prom during cpr_resume_devices() when
@@ -101,23 +133,73 @@ cpr_sae(int stash)
* returned back to here and it then calls the resume routine.
*/
int
-cpr_main(void)
+cpr_main(int sleeptype)
{
- label_t saveq = ttolwp(curthread)->lwp_qsav;
- int rc;
+ int rc, rc2;
+ label_t saveq;
+ klwp_t *tlwp = ttolwp(curthread);
- if (rc = cpr_default_setup(1))
- return (rc);
+ if (sleeptype == CPR_TODISK) {
+ if ((rc = cpr_default_setup(1)) != 0)
+ return (rc);
+ ASSERT(tlwp);
+ saveq = tlwp->lwp_qsav;
+ }
+
+ if (sleeptype == CPR_TORAM) {
+ rc = cpr_suspend(sleeptype);
+ PMD(PMD_SX, ("cpr_suspend rets %x\n", rc))
+ if (rc == 0) {
+ int i_cpr_power_down(int sleeptype);
+
+ /*
+ * From this point on, we should be at a high
+ * spl, interrupts disabled, and all but one
+ * cpu's paused (effectively UP/single threaded).
+ * So this is were we want to put ASSERTS()
+ * to let us know otherwise.
+ */
+ ASSERT(cpus_paused());
+ /*
+ * Now do the work of actually putting this
+ * machine to sleep!
+ */
+ rc = i_cpr_power_down(sleeptype);
+ if (rc == 0) {
+ PMD(PMD_SX, ("back from succssful suspend\n"))
+ }
+ /*
+ * We do care about the return value from cpr_resume
+ * at this point, as it will tell us if one of the
+ * resume functions failed (cpr_resume_devices())
+ * However, for this to return and _not_ panic, means
+ * that we must be in one of the test functions. So
+ * check for that and return an appropriate message.
+ */
+ rc2 = cpr_resume(sleeptype);
+ if (rc2 != 0) {
+ ASSERT(cpr_test_point > 0);
+ cmn_err(CE_NOTE,
+ "cpr_resume returned non-zero: %d\n", rc2);
+ PMD(PMD_SX, ("cpr_resume rets %x\n", rc2))
+ }
+ ASSERT(!cpus_paused());
+ } else {
+ PMD(PMD_SX, ("failed suspend, resuming\n"))
+ rc = cpr_resume(sleeptype);
+ }
+ return (rc);
+ }
/*
- * Remember where we are for resume
+ * Remember where we are for resume after reboot
*/
- if (!setjmp(&ttolwp(curthread)->lwp_qsav)) {
+ if (!setjmp(&tlwp->lwp_qsav)) {
/*
* try to checkpoint the system, if failed return back
* to userland, otherwise power off.
*/
- rc = cpr_suspend();
+ rc = cpr_suspend(sleeptype);
if (rc || cpr_reusable_mode) {
/*
* We don't really want to go down, or
@@ -125,22 +207,28 @@ cpr_main(void)
* to put the system back to an operable state then
* return back to userland.
*/
- (void) cpr_resume();
+ PMD(PMD_SX, ("failed suspend, resuming\n"))
+ (void) cpr_resume(sleeptype);
+ PMD(PMD_SX, ("back from failed suspend resume\n"))
}
} else {
/*
* This is the resumed side of longjmp, restore the previous
* longjmp pointer if there is one so this will be transparent
* to the world.
+ * This path is only for CPR_TODISK, where we reboot
*/
- ttolwp(curthread)->lwp_qsav = saveq;
+ ASSERT(sleeptype == CPR_TODISK);
+ tlwp->lwp_qsav = saveq;
CPR->c_flags &= ~C_SUSPENDING;
CPR->c_flags |= C_RESUMING;
/*
* resume the system back to the original state
*/
- rc = cpr_resume();
+ rc = cpr_resume(sleeptype);
+ PMD(PMD_SX, ("back from successful suspend; resume rets %x\n",
+ rc))
}
(void) cpr_default_setup(0);
@@ -149,6 +237,8 @@ cpr_main(void)
}
+#if defined(__sparc)
+
/*
* check/disable or re-enable UFS logging
*/
@@ -180,8 +270,7 @@ cpr_log_status(int enable, int *svstat, vnode_t *vp)
*svstat = status;
if (cpr_debug & CPR_DEBUG5) {
mntpt = vfs_getmntpoint(vp->v_vfsp);
- CPR_DEBUG(CPR_DEBUG5,
- "%s: \"%s\", logging status = %d\n",
+ errp("%s: \"%s\", logging status = %d\n",
str, refstr_value(mntpt), status);
refstr_rele(mntpt);
};
@@ -207,11 +296,10 @@ cpr_log_status(int enable, int *svstat, vnode_t *vp)
} else {
if (cpr_debug & CPR_DEBUG5) {
mntpt = vfs_getmntpoint(vp->v_vfsp);
- CPR_DEBUG(CPR_DEBUG5,
- "%s: \"%s\", logging is now %sd\n",
+ errp("%s: \"%s\", logging is now %sd\n",
str, refstr_value(mntpt), able);
refstr_rele(mntpt);
- }
+ };
}
}
@@ -223,7 +311,6 @@ cpr_log_status(int enable, int *svstat, vnode_t *vp)
*svstat = -1;
}
-
/*
* enable/disable UFS logging on filesystems containing cpr_default_path
* and cpr statefile. since the statefile can be on any fs, that fs
@@ -234,6 +321,7 @@ cpr_log_status(int enable, int *svstat, vnode_t *vp)
* file outside of rootfs would cause errors during cprboot, plus cpr and
* fsck problems with the new fs if logging were enabled.
*/
+
static int
cpr_ufs_logging(int enable)
{
@@ -274,6 +362,7 @@ cpr_ufs_logging(int enable)
return (0);
}
+#endif
/*
@@ -288,6 +377,54 @@ cpr_lock_mgr(void (*service)(void))
(*service)();
}
+int
+cpr_suspend_cpus(void)
+{
+ cpu_t *bootcpu;
+ int ret = 0;
+ extern void *i_cpr_save_context(void *arg);
+
+ mutex_enter(&cpu_lock);
+
+ /*
+ * if bootcpu is offline bring it back online
+ */
+ bootcpu = i_cpr_bootcpu();
+
+ /*
+ * the machine could not have booted without a bootcpu
+ */
+ ASSERT(bootcpu != NULL);
+
+ /*
+ * bring all the offline cpus online
+ */
+ if ((ret = cpr_all_online())) {
+ mutex_exit(&cpu_lock);
+ return (ret);
+ }
+
+ /*
+ * Set the affinity to be the boot processor
+ * This is cleared in either cpr_resume_cpus() or cpr_unpause_cpus()
+ */
+ affinity_set(i_cpr_bootcpuid());
+
+ ASSERT(CPU->cpu_id == 0);
+
+ PMD(PMD_SX, ("curthread running on bootcpu\n"))
+
+ /*
+ * pause all other running CPUs and save the CPU state at the sametime
+ */
+ cpu_pause_func = i_cpr_save_context;
+ pause_cpus(NULL);
+
+ mutex_exit(&cpu_lock);
+
+ return (0);
+}
+
/*
* Take the system down to a checkpointable state and write
* the state file, the following are sequentially executed:
@@ -301,41 +438,69 @@ cpr_lock_mgr(void (*service)(void))
* - suspend all devices
* - block intrpts
* - dump system state and memory to state file
+ * - SPARC code will not be called with CPR_TORAM, caller filters
*/
static int
-cpr_suspend(void)
+cpr_suspend(int sleeptype)
{
- int sf_realloc, rc, skt_rc, nverr;
+#if defined(__sparc)
+ int sf_realloc, nverr;
+#endif
+ int rc = 0;
+ int skt_rc = 0;
+ PMD(PMD_SX, ("cpr_suspend %x\n", sleeptype))
cpr_set_substate(C_ST_SUSPEND_BEGIN);
- cpr_suspend_init();
+ cpr_suspend_init(sleeptype);
cpr_save_time();
cpr_tod_get(&wholecycle_tv);
CPR_STAT_EVENT_START("Suspend Total");
+ i_cpr_alloc_cpus();
+
+#if defined(__sparc)
+ ASSERT(sleeptype == CPR_TODISK);
if (!cpr_reusable_mode) {
/*
- * We need to validate default file before fs functionality
- * is disabled.
+ * We need to validate default file before fs
+ * functionality is disabled.
*/
if (rc = cpr_validate_definfo(0))
return (rc);
}
-
i_cpr_save_machdep_info();
+#endif
+ PMD(PMD_SX, ("cpr_suspend: stop scans\n"))
/* Stop PM scans ASAP */
(void) callb_execute_class(CB_CL_CPR_PM, CB_CODE_CPR_CHKPT);
pm_dispatch_to_dep_thread(PM_DEP_WK_CPR_SUSPEND,
NULL, NULL, PM_DEP_WAIT, NULL, 0);
+#if defined(__sparc)
+ ASSERT(sleeptype == CPR_TODISK);
cpr_set_substate(C_ST_MP_OFFLINE);
if (rc = cpr_mp_offline())
return (rc);
+#endif
+ /*
+ * Ask Xorg to suspend the frame buffer, and wait for it to happen
+ */
+ mutex_enter(&srn_clone_lock);
+ if (srn_signal) {
+ PMD(PMD_SX, ("cpr_suspend: (*srn_signal)(..., "
+ "SRN_SUSPEND_REQ)\n"))
+ srn_inuse = 1; /* because *(srn_signal) cv_waits */
+ (*srn_signal)(SRN_TYPE_APM, SRN_SUSPEND_REQ);
+ srn_inuse = 0;
+ } else {
+ PMD(PMD_SX, ("cpr_suspend: srn_signal NULL\n"))
+ }
+ mutex_exit(&srn_clone_lock);
/*
* Ask the user threads to stop by themselves, but
@@ -346,11 +511,13 @@ cpr_suspend(void)
CPR_DEBUG(CPR_DEBUG1, "\nstopping user threads...");
CPR_STAT_EVENT_START(" stop users");
cpr_set_substate(C_ST_STOP_USER_THREADS);
+ PMD(PMD_SX, ("cpr_suspend: stop user threads\n"))
if (rc = cpr_stop_user_threads())
return (rc);
CPR_STAT_EVENT_END(" stop users");
CPR_DEBUG(CPR_DEBUG1, "done\n");
+ PMD(PMD_SX, ("cpr_suspend: save direct levels\n"))
pm_save_direct_levels();
/*
@@ -360,10 +527,14 @@ cpr_suspend(void)
*/
(void) callb_execute_class(CB_CL_CPR_PROMPRINTF, CB_CODE_CPR_CHKPT);
+ PMD(PMD_SX, ("cpr_suspend: send notice\n"))
+#ifndef DEBUG
cpr_send_notice();
if (cpr_debug)
prom_printf("\n");
+#endif
+ PMD(PMD_SX, ("cpr_suspend: POST USER callback\n"))
(void) callb_execute_class(CB_CL_CPR_POST_USER, CB_CODE_CPR_CHKPT);
/*
@@ -373,9 +544,12 @@ cpr_suspend(void)
* a kernel thread.
*/
cpr_set_substate(C_ST_PM_REATTACH_NOINVOL);
+ PMD(PMD_SX, ("cpr_suspend: reattach noinvol\n"))
if (!pm_reattach_noinvol())
return (ENXIO);
+#if defined(__sparc)
+ ASSERT(sleeptype == CPR_TODISK);
/*
* if ufs logging is enabled, we need to disable before
* stopping kernel threads so that ufs delete and roll
@@ -398,8 +572,8 @@ cpr_suspend(void)
alloc_statefile:
/*
- * If our last state was C_ST_DUMP_NOSPC, we're trying to realloc
- * the statefile, otherwise this is the first attempt.
+ * If our last state was C_ST_DUMP_NOSPC, we're trying to
+ * realloc the statefile, otherwise this is the first attempt.
*/
sf_realloc = (CPR->c_substate == C_ST_DUMP_NOSPC) ? 1 : 0;
@@ -407,7 +581,7 @@ alloc_statefile:
cpr_set_substate(C_ST_STATEF_ALLOC);
if (rc = cpr_alloc_statefile(sf_realloc)) {
if (sf_realloc)
- prom_printf("realloc failed\n");
+ errp("realloc failed\n");
return (rc);
}
CPR_STAT_EVENT_END(" alloc statefile");
@@ -415,9 +589,10 @@ alloc_statefile:
/*
* Sync the filesystem to preserve its integrity.
*
- * This sync is also used to flush out all B_DELWRI buffers (fs cache)
- * which are mapped and neither dirty nor referenced before
- * cpr_invalidate_pages destroys them. fsflush does similar thing.
+ * This sync is also used to flush out all B_DELWRI buffers
+ * (fs cache) which are mapped and neither dirty nor referenced
+ * before cpr_invalidate_pages destroys them.
+ * fsflush does similar thing.
*/
sync();
@@ -425,16 +600,18 @@ alloc_statefile:
* destroy all clean file mapped kernel pages
*/
CPR_STAT_EVENT_START(" clean pages");
- CPR_DEBUG(CPR_DEBUG1, "cleaning up mapped pages...");
+ CPR_DEBUG(CPR_DEBUG1, ("cleaning up mapped pages..."));
(void) callb_execute_class(CB_CL_CPR_VM, CB_CODE_CPR_CHKPT);
- CPR_DEBUG(CPR_DEBUG1, "done\n");
+ CPR_DEBUG(CPR_DEBUG1, ("done\n"));
CPR_STAT_EVENT_END(" clean pages");
+#endif
/*
* Hooks needed by lock manager prior to suspending.
* Refer to code for more comments.
*/
+ PMD(PMD_SX, ("cpr_suspend: lock mgr\n"))
cpr_lock_mgr(lm_cprsuspend);
/*
@@ -444,6 +621,7 @@ alloc_statefile:
CPR_DEBUG(CPR_DEBUG1, "suspending drivers...");
cpr_set_substate(C_ST_SUSPEND_DEVICES);
pm_powering_down = 1;
+ PMD(PMD_SX, ("cpr_suspend: suspending devices\n"))
rc = cpr_suspend_devices(ddi_root_node());
pm_powering_down = 0;
if (rc)
@@ -455,36 +633,61 @@ alloc_statefile:
* Stop all daemon activities
*/
cpr_set_substate(C_ST_STOP_KERNEL_THREADS);
+ PMD(PMD_SX, ("cpr_suspend: stopping kernel threads\n"))
if (skt_rc = cpr_stop_kernel_threads())
return (skt_rc);
+ PMD(PMD_SX, ("cpr_suspend: POST KERNEL callback\n"))
(void) callb_execute_class(CB_CL_CPR_POST_KERNEL, CB_CODE_CPR_CHKPT);
+ PMD(PMD_SX, ("cpr_suspend: reattach noinvol fini\n"))
pm_reattach_noinvol_fini();
cpr_sae(1);
+ PMD(PMD_SX, ("cpr_suspend: CPR CALLOUT callback\n"))
(void) callb_execute_class(CB_CL_CPR_CALLOUT, CB_CODE_CPR_CHKPT);
- /*
- * It's safer to do tod_get before we disable all intr.
- */
- CPR_STAT_EVENT_START(" write statefile");
+ if (sleeptype == CPR_TODISK) {
+ /*
+ * It's safer to do tod_get before we disable all intr.
+ */
+ CPR_STAT_EVENT_START(" write statefile");
+ }
/*
* it's time to ignore the outside world, stop the real time
* clock and disable any further intrpt activity.
*/
+ PMD(PMD_SX, ("cpr_suspend: handle xc\n"))
i_cpr_handle_xc(1); /* turn it on to disable xc assertion */
mutex_enter(&cpu_lock);
+ PMD(PMD_SX, ("cpr_suspend: cyclic suspend\n"))
cyclic_suspend();
mutex_exit(&cpu_lock);
- mon_clock_stop();
- mon_clock_unshare();
- mon_clock_start();
+ /*
+ * Due to the different methods of resuming the system between
+ * CPR_TODISK (boot cprboot on SPARC, which reloads kernel image)
+ * and CPR_TORAM (restart via reset into existing kernel image)
+ * cpus are not suspended and restored in the SPARC case, since it
+ * is necessary to restart the cpus and pause them before restoring
+ * the OBP image
+ */
+
+#if defined(__x86)
+
+ /* pause aux cpus */
+ PMD(PMD_SX, ("pause aux cpus\n"))
+
+ cpr_set_substate(C_ST_MP_PAUSED);
+ if ((rc = cpr_suspend_cpus()) != 0)
+ return (rc);
+#endif
+
+ PMD(PMD_SX, ("cpr_suspend: stop intr\n"))
i_cpr_stop_intr();
CPR_DEBUG(CPR_DEBUG1, "interrupt is stopped\n");
@@ -494,16 +697,28 @@ alloc_statefile:
* it must be up now.
*/
ASSERT(pm_cfb_is_up());
+ PMD(PMD_SX, ("cpr_suspend: prom suspend prepost\n"))
prom_suspend_prepost();
+#if defined(__sparc)
/*
* getting ready to write ourself out, flush the register
* windows to make sure that our stack is good when we
* come back on the resume side.
*/
flush_windows();
+#endif
/*
+ * For S3, we're done
+ */
+ if (sleeptype == CPR_TORAM) {
+ PMD(PMD_SX, ("cpr_suspend rets %x\n", rc))
+ cpr_set_substate(C_ST_NODUMP);
+ return (rc);
+ }
+#if defined(__sparc)
+ /*
* FATAL: NO MORE MEMORY ALLOCATION ALLOWED AFTER THIS POINT!!!
*
* The system is quiesced at this point, we are ready to either dump
@@ -535,7 +750,7 @@ alloc_statefile:
if (rc == ENOSPC) {
cpr_set_substate(C_ST_DUMP_NOSPC);
- (void) cpr_resume();
+ (void) cpr_resume(sleeptype);
goto alloc_statefile;
} else if (rc == 0) {
if (cpr_reusable_mode) {
@@ -544,9 +759,97 @@ alloc_statefile:
} else
rc = cpr_set_properties(1);
}
+#endif
+ PMD(PMD_SX, ("cpr_suspend: return %d\n", rc))
return (rc);
}
+void
+cpr_resume_cpus(void)
+{
+ /*
+ * this is a cut down version of start_other_cpus()
+ * just do the initialization to wake the other cpus
+ */
+
+#if defined(__x86)
+ /*
+ * Initialize our syscall handlers
+ */
+ init_cpu_syscall(CPU);
+
+#endif
+
+ i_cpr_pre_resume_cpus();
+
+ /*
+ * Restart the paused cpus
+ */
+ mutex_enter(&cpu_lock);
+ start_cpus();
+ mutex_exit(&cpu_lock);
+
+ /*
+ * clear the affinity set in cpr_suspend_cpus()
+ */
+ affinity_clear();
+
+ i_cpr_post_resume_cpus();
+
+ mutex_enter(&cpu_lock);
+ /*
+ * Restore this cpu to use the regular cpu_pause(), so that
+ * online and offline will work correctly
+ */
+ cpu_pause_func = NULL;
+
+ /*
+ * offline all the cpus that were brought online during suspend
+ */
+ cpr_restore_offline();
+
+ /*
+ * clear the affinity set in cpr_suspend_cpus()
+ */
+ affinity_clear();
+
+ mutex_exit(&cpu_lock);
+}
+
+void
+cpr_unpause_cpus(void)
+{
+ /*
+ * Now restore the system back to what it was before we suspended
+ */
+
+ PMD(PMD_SX, ("cpr_unpause_cpus: restoring system\n"))
+
+ mutex_enter(&cpu_lock);
+
+ /*
+ * Restore this cpu to use the regular cpu_pause(), so that
+ * online and offline will work correctly
+ */
+ cpu_pause_func = NULL;
+
+ /*
+ * Restart the paused cpus
+ */
+ start_cpus();
+
+ /*
+ * offline all the cpus that were brought online during suspend
+ */
+ cpr_restore_offline();
+
+ /*
+ * clear the affinity set in cpr_suspend_cpus()
+ */
+ affinity_clear();
+
+ mutex_exit(&cpu_lock);
+}
/*
* Bring the system back up from a checkpoint, at this point
@@ -559,7 +862,7 @@ alloc_statefile:
* - put all threads back on run queue
*/
static int
-cpr_resume(void)
+cpr_resume(int sleeptype)
{
cpr_time_t pwron_tv, *ctp;
char *str;
@@ -570,6 +873,7 @@ cpr_resume(void)
* that was suspended to a different level.
*/
CPR_DEBUG(CPR_DEBUG1, "\nEntering cpr_resume...\n");
+ PMD(PMD_SX, ("cpr_resume %x\n", sleeptype))
/*
* Note:
@@ -584,12 +888,14 @@ cpr_resume(void)
* and the one that caused the failure, if necessary."
*/
switch (CPR->c_substate) {
+#if defined(__sparc)
case C_ST_DUMP:
/*
* This is most likely a full-fledged cpr_resume after
* a complete and successful cpr suspend. Just roll back
* everything.
*/
+ ASSERT(sleeptype == CPR_TODISK);
break;
case C_ST_REUSABLE:
@@ -605,46 +911,60 @@ cpr_resume(void)
* is possible that a need for roll back of a state
* change arises between these exit points.
*/
+ ASSERT(sleeptype == CPR_TODISK);
goto rb_dump;
+#endif
+
+ case C_ST_NODUMP:
+ PMD(PMD_SX, ("cpr_resume: NODUMP\n"))
+ goto rb_nodump;
case C_ST_STOP_KERNEL_THREADS:
+ PMD(PMD_SX, ("cpr_resume: STOP_KERNEL_THREADS\n"))
goto rb_stop_kernel_threads;
case C_ST_SUSPEND_DEVICES:
+ PMD(PMD_SX, ("cpr_resume: SUSPEND_DEVICES\n"))
goto rb_suspend_devices;
+#if defined(__sparc)
case C_ST_STATEF_ALLOC:
+ ASSERT(sleeptype == CPR_TODISK);
goto rb_statef_alloc;
case C_ST_DISABLE_UFS_LOGGING:
+ ASSERT(sleeptype == CPR_TODISK);
goto rb_disable_ufs_logging;
+#endif
case C_ST_PM_REATTACH_NOINVOL:
+ PMD(PMD_SX, ("cpr_resume: REATTACH_NOINVOL\n"))
goto rb_pm_reattach_noinvol;
case C_ST_STOP_USER_THREADS:
+ PMD(PMD_SX, ("cpr_resume: STOP_USER_THREADS\n"))
goto rb_stop_user_threads;
+#if defined(__sparc)
case C_ST_MP_OFFLINE:
+ PMD(PMD_SX, ("cpr_resume: MP_OFFLINE\n"))
goto rb_mp_offline;
+#endif
+
+#if defined(__x86)
+ case C_ST_MP_PAUSED:
+ PMD(PMD_SX, ("cpr_resume: MP_PAUSED\n"))
+ goto rb_mp_paused;
+#endif
+
default:
+ PMD(PMD_SX, ("cpr_resume: others\n"))
goto rb_others;
}
rb_all:
/*
- * setup debugger trapping.
- */
- if (cpr_suspend_succeeded)
- i_cpr_set_tbr();
-
- /*
- * tell prom to monitor keys before the kernel comes alive
- */
- mon_clock_start();
-
- /*
* perform platform-dependent initialization
*/
if (cpr_suspend_succeeded)
@@ -659,33 +979,65 @@ rb_dump:
*
* DO NOT ADD ANY INITIALIZATION STEP BEFORE THIS POINT!!
*/
+rb_nodump:
+ /*
+ * If we did suspend to RAM, we didn't generate a dump
+ */
+ PMD(PMD_SX, ("cpr_resume: CPR DMA callback\n"))
(void) callb_execute_class(CB_CL_CPR_DMA, CB_CODE_CPR_RESUME);
- if (cpr_suspend_succeeded)
+ if (cpr_suspend_succeeded) {
+ PMD(PMD_SX, ("cpr_resume: CPR RPC callback\n"))
(void) callb_execute_class(CB_CL_CPR_RPC, CB_CODE_CPR_RESUME);
+ }
prom_resume_prepost();
+#if !defined(__sparc)
+ /*
+ * Need to sync the software clock with the hardware clock.
+ * On Sparc, this occurs in the sparc-specific cbe. However
+ * on x86 this needs to be handled _before_ we bring other cpu's
+ * back online. So we call a resume function in timestamp.c
+ */
+ if (tsc_resume_in_cyclic == 0)
+ tsc_resume();
+#endif
+
+#if defined(__sparc)
if (cpr_suspend_succeeded && (boothowto & RB_DEBUG))
kdi_dvec_cpr_restart();
+#endif
+
+
+#if defined(__x86)
+rb_mp_paused:
+ PT(PT_RMPO);
+ PMD(PMD_SX, ("resume aux cpus\n"))
+
+ if (cpr_suspend_succeeded) {
+ cpr_resume_cpus();
+ } else {
+ cpr_unpause_cpus();
+ }
+#endif
/*
* let the tmp callout catch up.
*/
+ PMD(PMD_SX, ("cpr_resume: CPR CALLOUT callback\n"))
(void) callb_execute_class(CB_CL_CPR_CALLOUT, CB_CODE_CPR_RESUME);
i_cpr_enable_intr();
- mon_clock_stop();
- mon_clock_share();
-
mutex_enter(&cpu_lock);
+ PMD(PMD_SX, ("cpr_resume: cyclic resume\n"))
cyclic_resume();
mutex_exit(&cpu_lock);
- mon_clock_start();
-
+ PMD(PMD_SX, ("cpr_resume: handle xc\n"))
i_cpr_handle_xc(0); /* turn it off to allow xc assertion */
+ PMD(PMD_SX, ("cpr_resume: CPR POST KERNEL callback\n"))
(void) callb_execute_class(CB_CL_CPR_POST_KERNEL, CB_CODE_CPR_RESUME);
/*
@@ -701,7 +1053,8 @@ rb_dump:
cpr_convert_promtime(&pwron_tv);
ctp = &cpr_term.tm_shutdown;
- CPR_STAT_EVENT_END_TMZ(" write statefile", ctp);
+ if (sleeptype == CPR_TODISK)
+ CPR_STAT_EVENT_END_TMZ(" write statefile", ctp);
CPR_STAT_EVENT_END_TMZ("Suspend Total", ctp);
CPR_STAT_EVENT_START_TMZ("Resume Total", &pwron_tv);
@@ -726,62 +1079,116 @@ rb_stop_kernel_threads:
* disabled before starting kernel threads, we don't want
* modunload thread to start changing device tree underneath.
*/
+ PMD(PMD_SX, ("cpr_resume: modunload disable\n"))
modunload_disable();
+ PMD(PMD_SX, ("cpr_resume: start kernel threads\n"))
cpr_start_kernel_threads();
rb_suspend_devices:
CPR_DEBUG(CPR_DEBUG1, "resuming devices...");
CPR_STAT_EVENT_START(" start drivers");
+ PMD(PMD_SX,
+ ("cpr_resume: rb_suspend_devices: cpr_resume_uniproc = %d\n",
+ cpr_resume_uniproc))
+
+#if defined(__x86)
+ /*
+ * If cpr_resume_uniproc is set, then pause all the other cpus
+ * apart from the current cpu, so that broken drivers that think
+ * that they are on a uniprocessor machine will resume
+ */
+ if (cpr_resume_uniproc) {
+ mutex_enter(&cpu_lock);
+ pause_cpus(NULL);
+ mutex_exit(&cpu_lock);
+ }
+#endif
+
/*
* The policy here is to continue resume everything we can if we did
* not successfully finish suspend; and panic if we are coming back
* from a fully suspended system.
*/
+ PMD(PMD_SX, ("cpr_resume: resume devices\n"))
rc = cpr_resume_devices(ddi_root_node(), 0);
cpr_sae(0);
str = "Failed to resume one or more devices.";
- if (rc && CPR->c_substate == C_ST_DUMP)
- cpr_err(CE_PANIC, str);
- else if (rc)
- cpr_err(CE_WARN, str);
+
+ if (rc) {
+ if (CPR->c_substate == C_ST_DUMP ||
+ (sleeptype == CPR_TORAM &&
+ CPR->c_substate == C_ST_NODUMP)) {
+ if (cpr_test_point == FORCE_SUSPEND_TO_RAM) {
+ PMD(PMD_SX, ("cpr_resume: resume device "
+ "warn\n"))
+ cpr_err(CE_WARN, str);
+ } else {
+ PMD(PMD_SX, ("cpr_resume: resume device "
+ "panic\n"))
+ cpr_err(CE_PANIC, str);
+ }
+ } else {
+ PMD(PMD_SX, ("cpr_resume: resume device warn\n"))
+ cpr_err(CE_WARN, str);
+ }
+ }
+
CPR_STAT_EVENT_END(" start drivers");
CPR_DEBUG(CPR_DEBUG1, "done\n");
+#if defined(__x86)
+ /*
+ * If cpr_resume_uniproc is set, then unpause all the processors
+ * that were paused before resuming the drivers
+ */
+ if (cpr_resume_uniproc) {
+ mutex_enter(&cpu_lock);
+ start_cpus();
+ mutex_exit(&cpu_lock);
+ }
+#endif
+
/*
* If we had disabled modunloading in this cpr resume cycle (i.e. we
* resumed from a state earlier than C_ST_SUSPEND_DEVICES), re-enable
* modunloading now.
*/
- if (CPR->c_substate != C_ST_SUSPEND_DEVICES)
+ if (CPR->c_substate != C_ST_SUSPEND_DEVICES) {
+ PMD(PMD_SX, ("cpr_resume: modload enable\n"))
modunload_enable();
+ }
/*
* Hooks needed by lock manager prior to resuming.
* Refer to code for more comments.
*/
+ PMD(PMD_SX, ("cpr_resume: lock mgr\n"))
cpr_lock_mgr(lm_cprresume);
+#if defined(__sparc)
/*
* This is a partial (half) resume during cpr suspend, we
* haven't yet given up on the suspend. On return from here,
* cpr_suspend() will try to reallocate and retry the suspend.
*/
if (CPR->c_substate == C_ST_DUMP_NOSPC) {
- mon_clock_stop();
return (0);
}
+ if (sleeptype == CPR_TODISK) {
rb_statef_alloc:
- cpr_statef_close();
+ cpr_statef_close();
rb_disable_ufs_logging:
- /*
- * if ufs logging was disabled, re-enable
- */
- (void) cpr_ufs_logging(1);
+ /*
+ * if ufs logging was disabled, re-enable
+ */
+ (void) cpr_ufs_logging(1);
+ }
+#endif
rb_pm_reattach_noinvol:
/*
@@ -795,44 +1202,64 @@ rb_pm_reattach_noinvol:
CPR->c_substate == C_ST_STATEF_ALLOC ||
CPR->c_substate == C_ST_SUSPEND_DEVICES ||
CPR->c_substate == C_ST_STOP_KERNEL_THREADS) {
+ PMD(PMD_SX, ("cpr_resume: reattach noinvol fini\n"))
pm_reattach_noinvol_fini();
}
+ PMD(PMD_SX, ("cpr_resume: CPR POST USER callback\n"))
(void) callb_execute_class(CB_CL_CPR_POST_USER, CB_CODE_CPR_RESUME);
+ PMD(PMD_SX, ("cpr_resume: CPR PROMPRINTF callback\n"))
(void) callb_execute_class(CB_CL_CPR_PROMPRINTF, CB_CODE_CPR_RESUME);
+ PMD(PMD_SX, ("cpr_resume: restore direct levels\n"))
pm_restore_direct_levels();
rb_stop_user_threads:
CPR_DEBUG(CPR_DEBUG1, "starting user threads...");
+ PMD(PMD_SX, ("cpr_resume: starting user threads\n"))
cpr_start_user_threads();
CPR_DEBUG(CPR_DEBUG1, "done\n");
+ /*
+ * Ask Xorg to resume the frame buffer, and wait for it to happen
+ */
+ mutex_enter(&srn_clone_lock);
+ if (srn_signal) {
+ PMD(PMD_SX, ("cpr_suspend: (*srn_signal)(..., "
+ "SRN_NORMAL_RESUME)\n"))
+ srn_inuse = 1; /* because (*srn_signal) cv_waits */
+ (*srn_signal)(SRN_TYPE_APM, SRN_NORMAL_RESUME);
+ srn_inuse = 0;
+ } else {
+ PMD(PMD_SX, ("cpr_suspend: srn_signal NULL\n"))
+ }
+ mutex_exit(&srn_clone_lock);
+#if defined(__sparc)
rb_mp_offline:
if (cpr_mp_online())
cpr_err(CE_WARN, "Failed to online all the processors.");
+#endif
rb_others:
- pm_dispatch_to_dep_thread(PM_DEP_WK_CPR_RESUME, NULL, NULL, PM_DEP_WAIT,
- NULL, 0);
+ PMD(PMD_SX, ("cpr_resume: dep thread\n"))
+ pm_dispatch_to_dep_thread(PM_DEP_WK_CPR_RESUME, NULL, NULL,
+ PM_DEP_WAIT, NULL, 0);
+ PMD(PMD_SX, ("cpr_resume: CPR PM callback\n"))
(void) callb_execute_class(CB_CL_CPR_PM, CB_CODE_CPR_RESUME);
- /*
- * now that all the drivers are going, kernel kbd driver can
- * take over, turn off prom monitor clock
- */
- mon_clock_stop();
-
if (cpr_suspend_succeeded) {
- cpr_restore_time();
cpr_stat_record_events();
}
- if (!cpr_reusable_mode)
+#if defined(__sparc)
+ if (sleeptype == CPR_TODISK && !cpr_reusable_mode)
cpr_clear_definfo();
+#endif
+ i_cpr_free_cpus();
CPR_DEBUG(CPR_DEBUG1, "Sending SIGTHAW...");
+ PMD(PMD_SX, ("cpr_resume: SIGTHAW\n"))
cpr_signal_user(SIGTHAW);
CPR_DEBUG(CPR_DEBUG1, "done\n");
@@ -854,11 +1281,12 @@ rb_others:
CPR_STAT_EVENT_PRINT();
#endif /* CPR_STAT */
+ PMD(PMD_SX, ("cpr_resume returns %x\n", rc))
return (rc);
}
static void
-cpr_suspend_init(void)
+cpr_suspend_init(int sleeptype)
{
cpr_time_t *ctp;
@@ -880,15 +1308,93 @@ cpr_suspend_init(void)
ctp = &cpr_term.tm_cprboot_end;
bzero(ctp, sizeof (*ctp));
+ if (sleeptype == CPR_TODISK) {
+ /*
+ * Lookup the physical address of our thread structure.
+ * This should never be invalid and the entire thread structure
+ * is expected to reside within the same pfn.
+ */
+ curthreadpfn = hat_getpfnum(kas.a_hat, (caddr_t)curthread);
+ ASSERT(curthreadpfn != PFN_INVALID);
+ ASSERT(curthreadpfn == hat_getpfnum(kas.a_hat,
+ (caddr_t)curthread + sizeof (kthread_t) - 1));
+ }
+
+ cpr_suspend_succeeded = 0;
+}
+
+/*
+ * bring all the offline cpus online
+ */
+static int
+cpr_all_online(void)
+{
+ int rc = 0;
+
+#ifdef __sparc
/*
- * Lookup the physical address of our thread structure. This should
- * never be invalid and the entire thread structure is expected
- * to reside within the same pfn.
+ * do nothing
*/
- curthreadpfn = hat_getpfnum(kas.a_hat, (caddr_t)curthread);
- ASSERT(curthreadpfn != PFN_INVALID);
- ASSERT(curthreadpfn == hat_getpfnum(kas.a_hat,
- (caddr_t)curthread + sizeof (kthread_t) - 1));
+#else
+
+ cpu_t *cp;
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+
+ cp = cpu_list;
+ do {
+ cp->cpu_cpr_flags &= ~CPU_CPR_ONLINE;
+ if (!CPU_ACTIVE(cp)) {
+ if ((rc = cpu_online(cp)) != 0)
+ break;
+ CPU_SET_CPR_FLAGS(cp, CPU_CPR_ONLINE);
+ }
+ } while ((cp = cp->cpu_next) != cpu_list);
+
+ if (rc) {
+ /*
+ * an online operation failed so offline the cpus
+ * that were onlined above to restore the system
+ * to its original state
+ */
+ cpr_restore_offline();
+ }
+#endif
+ return (rc);
+}
+
+/*
+ * offline all the cpus that were brought online by cpr_all_online()
+ */
+static void
+cpr_restore_offline(void)
+{
+
+#ifdef __sparc
+ /*
+ * do nothing
+ */
+#else
+
+ cpu_t *cp;
+ int rc = 0;
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+
+ cp = cpu_list;
+ do {
+ if (CPU_CPR_IS_ONLINE(cp)) {
+ rc = cpu_offline(cp, 0);
+ /*
+ * this offline should work, since the cpu was
+ * offline originally and was successfully onlined
+ * by cpr_all_online()
+ */
+ ASSERT(rc == 0);
+ cp->cpu_cpr_flags &= ~CPU_CPR_ONLINE;
+ }
+ } while ((cp = cp->cpu_next) != cpu_list);
+
+#endif
- cpr_suspend_succeeded = 0;
}
diff --git a/usr/src/uts/common/cpr/cpr_misc.c b/usr/src/uts/common/cpr/cpr_misc.c
index 936e3e9565..1ec0452c81 100644
--- a/usr/src/uts/common/cpr/cpr_misc.c
+++ b/usr/src/uts/common/cpr/cpr_misc.c
@@ -38,6 +38,7 @@
#include <sys/kmem.h>
#include <sys/cpr.h>
#include <sys/conf.h>
+#include <sys/machclock.h>
/*
* CPR miscellaneous support routines
@@ -61,11 +62,14 @@ extern char *cpr_pagedata;
extern int cpr_bufs_allocated;
extern int cpr_bitmaps_allocated;
+#if defined(__sparc)
static struct cprconfig cprconfig;
static int cprconfig_loaded = 0;
static int cpr_statefile_ok(vnode_t *, int);
static int cpr_p_online(cpu_t *, int);
static void cpr_save_mp_state(void);
+#endif
+
int cpr_is_ufs(struct vfs *);
char cpr_default_path[] = CPR_DEFAULT;
@@ -112,6 +116,10 @@ cpr_init(int fcn)
CPR->c_flags |= C_REUSABLE;
else
CPR->c_flags |= C_SUSPENDING;
+ if (fcn == AD_SUSPEND_TO_RAM || fcn == DEV_SUSPEND_TO_RAM) {
+ return (0);
+ }
+#if defined(__sparc)
if (fcn != AD_CPR_NOCOMPRESS && fcn != AD_CPR_TESTNOZ)
CPR->c_flags |= C_COMPRESSING;
/*
@@ -126,6 +134,7 @@ cpr_init(int fcn)
if (cpr_debug & CPR_DEBUG3)
cpr_err(CE_CONT, "Reserved virtual range from 0x%p for writing "
"kas\n", (void *)CPR->c_mapping_area);
+#endif
return (0);
}
@@ -157,6 +166,7 @@ cpr_done(void)
}
+#if defined(__sparc)
/*
* reads config data into cprconfig
*/
@@ -815,6 +825,7 @@ cpr_get_reusable_mode(void)
return (0);
}
+#endif
/*
* clock/time related routines
@@ -828,7 +839,7 @@ cpr_tod_get(cpr_time_t *ctp)
timestruc_t ts;
mutex_enter(&tod_lock);
- ts = tod_get();
+ ts = TODOP_GET(tod_ops);
mutex_exit(&tod_lock);
ctp->tv_sec = (time32_t)ts.tv_sec;
ctp->tv_nsec = (int32_t)ts.tv_nsec;
@@ -857,6 +868,7 @@ cpr_restore_time(void)
clkset(cpr_time_stamp);
}
+#if defined(__sparc)
/*
* CPU ONLINE/OFFLINE CODE
*/
@@ -1104,20 +1116,20 @@ cpr_reusable_mount_check(void)
}
/*
- * Force a fresh read of the cprinfo per uadmin 3 call
+ * return statefile offset in DEV_BSIZE units
*/
-void
-cpr_forget_cprconfig(void)
+int
+cpr_statefile_offset(void)
{
- cprconfig_loaded = 0;
+ return (cpr_statefile_is_spec() ? btod(CPR_SPEC_OFFSET) : 0);
}
-
/*
- * return statefile offset in DEV_BSIZE units
+ * Force a fresh read of the cprinfo per uadmin 3 call
*/
-int
-cpr_statefile_offset(void)
+void
+cpr_forget_cprconfig(void)
{
- return (cpr_statefile_is_spec() ? btod(CPR_SPEC_OFFSET) : 0);
+ cprconfig_loaded = 0;
}
+#endif
diff --git a/usr/src/uts/common/cpr/cpr_mod.c b/usr/src/uts/common/cpr/cpr_mod.c
index 365f102a2b..9358a6ab3a 100644
--- a/usr/src/uts/common/cpr/cpr_mod.c
+++ b/usr/src/uts/common/cpr/cpr_mod.c
@@ -42,16 +42,23 @@
#include <sys/autoconf.h>
#include <sys/machsystm.h>
-extern int i_cpr_is_supported(void);
+extern int i_cpr_is_supported(int sleeptype);
extern int cpr_is_ufs(struct vfs *);
extern int cpr_check_spec_statefile(void);
extern int cpr_reusable_mount_check(void);
-extern void cpr_forget_cprconfig(void);
extern int i_cpr_reusable_supported(void);
extern int i_cpr_reusefini(void);
-
extern struct mod_ops mod_miscops;
+extern int cpr_init(int);
+extern void cpr_done(void);
+extern void i_cpr_stop_other_cpus(void);
+extern int i_cpr_power_down();
+
+#if defined(__sparc)
+extern void cpr_forget_cprconfig(void);
+#endif
+
static struct modlmisc modlmisc = {
&mod_miscops, "checkpoint resume"
};
@@ -68,6 +75,9 @@ kmutex_t cpr_slock; /* cpr serial lock */
cpr_t cpr_state;
int cpr_debug;
int cpr_test_mode; /* true if called via uadmin testmode */
+int cpr_test_point = LOOP_BACK_NONE; /* cpr test point */
+int cpr_mp_enable = 0; /* set to 1 to enable MP suspend */
+major_t cpr_device = 0; /* major number for S3 on one device */
/*
* All the loadable module related code follows
@@ -100,9 +110,25 @@ _info(struct modinfo *modinfop)
return (mod_info(&modlinkage, modinfop));
}
+static
+int
+atoi(char *p)
+{
+ int i;
+
+ i = (*p++ - '0');
+
+ while (*p != '\0')
+ i = 10 * i + (*p++ - '0');
+
+ return (i);
+}
+
int
-cpr(int fcn)
+cpr(int fcn, void *mdep)
{
+
+#if defined(__sparc)
static const char noswapstr[] = "reusable statefile requires "
"that no swap area be configured.\n";
static const char blockstr[] = "reusable statefile must be "
@@ -112,11 +138,71 @@ cpr(int fcn)
"use uadmin A_FREEZE AD_REUSEFINI (uadmin %d %d) "
"to exit reusable statefile mode.\n";
static const char modefmt[] = "%s in reusable mode.\n";
+#endif
register int rc = 0;
- extern int cpr_init(int);
- extern void cpr_done(void);
+ int cpr_sleeptype;
/*
+ * First, reject commands that we don't (yet) support on this arch.
+ * This is easier to understand broken out like this than grotting
+ * through the second switch below.
+ */
+
+ switch (fcn) {
+#if defined(__sparc)
+ case AD_CHECK_SUSPEND_TO_RAM:
+ case AD_SUSPEND_TO_RAM:
+ return (ENOTSUP);
+ case AD_CHECK_SUSPEND_TO_DISK:
+ case AD_SUSPEND_TO_DISK:
+ case AD_CPR_REUSEINIT:
+ case AD_CPR_NOCOMPRESS:
+ case AD_CPR_FORCE:
+ case AD_CPR_REUSABLE:
+ case AD_CPR_REUSEFINI:
+ case AD_CPR_TESTZ:
+ case AD_CPR_TESTNOZ:
+ case AD_CPR_TESTHALT:
+ case AD_CPR_SUSP_DEVICES:
+ cpr_sleeptype = CPR_TODISK;
+ break;
+#endif
+#if defined(__x86)
+ case AD_CHECK_SUSPEND_TO_DISK:
+ case AD_SUSPEND_TO_DISK:
+ case AD_CPR_REUSEINIT:
+ case AD_CPR_NOCOMPRESS:
+ case AD_CPR_FORCE:
+ case AD_CPR_REUSABLE:
+ case AD_CPR_REUSEFINI:
+ case AD_CPR_TESTZ:
+ case AD_CPR_TESTNOZ:
+ case AD_CPR_TESTHALT:
+ case AD_CPR_PRINT:
+ return (ENOTSUP);
+ /* The DEV_* values need to be removed after sys-syspend is fixed */
+ case DEV_CHECK_SUSPEND_TO_RAM:
+ case DEV_SUSPEND_TO_RAM:
+ case AD_CPR_SUSP_DEVICES:
+ case AD_CHECK_SUSPEND_TO_RAM:
+ case AD_SUSPEND_TO_RAM:
+ case AD_LOOPBACK_SUSPEND_TO_RAM_PASS:
+ case AD_LOOPBACK_SUSPEND_TO_RAM_FAIL:
+ case AD_FORCE_SUSPEND_TO_RAM:
+ case AD_DEVICE_SUSPEND_TO_RAM:
+ /*
+ * if MP then do not support suspend to RAM, however override
+ * the MP restriction if cpr_mp_enable has been set
+ */
+ if (ncpus > 1 && cpr_mp_enable == 0)
+ return (ENOTSUP);
+ else
+ cpr_sleeptype = CPR_TORAM;
+ break;
+#endif
+ }
+#if defined(__sparc)
+ /*
* Need to know if we're in reusable mode, but we will likely have
* rebooted since REUSEINIT, so we have to get the info from the
* file system
@@ -125,8 +211,11 @@ cpr(int fcn)
cpr_reusable_mode = cpr_get_reusable_mode();
cpr_forget_cprconfig();
+#endif
+
switch (fcn) {
+#if defined(__sparc)
case AD_CPR_REUSEINIT:
if (!i_cpr_reusable_supported())
return (ENOTSUP);
@@ -188,7 +277,7 @@ cpr(int fcn)
break;
case AD_CPR_CHECK:
- if (!i_cpr_is_supported() || cpr_reusable_mode)
+ if (!i_cpr_is_supported(cpr_sleeptype) || cpr_reusable_mode)
return (ENOTSUP);
return (0);
@@ -196,6 +285,7 @@ cpr(int fcn)
CPR_STAT_EVENT_END("POST CPR DELAY");
cpr_stat_event_print();
return (0);
+#endif
case AD_CPR_DEBUG0:
cpr_debug = 0;
@@ -215,13 +305,55 @@ cpr(int fcn)
cpr_debug |= CPR_DEBUG6;
return (0);
+ /* The DEV_* values need to be removed after sys-syspend is fixed */
+ case DEV_CHECK_SUSPEND_TO_RAM:
+ case DEV_SUSPEND_TO_RAM:
+ case AD_CHECK_SUSPEND_TO_RAM:
+ case AD_SUSPEND_TO_RAM:
+ cpr_test_point = LOOP_BACK_NONE;
+ break;
+
+ case AD_LOOPBACK_SUSPEND_TO_RAM_PASS:
+ cpr_test_point = LOOP_BACK_PASS;
+ break;
+
+ case AD_LOOPBACK_SUSPEND_TO_RAM_FAIL:
+ cpr_test_point = LOOP_BACK_FAIL;
+ break;
+
+ case AD_FORCE_SUSPEND_TO_RAM:
+ cpr_test_point = FORCE_SUSPEND_TO_RAM;
+ break;
+
+ case AD_DEVICE_SUSPEND_TO_RAM:
+ cpr_test_point = DEVICE_SUSPEND_TO_RAM;
+ cpr_device = (major_t)atoi((char *)mdep);
+ break;
+
+ case AD_CPR_SUSP_DEVICES:
+ cpr_test_point = FORCE_SUSPEND_TO_RAM;
+ if (cpr_suspend_devices(ddi_root_node()) != DDI_SUCCESS)
+ cmn_err(CE_WARN,
+ "Some devices did not suspend "
+ "and may be unusable");
+ (void) cpr_resume_devices(ddi_root_node(), 0);
+ return (0);
+
default:
return (ENOTSUP);
}
- if (!i_cpr_is_supported() || !cpr_is_ufs(rootvfs))
+ if (!i_cpr_is_supported(cpr_sleeptype) ||
+ (cpr_sleeptype == CPR_TODISK && !cpr_is_ufs(rootvfs)))
return (ENOTSUP);
+ if (fcn == AD_CHECK_SUSPEND_TO_RAM ||
+ fcn == DEV_CHECK_SUSPEND_TO_RAM) {
+ ASSERT(i_cpr_is_supported(cpr_sleeptype));
+ return (0);
+ }
+
+#if defined(__sparc)
if (fcn == AD_CPR_REUSEINIT) {
if (mutex_tryenter(&cpr_slock) == 0)
return (EBUSY);
@@ -247,6 +379,7 @@ cpr(int fcn)
mutex_exit(&cpr_slock);
return (rc);
}
+#endif
/*
* acquire cpr serial lock and init cpr state structure.
@@ -254,23 +387,39 @@ cpr(int fcn)
if (rc = cpr_init(fcn))
return (rc);
+#if defined(__sparc)
if (fcn == AD_CPR_REUSABLE) {
if ((rc = i_cpr_check_cprinfo()) != 0) {
mutex_exit(&cpr_slock);
return (rc);
}
}
+#endif
/*
* Call the main cpr routine. If we are successful, we will be coming
* down from the resume side, otherwise we are still in suspend.
*/
cpr_err(CE_CONT, "System is being suspended");
- if (rc = cpr_main()) {
+ if (rc = cpr_main(cpr_sleeptype)) {
CPR->c_flags |= C_ERROR;
+ PMD(PMD_SX, ("cpr: Suspend operation failed.\n"))
cpr_err(CE_NOTE, "Suspend operation failed.");
} else if (CPR->c_flags & C_SUSPENDING) {
- extern void cpr_power_down();
+
+ /*
+ * In the suspend to RAM case, by the time we get
+ * control back we're already resumed
+ */
+ if (cpr_sleeptype == CPR_TORAM) {
+ PMD(PMD_SX, ("cpr: cpr CPR_TORAM done\n"))
+ cpr_done();
+ return (rc);
+ }
+
+#if defined(__sparc)
+
+ PMD(PMD_SX, ("cpr: Suspend operation succeeded.\n"))
/*
* Back from a successful checkpoint
*/
@@ -280,6 +429,7 @@ cpr(int fcn)
}
/* make sure there are no more changes to the device tree */
+ PMD(PMD_SX, ("cpr: dev tree freeze\n"))
devtree_freeze();
/*
@@ -288,7 +438,9 @@ cpr(int fcn)
* for us to be preempted, we're essentially single threaded
* from here on out.
*/
- stop_other_cpus();
+ PMD(PMD_SX, ("cpr: stop other cpus\n"))
+ i_cpr_stop_other_cpus();
+ PMD(PMD_SX, ("cpr: spl6\n"))
(void) spl6();
/*
@@ -296,24 +448,27 @@ cpr(int fcn)
* be called when there are no other threads that could be
* accessing devices
*/
+ PMD(PMD_SX, ("cpr: reset leaves\n"))
reset_leaves();
/*
- * If cpr_power_down() succeeds, it'll not return.
+ * If i_cpr_power_down() succeeds, it'll not return
*
* Drives with write-cache enabled need to flush
* their cache.
*/
- if (fcn != AD_CPR_TESTHALT)
- cpr_power_down();
-
+ if (fcn != AD_CPR_TESTHALT) {
+ PMD(PMD_SX, ("cpr: power down\n"))
+ (void) i_cpr_power_down(cpr_sleeptype);
+ }
+ ASSERT(cpr_sleeptype == CPR_TODISK);
+ /* currently CPR_TODISK comes back via a boot path */
CPR_DEBUG(CPR_DEBUG1, "(Done. Please Switch Off)\n");
halt(NULL);
/* NOTREACHED */
+#endif
}
- /*
- * For resuming: release resources and the serial lock.
- */
+ PMD(PMD_SX, ("cpr: cpr done\n"))
cpr_done();
return (rc);
}
diff --git a/usr/src/uts/common/cpr/cpr_stat.c b/usr/src/uts/common/cpr/cpr_stat.c
index 264bb4c9c7..9992f23c82 100644
--- a/usr/src/uts/common/cpr/cpr_stat.c
+++ b/usr/src/uts/common/cpr/cpr_stat.c
@@ -28,7 +28,6 @@
#include <sys/types.h>
#include <sys/ddi.h>
#include <sys/pte.h>
-#include <sys/intreg.h>
#include <sys/cpr.h>
/*
@@ -111,7 +110,7 @@ cpr_stat_event_end(char *name, cpr_time_t *ctp)
cep->ce_sec.etime = tv.tv_sec;
cep->ce_sec.ltime = cep->ce_sec.etime - cep->ce_sec.stime;
cep->ce_sec.mtime = ((cep->ce_sec.mtime * (cep->ce_ntests - 1)) +
- cep->ce_sec.ltime) / cep->ce_ntests;
+ cep->ce_sec.ltime) / cep->ce_ntests;
/*
* calculate 100*milliseconds
@@ -158,10 +157,10 @@ cpr_stat_record_events()
STAT->cs_real_statefsz = cpr_term.real_statef_size;
cur_comprate = ((longlong_t)((longlong_t)
- STAT->cs_nocomp_statefsz*100)/
- STAT->cs_real_statefsz);
+ STAT->cs_nocomp_statefsz*100)/
+ STAT->cs_real_statefsz);
if (STAT->cs_min_comprate == 0 ||
- (STAT->cs_min_comprate > cur_comprate))
+ (STAT->cs_min_comprate > cur_comprate))
STAT->cs_min_comprate = cur_comprate;
}
}
@@ -203,25 +202,25 @@ cpr_stat_event_print()
*/
printf("\nMISCELLANEOUS STATISTICS INFORMATION (units in KBytes)\n\n");
printf("\tUser Pages w/o Swapspace:\t%8lu (%lu pages)\n",
- cp->cs_nosw_pages*PAGESIZE/1000, cp->cs_nosw_pages);
+ cp->cs_nosw_pages*PAGESIZE/1000, cp->cs_nosw_pages);
printf("\tTotal Upages Saved to Statefile:%8d (%d pages)\n",
- cp->cs_upage2statef*PAGESIZE/1000, cp->cs_upage2statef);
+ cp->cs_upage2statef*PAGESIZE/1000, cp->cs_upage2statef);
if (cp->cs_mclustsz)
printf("\tAverage Cluster Size:\t\t%8d (%d.%1d%1d pages)\n\n",
- cp->cs_mclustsz/1000, cp->cs_mclustsz/PAGESIZE,
- ((cp->cs_mclustsz%PAGESIZE)*10/PAGESIZE),
- ((cp->cs_mclustsz%PAGESIZE)*100/PAGESIZE)%10);
+ cp->cs_mclustsz/1000, cp->cs_mclustsz/PAGESIZE,
+ ((cp->cs_mclustsz%PAGESIZE)*10/PAGESIZE),
+ ((cp->cs_mclustsz%PAGESIZE)*100/PAGESIZE)%10);
printf("\tKernel Memory Size:\t\t%8lu\n", cp->cs_nocomp_statefsz/1000);
printf("\tEstimated Statefile Size:\t%8lu\n", cp->cs_est_statefsz/1000);
printf("\tActual Statefile Size:\t\t%8lu\n", cp->cs_real_statefsz/1000);
if (cp->cs_real_statefsz) {
int min = cp->cs_min_comprate;
int new = ((longlong_t)((longlong_t)
- cp->cs_nocomp_statefsz*100)/cp->cs_real_statefsz);
+ cp->cs_nocomp_statefsz*100)/cp->cs_real_statefsz);
printf("\tCompression Ratio:\t\t%5d.%1d%1d (worst %d.%1d%1d)\n",
- new/100, (new%100)/10, new%10,
- min/100, (min%100)/10, min%10);
+ new/100, (new%100)/10, new%10,
+ min/100, (min%100)/10, min%10);
}
}
diff --git a/usr/src/uts/common/cpr/cpr_uthread.c b/usr/src/uts/common/cpr/cpr_uthread.c
index 49ea1dfb1f..e2da80d5b8 100644
--- a/usr/src/uts/common/cpr/cpr_uthread.c
+++ b/usr/src/uts/common/cpr/cpr_uthread.c
@@ -59,7 +59,7 @@ cpr_signal_user(int sig)
for (p = practive; p; p = p->p_next) {
/* only user threads */
if (p->p_exec == NULL || p->p_stat == SZOMB ||
- p == proc_init || p == ttoproc(curthread))
+ p == proc_init || p == ttoproc(curthread))
continue;
mutex_enter(&p->p_lock);
@@ -87,7 +87,7 @@ cpr_stop_user_threads()
return (ESRCH);
cpr_stop_user(count * count * CPR_UTSTOP_WAIT);
} while (cpr_check_user_threads() &&
- (count < CPR_UTSTOP_RETRY || CPR->c_fcn != AD_CPR_FORCE));
+ (count < CPR_UTSTOP_RETRY || CPR->c_fcn != AD_CPR_FORCE));
return (0);
}
@@ -194,11 +194,11 @@ cpr_check_user_threads()
CPR_DEBUG(CPR_DEBUG1, "Suspend failed: "
"cannot stop uthread\n");
cpr_err(CE_WARN, "Suspend cannot stop "
- "process %s (%p:%x).",
- ttoproc(tp)->p_user.u_psargs, (void *)tp,
- tp->t_state);
+ "process %s (%p:%x).",
+ ttoproc(tp)->p_user.u_psargs, (void *)tp,
+ tp->t_state);
cpr_err(CE_WARN, "Process may be waiting for"
- " network request, please try again.");
+ " network request, please try again.");
}
CPR_DEBUG(CPR_DEBUG2, "cant stop t=%p state=%x pfg=%x "
@@ -284,8 +284,6 @@ int
cpr_stop_kernel_threads(void)
{
caddr_t name;
- kthread_id_t tp;
- proc_t *p;
callb_lock_table(); /* Note: we unlock the table in resume. */
@@ -298,6 +296,25 @@ cpr_stop_kernel_threads(void)
return (EBUSY);
}
+ CPR_DEBUG(CPR_DEBUG1, ("done\n"));
+ return (0);
+}
+
+/*
+ * Check to see that kernel threads are stopped.
+ * This should be called while CPU's are paused, and the caller is
+ * effectively running single user, or else we are virtually guaranteed
+ * to fail. The routine should not ASSERT on the paused state or spl
+ * level, as there may be a use for this to verify that things are running
+ * again.
+ */
+int
+cpr_threads_are_stopped(void)
+{
+ caddr_t name;
+ kthread_id_t tp;
+ proc_t *p;
+
/*
* We think we stopped all the kernel threads. Just in case
* someone is not playing by the rules, take a spin through
@@ -320,8 +337,7 @@ cpr_stop_kernel_threads(void)
return (EBUSY);
}
} while ((tp = tp->t_next) != curthread);
- mutex_exit(&pidlock);
- CPR_DEBUG(CPR_DEBUG1, "done\n");
+ mutex_exit(&pidlock);
return (0);
}
diff --git a/usr/src/uts/common/io/asy.c b/usr/src/uts/common/io/asy.c
index 1062cd28f8..12ff96c905 100644
--- a/usr/src/uts/common/io/asy.c
+++ b/usr/src/uts/common/io/asy.c
@@ -230,6 +230,15 @@ static const int standard_com_ports[] = {
static int *com_ports;
static uint_t num_com_ports;
+#ifdef DEBUG
+/*
+ * Set this to true to make the driver pretend to do a suspend. Useful
+ * for debugging suspend/resume code with a serial debugger.
+ */
+boolean_t asy_nosuspend = B_FALSE;
+#endif
+
+
/*
* Baud rate table. Indexed by #defines found in sys/termios.h
*/
@@ -272,6 +281,7 @@ ushort_t asyspdtab[] = {
static int asyrsrv(queue_t *q);
static int asyopen(queue_t *rq, dev_t *dev, int flag, int sflag, cred_t *cr);
static int asyclose(queue_t *q, int flag, cred_t *credp);
+static int asywputdo(queue_t *q, mblk_t *mp, boolean_t);
static int asywput(queue_t *q, mblk_t *mp);
struct module_info asy_info = {
@@ -392,7 +402,7 @@ _fini(void)
asy_addedsoft = 0;
/* free "motherboard-serial-ports" property if allocated */
if (com_ports != NULL && com_ports != (int *)standard_com_ports)
- ddi_prop_free(com_ports);
+ ddi_prop_free(com_ports);
com_ports = NULL;
mutex_destroy(&asy_soft_lock);
ddi_soft_state_fini(&asy_soft_state);
@@ -406,6 +416,59 @@ _info(struct modinfo *modinfop)
return (mod_info(&modlinkage, modinfop));
}
+void
+async_put_suspq(struct asycom *asy, mblk_t *mp)
+{
+ struct asyncline *async = asy->asy_priv;
+
+ ASSERT(mutex_owned(&asy->asy_excl));
+
+ if (async->async_suspqf == NULL)
+ async->async_suspqf = mp;
+ else
+ async->async_suspqb->b_next = mp;
+
+ async->async_suspqb = mp;
+}
+
+static mblk_t *
+async_get_suspq(struct asycom *asy)
+{
+ struct asyncline *async = asy->asy_priv;
+ mblk_t *mp;
+
+ ASSERT(mutex_owned(&asy->asy_excl));
+
+ if ((mp = async->async_suspqf) != NULL) {
+ async->async_suspqf = mp->b_next;
+ mp->b_next = NULL;
+ } else {
+ async->async_suspqb = NULL;
+ }
+ return (mp);
+}
+
+static void
+async_process_suspq(struct asycom *asy)
+{
+ struct asyncline *async = asy->asy_priv;
+ mblk_t *mp;
+
+ ASSERT(mutex_owned(&asy->asy_excl));
+
+ while ((mp = async_get_suspq(asy)) != NULL) {
+ queue_t *q;
+
+ q = async->async_ttycommon.t_writeq;
+ ASSERT(q != NULL);
+ mutex_exit(&asy->asy_excl);
+ (void) asywputdo(q, mp, B_FALSE);
+ mutex_enter(&asy->asy_excl);
+ }
+ async->async_flags &= ~ASYNC_DDI_SUSPENDED;
+ cv_broadcast(&async->async_flags_cv);
+}
+
static int
asy_get_bus_type(dev_info_t *devinfo)
{
@@ -494,7 +557,7 @@ asy_get_io_regnum_isa(dev_info_t *devi, struct asycom *asy)
if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
"reg", (caddr_t)&reglist, &reglen) != DDI_PROP_SUCCESS) {
cmn_err(CE_WARN, "asy_get_io_regnum: reg property not found "
- "in devices property list");
+ "in devices property list");
return (-1);
}
@@ -545,9 +608,6 @@ asydetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
struct asycom *asy;
struct asyncline *async;
- if (cmd != DDI_DETACH)
- return (DDI_FAILURE);
-
instance = ddi_get_instance(devi); /* find out which unit */
asy = ddi_get_soft_state(asy_soft_state, instance);
@@ -555,25 +615,104 @@ asydetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
return (DDI_FAILURE);
async = asy->asy_priv;
- DEBUGNOTE2(ASY_DEBUG_INIT, "asy%d: %s shutdown.",
- instance, asy_hw_name(asy));
+ switch (cmd) {
+ case DDI_DETACH:
+ DEBUGNOTE2(ASY_DEBUG_INIT, "asy%d: %s shutdown.",
+ instance, asy_hw_name(asy));
- /* cancel DTR hold timeout */
- if (async->async_dtrtid != 0) {
- (void) untimeout(async->async_dtrtid);
- async->async_dtrtid = 0;
- }
+ /* cancel DTR hold timeout */
+ if (async->async_dtrtid != 0) {
+ (void) untimeout(async->async_dtrtid);
+ async->async_dtrtid = 0;
+ }
+
+ /* remove all minor device node(s) for this device */
+ ddi_remove_minor_node(devi, NULL);
+
+ mutex_destroy(&asy->asy_excl);
+ mutex_destroy(&asy->asy_excl_hi);
+ cv_destroy(&async->async_flags_cv);
+ ddi_remove_intr(devi, 0, asy->asy_iblock);
+ ddi_regs_map_free(&asy->asy_iohandle);
+ asy_soft_state_free(asy);
+ DEBUGNOTE1(ASY_DEBUG_INIT, "asy%d: shutdown complete",
+ instance);
+ break;
+ case DDI_SUSPEND:
+ {
+ unsigned i;
+ uchar_t lsr;
+
+#ifdef DEBUG
+ if (asy_nosuspend)
+ return (DDI_SUCCESS);
+#endif
+ mutex_enter(&asy->asy_excl);
+
+ ASSERT(async->async_ops >= 0);
+ while (async->async_ops > 0)
+ cv_wait(&async->async_ops_cv, &asy->asy_excl);
+
+ async->async_flags |= ASYNC_DDI_SUSPENDED;
+
+ /* Wait for timed break and delay to complete */
+ while ((async->async_flags & (ASYNC_BREAK|ASYNC_DELAY))) {
+ if (cv_wait_sig(&async->async_flags_cv, &asy->asy_excl)
+ == 0) {
+ async_process_suspq(asy);
+ mutex_exit(&asy->asy_excl);
+ return (DDI_FAILURE);
+ }
+ }
+
+ /* Clear untimed break */
+ if (async->async_flags & ASYNC_OUT_SUSPEND)
+ async_resume_utbrk(async);
+
+ mutex_exit(&asy->asy_excl);
+
+ mutex_enter(&asy->asy_soft_sr);
+ mutex_enter(&asy->asy_excl);
+ if (async->async_wbufcid != 0) {
+ bufcall_id_t bcid = async->async_wbufcid;
+ async->async_wbufcid = 0;
+ async->async_flags |= ASYNC_RESUME_BUFCALL;
+ mutex_exit(&asy->asy_excl);
+ unbufcall(bcid);
+ mutex_enter(&asy->asy_excl);
+ }
+ mutex_enter(&asy->asy_excl_hi);
- /* remove all minor device node(s) for this device */
- ddi_remove_minor_node(devi, NULL);
+ /* Disable interrupts from chip */
+ ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + ICR, 0);
+ asy->asy_flags |= ASY_DDI_SUSPENDED;
+
+ /* Process remaining RX characters and RX errors, if any */
+ lsr = ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + LSR);
+ async_rxint(asy, lsr);
+
+ /* Wait for TX to drain */
+ for (i = 1000; i > 0; i--) {
+ lsr = ddi_get8(asy->asy_iohandle,
+ asy->asy_ioaddr + LSR);
+ if ((lsr & (XSRE | XHRE)) == (XSRE | XHRE))
+ break;
+ delay(drv_usectohz(10000));
+ }
+ if (i == 0)
+ cmn_err(CE_WARN,
+ "asy: transmitter wasn't drained before "
+ "driver was suspended");
+
+ mutex_exit(&asy->asy_excl_hi);
+ mutex_exit(&asy->asy_excl);
+ mutex_exit(&asy->asy_soft_sr);
+ break;
+ }
+ default:
+ return (DDI_FAILURE);
+ }
- mutex_destroy(&asy->asy_excl);
- mutex_destroy(&asy->asy_excl_hi);
- cv_destroy(&async->async_flags_cv);
- ddi_remove_intr(devi, 0, asy->asy_iblock);
- ddi_regs_map_free(&asy->asy_iohandle);
- asy_soft_state_free(asy);
- DEBUGNOTE1(ASY_DEBUG_INIT, "asy%d: shutdown complete", instance);
return (DDI_SUCCESS);
}
@@ -610,10 +749,73 @@ asyattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
DDI_STRICTORDER_ACC,
};
- if (cmd != DDI_ATTACH)
+ instance = ddi_get_instance(devi); /* find out which unit */
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ break;
+ case DDI_RESUME:
+ {
+ struct asyncline *async;
+
+#ifdef DEBUG
+ if (asy_nosuspend)
+ return (DDI_SUCCESS);
+#endif
+ asy = ddi_get_soft_state(asy_soft_state, instance);
+ if (asy == NULL)
+ return (DDI_FAILURE);
+
+ mutex_enter(&asy->asy_soft_sr);
+ mutex_enter(&asy->asy_excl);
+ mutex_enter(&asy->asy_excl_hi);
+
+ async = asy->asy_priv;
+ /* Disable interrupts */
+ ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + ICR, 0);
+ if (asy_identify_chip(devi, asy) != DDI_SUCCESS) {
+ mutex_exit(&asy->asy_excl_hi);
+ mutex_exit(&asy->asy_excl);
+ mutex_exit(&asy->asy_soft_sr);
+ cmn_err(CE_WARN, "Cannot identify UART chip at %p\n",
+ (void *)asy->asy_ioaddr);
+ return (DDI_FAILURE);
+ }
+ asy->asy_flags &= ~ASY_DDI_SUSPENDED;
+ if (async->async_flags & ASYNC_ISOPEN) {
+ asy_program(asy, ASY_INIT);
+ /* Kick off output */
+ if (async->async_ocnt > 0) {
+ async_resume(async);
+ } else {
+ mutex_exit(&asy->asy_excl_hi);
+ if (async->async_xmitblk)
+ freeb(async->async_xmitblk);
+ async->async_xmitblk = NULL;
+ async_start(async);
+ mutex_enter(&asy->asy_excl_hi);
+ }
+ ASYSETSOFT(asy);
+ }
+ mutex_exit(&asy->asy_excl_hi);
+ mutex_exit(&asy->asy_excl);
+ mutex_exit(&asy->asy_soft_sr);
+
+ mutex_enter(&asy->asy_excl);
+ if (async->async_flags & ASYNC_RESUME_BUFCALL) {
+ async->async_wbufcid = bufcall(async->async_wbufcds,
+ BPRI_HI, (void (*)(void *)) async_reioctl,
+ (void *)(intptr_t)async->async_common->asy_unit);
+ async->async_flags &= ~ASYNC_RESUME_BUFCALL;
+ }
+ async_process_suspq(asy);
+ mutex_exit(&asy->asy_excl);
+ return (DDI_SUCCESS);
+ }
+ default:
return (DDI_FAILURE);
+ }
- instance = ddi_get_instance(devi); /* find out which unit */
ret = ddi_soft_state_zalloc(asy_soft_state, instance);
if (ret != DDI_SUCCESS)
return (DDI_FAILURE);
@@ -773,7 +975,8 @@ asyattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
*/
mutex_init(&asy->asy_excl, NULL, MUTEX_DRIVER, asy_soft_iblock);
mutex_init(&asy->asy_excl_hi, NULL, MUTEX_DRIVER,
- (void *)asy->asy_iblock);
+ (void *)asy->asy_iblock);
+ mutex_init(&asy->asy_soft_sr, NULL, MUTEX_DRIVER, asy_soft_iblock);
mutex_enter(&asy->asy_excl);
mutex_enter(&asy->asy_excl_hi);
@@ -783,6 +986,7 @@ asyattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
mutex_exit(&asy->asy_excl);
mutex_destroy(&asy->asy_excl);
mutex_destroy(&asy->asy_excl_hi);
+ mutex_destroy(&asy->asy_soft_sr);
ddi_regs_map_free(&asy->asy_iohandle);
cmn_err(CE_CONT, "Cannot identify UART chip at %p\n",
(void *)asy->asy_ioaddr);
@@ -796,11 +1000,10 @@ asyattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + LCR, DLAB);
/* Set the baud rate to 9600 */
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + (DAT+DLL),
- asyspdtab[asy->asy_bidx] & 0xff);
+ asyspdtab[asy->asy_bidx] & 0xff);
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + (DAT+DLH),
- (asyspdtab[asy->asy_bidx] >> 8) & 0xff);
- ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + LCR,
- asy->asy_lcr);
+ (asyspdtab[asy->asy_bidx] >> 8) & 0xff);
+ ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + LCR, asy->asy_lcr);
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + MCR, mcr);
mutex_exit(&asy->asy_excl_hi);
@@ -821,12 +1024,12 @@ asyattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
ddi_regs_map_free(&asy->asy_iohandle);
mutex_exit(&asy_glob_lock);
cmn_err(CE_CONT,
- "Can not set soft interrupt for ASY driver\n");
+ "Can not set soft interrupt for ASY driver\n");
asy_soft_state_free(asy);
return (DDI_FAILURE);
}
mutex_init(&asy_soft_lock, NULL, MUTEX_DRIVER,
- (void *)asy->asy_iblock);
+ (void *)asy->asy_iblock);
asy_addedsoft++;
}
mutex_exit(&asy_glob_lock);
@@ -845,7 +1048,7 @@ asyattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
mutex_destroy(&asy->asy_excl_hi);
ddi_regs_map_free(&asy->asy_iohandle);
cmn_err(CE_CONT,
- "Can not set device interrupt for ASY driver\n");
+ "Can not set device interrupt for ASY driver\n");
asy_soft_state_free(asy);
return (DDI_FAILURE);
}
@@ -959,20 +1162,17 @@ asy_getproperty(dev_info_t *devi, struct asycom *asy, const char *property)
if (ret != DDI_PROP_SUCCESS) {
(void) sprintf(name, "com%c-%s", number, property);
len = sizeof (val);
- ret = GET_PROP(devi, name, DDI_PROP_CANSLEEP, val,
- &len);
+ ret = GET_PROP(devi, name, DDI_PROP_CANSLEEP, val, &len);
}
if (ret != DDI_PROP_SUCCESS) {
(void) sprintf(name, "tty0%c-%s", number, property);
len = sizeof (val);
- ret = GET_PROP(devi, name, DDI_PROP_CANSLEEP, val,
- &len);
+ ret = GET_PROP(devi, name, DDI_PROP_CANSLEEP, val, &len);
}
if (ret != DDI_PROP_SUCCESS) {
(void) sprintf(name, "port-%c-%s", letter, property);
len = sizeof (val);
- ret = GET_PROP(devi, name, DDI_PROP_CANSLEEP, val,
- &len);
+ ret = GET_PROP(devi, name, DDI_PROP_CANSLEEP, val, &len);
}
if (ret != DDI_PROP_SUCCESS)
return (-1); /* property non-existant */
@@ -1375,7 +1575,7 @@ again:
kmem_free(termiosp, len);
} else
cmn_err(CE_WARN,
- "asy: couldn't get ttymodes property!");
+ "asy: couldn't get ttymodes property!");
mutex_enter(&asy->asy_excl_hi);
/* eeprom mode support - respect properties */
@@ -1394,8 +1594,9 @@ again:
async->async_startc = CSTART;
async->async_stopc = CSTOP;
asy_program(asy, ASY_INIT);
- } else if ((async->async_ttycommon.t_flags & TS_XCLUDE) &&
- secpolicy_excl_open(cr) != 0) {
+ } else
+ if ((async->async_ttycommon.t_flags & TS_XCLUDE) &&
+ secpolicy_excl_open(cr) != 0) {
mutex_exit(&asy->asy_excl_hi);
mutex_exit(&asy->asy_excl);
return (EBUSY);
@@ -1427,17 +1628,18 @@ again:
mcr = ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + MCR);
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + MCR,
- mcr|(asy->asy_mcr&DTR));
+ mcr|(asy->asy_mcr&DTR));
DEBUGCONT3(ASY_DEBUG_INIT,
- "asy%dopen: \"Raise DTR on every open\": make mcr = %x, "
- "make TS_SOFTCAR = %s\n",
- unit, mcr|(asy->asy_mcr&DTR),
- (asy->asy_flags & ASY_IGNORE_CD) ? "ON" : "OFF");
+ "asy%dopen: \"Raise DTR on every open\": make mcr = %x, "
+ "make TS_SOFTCAR = %s\n",
+ unit, mcr|(asy->asy_mcr&DTR),
+ (asy->asy_flags & ASY_IGNORE_CD) ? "ON" : "OFF");
+
if (asy->asy_flags & ASY_IGNORE_CD) {
DEBUGCONT1(ASY_DEBUG_MODEM,
- "asy%dopen: ASY_IGNORE_CD set, set TS_SOFTCAR\n",
- unit);
+ "asy%dopen: ASY_IGNORE_CD set, set TS_SOFTCAR\n",
+ unit);
async->async_ttycommon.t_flags |= TS_SOFTCAR;
}
else
@@ -1448,10 +1650,11 @@ again:
*/
asy->asy_msr = ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + MSR);
DEBUGCONT3(ASY_DEBUG_INIT, "asy%dopen: TS_SOFTCAR is %s, "
- "MSR & DCD is %s\n",
- unit,
- (async->async_ttycommon.t_flags & TS_SOFTCAR) ? "set" : "clear",
- (asy->asy_msr & DCD) ? "set" : "clear");
+ "MSR & DCD is %s\n",
+ unit,
+ (async->async_ttycommon.t_flags & TS_SOFTCAR) ? "set" : "clear",
+ (asy->asy_msr & DCD) ? "set" : "clear");
+
if (asy->asy_msr & DCD)
async->async_flags |= ASYNC_CARR_ON;
else
@@ -1671,33 +1874,34 @@ nodrain:
* If line has HUPCL set or is incompletely opened fix up the modem
* lines.
*/
- DEBUGCONT1(ASY_DEBUG_MODEM,
- "asy%dclose: next check HUPCL flag\n", instance);
+ DEBUGCONT1(ASY_DEBUG_MODEM, "asy%dclose: next check HUPCL flag\n",
+ instance);
mutex_enter(&asy->asy_excl_hi);
if ((async->async_ttycommon.t_cflag & HUPCL) ||
(async->async_flags & ASYNC_WOPEN)) {
DEBUGCONT3(ASY_DEBUG_MODEM,
- "asy%dclose: HUPCL flag = %x, ASYNC_WOPEN flag = %x\n",
- instance,
- async->async_ttycommon.t_cflag & HUPCL,
- async->async_ttycommon.t_cflag & ASYNC_WOPEN);
+ "asy%dclose: HUPCL flag = %x, ASYNC_WOPEN flag = %x\n",
+ instance,
+ async->async_ttycommon.t_cflag & HUPCL,
+ async->async_ttycommon.t_cflag & ASYNC_WOPEN);
async->async_flags |= ASYNC_DTR_DELAY;
/* turn off DTR, RTS but NOT interrupt to 386 */
if (asy->asy_flags & (ASY_IGNORE_CD|ASY_RTS_DTR_OFF)) {
DEBUGCONT3(ASY_DEBUG_MODEM,
- "asy%dclose: ASY_IGNORE_CD flag = %x, "
- "ASY_RTS_DTR_OFF flag = %x\n",
- instance,
- asy->asy_flags & ASY_IGNORE_CD,
- asy->asy_flags & ASY_RTS_DTR_OFF);
- ddi_put8(asy->asy_iohandle,
- asy->asy_ioaddr + MCR, asy->asy_mcr|OUT2);
+ "asy%dclose: ASY_IGNORE_CD flag = %x, "
+ "ASY_RTS_DTR_OFF flag = %x\n",
+ instance,
+ asy->asy_flags & ASY_IGNORE_CD,
+ asy->asy_flags & ASY_RTS_DTR_OFF);
+
+ ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + MCR,
+ asy->asy_mcr|OUT2);
} else {
DEBUGCONT1(ASY_DEBUG_MODEM,
"asy%dclose: Dropping DTR and RTS\n", instance);
- ddi_put8(asy->asy_iohandle,
- asy->asy_ioaddr + MCR, OUT2);
+ ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + MCR,
+ OUT2);
}
async->async_dtrtid =
timeout((void (*)())async_dtr_free,
@@ -1707,10 +1911,9 @@ nodrain:
* If nobody's using it now, turn off receiver interrupts.
*/
if ((async->async_flags & (ASYNC_WOPEN|ASYNC_ISOPEN)) == 0) {
- icr = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + ICR);
+ icr = ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + ICR);
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + ICR,
- (icr & ~RIEN));
+ (icr & ~RIEN));
}
mutex_exit(&asy->asy_excl_hi);
out:
@@ -1750,9 +1953,12 @@ asy_isbusy(struct asycom *asy)
async = asy->asy_priv;
ASSERT(mutex_owned(&asy->asy_excl));
ASSERT(mutex_owned(&asy->asy_excl_hi));
+/*
+ * XXXX this should be recoded
+ */
return ((async->async_ocnt > 0) ||
- ((ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + LSR) & (XSRE|XHRE)) == 0));
+ ((ddi_get8(asy->asy_iohandle,
+ asy->asy_ioaddr + LSR) & (XSRE|XHRE)) == 0));
}
static void
@@ -1826,7 +2032,7 @@ asy_program(struct asycom *asy, int mode)
#ifdef DEBUG
instance = UNIT(async->async_dev);
DEBUGCONT2(ASY_DEBUG_PROCS,
- "asy%d_program: mode = 0x%08X, enter\n", instance, mode);
+ "asy%d_program: mode = 0x%08X, enter\n", instance, mode);
#endif
baudrate = BAUDINDEX(async->async_ttycommon.t_cflag);
@@ -1836,15 +2042,15 @@ asy_program(struct asycom *asy, int mode)
if (baudrate > CBAUD) {
async->async_ttycommon.t_cflag |= CIBAUDEXT;
async->async_ttycommon.t_cflag |=
- (((baudrate - CBAUD - 1) << IBSHIFT) & CIBAUD);
+ (((baudrate - CBAUD - 1) << IBSHIFT) & CIBAUD);
} else {
async->async_ttycommon.t_cflag &= ~CIBAUDEXT;
async->async_ttycommon.t_cflag |=
- ((baudrate << IBSHIFT) & CIBAUD);
+ ((baudrate << IBSHIFT) & CIBAUD);
}
c_flag = async->async_ttycommon.t_cflag &
- (CLOCAL|CREAD|CSTOPB|CSIZE|PARENB|PARODD|CBAUD|CBAUDEXT);
+ (CLOCAL|CREAD|CSTOPB|CSIZE|PARENB|PARODD|CBAUD|CBAUDEXT);
/* disable interrupts */
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + ICR, 0);
@@ -1855,7 +2061,7 @@ asy_program(struct asycom *asy, int mode)
(void) ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + ISR);
(void) ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + LSR);
asy->asy_msr = flush_reg = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + MSR);
+ asy->asy_ioaddr + MSR);
/*
* The device is programmed in the open sequence, if we
* have to hardware handshake, then this is a good time
@@ -1892,17 +2098,16 @@ asy_program(struct asycom *asy, int mode)
if (asy->asy_use_fifo == FIFO_ON) {
for (flush_reg = asy->asy_fifo_buf; flush_reg-- > 0; ) {
(void) ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + DAT);
+ asy->asy_ioaddr + DAT);
}
} else {
flush_reg = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + DAT);
+ asy->asy_ioaddr + DAT);
}
if (ocflags != (c_flag & ~CLOCAL) || mode == ASY_INIT) {
/* Set line control */
- lcr = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + LCR);
+ lcr = ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + LCR);
lcr &= ~(WLS0|WLS1|STB|PEN|EPS);
if (c_flag & CSTOPB)
@@ -1930,13 +2135,13 @@ asy_program(struct asycom *asy, int mode)
}
/* set the baud rate, unless it is "0" */
- ddi_put8(asy->asy_iohandle,
- asy->asy_ioaddr + LCR, DLAB);
+ ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + LCR, DLAB);
+
if (baudrate != 0) {
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + DAT,
- asyspdtab[baudrate] & 0xff);
+ asyspdtab[baudrate] & 0xff);
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + ICR,
- (asyspdtab[baudrate] >> 8) & 0xff);
+ (asyspdtab[baudrate] >> 8) & 0xff);
}
/* set the line control modes */
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + LCR, lcr);
@@ -1957,10 +2162,10 @@ asy_program(struct asycom *asy, int mode)
if (baudrate == 0)
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + MCR,
- (asy->asy_mcr & RTS) | OUT2);
+ (asy->asy_mcr & RTS) | OUT2);
else
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + MCR,
- asy->asy_mcr | OUT2);
+ asy->asy_mcr | OUT2);
/*
* Call the modem status interrupt handler to check for the carrier
@@ -1971,10 +2176,10 @@ asy_program(struct asycom *asy, int mode)
/* Set interrupt control */
DEBUGCONT3(ASY_DEBUG_MODM2,
- "asy%d_program: c_flag & CLOCAL = %x t_cflag & CRTSCTS = %x\n",
- instance,
- c_flag & CLOCAL,
- async->async_ttycommon.t_cflag & CRTSCTS);
+ "asy%d_program: c_flag & CLOCAL = %x t_cflag & CRTSCTS = %x\n",
+ instance, c_flag & CLOCAL,
+ async->async_ttycommon.t_cflag & CRTSCTS);
+
if ((c_flag & CLOCAL) && !(async->async_ttycommon.t_cflag & CRTSCTS))
/*
* direct-wired line ignores DCD, so we don't enable modem
@@ -2026,10 +2231,11 @@ asyintr(caddr_t argasy)
uchar_t interrupt_id, lsr;
interrupt_id = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + ISR) & 0x0F;
+ asy->asy_ioaddr + ISR) & 0x0F;
async = asy->asy_priv;
+
if ((async == NULL) || asy_addedsoft == 0 ||
- !(async->async_flags & (ASYNC_ISOPEN|ASYNC_WOPEN))) {
+ !(async->async_flags & (ASYNC_ISOPEN|ASYNC_WOPEN))) {
if (interrupt_id & NOINTERRUPT)
return (DDI_INTR_UNCLAIMED);
else {
@@ -2040,30 +2246,31 @@ asyintr(caddr_t argasy)
* reading modem status
*/
(void) ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + LSR);
+ asy->asy_ioaddr + LSR);
(void) ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + DAT);
+ asy->asy_ioaddr + DAT);
asy->asy_msr = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + MSR);
+ asy->asy_ioaddr + MSR);
return (DDI_INTR_CLAIMED);
}
}
+
mutex_enter(&asy->asy_excl_hi);
/*
* We will loop until the interrupt line is pulled low. asy
* interrupt is edge triggered.
*/
/* CSTYLED */
- for (;; interrupt_id = (ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + ISR) & 0x0F)) {
+ for (;; interrupt_id =
+ (ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + ISR) & 0x0F)) {
+
if (interrupt_id & NOINTERRUPT)
break;
ret_status = DDI_INTR_CLAIMED;
- DEBUGCONT1(ASY_DEBUG_INTR,
- "asyintr: interrupt_id = 0x%d\n", interrupt_id);
- lsr = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + LSR);
+ DEBUGCONT1(ASY_DEBUG_INTR, "asyintr: interrupt_id = 0x%d\n",
+ interrupt_id);
+ lsr = ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + LSR);
switch (interrupt_id) {
case RxRDY:
case RSTATUS:
@@ -2218,9 +2425,9 @@ async_rxint(struct asycom *asy, uchar_t lsr)
if (!(tp->t_cflag & CREAD)) {
while (lsr & (RCA|PARERR|FRMERR|BRKDET|OVRRUN)) {
(void) (ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + DAT) & 0xff);
+ asy->asy_ioaddr + DAT) & 0xff);
lsr = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + LSR);
+ asy->asy_ioaddr + LSR);
if (looplim-- < 0) /* limit loop */
break;
}
@@ -2232,7 +2439,7 @@ async_rxint(struct asycom *asy, uchar_t lsr)
s = 0; /* reset error status */
if (lsr & RCA) {
c = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + DAT) & 0xff;
+ asy->asy_ioaddr + DAT) & 0xff;
/*
* We handle XON/XOFF char if IXON is set,
@@ -2319,8 +2526,7 @@ async_rxint(struct asycom *asy, uchar_t lsr)
else
async->async_sw_overrun = 1;
check_looplim:
- lsr = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + LSR);
+ lsr = ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + LSR);
if (looplim-- < 0) /* limit loop */
break;
}
@@ -2355,19 +2561,19 @@ async_msint_retry:
/* this resets the interrupt */
msr = ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + MSR);
DEBUGCONT10(ASY_DEBUG_STATE,
- "async%d_msint call #%d:\n"
- " transition: %3s %3s %3s %3s\n"
- "current state: %3s %3s %3s %3s\n",
- instance,
- ++(asy->asy_msint_cnt),
- (msr & DCTS) ? "DCTS" : " ",
- (msr & DDSR) ? "DDSR" : " ",
- (msr & DRI) ? "DRI " : " ",
- (msr & DDCD) ? "DDCD" : " ",
- (msr & CTS) ? "CTS " : " ",
- (msr & DSR) ? "DSR " : " ",
- (msr & RI) ? "RI " : " ",
- (msr & DCD) ? "DCD " : " ");
+ "async%d_msint call #%d:\n"
+ " transition: %3s %3s %3s %3s\n"
+ "current state: %3s %3s %3s %3s\n",
+ instance,
+ ++(asy->asy_msint_cnt),
+ (msr & DCTS) ? "DCTS" : " ",
+ (msr & DDSR) ? "DDSR" : " ",
+ (msr & DRI) ? "DRI " : " ",
+ (msr & DDCD) ? "DDCD" : " ",
+ (msr & CTS) ? "CTS " : " ",
+ (msr & DSR) ? "DSR " : " ",
+ (msr & RI) ? "RI " : " ",
+ (msr & DCD) ? "DCD " : " ");
/* If CTS status is changed, do H/W output flow control */
if ((t_cflag & CRTSCTS) && (((asy->asy_msr ^ msr) & CTS) != 0))
@@ -2489,17 +2695,16 @@ begin:
async->async_ext = 0;
/* check for carrier up */
DEBUGCONT3(ASY_DEBUG_MODM2,
- "async%d_softint: asy_msr & DCD = %x, "
- "tp->t_flags & TS_SOFTCAR = %x\n",
- instance,
- asy->asy_msr & DCD,
- tp->t_flags & TS_SOFTCAR);
+ "async%d_softint: asy_msr & DCD = %x, "
+ "tp->t_flags & TS_SOFTCAR = %x\n",
+ instance, asy->asy_msr & DCD, tp->t_flags & TS_SOFTCAR);
+
if (asy->asy_msr & DCD) {
/* carrier present */
if ((async->async_flags & ASYNC_CARR_ON) == 0) {
DEBUGCONT1(ASY_DEBUG_MODM2,
- "async%d_softint: set ASYNC_CARR_ON\n",
- instance);
+ "async%d_softint: set ASYNC_CARR_ON\n",
+ instance);
async->async_flags |= ASYNC_CARR_ON;
if (async->async_flags & ASYNC_ISOPEN) {
mutex_exit(&asy->asy_excl_hi);
@@ -2517,9 +2722,9 @@ begin:
int flushflag;
DEBUGCONT1(ASY_DEBUG_MODEM,
- "async%d_softint: carrier dropped, "
- "so drop DTR\n",
- instance);
+ "async%d_softint: carrier dropped, "
+ "so drop DTR\n",
+ instance);
/*
* Carrier went away.
* Drop DTR, abort any output in
@@ -2528,60 +2733,62 @@ begin:
* notification upstream.
*/
val = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + MCR);
+ asy->asy_ioaddr + MCR);
ddi_put8(asy->asy_iohandle,
asy->asy_ioaddr + MCR, (val & ~DTR));
+
if (async->async_flags & ASYNC_BUSY) {
- DEBUGCONT0(ASY_DEBUG_BUSY,
+ DEBUGCONT0(ASY_DEBUG_BUSY,
"async_softint: "
"Carrier dropped. "
"Clearing async_ocnt\n");
- async->async_ocnt = 0;
+ async->async_ocnt = 0;
} /* if */
async->async_flags &= ~ASYNC_STOPPED;
if (async->async_flags & ASYNC_ISOPEN) {
- mutex_exit(&asy->asy_excl_hi);
- mutex_exit(&asy->asy_excl);
- (void) putctl(q, M_HANGUP);
- mutex_enter(&asy->asy_excl);
- DEBUGCONT1(ASY_DEBUG_MODEM,
- "async%d_softint: "
- "putctl(q, M_HANGUP)\n",
- instance);
- /*
- * Flush FIFO buffers
- * Any data left in there is invalid now
- */
- if (asy->asy_use_fifo == FIFO_ON)
- asy_reset_fifo(asy, FIFOTXFLSH);
- /*
- * Flush our write queue if we have one.
- *
- * If we're in the midst of close, then flush
- * everything. Don't leave stale ioctls lying
- * about.
- */
- flushflag = (async->async_flags &
- ASYNC_CLOSING) ? FLUSHALL : FLUSHDATA;
- flushq(tp->t_writeq, flushflag);
-
- bp = async->async_xmitblk; /* active msg */
- if (bp != NULL) {
- freeb(bp);
- async->async_xmitblk = NULL;
- }
+ mutex_exit(&asy->asy_excl_hi);
+ mutex_exit(&asy->asy_excl);
+ (void) putctl(q, M_HANGUP);
+ mutex_enter(&asy->asy_excl);
+ DEBUGCONT1(ASY_DEBUG_MODEM,
+ "async%d_softint: "
+ "putctl(q, M_HANGUP)\n",
+ instance);
+ /*
+ * Flush FIFO buffers
+ * Any data left in there is invalid now
+ */
+ if (asy->asy_use_fifo == FIFO_ON)
+ asy_reset_fifo(asy, FIFOTXFLSH);
+ /*
+ * Flush our write queue if we have one.
+ * If we're in the midst of close, then
+ * flush everything. Don't leave stale
+ * ioctls lying about.
+ */
+ flushflag = (async->async_flags &
+ ASYNC_CLOSING) ? FLUSHALL :
+ FLUSHDATA;
+ flushq(tp->t_writeq, flushflag);
+
+ /* active msg */
+ bp = async->async_xmitblk;
+ if (bp != NULL) {
+ freeb(bp);
+ async->async_xmitblk = NULL;
+ }
- mutex_enter(&asy->asy_excl_hi);
- async->async_flags &= ~ASYNC_BUSY;
- /*
- * This message warns of Carrier loss
- * with data left to transmit can hang the
- * system.
- */
- DEBUGCONT0(ASY_DEBUG_MODEM,
- "async_softint: Flushing to "
- "prevent HUPCL hanging\n");
+ mutex_enter(&asy->asy_excl_hi);
+ async->async_flags &= ~ASYNC_BUSY;
+ /*
+ * This message warns of Carrier loss
+ * with data left to transmit can hang
+ * the system.
+ */
+ DEBUGCONT0(ASY_DEBUG_MODEM,
+ "async_softint: Flushing to "
+ "prevent HUPCL hanging\n");
} /* if (ASYNC_ISOPEN) */
} /* if (ASYNC_CARR_ON && CLOCAL) */
async->async_flags &= ~ASYNC_CARR_ON;
@@ -2625,8 +2832,10 @@ begin:
IN_FLOW_STREAMS);
mutex_exit(&asy->asy_excl_hi);
}
- DEBUGCONT2(ASY_DEBUG_INPUT,
- "async%d_softint: %d char(s) in queue.\n", instance, cc);
+
+ DEBUGCONT2(ASY_DEBUG_INPUT, "async%d_softint: %d char(s) in queue.\n",
+ instance, cc);
+
if (!(bp = allocb(cc, BPRI_MED))) {
mutex_exit(&asy->asy_excl);
ttycommon_qfull(&async->async_ttycommon, q);
@@ -2648,7 +2857,7 @@ begin:
if (bp->b_wptr > bp->b_rptr) {
if (!canput(q)) {
asyerror(CE_NOTE, "asy%d: local queue full",
- instance);
+ instance);
freemsg(bp);
} else
(void) putq(q, bp);
@@ -2732,7 +2941,7 @@ rv:
mutex_exit(&asy->asy_excl_hi);
mutex_exit(&asy->asy_excl);
asyerror(CE_NOTE, "asy%d: ring buffer overflow",
- instance);
+ instance);
mutex_enter(&asy->asy_excl);
mutex_enter(&asy->asy_excl_hi);
}
@@ -2775,10 +2984,9 @@ async_restart(void *arg)
if ((async->async_flags & ASYNC_BREAK) &&
!(async->async_flags & ASYNC_OUT_SUSPEND)) {
mutex_enter(&asy->asy_excl_hi);
- lcr = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + LCR);
+ lcr = ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + LCR);
ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + LCR,
- (lcr & ~SETBREAK));
+ (lcr & ~SETBREAK));
mutex_exit(&asy->asy_excl_hi);
}
async->async_flags &= ~(ASYNC_DELAY|ASYNC_BREAK);
@@ -2831,9 +3039,9 @@ async_nstart(struct asyncline *async, int mode)
*/
if (async->async_flags & (ASYNC_BREAK|ASYNC_BUSY)) {
DEBUGCONT2((mode? ASY_DEBUG_OUT : 0),
- "async%d_nstart: start %s.\n",
- instance,
- async->async_flags & ASYNC_BREAK ? "break" : "busy");
+ "async%d_nstart: start %s.\n",
+ instance,
+ async->async_flags & ASYNC_BREAK ? "break" : "busy");
return;
}
@@ -2851,13 +3059,13 @@ async_nstart(struct asyncline *async, int mode)
*/
if (async->async_flags & ASYNC_DELAY) {
DEBUGCONT1((mode? ASY_DEBUG_OUT : 0),
- "async%d_nstart: start ASYNC_DELAY.\n", instance);
+ "async%d_nstart: start ASYNC_DELAY.\n", instance);
return;
}
if ((q = async->async_ttycommon.t_writeq) == NULL) {
DEBUGCONT1((mode? ASY_DEBUG_OUT : 0),
- "async%d_nstart: start writeq is null.\n", instance);
+ "async%d_nstart: start writeq is null.\n", instance);
return; /* not attached to a stream */
}
@@ -2882,9 +3090,9 @@ async_nstart(struct asyncline *async, int mode)
*/
mutex_enter(&asy->asy_excl_hi);
val = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + LCR);
- ddi_put8(asy->asy_iohandle,
- asy->asy_ioaddr + LCR, (val | SETBREAK));
+ asy->asy_ioaddr + LCR);
+ ddi_put8(asy->asy_iohandle, asy->asy_ioaddr + LCR,
+ (val | SETBREAK));
mutex_exit(&asy->asy_excl_hi);
async->async_flags |= ASYNC_BREAK;
(void) timeout(async_restart, (caddr_t)async,
@@ -2977,9 +3185,8 @@ async_nstart(struct asyncline *async, int mode)
if (didsome)
async->async_flags |= ASYNC_PROGRESS;
DEBUGCONT2(ASY_DEBUG_BUSY,
- "async%d_nstart: Set ASYNC_BUSY. async_ocnt=%d\n",
- instance,
- async->async_ocnt);
+ "async%d_nstart: Set ASYNC_BUSY. async_ocnt=%d\n",
+ instance, async->async_ocnt);
async->async_flags |= ASYNC_BUSY;
mutex_exit(&asy->asy_excl_hi);
}
@@ -3123,12 +3330,13 @@ async_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
* ioctls, so keep the others safe too.
*/
DEBUGCONT2(ASY_DEBUG_IOCTL, "async%d_ioctl: %s\n",
- instance,
- iocp->ioc_cmd == TIOCMGET ? "TIOCMGET" :
- iocp->ioc_cmd == TIOCMSET ? "TIOCMSET" :
- iocp->ioc_cmd == TIOCMBIS ? "TIOCMBIS" :
- iocp->ioc_cmd == TIOCMBIC ? "TIOCMBIC" :
- "other");
+ instance,
+ iocp->ioc_cmd == TIOCMGET ? "TIOCMGET" :
+ iocp->ioc_cmd == TIOCMSET ? "TIOCMSET" :
+ iocp->ioc_cmd == TIOCMBIS ? "TIOCMBIS" :
+ iocp->ioc_cmd == TIOCMBIC ? "TIOCMBIC" :
+ "other");
+
switch (iocp->ioc_cmd) {
case TIOCMGET:
case TIOCGPPS:
@@ -3330,14 +3538,15 @@ async_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
* clock / (baud * 16) * 16 * 2.
*/
index = BAUDINDEX(
- async->async_ttycommon.t_cflag);
+ async->async_ttycommon.t_cflag);
async->async_flags |= ASYNC_BREAK;
+
while ((ddi_get8(asy->asy_iohandle,
asy->asy_ioaddr + LSR) & XSRE) == 0) {
mutex_exit(&asy->asy_excl_hi);
mutex_exit(&asy->asy_excl);
drv_usecwait(
- 32*asyspdtab[index] & 0xfff);
+ 32*asyspdtab[index] & 0xfff);
mutex_enter(&asy->asy_excl);
mutex_enter(&asy->asy_excl_hi);
}
@@ -3348,23 +3557,23 @@ async_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
* "async_start" to grab the next message.
*/
val = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + LCR);
+ asy->asy_ioaddr + LCR);
ddi_put8(asy->asy_iohandle,
- asy->asy_ioaddr + LCR,
- (val | SETBREAK));
+ asy->asy_ioaddr + LCR,
+ (val | SETBREAK));
mutex_exit(&asy->asy_excl_hi);
(void) timeout(async_restart, (caddr_t)async,
drv_usectohz(1000000)/4);
} else {
DEBUGCONT1(ASY_DEBUG_OUT,
- "async%d_ioctl: wait for flush.\n",
- instance);
+ "async%d_ioctl: wait for flush.\n",
+ instance);
mutex_enter(&asy->asy_excl_hi);
asy_waiteot(asy);
mutex_exit(&asy->asy_excl_hi);
DEBUGCONT1(ASY_DEBUG_OUT,
- "async%d_ioctl: ldterm satisfied.\n",
- instance);
+ "async%d_ioctl: ldterm satisfied.\n",
+ instance);
}
break;
@@ -3409,7 +3618,7 @@ async_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
case TIOCMBIC:
if (iocp->ioc_count != TRANSPARENT) {
DEBUGCONT1(ASY_DEBUG_IOCTL, "async%d_ioctl: "
- "non-transparent\n", instance);
+ "non-transparent\n", instance);
error = miocpullup(mp, sizeof (int));
if (error != 0)
@@ -3417,14 +3626,14 @@ async_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
mutex_enter(&asy->asy_excl_hi);
(void) asymctl(asy,
- dmtoasy(*(int *)mp->b_cont->b_rptr),
- iocp->ioc_cmd);
+ dmtoasy(*(int *)mp->b_cont->b_rptr),
+ iocp->ioc_cmd);
mutex_exit(&asy->asy_excl_hi);
iocp->ioc_error = 0;
mp->b_datap->db_type = M_IOCACK;
} else {
DEBUGCONT1(ASY_DEBUG_IOCTL, "async%d_ioctl: "
- "transparent\n", instance);
+ "transparent\n", instance);
mcopyin(mp, NULL, sizeof (int), NULL);
}
break;
@@ -3442,12 +3651,11 @@ async_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
if (iocp->ioc_count == TRANSPARENT) {
DEBUGCONT1(ASY_DEBUG_IOCTL, "async%d_ioctl: "
- "transparent\n", instance);
- mcopyout(mp, NULL, sizeof (int), NULL,
- datamp);
+ "transparent\n", instance);
+ mcopyout(mp, NULL, sizeof (int), NULL, datamp);
} else {
DEBUGCONT1(ASY_DEBUG_IOCTL, "async%d_ioctl: "
- "non-transparent\n", instance);
+ "non-transparent\n", instance);
mioc2ack(mp, datamp, sizeof (int), 0);
}
break;
@@ -3458,7 +3666,7 @@ async_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
break;
*(struct cons_polledio **)mp->b_cont->b_rptr =
- &asy->polledio;
+ &asy->polledio;
mp->b_datap->db_type = M_IOCACK;
break;
@@ -3498,7 +3706,7 @@ async_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
*/
mcopyout(mp, NULL, sizeof (boolean_t), NULL, NULL);
*(boolean_t *)mp->b_cont->b_rptr =
- (asy->asy_flags & ASY_CONSOLE) != 0;
+ (asy->asy_flags & ASY_CONSOLE) != 0;
break;
default:
@@ -3534,7 +3742,16 @@ asyrsrv(queue_t *q)
}
/*
- * Put procedure for write queue.
+ * The ASYWPUTDO_NOT_SUSP macro indicates to asywputdo() whether it should
+ * handle messages as though the driver is operating normally or is
+ * suspended. In the suspended case, some or all of the processing may have
+ * to be delayed until the driver is resumed.
+ */
+#define ASYWPUTDO_NOT_SUSP(async, wput) \
+ !((wput) && ((async)->async_flags & ASYNC_DDI_SUSPENDED))
+
+/*
+ * Processing for write queue put procedure.
* Respond to M_STOP, M_START, M_IOCTL, and M_FLUSH messages here;
* set the flow control character for M_STOPI and M_STARTI messages;
* queue up M_BREAK, M_DELAY, and M_DATA messages for processing
@@ -3545,7 +3762,7 @@ asyrsrv(queue_t *q)
* as we do in ldterm.
*/
static int
-asywput(queue_t *q, mblk_t *mp)
+asywputdo(queue_t *q, mblk_t *mp, boolean_t wput)
{
struct asyncline *async;
struct asycom *asy;
@@ -3555,6 +3772,7 @@ asywput(queue_t *q, mblk_t *mp)
int error;
async = (struct asyncline *)q->q_ptr;
+
#ifdef DEBUG
instance = UNIT(async->async_dev);
#endif
@@ -3577,17 +3795,19 @@ asywput(queue_t *q, mblk_t *mp)
mutex_enter(&asy->asy_excl);
if (async->async_flags & ASYNC_STOPPED) {
async->async_flags &= ~ASYNC_STOPPED;
- /*
- * If an output operation is in progress,
- * resume it. Otherwise, prod the start
- * routine.
- */
- if (async->async_ocnt > 0) {
- mutex_enter(&asy->asy_excl_hi);
- async_resume(async);
- mutex_exit(&asy->asy_excl_hi);
- } else {
- async_start(async);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ /*
+ * If an output operation is in progress,
+ * resume it. Otherwise, prod the start
+ * routine.
+ */
+ if (async->async_ocnt > 0) {
+ mutex_enter(&asy->asy_excl_hi);
+ async_resume(async);
+ mutex_exit(&asy->asy_excl_hi);
+ } else {
+ async_start(async);
+ }
}
}
mutex_exit(&asy->asy_excl);
@@ -3606,19 +3826,21 @@ asywput(queue_t *q, mblk_t *mp)
if (*(int *)mp->b_cont->b_rptr != 0) {
DEBUGCONT1(ASY_DEBUG_OUT,
- "async%d_ioctl: flush request.\n",
- instance);
+ "async%d_ioctl: flush request.\n",
+ instance);
(void) putq(q, mp);
- mutex_enter(&asy->asy_excl);
- /*
- * If an TIOCSBRK is in progress,
- * clean it as TIOCCBRK does,
- * then kick off output.
- * If TIOCSBRK is not in progress,
- * just kick off output.
- */
- async_resume_utbrk(async);
+ mutex_enter(&asy->asy_excl);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ /*
+ * If an TIOCSBRK is in progress,
+ * clean it as TIOCCBRK does,
+ * then kick off output.
+ * If TIOCSBRK is not in progress,
+ * just kick off output.
+ */
+ async_resume_utbrk(async);
+ }
mutex_exit(&asy->asy_excl);
break;
}
@@ -3636,16 +3858,18 @@ asywput(queue_t *q, mblk_t *mp)
* start routine, just in case.
*/
(void) putq(q, mp);
- mutex_enter(&asy->asy_excl);
- /*
- * If an TIOCSBRK is in progress,
- * clean it as TIOCCBRK does.
- * then kick off output.
- * If TIOCSBRK is not in progress,
- * just kick off output.
- */
- async_resume_utbrk(async);
+ mutex_enter(&asy->asy_excl);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ /*
+ * If an TIOCSBRK is in progress,
+ * clean it as TIOCCBRK does.
+ * then kick off output.
+ * If TIOCSBRK is not in progress,
+ * just kick off output.
+ */
+ async_resume_utbrk(async);
+ }
mutex_exit(&asy->asy_excl);
break;
@@ -3653,7 +3877,14 @@ asywput(queue_t *q, mblk_t *mp)
/*
* Do it now.
*/
- async_ioctl(async, q, mp);
+ mutex_enter(&asy->asy_excl);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ mutex_exit(&asy->asy_excl);
+ async_ioctl(async, q, mp);
+ break;
+ }
+ async_put_suspq(asy, mp);
+ mutex_exit(&asy->asy_excl);
break;
}
break;
@@ -3667,13 +3898,20 @@ asywput(queue_t *q, mblk_t *mp)
*/
mutex_enter(&asy->asy_excl_hi);
if (async->async_flags & ASYNC_BUSY) {
- DEBUGCONT1(ASY_DEBUG_BUSY, "asy%dwput: "
+ DEBUGCONT1(ASY_DEBUG_BUSY, "asy%dwput: "
"Clearing async_ocnt, "
"leaving ASYNC_BUSY set\n",
instance);
async->async_ocnt = 0;
async->async_flags &= ~ASYNC_BUSY;
} /* if */
+
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ /* Flush FIFO buffers */
+ if (asy->asy_use_fifo == FIFO_ON) {
+ asy_reset_fifo(asy, FIFOTXFLSH);
+ }
+ }
mutex_exit(&asy->asy_excl_hi);
/* Flush FIFO buffers */
@@ -3693,9 +3931,11 @@ asywput(queue_t *q, mblk_t *mp)
*mp->b_rptr &= ~FLUSHW; /* it has been flushed */
}
if (*mp->b_rptr & FLUSHR) {
- /* Flush FIFO buffers */
- if (asy->asy_use_fifo == FIFO_ON) {
- asy_reset_fifo(asy, FIFORXFLSH);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ /* Flush FIFO buffers */
+ if (asy->asy_use_fifo == FIFO_ON) {
+ asy_reset_fifo(asy, FIFORXFLSH);
+ }
}
flushq(RD(q), FLUSHDATA);
qreply(q, mp); /* give the read queues a crack at it */
@@ -3707,9 +3947,11 @@ asywput(queue_t *q, mblk_t *mp)
* We must make sure we process messages that survive the
* write-side flush.
*/
- mutex_enter(&asy->asy_excl);
- async_start(async);
- mutex_exit(&asy->asy_excl);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ mutex_enter(&asy->asy_excl);
+ async_start(async);
+ mutex_exit(&asy->asy_excl);
+ }
break;
case M_BREAK:
@@ -3720,44 +3962,64 @@ asywput(queue_t *q, mblk_t *mp)
* and poke the start routine.
*/
(void) putq(q, mp);
- mutex_enter(&asy->asy_excl);
- async_start(async);
- mutex_exit(&asy->asy_excl);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ mutex_enter(&asy->asy_excl);
+ async_start(async);
+ mutex_exit(&asy->asy_excl);
+ }
break;
case M_STOPI:
mutex_enter(&asy->asy_excl);
- mutex_enter(&asy->asy_excl_hi);
- if (!(async->async_inflow_source & IN_FLOW_USER)) {
- async_flowcontrol_hw_input(asy, FLOW_STOP,
- IN_FLOW_USER);
- (void) async_flowcontrol_sw_input(asy, FLOW_STOP,
- IN_FLOW_USER);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ mutex_enter(&asy->asy_excl_hi);
+ if (!(async->async_inflow_source & IN_FLOW_USER)) {
+ async_flowcontrol_hw_input(asy, FLOW_STOP,
+ IN_FLOW_USER);
+ (void) async_flowcontrol_sw_input(asy,
+ FLOW_STOP, IN_FLOW_USER);
+ }
+ mutex_exit(&asy->asy_excl_hi);
+ mutex_exit(&asy->asy_excl);
+ freemsg(mp);
+ break;
}
- mutex_exit(&asy->asy_excl_hi);
+ async_put_suspq(asy, mp);
mutex_exit(&asy->asy_excl);
- freemsg(mp);
break;
case M_STARTI:
mutex_enter(&asy->asy_excl);
- mutex_enter(&asy->asy_excl_hi);
- if (async->async_inflow_source & IN_FLOW_USER) {
- async_flowcontrol_hw_input(asy, FLOW_START,
- IN_FLOW_USER);
- (void) async_flowcontrol_sw_input(asy, FLOW_START,
- IN_FLOW_USER);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ mutex_enter(&asy->asy_excl_hi);
+ if (async->async_inflow_source & IN_FLOW_USER) {
+ async_flowcontrol_hw_input(asy, FLOW_START,
+ IN_FLOW_USER);
+ (void) async_flowcontrol_sw_input(asy,
+ FLOW_START, IN_FLOW_USER);
+ }
+ mutex_exit(&asy->asy_excl_hi);
+ mutex_exit(&asy->asy_excl);
+ freemsg(mp);
+ break;
}
- mutex_exit(&asy->asy_excl_hi);
+ async_put_suspq(asy, mp);
mutex_exit(&asy->asy_excl);
- freemsg(mp);
break;
case M_CTL:
if (MBLKL(mp) >= sizeof (struct iocblk) &&
((struct iocblk *)mp->b_rptr)->ioc_cmd == MC_POSIXQUERY) {
- ((struct iocblk *)mp->b_rptr)->ioc_cmd = MC_HAS_POSIX;
- qreply(q, mp);
+ mutex_enter(&asy->asy_excl);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ ((struct iocblk *)mp->b_rptr)->ioc_cmd =
+ MC_HAS_POSIX;
+ mutex_exit(&asy->asy_excl);
+ qreply(q, mp);
+ break;
+ } else {
+ async_put_suspq(asy, mp);
+ }
} else {
/*
* These MC_SERVICE type messages are used by upper
@@ -3784,7 +4046,14 @@ asywput(queue_t *q, mblk_t *mp)
break;
case M_IOCDATA:
- async_iocdata(q, mp);
+ mutex_enter(&asy->asy_excl);
+ if (ASYWPUTDO_NOT_SUSP(async, wput)) {
+ mutex_exit(&asy->asy_excl);
+ async_iocdata(q, mp);
+ break;
+ }
+ async_put_suspq(asy, mp);
+ mutex_exit(&asy->asy_excl);
break;
default:
@@ -3794,6 +4063,12 @@ asywput(queue_t *q, mblk_t *mp)
return (0);
}
+static int
+asywput(queue_t *q, mblk_t *mp)
+{
+ return (asywputdo(q, mp, B_TRUE));
+}
+
/*
* Retry an "ioctl", now that "bufcall" claims we may be able to allocate
* the buffer we need.
@@ -3853,11 +4128,11 @@ async_iocdata(queue_t *q, mblk_t *mp)
mutex_enter(&asy->asy_excl);
DEBUGCONT2(ASY_DEBUG_MODEM, "async%d_iocdata: case %s\n",
- instance,
- csp->cp_cmd == TIOCMGET ? "TIOCMGET" :
- csp->cp_cmd == TIOCMSET ? "TIOCMSET" :
- csp->cp_cmd == TIOCMBIS ? "TIOCMBIS" :
- "TIOCMBIC");
+ instance,
+ csp->cp_cmd == TIOCMGET ? "TIOCMGET" :
+ csp->cp_cmd == TIOCMSET ? "TIOCMSET" :
+ csp->cp_cmd == TIOCMBIS ? "TIOCMBIS" :
+ "TIOCMBIC");
switch (csp->cp_cmd) {
case TIOCMGET:
@@ -3876,9 +4151,8 @@ async_iocdata(queue_t *q, mblk_t *mp)
case TIOCMBIS:
case TIOCMBIC:
mutex_enter(&asy->asy_excl_hi);
- (void) asymctl(asy,
- dmtoasy(*(int *)mp->b_cont->b_rptr),
- csp->cp_cmd);
+ (void) asymctl(asy, dmtoasy(*(int *)mp->b_cont->b_rptr),
+ csp->cp_cmd);
mutex_exit(&asy->asy_excl_hi);
mioc2ack(mp, NULL, 0, 0);
break;
@@ -3927,8 +4201,8 @@ asyischar(cons_polledio_arg_t arg)
{
struct asycom *asy = (struct asycom *)arg;
- return ((ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + LSR) & RCA) != 0);
+ return ((ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + LSR) & RCA)
+ != 0);
}
/*
@@ -3941,8 +4215,7 @@ asygetchar(cons_polledio_arg_t arg)
while (!asyischar(arg))
drv_usecwait(10);
- return (ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + DAT));
+ return (ddi_get8(asy->asy_iohandle, asy->asy_ioaddr + DAT));
}
/*
@@ -3964,19 +4237,19 @@ asymctl(struct asycom *asy, int bits, int how)
case TIOCMSET:
DEBUGCONT2(ASY_DEBUG_MODEM,
- "asy%dmctl: TIOCMSET, bits = %x\n", instance, bits);
+ "asy%dmctl: TIOCMSET, bits = %x\n", instance, bits);
mcr_r = bits; /* Set bits */
break;
case TIOCMBIS:
DEBUGCONT2(ASY_DEBUG_MODEM, "asy%dmctl: TIOCMBIS, bits = %x\n",
- instance, bits);
+ instance, bits);
mcr_r |= bits; /* Mask in bits */
break;
case TIOCMBIC:
DEBUGCONT2(ASY_DEBUG_MODEM, "asy%dmctl: TIOCMBIC, bits = %x\n",
- instance, bits);
+ instance, bits);
mcr_r &= ~bits; /* Mask out bits */
break;
@@ -3990,17 +4263,17 @@ asymctl(struct asycom *asy, int bits, int how)
asy->asy_ioaddr + ICR) & MIEN) {
msr_r = asy->asy_msr;
DEBUGCONT2(ASY_DEBUG_MODEM,
- "asy%dmctl: TIOCMGET, read msr_r = %x\n",
- instance, msr_r);
+ "asy%dmctl: TIOCMGET, read msr_r = %x\n",
+ instance, msr_r);
} else {
msr_r = ddi_get8(asy->asy_iohandle,
- asy->asy_ioaddr + MSR);
+ asy->asy_ioaddr + MSR);
DEBUGCONT2(ASY_DEBUG_MODEM,
- "asy%dmctl: TIOCMGET, read MSR = %x\n",
- instance, msr_r);
+ "asy%dmctl: TIOCMGET, read MSR = %x\n",
+ instance, msr_r);
}
DEBUGCONT2(ASY_DEBUG_MODEM, "asy%dtodm: modem_lines = %x\n",
- instance, asytodm(mcr_r, msr_r));
+ instance, asytodm(mcr_r, msr_r));
return (asytodm(mcr_r, msr_r));
}
diff --git a/usr/src/uts/common/io/audio/sada/drv/audio810/audio810.c b/usr/src/uts/common/io/audio/sada/drv/audio810/audio810.c
index 19cc147773..5001635a43 100644
--- a/usr/src/uts/common/io/audio/sada/drv/audio810/audio810.c
+++ b/usr/src/uts/common/io/audio/sada/drv/audio810/audio810.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -160,6 +160,8 @@ static uint_t audio810_intr(caddr_t);
/*
* Local Routine Prototypes
*/
+static void audio810_set_busy(audio810_state_t *);
+static void audio810_set_idle(audio810_state_t *);
static int audio810_codec_sync(audio810_state_t *);
static int audio810_write_ac97(audio810_state_t *, int, uint16_t);
static int audio810_read_ac97(audio810_state_t *, int, uint16_t *);
@@ -569,17 +571,49 @@ audio810_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
switch (cmd) {
case DDI_ATTACH:
break;
-
- /*
- * now, no suspend/resume supported. we'll do it in the future.
- */
case DDI_RESUME:
ATRACE("I810_attach() DDI_RESUME", NULL);
- audio_sup_log(NULL, CE_WARN,
- "%s%d: i810_attach() resume is not supported yet",
- audio810_name, instance);
- return (DDI_FAILURE);
+ if ((statep = ddi_get_soft_state(audio810_statep, instance)) ==
+ NULL) {
+ audio_sup_log(NULL, CE_WARN,
+ "!attach() DDI_RESUME get soft state failed");
+ return (DDI_FAILURE);
+ }
+
+ ASSERT(dip == statep->dip);
+
+ mutex_enter(&statep->inst_lock);
+
+ ASSERT(statep->i810_suspended == I810_SUSPENDED);
+
+ statep->i810_suspended = I810_NOT_SUSPENDED;
+
+ /* Restore the audio810 chip's state */
+ if (audio810_chip_init(statep, I810_INIT_RESTORE) !=
+ AUDIO_SUCCESS) {
+ audio_sup_log(statep->audio_handle, CE_WARN,
+ "!attach() DDI_RESUME failed to init chip");
+ mutex_exit(&statep->inst_lock);
+ return (DDI_FAILURE);
+ }
+
+ mutex_exit(&statep->inst_lock);
+
+ /* Resume playing and recording, if required */
+ if (audio_sup_restore_state(statep->audio_handle,
+ AUDIO_ALL_DEVICES, AUDIO_BOTH) == AUDIO_FAILURE) {
+ audio_sup_log(statep->audio_handle, CE_WARN,
+ "!attach() DDI_RESUME audio restart failed");
+ }
+
+ mutex_enter(&statep->inst_lock);
+ cv_broadcast(&statep->i810_cv); /* let entry points continue */
+ mutex_exit(&statep->inst_lock);
+
+ ATRACE("audio810_attach() DDI_RESUME done", NULL);
+
+ return (DDI_SUCCESS);
default:
audio_sup_log(NULL, CE_WARN,
"!%s%d: attach() unknown command: 0x%x",
@@ -638,7 +672,7 @@ audio810_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
/* set PCI command register */
cmdreg = pci_config_get16(statep->pci_conf_handle, PCI_CONF_COMM);
pci_config_put16(statep->pci_conf_handle, PCI_CONF_COMM,
- cmdreg | PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
+ cmdreg | PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
if ((audio810_alloc_sample_buf(statep, I810_DMA_PCM_OUT,
statep->play_buf_size) == AUDIO_FAILURE) ||
@@ -700,6 +734,7 @@ error_unmap:
error_destroy:
ATRACE("audio810_attach() error_destroy", statep);
mutex_destroy(&statep->inst_lock);
+ cv_destroy(&statep->i810_cv);
error_audiosup:
ATRACE("audio810_attach() error_audiosup", statep);
@@ -751,17 +786,33 @@ audio810_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
switch (cmd) {
case DDI_DETACH:
break;
-
- /*
- * now, no suspend/resume supported. we'll do it in the future.
- */
case DDI_SUSPEND:
ATRACE("i810_detach() SUSPEND", statep);
- audio_sup_log(NULL, CE_WARN,
- "%s%d: i810_detach() suspend is not supported yet",
- audio810_name, instance);
- return (DDI_FAILURE);
+ mutex_enter(&statep->inst_lock);
+
+ ASSERT(statep->i810_suspended == I810_NOT_SUSPENDED);
+
+ statep->i810_suspended = I810_SUSPENDED; /* stop new ops */
+
+ /* wait for current operations to complete */
+ while (statep->i810_busy_cnt != 0)
+ cv_wait(&statep->i810_cv, &statep->inst_lock);
+
+ /* stop DMA engines */
+ audio810_stop_dma(statep);
+
+ if (audio_sup_save_state(statep->audio_handle,
+ AUDIO_ALL_DEVICES, AUDIO_BOTH) == AUDIO_FAILURE) {
+ audio_sup_log(statep->audio_handle, CE_WARN,
+ "!detach() DDI_SUSPEND audio save failed");
+ }
+
+ mutex_exit(&statep->inst_lock);
+
+ ATRACE("audio810_detach() DDI_SUSPEND successful", statep);
+
+ return (DDI_SUCCESS);
default:
ATRACE("i810_detach() unknown command", cmd);
audio_sup_log(statep->audio_handle, CE_WARN,
@@ -796,6 +847,7 @@ audio810_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
(void) audio_sup_unregister(statep->audio_handle);
mutex_destroy(&statep->inst_lock);
+ cv_destroy(&statep->i810_cv);
audio810_unmap_regs(statep);
@@ -835,6 +887,13 @@ audio810_intr(caddr_t arg)
statep = (audio810_state_t *)arg;
mutex_enter(&statep->inst_lock);
+
+ if (statep->i810_suspended == I810_SUSPENDED) {
+ ATRACE("audio810_intr() device suspended", NULL);
+ mutex_exit(&statep->inst_lock);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
gsr = I810_BM_GET32(I810_REG_GSR);
/* check if device is interrupting */
@@ -883,6 +942,97 @@ audio810_intr(caddr_t arg)
} /* audio810_intr() */
+/*
+ * audio810_set_busy()
+ *
+ * Description:
+ * This routine is called whenever a routine needs to guarantee
+ * that it will not be suspended. It will also block any routine
+ * while a suspend is going on.
+ *
+ * CAUTION: This routine cannot be called by routines that will
+ * block. Otherwise DDI_SUSPEND will be blocked for a
+ * long time. And that is the wrong thing to do.
+ *
+ * Arguments:
+ * audio810_state_t *statep The device's state structure
+ *
+ * Returns:
+ * void
+ */
+static void
+audio810_set_busy(audio810_state_t *statep)
+{
+ ATRACE("in audio810_set_busy()", statep);
+
+ ASSERT(!mutex_owned(&statep->inst_lock));
+
+ /* get the lock so we are safe */
+ mutex_enter(&statep->inst_lock);
+
+ /* block if we are suspended */
+ while (statep->i810_suspended == I810_SUSPENDED) {
+ cv_wait(&statep->i810_cv, &statep->inst_lock);
+ }
+
+ /*
+ * Okay, we aren't suspended, so mark as busy.
+ * This will keep us from being suspended when we release the lock.
+ */
+ ASSERT(statep->i810_busy_cnt >= 0);
+ statep->i810_busy_cnt++;
+
+ mutex_exit(&statep->inst_lock);
+
+ ATRACE("audio810_set_busy() done", statep);
+
+ ASSERT(!mutex_owned(&statep->inst_lock));
+
+} /* audio810_set_busy() */
+
+/*
+ * audio810_set_idle()
+ *
+ * Description:
+ * This routine reduces the busy count. It then does a cv_broadcast()
+ * if the count is 0 so a waiting DDI_SUSPEND will continue forward.
+ *
+ * Arguments:
+ * audio810_state_t *state The device's state structure
+ *
+ * Returns:
+ * void
+ */
+static void
+audio810_set_idle(audio810_state_t *statep)
+{
+ ATRACE("in audio810_set_idle()", statep);
+
+ ASSERT(!mutex_owned(&statep->inst_lock));
+
+ /* get the lock so we are safe */
+ mutex_enter(&statep->inst_lock);
+
+ ASSERT(statep->i810_suspended == I810_NOT_SUSPENDED);
+
+ /* decrement the busy count */
+ ASSERT(statep->i810_busy_cnt > 0);
+ statep->i810_busy_cnt--;
+
+ /* if no longer busy, then we wake up a waiting SUSPEND */
+ if (statep->i810_busy_cnt == 0) {
+ cv_broadcast(&statep->i810_cv);
+ }
+
+ /* we're done, so unlock */
+ mutex_exit(&statep->inst_lock);
+
+ ATRACE("audio810_set_idle() done", statep);
+
+ ASSERT(!mutex_owned(&statep->inst_lock));
+
+} /* audio810_set_idle() */
+
/* *********************** Mixer Entry Point Routines ******************* */
/*
* audio810_ad_set_config()
@@ -926,6 +1076,8 @@ audio810_ad_set_config(audiohdl_t ahandle, int stream, int command,
statep = audio_sup_get_private(ahandle);
ASSERT(statep);
+ audio810_set_busy(statep);
+
mutex_enter(&statep->inst_lock);
switch (command) {
case AM_SET_GAIN:
@@ -1030,6 +1182,8 @@ audio810_ad_set_config(audiohdl_t ahandle, int stream, int command,
}
mutex_exit(&statep->inst_lock);
+ audio810_set_idle(statep);
+
ATRACE_32("i810_ad_set_config() returning", rc);
return (rc);
@@ -1063,6 +1217,7 @@ audio810_ad_set_format(audiohdl_t ahandle, int stream, int dir,
{
audio810_state_t *statep;
uint16_t val;
+ int rc = AUDIO_FAILURE;
ASSERT(precision == AUDIO_PRECISION_16);
ASSERT(channels == AUDIO_CHANNELS_STEREO);
@@ -1083,6 +1238,8 @@ audio810_ad_set_format(audiohdl_t ahandle, int stream, int dir,
statep = audio_sup_get_private(ahandle);
ASSERT(statep);
+ audio810_set_busy(statep);
+
mutex_enter(&statep->inst_lock);
if (statep->var_sr == B_FALSE) {
@@ -1092,8 +1249,7 @@ audio810_ad_set_format(audiohdl_t ahandle, int stream, int dir,
audio_sup_log(statep->audio_handle, CE_NOTE,
"!ad_set_format() bad sample rate %d\n",
sample_rate);
- mutex_exit(&statep->inst_lock);
- return (AUDIO_FAILURE);
+ goto done;
}
} else {
switch (sample_rate) {
@@ -1111,8 +1267,7 @@ audio810_ad_set_format(audiohdl_t ahandle, int stream, int dir,
case I810_SAMPR48000: break;
default:
ATRACE_32("i810_ad_set_format() bad SR", sample_rate);
- mutex_exit(&statep->inst_lock);
- return (AUDIO_FAILURE);
+ goto done;
}
}
@@ -1140,8 +1295,7 @@ audio810_ad_set_format(audiohdl_t ahandle, int stream, int dir,
audio_sup_log(statep->audio_handle, CE_NOTE,
"!set_format() bad output sample rate %d",
sample_rate);
- mutex_exit(&statep->inst_lock);
- return (AUDIO_FAILURE);
+ goto done;
}
}
@@ -1170,8 +1324,7 @@ audio810_ad_set_format(audiohdl_t ahandle, int stream, int dir,
audio_sup_log(statep->audio_handle, CE_NOTE,
"!set_format() bad input sample rate %d",
sample_rate);
- mutex_exit(&statep->inst_lock);
- return (AUDIO_FAILURE);
+ goto done;
}
}
@@ -1180,12 +1333,15 @@ audio810_ad_set_format(audiohdl_t ahandle, int stream, int dir,
statep->i810_cprecision = precision;
}
+ rc = AUDIO_SUCCESS;
done:
mutex_exit(&statep->inst_lock);
- ATRACE_32("i810_ad_set_format() returning success", 0);
+ audio810_set_idle(statep);
- return (AUDIO_SUCCESS);
+ ATRACE_32("i810_ad_set_format() returning", rc);
+
+ return (rc);
} /* audio810_ad_set_format() */
@@ -1215,6 +1371,8 @@ audio810_ad_start_play(audiohdl_t ahandle, int stream)
statep = audio_sup_get_private(ahandle);
ASSERT(statep);
+ audio810_set_busy(statep);
+
mutex_enter(&statep->inst_lock);
if (statep->flags & I810_DMA_PLAY_PAUSED) {
@@ -1239,6 +1397,9 @@ audio810_ad_start_play(audiohdl_t ahandle, int stream)
done:
mutex_exit(&statep->inst_lock);
+
+ audio810_set_idle(statep);
+
return (rc);
} /* audio810_ad_start_play() */
@@ -1269,17 +1430,20 @@ audio810_ad_pause_play(audiohdl_t ahandle, int stream)
ATRACE("audio810_ad_pause_play() ", ahandle);
ATRACE_32("i810_ad_pause_play() stream", stream);
+ audio810_set_busy(statep);
+
mutex_enter(&statep->inst_lock);
- if ((statep->flags & I810_DMA_PLAY_STARTED) == 0) {
- mutex_exit(&statep->inst_lock);
- return;
- }
+ if ((statep->flags & I810_DMA_PLAY_STARTED) == 0)
+ goto done;
cr = I810_BM_GET8(I810_PCM_OUT_CR);
cr &= ~I810_BM_CR_RUN;
I810_BM_PUT8(I810_PCM_OUT_CR, cr);
statep->flags |= I810_DMA_PLAY_PAUSED;
+done:
mutex_exit(&statep->inst_lock);
+ audio810_set_idle(statep);
+
} /* audio810_ad_pause_play() */
/*
@@ -1308,6 +1472,8 @@ audio810_ad_stop_play(audiohdl_t ahandle, int stream)
statep = audio_sup_get_private(ahandle);
ASSERT(statep);
+ audio810_set_busy(statep);
+
mutex_enter(&statep->inst_lock);
/* pause bus master */
@@ -1323,6 +1489,8 @@ audio810_ad_stop_play(audiohdl_t ahandle, int stream)
mutex_exit(&statep->inst_lock);
+ audio810_set_idle(statep);
+
} /* audio810_ad_stop_play() */
/*
@@ -1344,25 +1512,28 @@ static int
audio810_ad_start_record(audiohdl_t ahandle, int stream)
{
audio810_state_t *statep;
- int rc;
+ int rc = AUDIO_SUCCESS;
ATRACE("audio810_ad_start_record() ", ahandle);
ATRACE_32("i810_ad_start_record() stream", stream);
statep = audio_sup_get_private(ahandle);
ASSERT(statep);
+ audio810_set_busy(statep);
+
mutex_enter(&statep->inst_lock);
- if (statep->flags & I810_DMA_RECD_STARTED) {
- mutex_exit(&statep->inst_lock);
- return (AUDIO_SUCCESS);
- }
+ if (statep->flags & I810_DMA_RECD_STARTED)
+ goto done;
rc = audio810_prepare_record_buf(statep);
if (rc == AUDIO_SUCCESS) {
statep->flags |= I810_DMA_RECD_STARTED;
}
+done:
mutex_exit(&statep->inst_lock);
+ audio810_set_idle(statep);
+
return (rc);
} /* audio810_ad_start_record() */
@@ -1393,6 +1564,8 @@ audio810_ad_stop_record(audiohdl_t ahandle, int stream)
statep = audio_sup_get_private(ahandle);
ASSERT(statep);
+ audio810_set_busy(statep);
+
mutex_enter(&statep->inst_lock);
statep->flags &= ~I810_DMA_RECD_STARTED;
@@ -1407,6 +1580,8 @@ audio810_ad_stop_record(audiohdl_t ahandle, int stream)
mutex_exit(&statep->inst_lock);
+ audio810_set_idle(statep);
+
} /* audio810_ad_stop_record() */
/* *********************** Local Routines *************************** */
@@ -1585,6 +1760,7 @@ audio810_init_state(audio810_state_t *statep, dev_info_t *dip)
return (AUDIO_FAILURE);
}
mutex_init(&statep->inst_lock, NULL, MUTEX_DRIVER, statep->intr_iblock);
+ cv_init(&statep->i810_cv, NULL, CV_DRIVER, NULL);
/* fill in device info strings */
(void) strcpy(statep->i810_dev_info.name, I810_DEV_NAME);
@@ -2419,6 +2595,14 @@ audio810_stop_dma(audio810_state_t *statep)
statep->flags = 0;
+/*
+ * XXXX Not sure what these declarations are for, but I brought them from
+ * the PM gate.
+ */
+ statep->play_buf.io_started = B_FALSE;
+
+ statep->record_buf.io_started = B_FALSE;
+
} /* audio810_stop_dma() */
/*
diff --git a/usr/src/uts/common/io/fdc.c b/usr/src/uts/common/io/fdc.c
index ab8d9b9c18..0e111658e0 100644
--- a/usr/src/uts/common/io/fdc.c
+++ b/usr/src/uts/common/io/fdc.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -472,7 +471,7 @@ fdc_probe(dev_info_t *dip)
}
FCERRPRINT(FDEP_L3, FDEM_ATTA, (CE_WARN, "fdc_probe: dip %p",
- (void*)dip));
+ (void*)dip));
if (get_ioaddr(dip, &ioaddr) != DDI_SUCCESS)
return (DDI_PROBE_FAILURE);
@@ -497,7 +496,7 @@ fdc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
char name[MAXNAMELEN];
FCERRPRINT(FDEP_L3, FDEM_ATTA, (CE_WARN, "fdc_attach: dip %p",
- (void*)dip));
+ (void*)dip));
switch (cmd) {
case DDI_ATTACH:
@@ -600,6 +599,10 @@ fdc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
ddi_report_dev(dip);
return (DDI_SUCCESS);
+ case DDI_RESUME:
+ return (DDI_SUCCESS);
+ /* break; */
+
default:
return (DDI_FAILURE);
}
@@ -826,11 +829,12 @@ static int
fdc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
struct fdcntlr *fcp;
+ struct fcu_obj *fjp;
int unit;
int rval = 0;
FCERRPRINT(FDEP_L3, FDEM_ATTA, (CE_WARN, "fdc_detach: dip %p",
- (void*)dip));
+ (void*)dip));
fcp = ddi_get_driver_private(dip);
@@ -847,8 +851,8 @@ fdc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
if (ddi_dmae_release(fcp->c_dip, fcp->c_dmachan) !=
DDI_SUCCESS)
cmn_err(CE_WARN, "fdc_detach: dma release failed, "
- "dip %p, dmachan %x\n",
- (void*)fcp->c_dip, fcp->c_dmachan);
+ "dip %p, dmachan %x\n",
+ (void*)fcp->c_dip, fcp->c_dmachan);
ddi_prop_remove_all(fcp->c_dip);
ddi_set_driver_private(fcp->c_dip, NULL);
@@ -858,6 +862,35 @@ fdc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
sema_destroy(&fcp->c_selsem);
ddi_soft_state_free(fdc_state_head, ddi_get_instance(dip));
break;
+
+ case DDI_SUSPEND:
+ /*
+ * Following code causes the fdc (floppy controller)
+ * to suspend as long as there are no floppy drives
+ * attached to it.
+ * At present the floppy driver does not support
+ * SUSPEND/RESUME.
+ *
+ * Check if any FD units are attached
+ *
+ * For now, SUSPEND/RESUME is not supported
+ * if a floppy drive is present.
+ * So if any FD unit is attached return DDI_FAILURE
+ */
+ for (unit = 0; unit < NFDUN; unit++) {
+ fjp = fcp->c_unit[unit];
+ if (fjp->fj_flags & FUNIT_DRVATCH) {
+ cmn_err(CE_WARN,
+ "fdc_detach: fd attached, failing SUSPEND");
+ return (DDI_FAILURE);
+ }
+ }
+
+ cmn_err(CE_NOTE, "fdc_detach: SUSPEND fdc");
+
+ rval = DDI_SUCCESS;
+ break;
+
default:
rval = EINVAL;
break;
@@ -892,9 +925,9 @@ fdc_abort(struct fcu_obj *fjp)
if (ddi_dmae_stop(fcp->c_dip, fcp->c_dmachan) !=
DDI_SUCCESS)
cmn_err(CE_WARN,
- "fdc_detach: dma release failed, "
- "dip %p, dmachan %x\n",
- (void*)fcp->c_dip, fcp->c_dmachan);
+ "fdc_detach: dma release failed, "
+ "dip %p, dmachan %x\n",
+ (void*)fcp->c_dip, fcp->c_dmachan);
}
mutex_exit(&fcp->c_lock);
drv_usecwait(500);
@@ -923,7 +956,7 @@ fdc_dkinfo(struct fcu_obj *fjp, struct dk_cinfo *dcp)
struct fdcntlr *fcp = fjp->fj_fdc;
(void) strncpy((char *)&dcp->dki_cname, ddi_get_name(fcp->c_dip),
- DK_DEVLEN);
+ DK_DEVLEN);
dcp->dki_ctype = DKC_UNKNOWN; /* no code for generic PC/AT fdc */
dcp->dki_flags = DKI_FMTTRK;
dcp->dki_addr = fcp->c_regbase;
@@ -931,7 +964,7 @@ fdc_dkinfo(struct fcu_obj *fjp, struct dk_cinfo *dcp)
dcp->dki_prio = fcp->c_intprio;
dcp->dki_vec = fcp->c_intvec;
(void) strncpy((char *)&dcp->dki_dname, ddi_driver_name(fjp->fj_dip),
- DK_DEVLEN);
+ DK_DEVLEN);
dcp->dki_slave = fjp->fj_unit & 3;
dcp->dki_maxtransfer = maxphys / DEV_BSIZE;
return (DDI_SUCCESS);
@@ -1149,7 +1182,7 @@ fdrw(struct fcu_obj *fjp, int funit, int rw, int cyl, int head,
dmar_flags |= (DDI_DMA_STREAMING | DDI_DMA_PARTIAL);
if (ddi_dma_alloc_handle(fcp->c_dip, &fdc_dma_attr, DDI_DMA_SLEEP,
- 0, &csb->csb_dmahandle) != DDI_SUCCESS) {
+ 0, &csb->csb_dmahandle) != DDI_SUCCESS) {
rval = EINVAL;
goto out;
}
@@ -1181,15 +1214,14 @@ fdrw(struct fcu_obj *fjp, int funit, int rw, int cyl, int head,
} else if (rval == DDI_DMA_PARTIAL_MAP) {
csb->csb_handle_bound = 1;
if (ddi_dma_numwin(csb->csb_dmahandle, &csb->csb_dmawincnt) !=
- DDI_SUCCESS) {
+ DDI_SUCCESS) {
cmn_err(CE_WARN, "fdrw: dma numwin failed\n");
rval = EINVAL;
goto out;
}
} else {
cmn_err(CE_WARN,
- "fdrw: dma addr bind handle failed, rval = %d\n",
- rval);
+ "fdrw: dma addr bind handle failed, rval = %d\n", rval);
rval = EINVAL;
goto out;
}
@@ -1269,7 +1301,7 @@ fdtrkformat(struct fcu_obj *fjp, int funit, int cyl, int head, int filldata)
fmdatlen = 4 * numsctr;
if (ddi_dma_alloc_handle(fcp->c_dip, &fdc_dma_attr, DDI_DMA_SLEEP,
- 0, &csb->csb_dmahandle) != DDI_SUCCESS) {
+ 0, &csb->csb_dmahandle) != DDI_SUCCESS) {
rval = EINVAL;
goto out;
}
@@ -1310,15 +1342,15 @@ fdtrkformat(struct fcu_obj *fjp, int funit, int cyl, int head, int filldata)
} else if (rval == DDI_DMA_PARTIAL_MAP) {
csb->csb_handle_bound = 1;
if (ddi_dma_numwin(csb->csb_dmahandle, &csb->csb_dmawincnt) !=
- DDI_SUCCESS) {
+ DDI_SUCCESS) {
cmn_err(CE_WARN, "fdtrkformat: dma numwin failed\n");
rval = EINVAL;
goto out;
}
} else {
cmn_err(CE_WARN,
- "fdtrkformat: dma buf bind handle failed, rval = %d\n",
- rval);
+ "fdtrkformat: dma buf bind handle failed, rval = %d\n",
+ rval);
rval = EINVAL;
goto out;
}
@@ -1579,15 +1611,15 @@ fdcquiesce(struct fdcntlr *fcp)
int unit;
FCERRPRINT(FDEP_L2, FDEM_RESE, (CE_NOTE, "fdcquiesce fcp %p",
- (void*)fcp));
+ (void*)fcp));
ASSERT(MUTEX_HELD(&fcp->c_lock));
mutex_enter(&fcp->c_dorlock);
if (ddi_dmae_stop(fcp->c_dip, fcp->c_dmachan) != DDI_SUCCESS)
cmn_err(CE_WARN, "fdcquiesce: dmae stop failed, "
- "dip %p, dmachan %x\n",
- (void*)fcp->c_dip, fcp->c_dmachan);
+ "dip %p, dmachan %x\n",
+ (void*)fcp->c_dip, fcp->c_dmachan);
fcp->c_digout = (fcp->c_digout & (FD_DMTREN | FD_DRSEL)) | FD_ENABLE;
outb(fcp->c_regbase + FCR_DOR, fcp->c_digout);
@@ -2158,7 +2190,7 @@ retry:
*/
mutex_enter(&fcp->c_dorlock);
(void) fdc_motorsm(fjp, FMI_RSTARTCMD,
- fjp->fj_drive->fdd_motoron);
+ fjp->fj_drive->fdd_motoron);
/*
* Return value ignored - fdcmotort deals with failure.
*/
@@ -2202,8 +2234,8 @@ retry:
if (ddi_dmae_prog(fcp->c_dip, &dmaereq, &csb->csb_dmacookie,
fcp->c_dmachan) != DDI_SUCCESS)
cmn_err(CE_WARN, "fdc_exec: dmae prog failed, "
- "dip %p, dmachan %x\n",
- (void*)fcp->c_dip, fcp->c_dmachan);
+ "dip %p, dmachan %x\n",
+ (void*)fcp->c_dip, fcp->c_dmachan);
}
if ((fdc_statemach(fcp) == FXS_DOWT) && !sleep) {
@@ -2400,7 +2432,9 @@ fdrecover(struct fdcntlr *fcp)
(void*)fcp->c_dip, fcp->c_dmachan,
residual);
FCERRPRINT(FDEP_L2, FDEM_RECO,
- (CE_NOTE, "fd unit %d: %s error: dma count=0x%lx residual=0x%x",
+ (CE_NOTE,
+ "fd unit %d: %s error: "
+ "dma count=0x%lx residual=0x%x",
csb->csb_drive,
fdcmds[*csb->csb_cmd & 0x1f].cmdname,
csb->csb_dmacookie.dmac_size, residual));
@@ -2412,8 +2446,10 @@ fdrecover(struct fdcntlr *fcp)
*/
if (++csb->csb_ourtrys <= OURUN_TRIES) {
FCERRPRINT(FDEP_L2, FDEM_RECO,
-(CE_NOTE, "fd unit %d: %s error: over/under-run",
-csb->csb_drive, fdcmds[*csb->csb_cmd & 0x1f].cmdname));
+ (CE_NOTE,
+ "fd unit %d: %s error: over/under-run",
+ csb->csb_drive,
+ fdcmds[*csb->csb_cmd & 0x1f].cmdname));
return (0);
} else
/*
@@ -2445,9 +2481,12 @@ csb->csb_drive, fdcmds[*csb->csb_cmd & 0x1f].cmdname));
if (csb->csb_opflags &
(CSB_OFDMARD | CSB_OFDMAWT)) {
FCERRPRINT(FDEP_L4, FDEM_RECO,
-(CE_WARN, "fd unit %d: %s error: st0=0x%x st1=0x%x st2=0x%x",
+ (CE_WARN,
+ "fd unit %d: %s error: "
+ "st0=0x%x st1=0x%x st2=0x%x",
csb->csb_drive,
-fdcmds[*csb->csb_cmd & 0x1f].cmdname,
+ fdcmds[*csb->csb_cmd &
+ 0x1f].cmdname,
*csb->csb_rslt, csb->csb_rslt[1],
csb->csb_rslt[2]));
}
@@ -2681,8 +2720,8 @@ fdwatch(void *arg)
if (fcp->c_flags & FCFLG_WAITING) {
if (ddi_dmae_stop(fcp->c_dip, fcp->c_dmachan) != DDI_SUCCESS)
cmn_err(CE_WARN, "fdwatch: dmae stop failed, "
- "dip %p, dmachan %x\n",
- (void*)fcp->c_dip, fcp->c_dmachan);
+ "dip %p, dmachan %x\n",
+ (void*)fcp->c_dip, fcp->c_dmachan);
csb = &fcp->c_csb;
FCERRPRINT(FDEP_L3, FDEM_WATC,
(CE_WARN, "fdcwatch unit %d: xstate = %d",
@@ -3168,7 +3207,7 @@ get_ioaddr(dev_info_t *dip, int *ioaddr)
} *reglist;
if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
- "reg", (caddr_t)&reglist, &reglen) != DDI_PROP_SUCCESS) {
+ "reg", (caddr_t)&reglist, &reglen) != DDI_PROP_SUCCESS) {
cmn_err(CE_WARN, "fdc: reg property not found");
return (DDI_FAILURE);
}
diff --git a/usr/src/uts/common/io/i8042.c b/usr/src/uts/common/io/i8042.c
index 42bcbb8d2a..9c9e568abc 100644
--- a/usr/src/uts/common/io/i8042.c
+++ b/usr/src/uts/common/io/i8042.c
@@ -382,6 +382,23 @@ static void i8042_send(struct i8042 *global, int reg, unsigned char cmd);
unsigned int i8042_unclaimed_interrupts = 0;
+static void
+i8042_discard_junk_data(struct i8042 *global)
+{
+ /* Discard any junk data that may have been left around */
+ for (;;) {
+ unsigned char stat;
+
+ stat = ddi_get8(global->io_handle,
+ global->io_addr + I8042_STAT);
+ if (! (stat & I8042_STAT_OUTBF))
+ break;
+ (void) ddi_get8(global->io_handle,
+ global->io_addr + I8042_DATA);
+
+ }
+}
+
static int
i8042_cleanup(struct i8042 *global)
{
@@ -508,7 +525,7 @@ i8042_purge_outbuf(struct i8042 *global)
if (i8042_wait_obf(global))
break;
(void) ddi_get8(global->io_handle,
- global->io_addr + I8042_DATA);
+ global->io_addr + I8042_DATA);
}
/*
@@ -537,10 +554,9 @@ i8042_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
switch (cmd) {
case DDI_RESUME:
-#ifdef __sparc
global = (struct i8042 *)ddi_get_driver_private(dip);
+ i8042_discard_junk_data(global);
i8042_write_command_byte(global, I8042_CMD_ENABLE_ALL);
-#endif
return (DDI_SUCCESS);
case DDI_ATTACH:
@@ -628,7 +644,7 @@ i8042_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
global->iblock_cookies = NULL;
mutex_init(&global->i8042_mutex, NULL, MUTEX_DRIVER,
- (global->nintrs > 0) ? global->iblock_cookies[0] : NULL);
+ (global->nintrs > 0) ? global->iblock_cookies[0] : NULL);
mutex_init(&global->i8042_out_mutex, NULL, MUTEX_DRIVER, NULL);
@@ -816,8 +832,8 @@ i8042_map(
}
if (rnumber < 0 || rnumber >= iprop_len) {
cmn_err(CE_WARN, "%s #%d: bad map request for %s@%s",
- DRIVER_NAME(dip), ddi_get_instance(dip),
- ddi_node_name(rdip), ddi_get_name_addr(rdip));
+ DRIVER_NAME(dip), ddi_get_instance(dip),
+ ddi_node_name(rdip), ddi_get_name_addr(rdip));
return (DDI_FAILURE);
}
#endif
@@ -838,9 +854,9 @@ i8042_map(
default:
#if defined(DEBUG)
cmn_err(CE_WARN, "%s #%d: unknown map type %d for %s@%s",
- DRIVER_NAME(dip), ddi_get_instance(dip),
- mp->map_type,
- ddi_node_name(rdip), ddi_get_name_addr(rdip));
+ DRIVER_NAME(dip), ddi_get_instance(dip),
+ mp->map_type,
+ ddi_node_name(rdip), ddi_get_name_addr(rdip));
#endif
return (DDI_FAILURE);
}
@@ -848,9 +864,9 @@ i8042_map(
#if defined(DEBUG)
if (offset != 0 || len != 0) {
cmn_err(CE_WARN,
- "%s #%d: partial mapping attempt for %s@%s ignored",
- DRIVER_NAME(dip), ddi_get_instance(dip),
- ddi_node_name(rdip), ddi_get_name_addr(rdip));
+ "%s #%d: partial mapping attempt for %s@%s ignored",
+ DRIVER_NAME(dip), ddi_get_instance(dip),
+ ddi_node_name(rdip), ddi_get_name_addr(rdip));
}
#endif
@@ -901,7 +917,7 @@ i8042_map(
default:
cmn_err(CE_WARN, "%s: map operation %d not supported",
- DRIVER_NAME(dip), mp->map_op);
+ DRIVER_NAME(dip), mp->map_op);
return (DDI_FAILURE);
}
}
@@ -986,7 +1002,7 @@ i8042_intr(caddr_t arg)
#if defined(DEBUG)
if (port->overruns % 50 == 1) {
cmn_err(CE_WARN, "i8042/%d: %d overruns\n",
- which_port, port->overruns);
+ which_port, port->overruns);
}
#endif
mutex_exit(&global->i8042_mutex);
@@ -1039,7 +1055,7 @@ i8042_send(struct i8042 *global, int reg, unsigned char val)
/*CONSTANTCONDITION*/
while (1) {
stat = ddi_get8(global->io_handle,
- global->io_addr + I8042_STAT);
+ global->io_addr + I8042_STAT);
if ((stat & I8042_STAT_INBF) == 0) {
ddi_put8(global->io_handle, global->io_addr+reg, val);
@@ -1113,7 +1129,7 @@ i8042_get8(ddi_acc_impl_t *handlep, uint8_t *addr)
} else {
#if defined(DEBUG)
cmn_err(CE_WARN,
- "i8042: Tried to read from empty buffer");
+ "i8042: Tried to read from empty buffer");
#endif
ret = 0;
}
@@ -1127,7 +1143,7 @@ i8042_get8(ddi_acc_impl_t *handlep, uint8_t *addr)
case I8042_INT_OUTPUT_DATA:
case I8042_POLL_OUTPUT_DATA:
cmn_err(CE_WARN, "i8042: read of write-only register 0x%p",
- (void *)addr);
+ (void *)addr);
ret = 0;
break;
#endif
@@ -1137,7 +1153,7 @@ i8042_get8(ddi_acc_impl_t *handlep, uint8_t *addr)
return (B_TRUE);
for (;;) {
stat = ddi_get8(global->io_handle,
- global->io_addr + I8042_STAT);
+ global->io_addr + I8042_STAT);
if ((stat & I8042_STAT_OUTBF) == 0)
return (B_FALSE);
switch (port->which) {
@@ -1151,13 +1167,13 @@ i8042_get8(ddi_acc_impl_t *handlep, uint8_t *addr)
break;
default:
cmn_err(CE_WARN, "data from unknown port: %d",
- port->which);
+ port->which);
}
/*
* Data for wrong port pending; discard it.
*/
(void) ddi_get8(global->io_handle,
- global->io_addr + I8042_DATA);
+ global->io_addr + I8042_DATA);
}
/* NOTREACHED */
@@ -1170,7 +1186,7 @@ i8042_get8(ddi_acc_impl_t *handlep, uint8_t *addr)
}
stat = ddi_get8(global->io_handle,
- global->io_addr + I8042_STAT);
+ global->io_addr + I8042_STAT);
if ((stat & I8042_STAT_OUTBF) == 0) {
#if defined(DEBUG)
prom_printf("I8042_POLL_INPUT_DATA: no data!\n");
@@ -1178,7 +1194,7 @@ i8042_get8(ddi_acc_impl_t *handlep, uint8_t *addr)
return (0);
}
ret = ddi_get8(global->io_handle,
- global->io_addr + I8042_DATA);
+ global->io_addr + I8042_DATA);
switch (port->which) {
case MAIN_PORT:
if ((stat & I8042_STAT_AUXBF) == 0)
@@ -1197,7 +1213,7 @@ i8042_get8(ddi_acc_impl_t *handlep, uint8_t *addr)
default:
#if defined(DEBUG)
cmn_err(CE_WARN, "i8042: read of undefined register 0x%p",
- (void *)addr);
+ (void *)addr);
#endif
ret = 0;
break;
@@ -1240,12 +1256,12 @@ i8042_put8(ddi_acc_impl_t *handlep, uint8_t *addr, uint8_t value)
case I8042_POLL_INPUT_AVAIL:
case I8042_POLL_INPUT_DATA:
cmn_err(CE_WARN, "i8042: write of read-only register 0x%p",
- (void *)addr);
+ (void *)addr);
break;
default:
cmn_err(CE_WARN, "i8042: read of undefined register 0x%p",
- (void *)addr);
+ (void *)addr);
break;
#endif
}
@@ -1391,7 +1407,7 @@ i8042_ctlops(dev_info_t *dip, dev_info_t *rdip,
(void) sprintf(name, "%d", which_port);
ddi_set_name_addr(child, name);
ddi_set_parent_data(child,
- (caddr_t)&global->i8042_ports[which_port]);
+ (caddr_t)&global->i8042_ports[which_port]);
return (DDI_SUCCESS);
case DDI_CTLOPS_UNINITCHILD:
@@ -1402,8 +1418,8 @@ i8042_ctlops(dev_info_t *dip, dev_info_t *rdip,
case DDI_CTLOPS_REPORTDEV:
cmn_err(CE_CONT, "?8042 device: %s@%s, %s # %d\n",
- ddi_node_name(rdip), ddi_get_name_addr(rdip),
- DRIVER_NAME(rdip), ddi_get_instance(rdip));
+ ddi_node_name(rdip), ddi_get_name_addr(rdip),
+ DRIVER_NAME(rdip), ddi_get_instance(rdip));
return (DDI_SUCCESS);
default:
diff --git a/usr/src/uts/common/io/kb8042/kb8042.c b/usr/src/uts/common/io/kb8042/kb8042.c
index 6799e6d75a..0e8369b076 100644
--- a/usr/src/uts/common/io/kb8042/kb8042.c
+++ b/usr/src/uts/common/io/kb8042/kb8042.c
@@ -431,6 +431,14 @@ kb8042_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
kb8042->w_init = 0;
kb8042_init(kb8042, B_TRUE);
kb8042_setled(kb8042, leds, B_FALSE);
+ mutex_enter(&kb8042->w_hw_mutex);
+ kb8042->suspended = B_FALSE;
+ if (kb8042->w_qp != NULL) {
+ enableok(WR(kb8042->w_qp));
+ qenable(WR(kb8042->w_qp));
+ }
+ cv_broadcast(&kb8042->suspend_cv);
+ mutex_exit(&kb8042->w_hw_mutex);
return (DDI_SUCCESS);
case DDI_ATTACH:
@@ -480,7 +488,8 @@ kb8042_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
}
mutex_init(&kb8042->w_hw_mutex, NULL, MUTEX_DRIVER, kb8042->w_iblock);
-
+ cv_init(&kb8042->ops_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&kb8042->suspend_cv, NULL, CV_DRIVER, NULL);
kb8042->init_state |= KB8042_HW_MUTEX_INITTED;
kb8042_init(kb8042, B_FALSE);
@@ -552,6 +561,12 @@ kb8042_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
switch (cmd) {
case DDI_SUSPEND:
+ mutex_enter(&kb8042->w_hw_mutex);
+ ASSERT(kb8042->ops >= 0);
+ while (kb8042->ops > 0)
+ cv_wait(&kb8042->ops_cv, &kb8042->w_hw_mutex);
+ kb8042->suspended = B_TRUE;
+ mutex_exit(&kb8042->w_hw_mutex);
return (DDI_SUCCESS);
case DDI_DETACH:
@@ -606,8 +621,11 @@ kb8042_cleanup(struct kb8042 *kb8042)
{
ASSERT(kb8042_dip != NULL);
- if (kb8042->init_state & KB8042_HW_MUTEX_INITTED)
+ if (kb8042->init_state & KB8042_HW_MUTEX_INITTED) {
+ cv_destroy(&kb8042->suspend_cv);
+ cv_destroy(&kb8042->ops_cv);
mutex_destroy(&kb8042->w_hw_mutex);
+ }
if (kb8042->init_state & KB8042_INTR_ADDED)
ddi_remove_intr(kb8042_dip, 0, kb8042->w_iblock);
@@ -660,9 +678,19 @@ kb8042_open(queue_t *qp, dev_t *devp, int flag, int sflag, cred_t *credp)
kb8042 = &Kdws;
+ mutex_enter(&kb8042->w_hw_mutex);
+ while (kb8042->suspended) {
+ if (cv_wait_sig(&kb8042->suspend_cv, &kb8042->w_hw_mutex) ==
+ 0) {
+ mutex_exit(&kb8042->w_hw_mutex);
+ return (EINTR);
+ }
+ }
+
kb8042->w_dev = *devp;
if (qp->q_ptr) {
+ mutex_exit(&kb8042->w_hw_mutex);
return (0);
}
qp->q_ptr = (caddr_t)kb8042;
@@ -670,6 +698,10 @@ kb8042_open(queue_t *qp, dev_t *devp, int flag, int sflag, cred_t *credp)
if (!kb8042->w_qp)
kb8042->w_qp = qp;
+ ASSERT(kb8042->ops >= 0);
+ kb8042->ops++;
+ mutex_exit(&kb8042->w_hw_mutex);
+
kb8042_get_initial_leds(kb8042, &initial_leds, &initial_led_mask);
err = kbtrans_streams_init(qp, sflag, credp,
(struct kbtrans_hardware *)kb8042, &kb8042_callbacks,
@@ -700,6 +732,13 @@ kb8042_open(queue_t *qp, dev_t *devp, int flag, int sflag, cred_t *credp)
kbtrans_streams_enable(kb8042->hw_kbtrans);
+ mutex_enter(&kb8042->w_hw_mutex);
+ ASSERT(kb8042->ops > 0);
+ kb8042->ops--;
+ if (kb8042->ops == 0)
+ cv_broadcast(&kb8042->ops_cv);
+ mutex_exit(&kb8042->w_hw_mutex);
+
return (0);
}
@@ -714,11 +753,31 @@ kb8042_close(queue_t *qp, int flag, cred_t *credp)
kb8042 = (struct kb8042 *)qp->q_ptr;
+ mutex_enter(&kb8042->w_hw_mutex);
+ while (kb8042->suspended) {
+ if (cv_wait_sig(&kb8042->suspend_cv, &kb8042->w_hw_mutex) ==
+ 0) {
+ mutex_exit(&kb8042->w_hw_mutex);
+ return (EINTR);
+ }
+ }
+
+ ASSERT(kb8042->ops >= 0);
+ kb8042->ops++;
+ mutex_exit(&kb8042->w_hw_mutex);
+
(void) kbtrans_streams_fini(kb8042->hw_kbtrans);
kb8042->w_qp = (queue_t *)NULL;
qprocsoff(qp);
+ mutex_enter(&kb8042->w_hw_mutex);
+ ASSERT(kb8042->ops > 0);
+ kb8042->ops--;
+ if (kb8042->ops == 0)
+ cv_broadcast(&kb8042->ops_cv);
+ mutex_exit(&kb8042->w_hw_mutex);
+
return (0);
}
@@ -728,10 +787,27 @@ kb8042_wsrv(queue_t *qp)
struct kb8042 *kb8042;
mblk_t *mp;
+ boolean_t suspended;
kb8042 = (struct kb8042 *)qp->q_ptr;
+ mutex_enter(&kb8042->w_hw_mutex);
+ suspended = kb8042->suspended;
+ ASSERT(kb8042->ops >= 0);
+ if (!suspended)
+ kb8042->ops++;
+ mutex_exit(&kb8042->w_hw_mutex);
+
+#ifdef NO_KB_DEBUG
+ while (!suspended && (mp = getq(qp)) != NULL) {
+#else
+ /*
+ * Not taking keyboard input while suspending can make debugging
+ * difficult. However, we still do the ops counting so that we
+ * don't suspend at a bad time.
+ */
while ((mp = getq(qp))) {
+#endif
switch (kbtrans_streams_message(kb8042->hw_kbtrans, mp)) {
case KBTRANS_MESSAGE_HANDLED:
continue;
@@ -765,6 +841,16 @@ kb8042_wsrv(queue_t *qp)
continue;
}
}
+
+ mutex_enter(&kb8042->w_hw_mutex);
+ if (!suspended) {
+ ASSERT(kb8042->ops > 0);
+ kb8042->ops--;
+ if (kb8042->ops == 0)
+ cv_broadcast(&kb8042->ops_cv);
+ }
+ mutex_exit(&kb8042->w_hw_mutex);
+
return (0);
}
diff --git a/usr/src/uts/common/io/kb8042/kb8042.h b/usr/src/uts/common/io/kb8042/kb8042.h
index 33d849669e..6232d38279 100644
--- a/usr/src/uts/common/io/kb8042/kb8042.h
+++ b/usr/src/uts/common/io/kb8042/kb8042.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -19,12 +18,13 @@
*
* CDDL HEADER END
*/
+
/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
/* All Rights Reserved */
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -111,6 +111,10 @@ struct kb8042 {
int simulated_kbd_type;
uint32_t init_state;
int break_received;
+ boolean_t suspended;
+ int ops;
+ kcondvar_t suspend_cv;
+ kcondvar_t ops_cv;
};
#define KB_COMMAND_STATE_IDLE 0
diff --git a/usr/src/uts/common/io/pci-ide/pci-ide.c b/usr/src/uts/common/io/pci-ide/pci-ide.c
index c47157dd31..924aec38d7 100644
--- a/usr/src/uts/common/io/pci-ide/pci-ide.c
+++ b/usr/src/uts/common/io/pci-ide/pci-ide.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -47,6 +47,7 @@
#include <sys/pci_intr_lib.h>
int pciide_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+int pciide_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
#define PCIIDE_NATIVE_MODE(dip) \
(!ddi_prop_exists(DDI_DEV_T_ANY, (dip), DDI_PROP_DONTPASS, \
@@ -154,7 +155,7 @@ struct dev_ops pciide_ops = {
nulldev, /* identify */
nulldev, /* probe */
pciide_attach, /* attach */
- nodev, /* detach */
+ pciide_detach, /* detach */
nodev, /* reset */
(struct cb_ops *)0, /* driver operations */
&pciide_bus_ops /* bus operations */
@@ -203,9 +204,8 @@ pciide_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
ddi_acc_handle_t conf_hdl = NULL;
int rc;
-
- if (cmd == DDI_ATTACH) {
-
+ switch (cmd) {
+ case DDI_ATTACH:
/*
* Make sure bus-mastering is enabled, even if
* BIOS didn't.
@@ -225,13 +225,55 @@ pciide_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
cmdreg | PCI_COMM_ME);
}
pci_config_teardown(&conf_hdl);
+ return (DDI_SUCCESS);
+ case DDI_RESUME:
+ /* Restore our PCI configuration header */
+ if (pci_restore_config_regs(dip) != DDI_SUCCESS) {
+ /*
+ * XXXX
+ * This is a pretty bad thing. However, for some
+ * reason it always happens. To further complicate
+ * things, it appears if we just ignore this, we
+ * properly resume. For now, all I want to do is
+ * to generate this message so that it doesn't get
+ * forgotten.
+ */
+ cmn_err(CE_WARN,
+ "Couldn't restore PCI config regs for %s(%p)",
+ ddi_node_name(dip), (void *) dip);
+ }
+#ifdef DEBUG
+ /* Bus mastering should still be enabled */
+ if (pci_config_setup(dip, &conf_hdl) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+ cmdreg = pci_config_get16(conf_hdl, PCI_CONF_COMM);
+ ASSERT((cmdreg & PCI_COMM_ME) != 0);
+ pci_config_teardown(&conf_hdl);
+#endif
return (DDI_SUCCESS);
- } else {
- return (DDI_FAILURE);
}
+
+ return (DDI_FAILURE);
}
+/*ARGSUSED*/
+int
+pciide_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ switch (cmd) {
+ case DDI_DETACH:
+ return (DDI_SUCCESS);
+ case DDI_SUSPEND:
+ /* Save our PCI configuration header */
+ if (pci_save_config_regs(dip) != DDI_SUCCESS) {
+ /* Don't suspend if we cannot save config regs */
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+ }
+ return (DDI_FAILURE);
+}
/*ARGSUSED*/
static int
@@ -295,9 +337,9 @@ pciide_ddi_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
old_rnumber = rnumber;
new_rnumber
- = pciide_pre26_rnumber_map(dip, old_rnumber);
+ = pciide_pre26_rnumber_map(dip, old_rnumber);
PDBG(("pciide rnumber old %d new %d\n",
- old_rnumber, new_rnumber));
+ old_rnumber, new_rnumber));
rnumber = new_rnumber;
}
@@ -454,7 +496,7 @@ pciide_initchild(dev_info_t *mydip, dev_info_t *cdip)
* property in the ata.conf file.
*/
vec = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
- "interrupts", -1);
+ "interrupts", -1);
if (vec == -1) {
/* setup compatibility mode interrupts */
if (dev == 0) {
@@ -530,7 +572,7 @@ pciide_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
old_rnumber = mp->map_obj.rnumber;
new_rnumber = pciide_pre26_rnumber_map(dip, old_rnumber);
PDBG(("pciide rnumber old %d new %d\n",
- old_rnumber, new_rnumber));
+ old_rnumber, new_rnumber));
mp->map_obj.rnumber = new_rnumber;
}
@@ -545,7 +587,7 @@ pciide_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
*/
pdip = ddi_get_parent(dip);
rc = ((*(DEVI(pdip)->devi_ops->devo_bus_ops->bus_map))
- (pdip, dip, mp, offset, len, vaddrp));
+ (pdip, dip, mp, offset, len, vaddrp));
PDBG(("pciide_bus_map %s\n", rc == DDI_SUCCESS ? "okay" : "!ok"));
@@ -751,7 +793,7 @@ pciide_compat_setup(dev_info_t *mydip, dev_info_t *cdip, int dev)
if ((dev == 0 && !(class_code & PCI_IDE_IF_NATIVE_PRI)) ||
(dev == 1 && !(class_code & PCI_IDE_IF_NATIVE_SEC))) {
rc = ddi_prop_update_int(DDI_DEV_T_NONE, cdip,
- "compatibility-mode", 1);
+ "compatibility-mode", 1);
if (rc != DDI_PROP_SUCCESS)
cmn_err(CE_WARN,
"pciide prop error %d compat-mode", rc);
@@ -765,9 +807,9 @@ pciide_compat_setup(dev_info_t *mydip, dev_info_t *cdip, int dev)
*/
class_code &= 0x00ffff00;
class_code |= PCI_IDE_IF_BM_CAP_MASK |
- PCI_IDE_IF_NATIVE_PRI | PCI_IDE_IF_NATIVE_SEC;
+ PCI_IDE_IF_NATIVE_PRI | PCI_IDE_IF_NATIVE_SEC;
rc = ddi_prop_update_int(DDI_DEV_T_NONE, mydip,
- "class-code", class_code);
+ "class-code", class_code);
if (rc != DDI_PROP_SUCCESS)
cmn_err(CE_WARN,
"pciide prop error %d class-code", rc);
@@ -783,7 +825,7 @@ pciide_pre26_rnumber_map(dev_info_t *mydip, int rnumber)
int class_code;
class_code = ddi_prop_get_int(DDI_DEV_T_ANY, mydip, DDI_PROP_DONTPASS,
- "class-code", 0);
+ "class-code", 0);
pri_native = (class_code & PCI_IDE_IF_NATIVE_PRI) ? TRUE : FALSE;
sec_native = (class_code & PCI_IDE_IF_NATIVE_SEC) ? TRUE : FALSE;
diff --git a/usr/src/uts/common/io/pm.c b/usr/src/uts/common/io/pm.c
index d4cb7f0da0..42847eecf7 100644
--- a/usr/src/uts/common/io/pm.c
+++ b/usr/src/uts/common/io/pm.c
@@ -56,10 +56,10 @@
#include <sys/policy.h>
/*
- * Minor number is instance<<8 + clone minor from range 1-255; (0 reserved
- * for "original"
+ * Minor number is instance<<8 + clone minor from range 1-254; (0 reserved
+ * for "original")
*/
-#define PM_MINOR_TO_CLONE(minor) ((minor) & (PM_MAX_CLONE - 1))
+#define PM_MINOR_TO_CLONE(minor) ((minor) & (PM_MAX_CLONE -1))
#define PM_NUMCMPTS(dip) (DEVI(dip)->devi_pm_num_components)
#define PM_IS_CFB(dip) (DEVI(dip)->devi_pm_flags & PMC_CONSOLE_FB)
@@ -67,6 +67,8 @@
#define PM_RELE(dip) ddi_release_devi(dip)
#define PM_IDLEDOWN_TIME 10
+#define MAXSMBIOSSTRLEN 64 /* from SMBIOS spec */
+#define MAXCOPYBUF (MAXSMBIOSSTRLEN + 1)
extern kmutex_t pm_scan_lock; /* protects autopm_enable, pm_scans_disabled */
extern kmutex_t pm_clone_lock; /* protects pm_clones array */
@@ -77,6 +79,19 @@ extern int pm_system_idle_threshold;
extern int pm_cpu_idle_threshold;
extern kcondvar_t pm_clones_cv[PM_MAX_CLONE];
extern uint_t pm_poll_cnt[PM_MAX_CLONE];
+extern int autoS3_enabled;
+extern void pm_record_thresh(pm_thresh_rec_t *);
+extern void pm_register_watcher(int, dev_info_t *);
+extern int pm_get_current_power(dev_info_t *, int, int *);
+extern int pm_interest_registered(int);
+extern void pm_all_to_default_thresholds(void);
+extern int pm_current_threshold(dev_info_t *, int, int *);
+extern void pm_deregister_watcher(int, dev_info_t *);
+extern void pm_unrecord_threshold(char *);
+extern int pm_S3_enabled;
+extern int pm_ppm_searchlist(pm_searchargs_t *);
+extern psce_t *pm_psc_clone_to_direct(int);
+extern psce_t *pm_psc_clone_to_interest(int);
/*
* The soft state of the power manager. Since there will only
@@ -181,7 +196,7 @@ pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
pmstp->pm_instance = ddi_get_instance(dip);
if (ddi_create_minor_node(dip, "pm", S_IFCHR,
(pmstp->pm_instance << 8) + 0,
- DDI_PSEUDO, 0) != DDI_SUCCESS) {
+ DDI_PSEUDO, 0) != DDI_SUCCESS) {
return (DDI_FAILURE);
}
pmstp->pm_dip = dip; /* pm_init and getinfo depend on it */
@@ -271,6 +286,7 @@ pm_close_direct_pm_device(dev_info_t *dip, void *arg)
#define NODEP 5
#define DEP 6
#define PM_PSC 7
+#define PM_SRCH 8
#define CHECKPERMS 0x001
#define SU 0x002
@@ -405,6 +421,8 @@ static struct pm_cmd_info pmci[] = {
{PM_RESET_DEVICE_THRESHOLD, "PM_RESET_DEVICE_THRESHOLD", 1, PM_REQ,
INWHO, DIP, NODEP, SU},
{PM_GET_PM_STATE, "PM_GET_PM_STATE", 1, NOSTRUCT},
+ {PM_GET_AUTOS3_STATE, "PM_GET_AUTOS3_STATE", 1, NOSTRUCT},
+ {PM_GET_S3_SUPPORT_STATE, "PM_GET_S3_SUPPORT_STATE", 1, NOSTRUCT},
{PM_GET_DEVICE_TYPE, "PM_GET_DEVICE_TYPE", 1, PM_REQ, INWHO,
DIP, NODEP},
{PM_SET_COMPONENT_THRESHOLDS, "PM_SET_COMPONENT_THRESHOLDS", 1, PM_REQ,
@@ -431,6 +449,14 @@ static struct pm_cmd_info pmci[] = {
{PM_SET_CPU_THRESHOLD, "PM_SET_CPU_THRESHOLD", 1, NOSTRUCT,
0, 0, 0, SU},
{PM_GET_CPUPM_STATE, "PM_GET_CPUPM_STATE", 1, NOSTRUCT},
+ {PM_START_AUTOS3, "PM_START_AUTOS3", 1, NOSTRUCT, 0, 0, 0, SU},
+ {PM_STOP_AUTOS3, "PM_STOP_AUTOS3", 1, NOSTRUCT, 0, 0, 0, SU},
+ {PM_ENABLE_S3, "PM_ENABLE_S3", 1, NOSTRUCT, 0, 0, 0, SU},
+ {PM_DISABLE_S3, "PM_DISABLE_S3", 1, NOSTRUCT, 0, 0, 0, SU},
+ {PM_ENTER_S3, "PM_ENTER_S3", 1, NOSTRUCT, 0, 0, 0, SU},
+ {PM_SEARCH_LIST, "PM_SEARCH_LIST", 1, PM_SRCH, 0, 0, 0, SU},
+ {PM_GET_CMD_NAME, "PM_GET_CMD_NAME", 1, PM_REQ, INDATAOUT, NODIP,
+ NODEP, 0},
{0, NULL}
};
@@ -729,8 +755,6 @@ static void
pm_discard_entries(int clone)
{
psce_t *pscep;
- psce_t *pm_psc_clone_to_direct(int);
- psce_t *pm_psc_clone_to_interest(int);
int direct = 0;
mutex_enter(&pm_clone_lock);
@@ -901,26 +925,21 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
size_t wholen; /* copyinstr length */
size_t deplen = MAXNAMELEN;
char *dep, i_dep_buf[MAXNAMELEN];
- char *pathbuf;
+ char pathbuf[MAXNAMELEN];
struct pm_component *cp;
#ifdef _MULTI_DATAMODEL
pm_state_change32_t *pscp32;
pm_state_change32_t psc32;
+ pm_searchargs32_t psa32;
size_t copysize32;
#endif
pm_state_change_t *pscp;
pm_state_change_t psc;
+ pm_searchargs_t psa;
+ char listname[MAXCOPYBUF];
+ char manufacturer[MAXCOPYBUF];
+ char product[MAXCOPYBUF];
size_t copysize;
- extern void pm_record_thresh(pm_thresh_rec_t *);
- psce_t *pm_psc_clone_to_direct(int);
- psce_t *pm_psc_clone_to_interest(int);
- extern void pm_register_watcher(int, dev_info_t *);
- extern int pm_get_current_power(dev_info_t *, int, int *);
- extern int pm_interest_registered(int);
- extern void pm_all_to_default_thresholds(void);
- extern int pm_current_threshold(dev_info_t *, int, int *);
- extern void pm_deregister_watcher(int, dev_info_t *);
- extern void pm_unrecord_threshold(char *);
PMD(PMD_IOCTL, ("ioctl: %s: begin\n", cmdstr))
@@ -955,6 +974,7 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
}
switch (pcip->str_type) {
case PM_REQ:
+ {
#ifdef _MULTI_DATAMODEL
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
pm_req32_t req32;
@@ -979,9 +999,9 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
break;
}
req.physpath = who;
+ PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n",
+ cmdstr, req.physpath))
}
- PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n", cmdstr,
- req.physpath))
if (pcip->inargs & INDATA) {
req.data = (void *)(uintptr_t)req32.data;
req.datasize = req32.datasize;
@@ -1053,9 +1073,8 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
ASSERT(!(pcip->inargs & INDATAINT));
ASSERT(pcip->deptype == DEP);
if (req32.data != NULL) {
- size_t dummy;
if (copyinstr((void *)(uintptr_t)
- req32.data, dep, deplen, &dummy)) {
+ req32.data, dep, deplen, NULL)) {
PMD(PMD_ERROR, ("ioctl: %s: "
"0x%p dep size %lx, EFAULT"
"\n", cmdstr,
@@ -1096,9 +1115,9 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
break;
}
req.physpath = who;
+ PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n",
+ cmdstr, req.physpath))
}
- PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n", cmdstr,
- req.physpath))
if (!(pcip->inargs & INDATA)) {
req.data = NULL;
req.datasize = 0;
@@ -1154,9 +1173,8 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
ASSERT(!(pcip->inargs & INDATAINT));
ASSERT(pcip->deptype == DEP);
if (req.data != NULL) {
- size_t dummy;
if (copyinstr((caddr_t)req.data,
- dep, deplen, &dummy)) {
+ dep, deplen, NULL)) {
PMD(PMD_ERROR, ("ioctl: %s: "
"0x%p dep size %lu, "
"EFAULT\n", cmdstr,
@@ -1222,6 +1240,7 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
}
case PM_GET_DEVICE_THRESHOLD:
+ {
PM_LOCK_DIP(dip);
if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip)) {
PM_UNLOCK_DIP(dip);
@@ -1234,6 +1253,7 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
PM_UNLOCK_DIP(dip);
ret = 0;
break;
+ }
case PM_DIRECT_PM:
{
@@ -1248,11 +1268,9 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
* Check to see if we are there is a dependency on
* this kept device, if so, return EBUSY.
*/
- pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
(void) ddi_pathname(dip, pathbuf);
pm_dispatch_to_dep_thread(PM_DEP_WK_CHECK_KEPT,
NULL, pathbuf, PM_DEP_WAIT, &has_dep, 0);
- kmem_free(pathbuf, MAXPATHLEN);
if (has_dep) {
PMD(PMD_ERROR | PMD_DPM, ("%s EBUSY\n",
cmdstr))
@@ -1301,11 +1319,9 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
info->pmi_dev_pm_state &= ~PM_DIRECT;
PM_UNLOCK_DIP(dip);
/* Bring ourselves up if there is a keeper. */
- pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
(void) ddi_pathname(dip, pathbuf);
pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF,
NULL, pathbuf, PM_DEP_WAIT, NULL, 0);
- kmem_free(pathbuf, MAXPATHLEN);
pm_discard_entries(clone);
pm_deregister_watcher(clone, dip);
/*
@@ -1426,6 +1442,7 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
}
case PM_GET_CURRENT_POWER:
+ {
if (pm_get_current_power(dip, req.component,
rval_p) != DDI_SUCCESS) {
PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s "
@@ -1440,6 +1457,7 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
else
ret = 0;
break;
+ }
case PM_GET_TIME_IDLE:
{
@@ -1629,11 +1647,14 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
}
case PM_GET_NUM_COMPONENTS:
+ {
ret = 0;
*rval_p = PM_NUMCMPTS(dip);
break;
+ }
case PM_GET_DEVICE_TYPE:
+ {
ret = 0;
if ((info = PM_GET_PM_INFO(dip)) == NULL) {
PMD(PMD_ERROR, ("ioctl: %s: "
@@ -1647,6 +1668,7 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
*rval_p = PM_AUTOPM;
}
break;
+ }
case PM_SET_COMPONENT_THRESHOLDS:
{
@@ -1981,6 +2003,8 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
cmdstr, PM_DEVICE(dip),
(void *)req.data))
ASSERT(!dipheld);
+ kmem_free(timestamp,
+ comps * sizeof (time_t));
return (EFAULT);
}
rvaddr = (caddr_t)req.data;
@@ -1994,7 +2018,24 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
break;
}
+ case PM_GET_CMD_NAME:
+ {
+ PMD(PMD_IOCTL, ("%s: %s\n", cmdstr,
+ pm_decode_cmd(req.value)))
+ if (ret = copyoutstr(pm_decode_cmd(req.value),
+ (char *)req.data, req.datasize, &lencopied)) {
+ PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
+ "copyoutstr %p failed--EFAULT\n", cmdstr,
+ PM_DEVICE(dip), (void *)req.data))
+ break;
+ }
+ *rval_p = lencopied;
+ ret = 0;
+ break;
+ }
+
case PM_GET_COMPONENT_NAME:
+ {
ASSERT(dip);
if (!e_pm_valid_comp(dip, req.component, &cp)) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
@@ -2014,6 +2055,7 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
*rval_p = lencopied;
ret = 0;
break;
+ }
case PM_GET_POWER_NAME:
{
@@ -2118,6 +2160,7 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
case PM_GET_NUM_POWER_LEVELS:
+ {
if (!e_pm_valid_comp(dip, req.component, &cp)) {
PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
"component %d > numcmpts - 1 %d--EINVAL\n",
@@ -2129,8 +2172,10 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
*rval_p = cp->pmc_comp.pmc_numlevels;
ret = 0;
break;
+ }
case PM_GET_DEVICE_THRESHOLD_BASIS:
+ {
ret = 0;
PM_LOCK_DIP(dip);
if ((info = PM_GET_PM_INFO(dip)) == NULL) {
@@ -2172,9 +2217,23 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
PM_UNLOCK_DIP(dip);
break;
}
+ default:
+ /*
+ * Internal error, invalid ioctl description
+ * force debug entry even if pm_debug not set
+ */
+#ifdef DEBUG
+ pm_log("invalid diptype %d for cmd %d (%s)\n",
+ pcip->diptype, cmd, pcip->name);
+#endif
+ ASSERT(0);
+ return (EIO);
+ }
break;
+ }
case PM_PSC:
+ {
/*
* Commands that require pm_state_change_t as arg
*/
@@ -2461,7 +2520,7 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
if (ddi_copyout(&psc32.component,
&pscp32->component, copysize32, mode)
- != 0) {
+ != 0) {
PMD(PMD_ERROR, ("ioctl: %s: copyout "
"failed--EFAULT\n", cmdstr))
ret = EFAULT;
@@ -2482,14 +2541,128 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
break;
}
default:
+ /*
+ * Internal error, invalid ioctl description
+ * force debug entry even if pm_debug not set
+ */
+#ifdef DEBUG
+ pm_log("invalid diptype %d for cmd %d (%s)\n",
+ pcip->diptype, cmd, pcip->name);
+#endif
ASSERT(0);
+ return (EIO);
}
break;
+ }
+
+ case PM_SRCH: /* command that takes a pm_searchargs_t arg */
+ {
+ /*
+ * If no ppm, then there is nothing to search.
+ */
+ if (DEVI(ddi_root_node())->devi_pm_ppm == NULL) {
+ ret = ENODEV;
+ break;
+ }
+
+#ifdef _MULTI_DATAMODEL
+ if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
+ if (ddi_copyin((caddr_t)arg, &psa32,
+ sizeof (psa32), mode) != 0) {
+ PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
+ "EFAULT\n\n", cmdstr))
+ return (EFAULT);
+ }
+ if (copyinstr((void *)(uintptr_t)psa32.pms_listname,
+ listname, MAXCOPYBUF, NULL)) {
+ PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
+ "%d, " "EFAULT\n", cmdstr,
+ (void *)(uintptr_t)psa32.pms_listname,
+ MAXCOPYBUF))
+ ret = EFAULT;
+ break;
+ }
+ if (copyinstr((void *)(uintptr_t)psa32.pms_manufacturer,
+ manufacturer, MAXCOPYBUF, NULL)) {
+ PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
+ "%d, " "EFAULT\n", cmdstr,
+ (void *)(uintptr_t)psa32.pms_manufacturer,
+ MAXCOPYBUF))
+ ret = EFAULT;
+ break;
+ }
+ if (copyinstr((void *)(uintptr_t)psa32.pms_product,
+ product, MAXCOPYBUF, NULL)) {
+ PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
+ "%d, " "EFAULT\n", cmdstr,
+ (void *)(uintptr_t)psa32.pms_product,
+ MAXCOPYBUF))
+ ret = EFAULT;
+ break;
+ }
+ } else
+#endif /* _MULTI_DATAMODEL */
+ {
+ if (ddi_copyin((caddr_t)arg, &psa,
+ sizeof (psa), mode) != 0) {
+ PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
+ "EFAULT\n\n", cmdstr))
+ return (EFAULT);
+ }
+ if (copyinstr(psa.pms_listname,
+ listname, MAXCOPYBUF, NULL)) {
+ PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
+ "%d, " "EFAULT\n", cmdstr,
+ (void *)psa.pms_listname, MAXCOPYBUF))
+ ret = EFAULT;
+ break;
+ }
+ if (copyinstr(psa.pms_manufacturer,
+ manufacturer, MAXCOPYBUF, NULL)) {
+ PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
+ "%d, " "EFAULT\n", cmdstr,
+ (void *)psa.pms_manufacturer, MAXCOPYBUF))
+ ret = EFAULT;
+ break;
+ }
+ if (copyinstr(psa.pms_product,
+ product, MAXCOPYBUF, NULL)) {
+ PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
+ "%d, " "EFAULT\n", cmdstr,
+ (void *)psa.pms_product, MAXCOPYBUF))
+ ret = EFAULT;
+ break;
+ }
+ }
+ psa.pms_listname = listname;
+ psa.pms_manufacturer = manufacturer;
+ psa.pms_product = product;
+ switch (cmd) {
+ case PM_SEARCH_LIST:
+ ret = pm_ppm_searchlist(&psa);
+ break;
+
+ default:
+ /*
+ * Internal error, invalid ioctl description
+ * force debug entry even if pm_debug not set
+ */
+#ifdef DEBUG
+ pm_log("invalid diptype %d for cmd %d (%s)\n",
+ pcip->diptype, cmd, pcip->name);
+#endif
+ ASSERT(0);
+ return (EIO);
+ }
+ break;
+ }
case NOSTRUCT:
+ {
switch (cmd) {
case PM_START_PM:
case PM_START_CPUPM:
+ {
mutex_enter(&pm_scan_lock);
if ((cmd == PM_START_PM && autopm_enabled) ||
(cmd == PM_START_CPUPM && PM_CPUPM_ENABLED)) {
@@ -2500,13 +2673,14 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
break;
}
if (cmd == PM_START_PM)
- autopm_enabled = 1;
+ autopm_enabled = 1;
else
- cpupm = PM_CPUPM_ENABLE;
+ cpupm = PM_CPUPM_ENABLE;
mutex_exit(&pm_scan_lock);
ddi_walk_devs(ddi_root_node(), pm_start_pm_walk, &cmd);
ret = 0;
break;
+ }
case PM_RESET_PM:
case PM_STOP_PM:
@@ -2523,13 +2697,16 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
ret = EINVAL;
break;
}
- if (cmd == PM_STOP_PM)
- autopm_enabled = 0;
- else if (cmd == PM_STOP_CPUPM)
- cpupm = PM_CPUPM_DISABLE;
- else {
- autopm_enabled = 0;
- cpupm = PM_CPUPM_NOTSET;
+ if (cmd == PM_STOP_PM) {
+ autopm_enabled = 0;
+ pm_S3_enabled = 0;
+ autoS3_enabled = 0;
+ } else if (cmd == PM_STOP_CPUPM) {
+ cpupm = PM_CPUPM_DISABLE;
+ } else {
+ autopm_enabled = 0;
+ autoS3_enabled = 0;
+ cpupm = PM_CPUPM_NOTSET;
}
mutex_exit(&pm_scan_lock);
@@ -2553,22 +2730,29 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
}
case PM_GET_SYSTEM_THRESHOLD:
+ {
*rval_p = pm_system_idle_threshold;
ret = 0;
break;
+ }
case PM_GET_DEFAULT_SYSTEM_THRESHOLD:
+ {
*rval_p = pm_default_idle_threshold;
ret = 0;
break;
+ }
case PM_GET_CPU_THRESHOLD:
+ {
*rval_p = pm_cpu_idle_threshold;
ret = 0;
break;
+ }
case PM_SET_SYSTEM_THRESHOLD:
case PM_SET_CPU_THRESHOLD:
+ {
if ((int)arg < 0) {
PMD(PMD_ERROR, ("ioctl: %s: arg 0x%x < 0"
"--EINVAL\n", cmdstr, (int)arg))
@@ -2583,20 +2767,24 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
pm_cpu_idle_threshold = (int)arg;
}
ddi_walk_devs(ddi_root_node(), pm_set_idle_thresh_walk,
- (void *) &cmd);
+ (void *) &cmd);
ret = 0;
break;
+ }
case PM_IDLE_DOWN:
+ {
if (pm_timeout_idledown() != 0) {
ddi_walk_devs(ddi_root_node(),
pm_start_idledown, (void *)PMID_IOC);
}
ret = 0;
break;
+ }
case PM_GET_PM_STATE:
+ {
if (autopm_enabled) {
*rval_p = PM_SYSTEM_PM_ENABLED;
} else {
@@ -2604,8 +2792,10 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
}
ret = 0;
break;
+ }
case PM_GET_CPUPM_STATE:
+ {
if (PM_CPUPM_ENABLED)
*rval_p = PM_CPU_PM_ENABLED;
else if (PM_CPUPM_DISABLED)
@@ -2615,7 +2805,96 @@ pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
ret = 0;
break;
}
+
+ case PM_GET_AUTOS3_STATE:
+ {
+ if (autoS3_enabled) {
+ *rval_p = PM_AUTOS3_ENABLED;
+ } else {
+ *rval_p = PM_AUTOS3_DISABLED;
+ }
+ ret = 0;
+ break;
+ }
+
+ case PM_GET_S3_SUPPORT_STATE:
+ {
+ if (pm_S3_enabled) {
+ *rval_p = PM_S3_SUPPORT_ENABLED;
+ } else {
+ *rval_p = PM_S3_SUPPORT_DISABLED;
+ }
+ ret = 0;
+ break;
+ }
+
+ /*
+ * pmconfig tells us if the platform supports S3
+ */
+ case PM_ENABLE_S3:
+ {
+ mutex_enter(&pm_scan_lock);
+ if (pm_S3_enabled) {
+ mutex_exit(&pm_scan_lock);
+ PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
+ cmdstr))
+ ret = EBUSY;
+ break;
+ }
+ pm_S3_enabled = 1;
+ mutex_exit(&pm_scan_lock);
+ ret = 0;
+ break;
+ }
+
+ case PM_DISABLE_S3:
+ {
+ mutex_enter(&pm_scan_lock);
+ pm_S3_enabled = 0;
+ mutex_exit(&pm_scan_lock);
+ ret = 0;
+ break;
+ }
+
+ case PM_START_AUTOS3:
+ {
+ mutex_enter(&pm_scan_lock);
+ if (autoS3_enabled) {
+ mutex_exit(&pm_scan_lock);
+ PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
+ cmdstr))
+ ret = EBUSY;
+ break;
+ }
+ autoS3_enabled = 1;
+ mutex_exit(&pm_scan_lock);
+ ret = 0;
+ break;
+ }
+
+ case PM_STOP_AUTOS3:
+ {
+ mutex_enter(&pm_scan_lock);
+ autoS3_enabled = 0;
+ mutex_exit(&pm_scan_lock);
+ ret = 0;
+ break;
+ }
+
+ default:
+ /*
+ * Internal error, invalid ioctl description
+ * force debug entry even if pm_debug not set
+ */
+#ifdef DEBUG
+ pm_log("invalid diptype %d for cmd %d (%s)\n",
+ pcip->diptype, cmd, pcip->name);
+#endif
+ ASSERT(0);
+ return (EIO);
+ }
break;
+ }
default:
/*
diff --git a/usr/src/uts/common/io/ppm/ppm.c b/usr/src/uts/common/io/ppm/ppm.c
index b35a2e369d..e52ff63b78 100644
--- a/usr/src/uts/common/io/ppm/ppm.c
+++ b/usr/src/uts/common/io/ppm/ppm.c
@@ -206,8 +206,10 @@ int
_init(void)
{
if (ddi_soft_state_init(
- &ppm_statep, sizeof (ppm_unit_t), 1) != DDI_SUCCESS)
+ &ppm_statep, sizeof (ppm_unit_t), 1) != DDI_SUCCESS) {
+ PPMD(D_INIT, ("ppm: soft state init\n"))
return (DDI_FAILURE);
+ }
if (mod_install(&modlinkage) != DDI_SUCCESS) {
ddi_soft_state_fini(&ppm_statep);
@@ -220,7 +222,12 @@ _init(void)
int
_fini(void)
{
- return (mod_remove(&modlinkage));
+ int error;
+
+ if ((error = mod_remove(&modlinkage)) == DDI_SUCCESS)
+ ddi_soft_state_fini(&ppm_statep);
+
+ return (error);
}
@@ -679,7 +686,7 @@ err_bydom:
STRUCT_INIT(norm, mode);
ret = ddi_copyin((caddr_t)arg, STRUCT_BUF(norm),
- STRUCT_SIZE(norm), mode);
+ STRUCT_SIZE(norm), mode);
if (ret != 0)
return (EFAULT);
@@ -755,6 +762,10 @@ ppm_ctlops(dev_info_t *dip, dev_info_t *rdip,
ppm_owned_t *owned;
int mode;
int ret = DDI_SUCCESS;
+ static int ppm_manage_sx(s3a_t *, int);
+ static int ppm_search_list(pm_searchargs_t *);
+ int *res = (int *)result;
+ s3a_t s3args;
#ifdef DEBUG
char *str = "ppm_ctlops";
@@ -765,8 +776,9 @@ ppm_ctlops(dev_info_t *dip, dev_info_t *rdip,
str, ddi_binding_name(rdip), ctlstr))
#endif
- if (ctlop != DDI_CTLOPS_POWER)
+ if (ctlop != DDI_CTLOPS_POWER) {
return (DDI_FAILURE);
+ }
unitp = (ppm_unit_t *)ddi_get_soft_state(ppm_statep, ppm_inst);
@@ -779,8 +791,6 @@ ppm_ctlops(dev_info_t *dip, dev_info_t *rdip,
ppm_manage_led(PPM_LED_BLINKING);
else
ppm_manage_led(PPM_LED_SOLIDON);
- PPMD(D_LOWEST, ("%s: %sall devices are at lowest power \n",
- str, mode ? "" : "not "))
return (DDI_SUCCESS);
/* undo the claiming of 'rdip' at attach time */
@@ -984,9 +994,37 @@ ppm_ctlops(dev_info_t *dip, dev_info_t *rdip,
return (DDI_FAILURE);
}
+ case PMR_PPM_ENTER_SX:
+ case PMR_PPM_EXIT_SX:
+ s3args.s3a_state = reqp->req.ppm_power_enter_sx_req.sx_state;
+ s3args.s3a_test_point =
+ reqp->req.ppm_power_enter_sx_req.test_point;
+ s3args.s3a_wakephys = reqp->req.ppm_power_enter_sx_req.wakephys;
+ s3args.s3a_psr = reqp->req.ppm_power_enter_sx_req.psr;
+ ret = ppm_manage_sx(&s3args,
+ reqp->request_type == PMR_PPM_ENTER_SX);
+ if (ret) {
+ PPMD(D_CPR, ("ppm_manage_sx returns %d\n", ret))
+ return (DDI_FAILURE);
+ } else {
+ return (DDI_SUCCESS);
+ }
+
+ case PMR_PPM_SEARCH_LIST:
+ ret = ppm_search_list(reqp->req.ppm_search_list_req.searchlist);
+ reqp->req.ppm_search_list_req.result = ret;
+ *res = ret;
+ if (ret) {
+ PPMD(D_CPR, ("ppm_search_list returns %d\n", ret))
+ return (DDI_FAILURE);
+ } else {
+ PPMD(D_CPR, ("ppm_search_list returns %d\n", ret))
+ return (DDI_SUCCESS);
+ }
+
default:
cmn_err(CE_WARN, "ppm_ctlops: unrecognized ctlops req(%d)",
- reqp->request_type);
+ reqp->request_type);
return (DDI_FAILURE);
}
}
@@ -1246,7 +1284,7 @@ ppm_bringup_domains()
}
mutex_exit(&domp->lock);
}
- PPMD(D_CPR, ("%s[%d]: exit, ret=%d\n", str, ppmbringup, ret))
+ PPMD(D_CPR, ("%s[%d]: exit\n", str, ppmbringup))
return (ret);
}
@@ -1275,6 +1313,15 @@ ppm_sync_bookkeeping()
mutex_exit(&domp->lock);
continue;
}
+
+ /*
+ * skip NULL .devlist slot, for some may host pci device
+ * that can not tolerate clock off or not even participate
+ * in PM.
+ */
+ if (domp->devlist == NULL)
+ continue;
+
switch (domp->model) {
case PPMD_FET:
ret = ppm_fetset(domp, PPMD_OFF);
@@ -1291,7 +1338,7 @@ ppm_sync_bookkeeping()
}
mutex_exit(&domp->lock);
}
- PPMD(D_CPR, ("%s[%d]: exit, ret=%d\n", str, ppmsyncbp, ret))
+ PPMD(D_CPR, ("%s[%d]: exit\n", str, ppmsyncbp))
return (ret);
}
@@ -1655,14 +1702,14 @@ ppm_fetset(ppm_domain_t *domp, uint8_t value)
* we might wait for longer than required
*/
PPMD(D_FET, ("%s : waiting %lu micro seconds "
- "before on\n", domp->name,
- delay - temp))
+ "before on\n", domp->name,
+ delay - temp));
drv_usecwait(delay - temp);
}
}
}
switch (dc->method) {
-#if !defined(__x86)
+#ifdef sun4u
case PPMDC_I2CKIO: {
i2c_gpio_t i2c_req;
i2c_req.reg_mask = dc->m_un.i2c.mask;
@@ -1739,7 +1786,7 @@ ppm_fetget(ppm_domain_t *domp, uint8_t *lvl)
}
switch (dc->method) {
-#if !defined(__x86)
+#ifdef sun4u
case PPMDC_I2CKIO: {
i2c_gpio_t i2c_req;
i2c_req.reg_mask = dc->m_un.i2c.mask;
@@ -1773,7 +1820,7 @@ ppm_fetget(ppm_domain_t *domp, uint8_t *lvl)
}
off_val = (dc->cmd == PPMDC_FET_OFF) ? dc->m_un.kio.val :
- dc->next->m_un.kio.val;
+ dc->next->m_un.kio.val;
*lvl = (kio_val == off_val) ? PPMD_OFF : PPMD_ON;
PPMD(D_FET, ("%s: %s domain FET %s\n", str, domp->name,
@@ -2187,7 +2234,7 @@ ppm_gpioset(ppm_domain_t *domp, int key)
}
switch (dc->method) {
-#if !defined(__x86)
+#ifdef sun4u
case PPMDC_I2CKIO: {
i2c_gpio_t i2c_req;
ppm_dev_t *pdev;
@@ -2223,6 +2270,7 @@ ppm_gpioset(ppm_domain_t *domp, int key)
break;
}
#endif
+
case PPMDC_KIO:
ret = ldi_ioctl(dc->lh, dc->m_un.kio.iowr,
(intptr_t)&(dc->m_un.kio.val), FWRITE | FKIOCTL, kcred,
@@ -2666,3 +2714,100 @@ ppm_power_down_domain(dev_info_t *dip)
mutex_exit(&domp->lock);
return (ret);
}
+
+static int
+ppm_manage_sx(s3a_t *s3ap, int enter)
+{
+ ppm_domain_t *domp = ppm_lookup_domain("domain_estar");
+ ppm_dc_t *dc;
+ int ret = 0;
+
+ if (domp == NULL) {
+ PPMD(D_CPR, ("ppm_manage_sx: can't find estar domain\n"))
+ return (ENODEV);
+ }
+ PPMD(D_CPR, ("ppm_manage_sx %x, enter %d\n", s3ap->s3a_state,
+ enter))
+ switch (s3ap->s3a_state) {
+ case S3:
+ if (enter) {
+ dc = ppm_lookup_dc(domp, PPMDC_ENTER_S3);
+ } else {
+ dc = ppm_lookup_dc(domp, PPMDC_EXIT_S3);
+ }
+ ASSERT(dc && dc->method == PPMDC_KIO);
+ PPMD(D_CPR,
+ ("ppm_manage_sx: calling acpi driver (handle %p)"
+ " with %x\n", (void *)dc->lh, dc->m_un.kio.iowr))
+ ret = ldi_ioctl(dc->lh, dc->m_un.kio.iowr,
+ (intptr_t)s3ap, FWRITE | FKIOCTL, kcred, NULL);
+ break;
+
+ case S4:
+ /* S4 is not supported yet */
+ return (EINVAL);
+ default:
+ ASSERT(0);
+ }
+ return (ret);
+}
+
+/*
+ * Search enable/disable lists, which are encoded in ppm.conf as an array
+ * of char strings.
+ */
+static int
+ppm_search_list(pm_searchargs_t *sl)
+{
+ int i;
+ int flags = DDI_PROP_DONTPASS;
+ ppm_unit_t *unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
+ char **pp;
+ char *starp;
+ uint_t nelements;
+ char *manuf = sl->pms_manufacturer;
+ char *prod = sl->pms_product;
+
+ if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, unitp->dip, flags,
+ sl->pms_listname, &pp, &nelements) != DDI_PROP_SUCCESS) {
+ PPMD(D_CPR, ("ppm_search_list prop lookup %s failed--EINVAL\n",
+ sl->pms_listname))
+ return (EINVAL);
+ }
+ ASSERT((nelements & 1) == 0); /* must be even */
+
+ PPMD(D_CPR, ("ppm_search_list looking for %s, %s\n", manuf, prod))
+
+ for (i = 0; i < nelements; i += 2) {
+ PPMD(D_CPR, ("checking %s, %s", pp[i], pp[i+1]))
+ /* we support only a trailing '*' pattern match */
+ if ((starp = strchr(pp[i], '*')) != NULL && *(starp + 1) == 0) {
+ /* LINTED - ptrdiff overflow */
+ if (strncmp(manuf, pp[i], (starp - pp[i])) != 0) {
+ PPMD(D_CPR, (" no match %s with %s\n",
+ manuf, pp[i + 1]))
+ continue;
+ }
+ }
+ if ((starp = strchr(pp[i + 1], '*')) != NULL &&
+ *(starp + 1) == 0) {
+ if (strncmp(prod,
+ /* LINTED - ptrdiff overflow */
+ pp[i + 1], (starp - pp[i + 1])) != 0) {
+ PPMD(D_CPR, (" no match %s with %s\n",
+ prod, pp[i + 1]))
+ continue;
+ }
+ }
+ if (strcmp(manuf, pp[i]) == 0 &&
+ (strcmp(prod, pp[i + 1]) == 0)) {
+ PPMD(D_CPR, (" match\n"))
+ ddi_prop_free(pp);
+ return (0);
+ }
+ PPMD(D_CPR, (" no match %s with %s or %s with %s\n",
+ manuf, pp[i], prod, pp[i + 1]))
+ }
+ ddi_prop_free(pp);
+ return (ENODEV);
+}
diff --git a/usr/src/uts/common/io/ppm/ppm_subr.c b/usr/src/uts/common/io/ppm/ppm_subr.c
index 4bcd24c877..617ac3a380 100644
--- a/usr/src/uts/common/io/ppm/ppm_subr.c
+++ b/usr/src/uts/common/io/ppm/ppm_subr.c
@@ -207,6 +207,7 @@ ppm_lookup_hndl(int model, ppm_dc_t *key_dc)
#define PPM_CTRL_PROP_SUFFIX "-control"
struct ppm_domit ppm_domit_data[] = {
+ "SX", PPMD_SX, 0, PPMD_ON,
"CPU", PPMD_CPU, PPMD_LOCK_ALL, PPMD_ON,
"FET", PPMD_FET, PPMD_LOCK_ONE, PPMD_ON,
"PCI", PPMD_PCI, PPMD_LOCK_ONE, PPMD_ON,
@@ -401,11 +402,19 @@ ppm_lookup_dev(dev_info_t *dip)
PPM_GET_PATHNAME(dip, path);
for (domp = ppm_domain_p; domp; domp = domp->next) {
- if (PPM_DOMAIN_UP(domp))
+ if (PPM_DOMAIN_UP(domp)) {
for (dbp = domp->conflist; dbp; dbp = dbp->next) {
+ /*
+ * allow claiming root without knowing
+ * its full name
+ */
+ if (dip == ddi_root_node() &&
+ strcmp(dbp->name, "/") == 0)
+ return (domp);
if (ppm_match_devs(path, dbp) == 0)
return (domp);
}
+ }
}
return (NULL);
@@ -514,7 +523,8 @@ ppm_match_devs(char *dev_path, ppm_db_t *dbp)
/* "<exact match>*" */
if (dbp->name[dbp->wcpos[0] + 1] == 0) {
cp = path + dbp->wcpos[0];
- while (*cp && (*cp++ != '/'));
+ while (*cp && (*cp++ != '/'))
+ ;
return ((*cp == 0) ? 0 : -1);
}
@@ -761,6 +771,22 @@ ppm_init_cb(dev_info_t *dip)
for (domp = ppm_domain_p; domp != NULL; domp = domp->next) {
for (dc = domp->dc; dc; dc = dc->next) {
+ /*
+ * Warning: This code is rather confusing.
+ *
+ * It intends to ensure that ppm_init_lyr() is only
+ * called ONCE for a device that may be associated
+ * with more than one domain control.
+ * So, what it does is first to check to see if
+ * there is a handle, and then if not it goes on
+ * to call the init_lyr() routine.
+ *
+ * The non-obvious thing is that the ppm_init_lyr()
+ * routine, in addition to opening the device
+ * associated with the dc (domain control) in
+ * question, has the side-effect of creating the
+ * handle for that dc as well.
+ */
if (ppm_lookup_hndl(domp->model, dc) != NULL)
continue;
@@ -979,6 +1005,8 @@ struct ppm_confdefs {
char *sym;
int val;
} ppm_confdefs_table[] = {
+ "ENTER_S3", PPMDC_ENTER_S3,
+ "EXIT_S3", PPMDC_EXIT_S3,
"CPU_NEXT", PPMDC_CPU_NEXT,
"PRE_CHNG", PPMDC_PRE_CHNG,
"CPU_GO", PPMDC_CPU_GO,
@@ -991,7 +1019,9 @@ struct ppm_confdefs {
"LED_OFF", PPMDC_LED_OFF,
"KIO", PPMDC_KIO,
"VCORE", PPMDC_VCORE,
+#ifdef sun4u
"I2CKIO", PPMDC_I2CKIO,
+#endif
"CPUSPEEDKIO", PPMDC_CPUSPEEDKIO,
"PRE_PWR_OFF", PPMDC_PRE_PWR_OFF,
"PRE_PWR_ON", PPMDC_PRE_PWR_ON,
@@ -1103,14 +1133,20 @@ ppm_parse_dc(char **dc_namep, ppm_dc_t *dc)
(void) ppm_stoi(dclist[i], &dc->m_un.cpu.speeds);
continue;
}
+#ifdef sun4u
if (strstr(dclist[i], "mask=")) {
(void) ppm_stoi(dclist[i], &dc->m_un.i2c.mask);
continue;
}
+#endif
/* This must be before the if statement for delay */
if (strstr(dclist[i], "post_delay=")) {
+#ifdef sun4u
ASSERT(dc->method == PPMDC_KIO ||
dc->method == PPMDC_I2CKIO);
+#else
+ ASSERT(dc->method == PPMDC_KIO);
+#endif
/*
* all delays are uint_t type instead of clock_t.
* If the delay is too long, it might get truncated.
@@ -1119,13 +1155,15 @@ ppm_parse_dc(char **dc_namep, ppm_dc_t *dc)
switch (dc->method) {
case PPMDC_KIO:
(void) ppm_stoi(dclist[i],
- &dc->m_un.kio.post_delay);
+ &dc->m_un.kio.post_delay);
break;
+#ifdef sun4u
case PPMDC_I2CKIO:
(void) ppm_stoi(dclist[i],
- &dc->m_un.i2c.post_delay);
+ &dc->m_un.i2c.post_delay);
break;
+#endif
default:
break;
@@ -1133,9 +1171,14 @@ ppm_parse_dc(char **dc_namep, ppm_dc_t *dc)
continue;
}
if (strstr(dclist[i], "delay=")) {
+#ifdef sun4u
+ ASSERT(dc->method == PPMDC_VCORE ||
+ dc->method == PPMDC_KIO ||
+ dc->method == PPMDC_I2CKIO);
+#else
ASSERT(dc->method == PPMDC_VCORE ||
- dc->method == PPMDC_KIO ||
- dc->method == PPMDC_I2CKIO);
+ dc->method == PPMDC_KIO);
+#endif
/*
* all delays are uint_t type instead of clock_t.
@@ -1148,9 +1191,11 @@ ppm_parse_dc(char **dc_namep, ppm_dc_t *dc)
(void) ppm_stoi(dclist[i], &dc->m_un.kio.delay);
break;
+#ifdef sun4u
case PPMDC_I2CKIO:
(void) ppm_stoi(dclist[i], &dc->m_un.i2c.delay);
break;
+#endif
case PPMDC_VCORE:
(void) ppm_stoi(dclist[i], &dc->m_un.cpu.delay);
@@ -1227,6 +1272,8 @@ ppm_lookup_dc(ppm_domain_t *domp, int cmd)
case PPMDC_PWR_ON:
case PPMDC_RESET_OFF:
case PPMDC_RESET_ON:
+ case PPMDC_ENTER_S3:
+ case PPMDC_EXIT_S3:
break;
default:
PPMD(D_PPMDC, ("%s: cmd(%d) unrecognized\n", str, cmd))
@@ -1234,9 +1281,11 @@ ppm_lookup_dc(ppm_domain_t *domp, int cmd)
}
for (dc = domp->dc; dc; dc = dc->next) {
- if (dc->cmd == cmd)
+ if (dc->cmd == cmd) {
return (dc);
+ }
}
+
return (NULL);
}
@@ -1315,6 +1364,7 @@ ppm_get_ctlstr(int ctlop, uint_t mask)
FLINTSTR(D_LOCKS, PMR_PPM_UNLOCK_POWER),
FLINTSTR(D_LOCKS, PMR_PPM_TRY_LOCK_POWER),
FLINTSTR(D_LOCKS, PMR_PPM_POWER_LOCK_OWNER),
+ FLINTSTR(D_CTLOPS1 | D_CTLOPS2, PMR_PPM_ENTER_SX),
FLINTSTR(D_CTLOPS1 | D_CTLOPS2, PMR_UNKNOWN),
};
@@ -1334,13 +1384,15 @@ ppm_print_dc(ppm_dc_t *dc)
PPMD(D_PPMDC, ("\nAdds ppm_dc: path(%s),\n cmd(%x), "
"method(%x), ", d->path, d->cmd, d->method))
- if (d->method == PPMDC_I2CKIO) {
+ if (d->method == PPMDC_KIO) {
+ PPMD(D_PPMDC, ("kio.iowr(%x), kio.val(0x%X)",
+ d->m_un.kio.iowr, d->m_un.kio.val))
+#ifdef sun4u
+ } else if (d->method == PPMDC_I2CKIO) {
PPMD(D_PPMDC, ("i2c.iowr(%x), i2c.val(0x%X), "
"i2c.mask(0x%X)", d->m_un.i2c.iowr,
d->m_un.i2c.val, d->m_un.i2c.mask))
- } else if (d->method == PPMDC_KIO) {
- PPMD(D_PPMDC, ("kio.iowr(%x), kio.val(0x%X)",
- d->m_un.kio.iowr, d->m_un.kio.val))
+#endif
} else if (d->method == PPMDC_VCORE) {
PPMD(D_PPMDC, ("cpu: .iord(%x), .iowr(%x), .val(0x%X), "
".delay(0x%x)",
diff --git a/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c b/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c
index fa303ca9b5..f6d5870c5c 100644
--- a/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c
+++ b/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c
@@ -138,6 +138,7 @@ static void nv_read_signature(nv_port_t *nvp);
static void mcp55_set_intr(nv_port_t *nvp, int flag);
static void mcp04_set_intr(nv_port_t *nvp, int flag);
static void nv_resume(nv_port_t *nvp);
+static void nv_suspend(nv_port_t *nvp);
static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
@@ -400,7 +401,7 @@ nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
static int
nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
- int status, attach_state, intr_types, bar, i;
+ int status, attach_state, intr_types, bar, i, command;
int inst = ddi_get_instance(dip);
ddi_acc_handle_t pci_conf_handle;
nv_ctl_t *nvc;
@@ -439,6 +440,20 @@ nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
+ /*
+ * If a device is attached after a suspend/resume, sometimes
+ * the command register is zero, as it might not be set by
+ * BIOS or a parent. Set it again here.
+ */
+ command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
+
+ if (command == 0) {
+ cmn_err(CE_WARN, "nv_sata%d: restoring PCI command"
+ " register", inst);
+ pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
+ PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
+ }
+
subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
if (subclass & PCI_MASS_RAID) {
@@ -585,7 +600,6 @@ nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
NVLOG((NVDBG_INIT, nvc, NULL,
"nv_attach(): DDI_RESUME inst %d", inst));
-
nvc->nvc_state &= ~NV_CTRL_SUSPEND;
for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
@@ -716,6 +730,11 @@ nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
* the current state.
*/
NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
+
+ for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
+ nv_suspend(&(nvc->nvc_port[i]));
+ }
+
nvc->nvc_state |= NV_CTRL_SUSPEND;
return (DDI_SUCCESS);
@@ -1226,7 +1245,7 @@ nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
- " done % reason %d", ret));
+ " done % reason %d", ret));
return (ret);
}
@@ -2725,7 +2744,7 @@ mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
nv_cmn_err(CE_PANIC, nvc, nvp,
- "this is should not be executed at all until NCQ");
+ "this is should not be executed at all until NCQ");
mutex_enter(&nvp->nvp_mutex);
@@ -4576,6 +4595,38 @@ nv_resume(nv_port_t *nvp)
* nv_reset(nvp);
*/
+ nv_reset(nvp);
+
+ mutex_exit(&nvp->nvp_mutex);
+}
+
+/*
+ * The PM functions for suspend and resume are incomplete and need additional
+ * work. It may or may not work in the current state.
+ */
+static void
+nv_suspend(nv_port_t *nvp)
+{
+ NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
+
+ mutex_enter(&nvp->nvp_mutex);
+
+ if (nvp->nvp_state & NV_PORT_INACTIVE) {
+ mutex_exit(&nvp->nvp_mutex);
+
+ return;
+ }
+
+ (*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_DISABLE);
+
+ /*
+ * power may have been removed to the port and the
+ * drive, and/or a drive may have been added or removed.
+ * Force a reset which will cause a probe and re-establish
+ * any state needed on the drive.
+ * nv_reset(nvp);
+ */
+
mutex_exit(&nvp->nvp_mutex);
}
diff --git a/usr/src/uts/common/io/srn.c b/usr/src/uts/common/io/srn.c
new file mode 100755
index 0000000000..cb2888871d
--- /dev/null
+++ b/usr/src/uts/common/io/srn.c
@@ -0,0 +1,563 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * srn Provide apm-like interfaces to Xorg
+ */
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/modctl.h>
+#include <sys/conf.h> /* driver flags and functions */
+#include <sys/open.h> /* OTYP_CHR definition */
+#include <sys/stat.h> /* S_IFCHR definition */
+#include <sys/pathname.h> /* name -> dev_info xlation */
+#include <sys/kmem.h> /* memory alloc stuff */
+#include <sys/debug.h>
+#include <sys/pm.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/epm.h>
+#include <sys/vfs.h>
+#include <sys/mode.h>
+#include <sys/mkdev.h>
+#include <sys/promif.h>
+#include <sys/consdev.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/poll.h>
+#include <sys/note.h>
+#include <sys/taskq.h>
+#include <sys/policy.h>
+#include <sys/srn.h>
+
+/*
+ * Minor number is instance<<8 + clone minor from range 1-255;
+ * But only one will be allocated
+ */
+#define SRN_MINOR_TO_CLONE(minor) ((minor) & (SRN_MAX_CLONE - 1))
+#define SU 0x002
+#define SG 0x004
+
+extern kmutex_t srn_clone_lock; /* protects srn_clones array */
+extern kcondvar_t srn_clones_cv[SRN_MAX_CLONE];
+extern uint_t srn_poll_cnt[SRN_MAX_CLONE];
+
+/*
+ * The soft state of the srn driver. Since there will only be
+ * one of these, just reference it through a static struct.
+ */
+static struct srnstate {
+ dev_info_t *srn_dip; /* ptr to our dev_info node */
+ int srn_instance; /* for ddi_get_instance() */
+ uchar_t srn_clones[SRN_MAX_CLONE]; /* unique opens */
+ struct cred *srn_cred[SRN_MAX_CLONE]; /* cred for each open */
+ int srn_type[SRN_MAX_CLONE]; /* type of handshake */
+ int srn_delivered[SRN_MAX_CLONE];
+ srn_event_info_t srn_pending[SRN_MAX_CLONE];
+} srn = { NULL, -1};
+typedef struct srnstate *srn_state_t;
+
+kcondvar_t srn_clones_cv[SRN_MAX_CLONE];
+uint_t srn_poll_cnt[SRN_MAX_CLONE]; /* count of events for poll */
+int srn_apm_count;
+int srn_autosx_count;
+struct pollhead srn_pollhead[SRN_MAX_CLONE];
+
+static int srn_open(dev_t *, int, int, cred_t *);
+static int srn_close(dev_t, int, int, cred_t *);
+static int srn_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
+static int srn_chpoll(dev_t, short, int, short *, struct pollhead **);
+
+static struct cb_ops srn_cb_ops = {
+ srn_open, /* open */
+ srn_close, /* close */
+ nodev, /* strategy */
+ nodev, /* print */
+ nodev, /* dump */
+ nodev, /* read */
+ nodev, /* write */
+ srn_ioctl, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ srn_chpoll, /* poll */
+ ddi_prop_op, /* prop_op */
+ NULL, /* streamtab */
+ D_NEW | D_MP /* driver compatibility flag */
+};
+
+static int srn_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
+ void **result);
+static int srn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+static int srn_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
+static void srn_notify(int type, int event);
+
+static struct dev_ops srn_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* refcnt */
+ srn_getinfo, /* info */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ srn_attach, /* attach */
+ srn_detach, /* detach */
+ nodev, /* reset */
+ &srn_cb_ops, /* driver operations */
+ NULL, /* bus operations */
+ NULL /* power */
+};
+
+static struct modldrv modldrv = {
+ &mod_driverops,
+ "srn driver v1.4",
+ &srn_ops
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, &modldrv, 0
+};
+
+/* Local functions */
+
+int
+_init(void)
+{
+ return (mod_install(&modlinkage));
+}
+
+int
+_fini(void)
+{
+ return (mod_remove(&modlinkage));
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+static int
+srn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int i;
+ extern void (*srn_signal)(int, int);
+
+ switch (cmd) {
+
+ case DDI_ATTACH:
+ if (srn.srn_instance != -1) /* Only allow one instance */
+ return (DDI_FAILURE);
+ srn.srn_instance = ddi_get_instance(dip);
+ if (ddi_create_minor_node(dip, "srn", S_IFCHR,
+ (srn.srn_instance << 8) + 0, DDI_PSEUDO, 0)
+ != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ srn.srn_dip = dip; /* srn_init and getinfo depend on it */
+
+ for (i = 0; i < SRN_MAX_CLONE; i++)
+ cv_init(&srn_clones_cv[i], NULL, CV_DEFAULT, NULL);
+
+ srn.srn_instance = ddi_get_instance(dip);
+ mutex_enter(&srn_clone_lock);
+ srn_signal = srn_notify;
+ mutex_exit(&srn_clone_lock);
+ ddi_report_dev(dip);
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+ }
+}
+
+/* ARGSUSED */
+static int
+srn_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int i;
+ extern int srn_inuse;
+ extern void (*srn_signal)(int, int);
+
+ switch (cmd) {
+ case DDI_DETACH:
+
+ mutex_enter(&srn_clone_lock);
+ while (srn_inuse) {
+ mutex_exit(&srn_clone_lock);
+ delay(1);
+ mutex_enter(&srn_clone_lock);
+ }
+ srn_signal = NULL;
+ mutex_exit(&srn_clone_lock);
+
+ for (i = 0; i < SRN_MAX_CLONE; i++)
+ cv_destroy(&srn_clones_cv[i]);
+
+ ddi_remove_minor_node(dip, NULL);
+ srn.srn_instance = -1;
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+ }
+}
+
+
+#ifdef DEBUG
+char *srn_cmd_string;
+int srn_cmd;
+#endif
+
+/*
+ * Returns true if permission granted by credentials
+ * XXX
+ */
+static int
+srn_perms(int perm, cred_t *cr)
+{
+ if ((perm & SU) && secpolicy_power_mgmt(cr) == 0) /* privileged? */
+ return (1);
+ if ((perm & SG) && (crgetgid(cr) == 0)) /* group 0 is ok */
+ return (1);
+ return (0);
+}
+
+static int
+srn_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
+ struct pollhead **phpp)
+{
+ extern struct pollhead srn_pollhead[]; /* common/os/sunpm.c */
+ int clone;
+
+ clone = SRN_MINOR_TO_CLONE(getminor(dev));
+ if ((events & (POLLIN | POLLRDNORM)) && srn_poll_cnt[clone]) {
+ *reventsp |= (POLLIN | POLLRDNORM);
+ } else {
+ *reventsp = 0;
+ if (!anyyet) {
+ *phpp = &srn_pollhead[clone];
+ }
+ }
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+srn_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
+{
+ dev_t dev;
+ int instance;
+
+ switch (infocmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ if (srn.srn_instance == -1)
+ return (DDI_FAILURE);
+ *result = srn.srn_dip;
+ return (DDI_SUCCESS);
+
+ case DDI_INFO_DEVT2INSTANCE:
+ dev = (dev_t)arg;
+ instance = getminor(dev) >> 8;
+ *result = (void *)(uintptr_t)instance;
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+ }
+}
+
+
+/*ARGSUSED1*/
+static int
+srn_open(dev_t *devp, int flag, int otyp, cred_t *cr)
+{
+ int clone;
+
+ if (otyp != OTYP_CHR)
+ return (EINVAL);
+
+ mutex_enter(&srn_clone_lock);
+ for (clone = 1; clone < SRN_MAX_CLONE - 1; clone++)
+ if (!srn.srn_clones[clone])
+ break;
+
+ if (clone == SRN_MAX_CLONE) {
+ mutex_exit(&srn_clone_lock);
+ return (ENXIO);
+ }
+ srn.srn_cred[clone] = cr;
+ ASSERT(srn_apm_count >= 0);
+ srn_apm_count++;
+ srn.srn_type[clone] = SRN_TYPE_APM;
+ crhold(cr);
+
+ *devp = makedevice(getmajor(*devp), (srn.srn_instance << 8) +
+ clone);
+ srn.srn_clones[clone] = 1;
+ srn.srn_cred[clone] = cr;
+ crhold(cr);
+ mutex_exit(&srn_clone_lock);
+ PMD(PMD_SX, ("srn open OK\n"))
+ return (0);
+}
+
+/*ARGSUSED1*/
+static int
+srn_close(dev_t dev, int flag, int otyp, cred_t *cr)
+{
+ int clone;
+
+ if (otyp != OTYP_CHR)
+ return (EINVAL);
+
+ clone = SRN_MINOR_TO_CLONE(getminor(dev));
+ PMD(PMD_SX, ("srn_close: minor %x, clone %x\n", getminor(dev),
+ clone))
+ mutex_enter(&srn_clone_lock);
+ crfree(srn.srn_cred[clone]);
+ srn.srn_cred[clone] = 0;
+ srn_poll_cnt[clone] = 0;
+ if (srn.srn_pending[clone].ae_type || srn.srn_delivered[clone]) {
+ srn.srn_pending[clone].ae_type = 0;
+ srn.srn_delivered[clone] = 0;
+ cv_signal(&srn_clones_cv[clone]);
+ }
+ switch (srn.srn_type[clone]) {
+ case SRN_TYPE_AUTOSX:
+ ASSERT(srn_autosx_count);
+ srn_autosx_count--;
+ break;
+ case SRN_TYPE_APM:
+ ASSERT(srn_apm_count);
+ srn_apm_count--;
+ break;
+ default:
+ ASSERT(0);
+ return (EINVAL);
+ }
+ srn.srn_clones[clone] = 0;
+ mutex_exit(&srn_clone_lock);
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+srn_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
+{
+ int clone = SRN_MINOR_TO_CLONE(getminor(dev));
+
+ PMD(PMD_SX, ("ioctl: %x: begin\n", cmd))
+
+ switch (cmd) {
+ case SRN_IOC_NEXTEVENT:
+ case SRN_IOC_SUSPEND:
+ case SRN_IOC_RESUME:
+ case SRN_IOC_AUTOSX:
+ break;
+ default:
+ return (ENOTTY);
+ }
+
+ if (!srn_perms(SU | SG, srn.srn_cred[clone])) {
+ return (EPERM);
+ }
+ switch (cmd) {
+ case SRN_IOC_AUTOSX:
+ PMD(PMD_SX, ("SRN_IOC_AUTOSX entered\n"))
+ mutex_enter(&srn_clone_lock);
+ if (!srn.srn_clones[clone]) {
+ PMD(PMD_SX, (" ioctl !srn_clones--EINVAL\n"))
+ mutex_exit(&srn_clone_lock);
+ return (EINVAL);
+ }
+ if (srn.srn_pending[clone].ae_type) {
+ PMD(PMD_SX, ("AUTOSX while pending--EBUSY\n"))
+ mutex_exit(&srn_clone_lock);
+ return (EBUSY);
+ }
+ if (srn.srn_type[clone] == SRN_TYPE_AUTOSX) {
+ PMD(PMD_SX, ("AUTOSX already--EBUSY\n"))
+ mutex_exit(&srn_clone_lock);
+ return (EBUSY);
+ }
+ ASSERT(srn.srn_type[clone] == SRN_TYPE_APM);
+ srn.srn_type[clone] = SRN_TYPE_AUTOSX;
+ srn_apm_count--;
+ ASSERT(srn_apm_count >= 0);
+ ASSERT(srn_autosx_count >= 0);
+ srn_autosx_count++;
+ mutex_exit(&srn_clone_lock);
+ PMD(PMD_SX, ("SRN_IOC_AUTOSX returns success\n"))
+ return (0);
+
+ case SRN_IOC_NEXTEVENT:
+ /*
+ * return the next suspend or resume event; there should
+ * be one, cause we only get called if we've signalled a
+ * poll data completion
+ * then wake up the kernel thread sleeping for the delivery
+ */
+ PMD(PMD_SX, ("SRN_IOC_NEXTEVENT entered\n"))
+ mutex_enter(&srn_clone_lock);
+ if (srn_poll_cnt[clone] == 0) {
+ mutex_exit(&srn_clone_lock);
+ PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d "
+ "EWOULDBLOCK\n", clone))
+ return (EWOULDBLOCK);
+ }
+ ASSERT(srn.srn_pending[clone].ae_type);
+ if (ddi_copyout(&srn.srn_pending[clone], (void *)arg,
+ sizeof (srn_event_info_t), mode) != 0) {
+ mutex_exit(&srn_clone_lock);
+ PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d EFAULT\n",
+ clone))
+ return (EFAULT);
+ }
+ if (srn.srn_type[clone] == SRN_TYPE_APM)
+ srn.srn_delivered[clone] =
+ srn.srn_pending[clone].ae_type;
+ PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d delivered %x\n",
+ clone, srn.srn_pending[clone].ae_type))
+ srn_poll_cnt[clone] = 0;
+ mutex_exit(&srn_clone_lock);
+ return (0);
+
+ case SRN_IOC_SUSPEND:
+ /* ack suspend */
+ PMD(PMD_SX, ("SRN_IOC_SUSPEND entered clone %d\n", clone))
+ mutex_enter(&srn_clone_lock);
+ if (srn.srn_delivered[clone] != SRN_SUSPEND_REQ) {
+ mutex_exit(&srn_clone_lock);
+ PMD(PMD_SX, ("SRN_IOC_SUSPEND EINVAL\n"))
+ return (EINVAL);
+ }
+ srn.srn_delivered[clone] = 0;
+ srn.srn_pending[clone].ae_type = 0;
+ /* notify the kernel suspend thread to continue */
+ PMD(PMD_SX, ("SRN_IOC_SUSPEND clone %d ok\n", clone))
+ cv_signal(&srn_clones_cv[clone]);
+ mutex_exit(&srn_clone_lock);
+ return (0);
+
+ case SRN_IOC_RESUME:
+ /* ack resume */
+ PMD(PMD_SX, ("SRN_IOC_RESUME entered clone %d\n", clone))
+ mutex_enter(&srn_clone_lock);
+ if (srn.srn_delivered[clone] != SRN_NORMAL_RESUME) {
+ mutex_exit(&srn_clone_lock);
+ PMD(PMD_SX, ("SRN_IOC_RESUME EINVAL\n"))
+ return (EINVAL);
+ }
+ srn.srn_delivered[clone] = 0;
+ srn.srn_pending[clone].ae_type = 0;
+ /* notify the kernel resume thread to continue */
+ PMD(PMD_SX, ("SRN_IOC_RESUME ok for clone %d\n", clone))
+ cv_signal(&srn_clones_cv[clone]);
+ mutex_exit(&srn_clone_lock);
+ return (0);
+
+ default:
+ PMD(PMD_SX, ("srn_ioctl unknown cmd EINVAL\n"))
+ return (EINVAL);
+ }
+}
+/*
+ * A very simple handshake with the srn driver,
+ * only one outstanding event at a time.
+ * The OS delivers the event and depending on type,
+ * either blocks waiting for the ack, or drives on
+ */
+void
+srn_notify(int type, int event)
+{
+ int clone, count;
+ PMD(PMD_SX, ("srn_notify entered with type %d, event 0x%x\n",
+ type, event));
+ ASSERT(mutex_owned(&srn_clone_lock));
+ switch (type) {
+ case SRN_TYPE_APM:
+ if (srn_apm_count == 0) {
+ PMD(PMD_SX, ("no apm types\n"))
+ return;
+ }
+ count = srn_apm_count;
+ break;
+ case SRN_TYPE_AUTOSX:
+ if (srn_autosx_count == 0) {
+ PMD(PMD_SX, ("no autosx types\n"))
+ return;
+ }
+ count = srn_autosx_count;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ ASSERT(count > 0);
+ PMD(PMD_SX, ("count %d\n", count))
+ for (clone = 0; clone < SRN_MAX_CLONE; clone++) {
+ if (srn.srn_type[clone] == type) {
+ if (type == SRN_TYPE_APM) {
+ ASSERT(srn.srn_pending[clone].ae_type == 0);
+ ASSERT(srn_poll_cnt[clone] == 0);
+ ASSERT(srn.srn_delivered[clone] == 0);
+ }
+ srn.srn_pending[clone].ae_type = event;
+ srn_poll_cnt[clone] = 1;
+ PMD(PMD_SX, ("pollwake %d\n", clone))
+ pollwakeup(&srn_pollhead[clone], (POLLRDNORM | POLLIN));
+ count--;
+ if (count == 0)
+ break;
+ }
+ }
+ if (type == SRN_TYPE_AUTOSX) { /* we don't wait */
+ PMD(PMD_SX, ("Not waiting for AUTOSX ack\n"))
+ return;
+ }
+ ASSERT(type == SRN_TYPE_APM);
+ /* otherwise wait for acks */
+restart:
+ /*
+ * We wait untill all of the pending events are cleared.
+ * We have to start over every time we do a cv_wait because
+ * we give up the mutex and can be re-entered
+ */
+ for (clone = 1; clone < SRN_MAX_CLONE; clone++) {
+ if (srn.srn_clones[clone] == 0 ||
+ srn.srn_type[clone] != SRN_TYPE_APM)
+ continue;
+ if (srn.srn_pending[clone].ae_type) {
+ PMD(PMD_SX, ("srn_notify waiting for ack for clone %d, "
+ "event %x\n", clone, event))
+ cv_wait(&srn_clones_cv[clone], &srn_clone_lock);
+ goto restart;
+ }
+ }
+ PMD(PMD_SX, ("srn_notify done with %x\n", event))
+}
diff --git a/usr/src/uts/common/io/srn.conf b/usr/src/uts/common/io/srn.conf
new file mode 100755
index 0000000000..7db6545647
--- /dev/null
+++ b/usr/src/uts/common/io/srn.conf
@@ -0,0 +1,27 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+name="srn" parent="pseudo" instance=0;
diff --git a/usr/src/uts/common/io/usb/hcd/ehci/ehci.c b/usr/src/uts/common/io/usb/hcd/ehci/ehci.c
index 3b986e1723..93b7815a7d 100644
--- a/usr/src/uts/common/io/usb/hcd/ehci/ehci.c
+++ b/usr/src/uts/common/io/usb/hcd/ehci/ehci.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -65,6 +65,11 @@ uint_t ehci_errmask = (uint_t)PRINT_MASK_ALL;
uint_t ehci_errlevel = USB_LOG_L2;
uint_t ehci_instance_debug = (uint_t)-1;
+/*
+ * Tunable to ensure host controller goes off even if a keyboard is attached.
+ */
+int force_ehci_off = 1;
+
/* Enable all workarounds for VIA VT62x2 */
uint_t ehci_vt62x2_workaround = EHCI_VIA_WORKAROUNDS;
diff --git a/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c b/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c
index bc760adfc8..44e6688a4b 100644
--- a/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c
+++ b/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c
@@ -54,6 +54,7 @@ extern void *ehci_statep;
extern void ehci_handle_endpoint_reclaimation(ehci_state_t *);
extern uint_t ehci_vt62x2_workaround;
+extern int force_ehci_off;
/* Adjustable variables for the size of the pools */
int ehci_qh_pool_size = EHCI_QH_POOL_SIZE;
@@ -2006,7 +2007,7 @@ ehci_cpr_suspend(ehci_state_t *ehcip)
* Stop the ehci host controller
* if usb keyboard is not connected.
*/
- if (ehcip->ehci_polled_kbd_count == 0) {
+ if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) {
Set_OpReg(ehci_command,
Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
}
diff --git a/usr/src/uts/common/io/usb/hcd/openhci/ohci.c b/usr/src/uts/common/io/usb/hcd/openhci/ohci.c
index 2cb62f57ee..bbfef3dff2 100644
--- a/usr/src/uts/common/io/usb/hcd/openhci/ohci.c
+++ b/usr/src/uts/common/io/usb/hcd/openhci/ohci.c
@@ -47,6 +47,8 @@
/* Pointer to the state structure */
static void *ohci_statep;
+int force_ohci_off = 1;
+
/* Number of instances */
#define OHCI_INSTS 1
@@ -2353,7 +2355,7 @@ ohci_cpr_suspend(ohci_state_t *ohcip)
* Suspend the ohci host controller
* if usb keyboard is not connected.
*/
- if (ohcip->ohci_polled_kbd_count == 0) {
+ if (ohcip->ohci_polled_kbd_count == 0 || force_ohci_off != 0) {
Set_OpReg(hcr_control, HCR_CONTROL_SUSPD);
}
diff --git a/usr/src/uts/common/os/callb.c b/usr/src/uts/common/os/callb.c
index 819851e7b9..b0a9264762 100644
--- a/usr/src/uts/common/os/callb.c
+++ b/usr/src/uts/common/os/callb.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -255,7 +254,7 @@ callb_execute_class(int class, int code)
#ifdef CALLB_DEBUG
printf("callb_execute: name=%s func=%p arg=%p\n",
- cp->c_name, (void *)cp->c_func, (void *)cp->c_arg);
+ cp->c_name, (void *)cp->c_func, (void *)cp->c_arg);
#endif /* CALLB_DEBUG */
mutex_exit(&ct->ct_lock);
@@ -294,12 +293,14 @@ callb_generic_cpr(void *arg, int code)
switch (code) {
case CB_CODE_CPR_CHKPT:
cp->cc_events |= CALLB_CPR_START;
+#ifdef CPR_NOT_THREAD_SAFE
while (!(cp->cc_events & CALLB_CPR_SAFE))
/* cv_timedwait() returns -1 if it times out. */
if ((ret = cv_timedwait(&cp->cc_callb_cv,
cp->cc_lockp,
lbolt + callb_timeout_sec * hz)) == -1)
break;
+#endif
break;
case CB_CODE_CPR_RESUME:
diff --git a/usr/src/uts/common/os/cpu.c b/usr/src/uts/common/os/cpu.c
index d99d9403cd..ef306f2979 100644
--- a/usr/src/uts/common/os/cpu.c
+++ b/usr/src/uts/common/os/cpu.c
@@ -161,6 +161,9 @@ static struct _cpu_pause_info {
static kmutex_t pause_free_mutex;
static kcondvar_t pause_free_cv;
+void *(*cpu_pause_func)(void *) = NULL;
+
+
static struct cpu_sys_stats_ks_data {
kstat_named_t cpu_ticks_idle;
kstat_named_t cpu_ticks_user;
@@ -738,10 +741,12 @@ weakbinding_start(void)
* which is a good trade off.
*/
static void
-cpu_pause(volatile char *safe)
+cpu_pause(int index)
{
int s;
struct _cpu_pause_info *cpi = &cpu_pause_info;
+ volatile char *safe = &safe_list[index];
+ long lindex = index;
ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE));
@@ -766,6 +771,16 @@ cpu_pause(volatile char *safe)
* setbackdq/setfrontdq.
*/
s = splhigh();
+ /*
+ * if cpu_pause_func() has been set then call it using
+ * index as the argument, currently only used by
+ * cpr_suspend_cpus(). This function is used as the
+ * code to execute on the "paused" cpu's when a machine
+ * comes out of a sleep state and CPU's were powered off.
+ * (could also be used for hotplugging CPU's).
+ */
+ if (cpu_pause_func != NULL)
+ (*cpu_pause_func)((void *)lindex);
mach_cpu_pause(safe);
@@ -809,13 +824,13 @@ static void
cpu_pause_alloc(cpu_t *cp)
{
kthread_id_t t;
- int cpun = cp->cpu_id;
+ long cpun = cp->cpu_id;
/*
* Note, v.v_nglobpris will not change value as long as I hold
* cpu_lock.
*/
- t = thread_create(NULL, 0, cpu_pause, (caddr_t)&safe_list[cpun],
+ t = thread_create(NULL, 0, cpu_pause, (void *)cpun,
0, &p0, TS_STOPPED, v.v_nglobpris - 1);
thread_lock(t);
t->t_bound_cpu = cp;
diff --git a/usr/src/uts/common/os/sunpci.c b/usr/src/uts/common/os/sunpci.c
index ff89c017d6..5bc4c71474 100644
--- a/usr/src/uts/common/os/sunpci.c
+++ b/usr/src/uts/common/os/sunpci.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -334,10 +334,12 @@ pci_save_config_regs(dev_info_t *dip)
off_t offset = 0;
uint8_t cap_ptr, cap_id;
int pcie = 0;
+ PMD(PMD_SX, ("pci_save_config_regs %s:%d\n", ddi_driver_name(dip),
+ ddi_get_instance(dip)))
if (pci_config_setup(dip, &confhdl) != DDI_SUCCESS) {
cmn_err(CE_WARN, "%s%d can't get config handle",
- ddi_driver_name(dip), ddi_get_instance(dip));
+ ddi_driver_name(dip), ddi_get_instance(dip));
return (DDI_FAILURE);
}
@@ -364,7 +366,7 @@ pci_save_config_regs(dev_info_t *dip)
if (pcie) {
/* PCI express device. Can have data in all 4k space */
regbuf = (uint32_t *)kmem_zalloc((size_t)PCIE_CONF_HDR_SIZE,
- KM_SLEEP);
+ KM_SLEEP);
p = regbuf;
/*
* Allocate space for mask.
@@ -406,12 +408,12 @@ pci_save_config_regs(dev_info_t *dip)
kmem_free(regbuf, (size_t)PCIE_CONF_HDR_SIZE);
} else {
regbuf = (uint32_t *)kmem_zalloc((size_t)PCI_CONF_HDR_SIZE,
- KM_SLEEP);
+ KM_SLEEP);
chsp = (pci_config_header_state_t *)regbuf;
chsp->chs_command = pci_config_get16(confhdl, PCI_CONF_COMM);
chsp->chs_header_type = pci_config_get8(confhdl,
- PCI_CONF_HEADER);
+ PCI_CONF_HEADER);
if ((chsp->chs_header_type & PCI_HEADER_TYPE_M) ==
PCI_HEADER_ONE)
chsp->chs_bridge_control =
@@ -767,3 +769,274 @@ restoreconfig_err:
pci_config_teardown(&confhdl);
return (DDI_FAILURE);
}
+
+/*ARGSUSED*/
+static int
+pci_lookup_pmcap(dev_info_t *dip, ddi_acc_handle_t conf_hdl,
+ uint16_t *pmcap_offsetp)
+{
+ uint8_t cap_ptr;
+ uint8_t cap_id;
+ uint8_t header_type;
+ uint16_t status;
+
+ header_type = pci_config_get8(conf_hdl, PCI_CONF_HEADER);
+ header_type &= PCI_HEADER_TYPE_M;
+
+ /* we don't deal with bridges, etc here */
+ if (header_type != PCI_HEADER_ZERO) {
+ return (DDI_FAILURE);
+ }
+
+ status = pci_config_get16(conf_hdl, PCI_CONF_STAT);
+ if ((status & PCI_STAT_CAP) == 0) {
+ return (DDI_FAILURE);
+ }
+
+ cap_ptr = pci_config_get8(conf_hdl, PCI_CONF_CAP_PTR);
+
+ /*
+ * Walk the capabilities searching for a PM entry.
+ */
+ while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
+ cap_id = pci_config_get8(conf_hdl, cap_ptr + PCI_CAP_ID);
+ if (cap_id == PCI_CAP_ID_PM) {
+ break;
+ }
+ cap_ptr = pci_config_get8(conf_hdl,
+ cap_ptr + PCI_CAP_NEXT_PTR);
+ }
+
+ if (cap_ptr == PCI_CAP_NEXT_PTR_NULL) {
+ return (DDI_FAILURE);
+ }
+ *pmcap_offsetp = cap_ptr;
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Do common pci-specific suspend actions:
+ * - enable wakeup if appropriate for the device
+ * - put device in lowest D-state that supports wakeup, or D3 if none
+ * - turn off bus mastering in control register
+ * For lack of per-dip storage (parent private date is pretty busy)
+ * we use properties to store the necessary context
+ * To avoid grotting through pci config space on every suspend,
+ * we leave the prop in existence after resume, cause we know that
+ * the detach framework code will dispose of it for us.
+ */
+
+typedef struct pci_pm_context {
+ int ppc_flags;
+ uint16_t ppc_cap_offset; /* offset in config space to pm cap */
+ uint16_t ppc_pmcsr; /* need this too */
+ uint16_t ppc_suspend_level;
+} pci_pm_context_t;
+
+#define SAVED_PM_CONTEXT "pci-pm-context"
+
+/* values for ppc_flags */
+#define PPCF_NOPMCAP 1
+
+/*
+ * Handle pci-specific suspend processing
+ * PM CSR and PCI CMD are saved by pci_save_config_regs().
+ * If device can wake up system via PME, enable it to do so
+ * Set device power level to lowest that can generate PME, or D3 if none can
+ * Turn off bus master enable in pci command register
+ */
+#if defined(__x86)
+extern int acpi_ddi_setwake(dev_info_t *dip, int level);
+#endif
+
+int
+pci_post_suspend(dev_info_t *dip)
+{
+ pci_pm_context_t *p;
+ uint16_t pmcap, pmcsr, pcicmd;
+ uint_t length;
+ int ret;
+ int fromprop = 1; /* source of memory *p */
+ ddi_acc_handle_t hdl;
+
+ PMD(PMD_SX, ("pci_post_suspend %s:%d\n",
+ ddi_driver_name(dip), ddi_get_instance(dip)))
+
+ if (pci_save_config_regs(dip) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ if (pci_config_setup(dip, &hdl) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
+ SAVED_PM_CONTEXT, (uchar_t **)&p, &length) != DDI_PROP_SUCCESS) {
+ p = (pci_pm_context_t *)kmem_zalloc(sizeof (*p), KM_SLEEP);
+ fromprop = 0;
+ if (pci_lookup_pmcap(dip, hdl,
+ &p->ppc_cap_offset) != DDI_SUCCESS) {
+ p->ppc_flags |= PPCF_NOPMCAP;
+ ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip,
+ SAVED_PM_CONTEXT, (uchar_t *)p,
+ sizeof (pci_pm_context_t));
+ if (ret != DDI_PROP_SUCCESS) {
+ (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
+ SAVED_PM_CONTEXT);
+ ret = DDI_FAILURE;
+ } else {
+ ret = DDI_SUCCESS;
+ }
+ kmem_free(p, sizeof (*p));
+ pci_config_teardown(&hdl);
+ return (DDI_SUCCESS);
+ }
+ /*
+ * Upon suspend, set the power level to the lowest that can
+ * wake the system. If none can, then set to lowest.
+ * XXX later we will need to check policy to see if this
+ * XXX device has had wakeup disabled
+ */
+ pmcap = pci_config_get16(hdl, p->ppc_cap_offset + PCI_PMCAP);
+ if ((pmcap & PCI_PMCAP_D3COLD_PME) != 0)
+ p->ppc_suspend_level =
+ (PCI_PMCSR_PME_EN | PCI_PMCSR_D3HOT);
+ else if ((pmcap & (PCI_PMCAP_D3HOT_PME | PCI_PMCAP_D2_PME)) !=
+ 0)
+ p->ppc_suspend_level = PCI_PMCSR_PME_EN | PCI_PMCSR_D2;
+ else if ((pmcap & PCI_PMCAP_D1_PME) != 0)
+ p->ppc_suspend_level = PCI_PMCSR_PME_EN | PCI_PMCSR_D1;
+ else if ((pmcap & PCI_PMCAP_D0_PME) != 0)
+ p->ppc_suspend_level = PCI_PMCSR_PME_EN | PCI_PMCSR_D0;
+ else
+ p->ppc_suspend_level = PCI_PMCSR_D3HOT;
+
+ /*
+ * we defer updating the property to catch the saved
+ * register values as well
+ */
+ }
+ /* If we set this in kmem_zalloc'd memory, we already returned above */
+ if ((p->ppc_flags & PPCF_NOPMCAP) != 0) {
+ ddi_prop_free(p);
+ pci_config_teardown(&hdl);
+ return (DDI_SUCCESS);
+ }
+
+
+ /*
+ * Turn off (Bus) Master Enable, since acpica will be turning off
+ * bus master aribitration
+ */
+ pcicmd = pci_config_get16(hdl, PCI_CONF_COMM);
+ pcicmd &= ~PCI_COMM_ME;
+ pci_config_put16(hdl, PCI_CONF_COMM, pcicmd);
+
+ /*
+ * set pm csr
+ */
+ pmcsr = pci_config_get16(hdl, p->ppc_cap_offset + PCI_PMCSR);
+ p->ppc_pmcsr = pmcsr;
+ pmcsr &= (PCI_PMCSR_STATE_MASK);
+ pmcsr |= (PCI_PMCSR_PME_STAT | p->ppc_suspend_level);
+ pci_config_put16(hdl, p->ppc_cap_offset + PCI_PMCSR, pmcsr);
+
+#if defined(__x86)
+ /*
+ * Arrange for platform wakeup enabling
+ */
+ if ((p->ppc_suspend_level & PCI_PMCSR_PME_EN) != 0) {
+ int retval;
+
+ retval = acpi_ddi_setwake(dip, 3); /* XXX 3 for now */
+ if (retval) {
+ PMD(PMD_SX, ("pci_post_suspend, setwake %s@%s rets "
+ "%x\n", PM_NAME(dip), PM_ADDR(dip), retval));
+ }
+ }
+#endif
+
+ /*
+ * Push out saved register values
+ */
+ ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, SAVED_PM_CONTEXT,
+ (uchar_t *)p, sizeof (pci_pm_context_t));
+ if (ret == DDI_PROP_SUCCESS) {
+ if (fromprop)
+ ddi_prop_free(p);
+ else
+ kmem_free(p, sizeof (*p));
+ pci_config_teardown(&hdl);
+ return (DDI_SUCCESS);
+ }
+ /* Failed; put things back the way we found them */
+ (void) pci_restore_config_regs(dip);
+ if (fromprop)
+ ddi_prop_free(p);
+ else
+ kmem_free(p, sizeof (*p));
+ (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, SAVED_PM_CONTEXT);
+ pci_config_teardown(&hdl);
+ return (DDI_FAILURE);
+}
+
+/*
+ * The inverse of pci_post_suspend; handle pci-specific resume processing
+ * First, turn device back on, then restore config space.
+ */
+
+int
+pci_pre_resume(dev_info_t *dip)
+{
+ ddi_acc_handle_t hdl;
+ pci_pm_context_t *p;
+ /* E_FUNC_SET_NOT_USED */
+ uint16_t pmcap, pmcsr;
+ int flags;
+ uint_t length;
+ clock_t drv_usectohz(clock_t microsecs);
+#if defined(__x86)
+ uint16_t suspend_level;
+#endif
+
+ PMD(PMD_SX, ("pci_pre_resume %s:%d\n", ddi_driver_name(dip),
+ ddi_get_instance(dip)))
+ if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
+ SAVED_PM_CONTEXT, (uchar_t **)&p, &length) != DDI_PROP_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ flags = p->ppc_flags;
+ pmcap = p->ppc_cap_offset;
+ pmcsr = p->ppc_pmcsr;
+#if defined(__x86)
+ suspend_level = p->ppc_suspend_level;
+#endif
+ ddi_prop_free(p);
+ if ((flags & PPCF_NOPMCAP) != 0) {
+ return (DDI_SUCCESS);
+ }
+#if defined(__x86)
+ /*
+ * Turn platform wake enable back off
+ */
+ if ((suspend_level & PCI_PMCSR_PME_EN) != 0) {
+ int retval;
+
+ retval = acpi_ddi_setwake(dip, 0); /* 0 for now */
+ if (retval) {
+ PMD(PMD_SX, ("pci_pre_resume, setwake %s@%s rets "
+ "%x\n", PM_NAME(dip), PM_ADDR(dip), retval));
+ }
+ }
+#endif
+ if (pci_config_setup(dip, &hdl) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ pci_config_put16(hdl, pmcap + PCI_PMCSR, pmcsr);
+ delay(drv_usectohz(10000)); /* PCI PM spec D3->D0 (10ms) */
+ pci_config_teardown(&hdl);
+ (void) pci_restore_config_regs(dip); /* fudges D-state! */
+ return (DDI_SUCCESS);
+}
diff --git a/usr/src/uts/common/os/sunpm.c b/usr/src/uts/common/os/sunpm.c
index 9c89cf3637..40338e4fcf 100644
--- a/usr/src/uts/common/os/sunpm.c
+++ b/usr/src/uts/common/os/sunpm.c
@@ -174,6 +174,11 @@
#include <sys/disp.h>
#include <sys/sobject.h>
#include <sys/sunmdi.h>
+#include <sys/systm.h>
+#include <sys/cpuvar.h>
+#include <sys/cyclic.h>
+#include <sys/uadmin.h>
+#include <sys/srn.h>
/*
@@ -341,6 +346,37 @@ int autopm_enabled;
pm_cpupm_t cpupm = PM_CPUPM_NOTSET;
/*
+ * AutoS3 depends on autopm being enabled, and must be enabled by
+ * PM_START_AUTOS3 command.
+ */
+int autoS3_enabled;
+
+#if !defined(__sparc)
+/*
+ * on sparc these live in fillsysinfo.c
+ *
+ * If this variable is non-zero, cpr should return "not supported" when
+ * it is queried even though it would normally be supported on this platform.
+ */
+int cpr_supported_override;
+
+/*
+ * Some platforms may need to support CPR even in the absence of
+ * having the correct platform id information. If this
+ * variable is non-zero, cpr should proceed even in the absence
+ * of otherwise being qualified.
+ */
+int cpr_platform_enable = 0;
+
+#endif
+
+/*
+ * pm_S3_enabled indicates that we believe the platform can support S3,
+ * which we get from pmconfig(1M)
+ */
+int pm_S3_enabled;
+
+/*
* This flag is true while processes are stopped for a checkpoint/resume.
* Controlling processes of direct pm'd devices are not available to
* participate in power level changes, so we bypass them when this is set.
@@ -352,6 +388,7 @@ static int pm_processes_stopped;
/*
* see common/sys/epm.h for PMD_* values
*/
+
uint_t pm_debug = 0;
/*
@@ -364,6 +401,7 @@ uint_t pm_debug = 0;
* deadlocks and decremented at the end of pm_set_power()
*/
uint_t pm_divertdebug = 1;
+volatile uint_t pm_debug_to_console = 0;
kmutex_t pm_debug_lock; /* protects pm_divertdebug */
void prdeps(char *);
@@ -410,6 +448,13 @@ uint_t pm_poll_cnt[PM_MAX_CLONE]; /* count of events for poll */
unsigned char pm_interest[PM_MAX_CLONE];
struct pollhead pm_pollhead;
+/*
+ * Data structures shared with common/io/srn.c
+ */
+kmutex_t srn_clone_lock; /* protects srn_signal, srn_inuse */
+void (*srn_signal)(int type, int event);
+int srn_inuse; /* stop srn detach */
+
extern int hz;
extern char *platform_module_list[];
@@ -447,7 +492,6 @@ pscc_t *pm_pscc_direct;
#define PM_IS_NEXUS(dip) NEXUS_DRV(devopsp[PM_MAJOR(dip)])
#define POWERING_ON(old, new) ((old) == 0 && (new) != 0)
#define POWERING_OFF(old, new) ((old) != 0 && (new) == 0)
-#define PPM(dip) ((dev_info_t *)DEVI(dip)->devi_pm_ppm)
#define PM_INCR_NOTLOWEST(dip) { \
mutex_enter(&pm_compcnt_lock); \
@@ -510,14 +554,14 @@ typedef struct lock_loan {
static lock_loan_t lock_loan_head; /* list head is a dummy element */
#ifdef DEBUG
-#ifdef PMDDEBUG
+#ifdef PMDDEBUG
#define PMD_FUNC(func, name) char *(func) = (name);
-#else
+#else /* !PMDDEBUG */
#define PMD_FUNC(func, name)
-#endif
-#else
+#endif /* PMDDEBUG */
+#else /* !DEBUG */
#define PMD_FUNC(func, name)
-#endif
+#endif /* DEBUG */
/*
@@ -607,7 +651,7 @@ static boolean_t
pm_halt_callb(void *arg, int code)
{
_NOTE(ARGUNUSED(arg, code))
- return (B_TRUE); /* XXX for now */
+ return (B_TRUE);
}
/*
@@ -2057,6 +2101,25 @@ pm_ppm_notify_all_lowest(dev_info_t *dip, int mode)
(void) pm_ctlops((dev_info_t *)ppmcp->ppmc_dip, dip,
DDI_CTLOPS_POWER, &power_req, &result);
mutex_exit(&ppm_lock);
+ if (mode == PM_ALL_LOWEST) {
+ if (autoS3_enabled) {
+ PMD(PMD_SX, ("pm_ppm_notify_all_lowest triggering "
+ "autos3\n"))
+ mutex_enter(&srn_clone_lock);
+ if (srn_signal) {
+ srn_inuse++;
+ PMD(PMD_SX, ("(*srn_signal)(AUTOSX, 3)\n"))
+ (*srn_signal)(SRN_TYPE_AUTOSX, 3);
+ srn_inuse--;
+ } else {
+ PMD(PMD_SX, ("srn_signal NULL\n"))
+ }
+ mutex_exit(&srn_clone_lock);
+ } else {
+ PMD(PMD_SX, ("pm_ppm_notify_all_lowest autos3 "
+ "disabled\n"));
+ }
+ }
}
static void
@@ -3161,10 +3224,11 @@ pm_register_ppm(int (*func)(dev_info_t *), dev_info_t *dip)
if (i >= MAX_PPM_HANDLERS)
return (DDI_FAILURE);
while ((dip = ddi_get_parent(dip)) != NULL) {
- if (PM_GET_PM_INFO(dip) == NULL)
+ if (dip != ddi_root_node() && PM_GET_PM_INFO(dip) == NULL)
continue;
pm_ppm_claim(dip);
- if (pm_ppm_claimed(dip)) {
+ /* don't bother with the not power-manageable nodes */
+ if (pm_ppm_claimed(dip) && PM_GET_PM_INFO(dip)) {
/*
* Tell ppm about this.
*/
@@ -7549,7 +7613,7 @@ pm_cfb_setup(const char *stdout_path)
* IF console is fb and is power managed, don't do prom_printfs from
* pm debug macro
*/
- if (pm_cfb_enabled) {
+ if (pm_cfb_enabled && !pm_debug_to_console) {
if (pm_debug)
prom_printf("pm debug output will be to log only\n");
pm_divertdebug++;
@@ -7652,14 +7716,16 @@ pm_cfb_setup_intr(void)
extern void prom_set_outfuncs(void (*)(void), void (*)(void));
void pm_cfb_check_and_powerup(void);
+ mutex_init(&pm_cfb_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL8));
+#ifdef PMDDEBUG
+ mutex_init(&pm_debug_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL8));
+#endif
+
if (!stdout_is_framebuffer) {
PMD(PMD_CFB, ("%s: console not fb\n", pmf))
return;
}
- mutex_init(&pm_cfb_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL8));
-#ifdef DEBUG
- mutex_init(&pm_debug_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL8));
-#endif
+
/*
* setup software interrupt handler
*/
@@ -7811,14 +7877,26 @@ pm_path_to_major(char *path)
}
#ifdef DEBUG
+#ifndef sparc
+clock_t pt_sleep = 1;
+#endif
-char *pm_msgp;
-char *pm_bufend;
-char *pm_msgbuf = NULL;
-int pm_logpages = 2;
+char *pm_msgp;
+char *pm_bufend;
+char *pm_msgbuf = NULL;
+int pm_logpages = 0x100;
+#include <sys/sunldi.h>
+#include <sys/uio.h>
+clock_t pm_log_sleep = 1000;
+int pm_extra_cr = 1;
+volatile int pm_tty = 1;
#define PMLOGPGS pm_logpages
+#if defined(__x86)
+void pm_printf(char *s);
+#endif
+
/*PRINTFLIKE1*/
void
pm_log(const char *fmt, ...)
@@ -7841,15 +7919,30 @@ pm_log(const char *fmt, ...)
(void) vsnprintf(pm_msgbuf, size, fmt, adx);
if (!pm_divertdebug)
prom_printf("%s", pm_msgp);
+#if defined(__x86)
+ if (pm_tty) {
+ pm_printf(pm_msgp);
+ if (pm_extra_cr)
+ pm_printf("\r");
+ }
+#endif
pm_msgp = pm_msgbuf + size;
} else {
(void) vsnprintf(pm_msgp, size, fmt, adx);
+#if defined(__x86)
+ if (pm_tty) {
+ pm_printf(pm_msgp);
+ if (pm_extra_cr)
+ pm_printf("\r");
+ }
+#endif
if (!pm_divertdebug)
prom_printf("%s", pm_msgp);
pm_msgp += size;
}
va_end(adx);
mutex_exit(&pm_debug_lock);
+ drv_usecwait((clock_t)pm_log_sleep);
}
#endif /* DEBUG */
@@ -9108,16 +9201,19 @@ pm_desc_pwrchk_walk(dev_info_t *dip, void *arg)
PMD_FUNC(pmf, "desc_pwrchk")
pm_desc_pwrchk_t *pdpchk = (pm_desc_pwrchk_t *)arg;
pm_info_t *info = PM_GET_PM_INFO(dip);
- int i, curpwr, ce_level;
+ int i;
+ /* LINTED */
+ int curpwr, ce_level;
if (!info)
return (DDI_WALK_CONTINUE);
PMD(PMD_SET, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip)))
for (i = 0; i < PM_NUMCMPTS(dip); i++) {
- curpwr = PM_CURPOWER(dip, i);
- if (curpwr == 0)
+ /* LINTED */
+ if ((curpwr = PM_CURPOWER(dip, i)) == 0)
continue;
+ /* E_FUNC_SET_NOT_USED */
ce_level = (pdpchk->pdpc_par_involved == 0) ? CE_PANIC :
CE_WARN;
PMD(PMD_SET, ("%s: %s@%s(%s#%d) is powered off while desc "
@@ -9170,3 +9266,58 @@ pm_return_lock(void)
mutex_exit(&pm_loan_lock);
kmem_free(cur, sizeof (*cur));
}
+
+#if defined(__x86)
+
+#define CPR_RXR 0x1
+#define CPR_TXR 0x20
+#define CPR_DATAREG 0x3f8
+#define CPR_LSTAT 0x3fd
+#define CPR_INTRCTL 0x3f9
+
+char
+pm_getchar(void)
+{
+ while ((inb(CPR_LSTAT) & CPR_RXR) != CPR_RXR)
+ drv_usecwait(10);
+
+ return (inb(CPR_DATAREG));
+
+}
+
+void
+pm_putchar(char c)
+{
+ while ((inb(CPR_LSTAT) & CPR_TXR) == 0)
+ drv_usecwait(10);
+
+ outb(CPR_DATAREG, c);
+}
+
+void
+pm_printf(char *s)
+{
+ while (*s) {
+ pm_putchar(*s++);
+ }
+}
+
+#endif
+
+int
+pm_ppm_searchlist(pm_searchargs_t *sp)
+{
+ power_req_t power_req;
+ int result = 0;
+ /* LINTED */
+ int ret;
+
+ power_req.request_type = PMR_PPM_SEARCH_LIST;
+ power_req.req.ppm_search_list_req.searchlist = sp;
+ ASSERT(DEVI(ddi_root_node())->devi_pm_ppm);
+ ret = pm_ctlops((dev_info_t *)DEVI(ddi_root_node())->devi_pm_ppm,
+ ddi_root_node(), DDI_CTLOPS_POWER, &power_req, &result);
+ PMD(PMD_SX, ("pm_ppm_searchlist returns %d, result %d\n",
+ ret, result))
+ return (result);
+}
diff --git a/usr/src/uts/common/sys/Makefile b/usr/src/uts/common/sys/Makefile
index 75060a7a15..4cd97b5c3a 100644
--- a/usr/src/uts/common/sys/Makefile
+++ b/usr/src/uts/common/sys/Makefile
@@ -459,6 +459,7 @@ CHKHDRS= \
sockio.h \
squeue.h \
squeue_impl.h \
+ srn.h \
sservice.h \
stat.h \
statfs.h \
diff --git a/usr/src/uts/common/sys/asy.h b/usr/src/uts/common/sys/asy.h
index 491a222cd0..3b3d0c2a63 100644
--- a/usr/src/uts/common/sys/asy.h
+++ b/usr/src/uts/common/sys/asy.h
@@ -275,6 +275,15 @@ struct asycom {
ddi_iblock_cookie_t asy_iblock;
kmutex_t asy_excl; /* asy adaptive mutex */
kmutex_t asy_excl_hi; /* asy spinlock mutex */
+
+ /*
+ * The asy_soft_sr mutex should only be taken by the soft interrupt
+ * handler and the driver DDI_SUSPEND/DDI_RESUME code. It
+ * shouldn't be taken by any code that may get called indirectly
+ * by the soft interrupt handler (e.g. as a result of a put or
+ * putnext call).
+ */
+ kmutex_t asy_soft_sr; /* soft int suspend/resume mutex */
uchar_t asy_msr; /* saved modem status */
uchar_t asy_mcr; /* soft carrier bits */
uchar_t asy_lcr; /* console lcr bits */
@@ -300,11 +309,13 @@ struct asycom {
struct asyncline {
int async_flags; /* random flags */
kcondvar_t async_flags_cv; /* condition variable for flags */
+ kcondvar_t async_ops_cv; /* condition variable for async_ops */
dev_t async_dev; /* device major/minor numbers */
mblk_t *async_xmitblk; /* transmit: active msg block */
struct asycom *async_common; /* device common data */
tty_common_t async_ttycommon; /* tty driver common data */
bufcall_id_t async_wbufcid; /* id for pending write-side bufcall */
+ size_t async_wbufcds; /* Buffer size requested in bufcall */
timeout_id_t async_polltid; /* softint poll timeout id */
timeout_id_t async_dtrtid; /* delaying DTR turn on */
timeout_id_t async_utbrktid; /* hold minimum untimed break time id */
@@ -343,6 +354,10 @@ struct asyncline {
short async_ext; /* modem status change count */
short async_work; /* work to do flag */
timeout_id_t async_timer; /* close drain progress timer */
+
+ mblk_t *async_suspqf; /* front of suspend queue */
+ mblk_t *async_suspqb; /* back of suspend queue */
+ int async_ops; /* active operations counter */
};
/* definitions for async_flags field */
@@ -372,6 +387,8 @@ struct asyncline {
#define ASYNC_OUT_FLW_RESUME 0x00100000 /* output need to be resumed */
/* because of transition of flow */
/* control from stop to start */
+#define ASYNC_DDI_SUSPENDED 0x00200000 /* suspended by DDI */
+#define ASYNC_RESUME_BUFCALL 0x00400000 /* call bufcall when resumed by DDI */
/* asy_hwtype definitions */
#define ASY8250A 0x2 /* 8250A or 16450 */
@@ -389,6 +406,7 @@ struct asyncline {
#define ASY_RTS_DTR_OFF 0x00000020
#define ASY_IGNORE_CD 0x00000040
#define ASY_CONSOLE 0x00000080
+#define ASY_DDI_SUSPENDED 0x00000100 /* suspended by DDI */
/* definitions for asy_flags2 field */
#define ASY2_NO_LOOPBACK 0x00000001 /* Device doesn't support loopback */
diff --git a/usr/src/uts/common/sys/audio/impl/audio810_impl.h b/usr/src/uts/common/sys/audio/impl/audio810_impl.h
index 7846685221..05baf89564 100644
--- a/usr/src/uts/common/sys/audio/impl/audio810_impl.h
+++ b/usr/src/uts/common/sys/audio/impl/audio810_impl.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -19,8 +18,9 @@
*
* CDDL HEADER END
*/
+
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -64,6 +64,9 @@ extern "C" {
#define I810_BSIZE (8*1024)
+#define I810_NOT_SUSPENDED (0)
+#define I810_SUSPENDED (~I810_NOT_SUSPENDED)
+
#define I810_MAX_CHANNELS (200) /* force max # chs */
#define I810_MAX_HW_CHANNELS (32)
#define I810_MAX_IN_CHANNELS (1)
@@ -279,6 +282,9 @@ struct audio810_state {
int i810_psamples; /* pcm-out samples/intr */
uint32_t i810_res_flags; /* resource flags */
+ int i810_suspended; /* suspend/resume state */
+ int i810_busy_cnt; /* device busy count */
+ kcondvar_t i810_cv; /* suspend/resume cond. var */
};
typedef struct audio810_state audio810_state_t;
diff --git a/usr/src/uts/common/sys/cpr.h b/usr/src/uts/common/sys/cpr.h
index 337cbf9986..6fd5438cc0 100644
--- a/usr/src/uts/common/sys/cpr.h
+++ b/usr/src/uts/common/sys/cpr.h
@@ -178,6 +178,9 @@ struct cprconfig {
extern int cpr_debug;
+#define errp prom_printf
+#define DPRINT
+
/*
* CPR_DEBUG1 displays the main flow of CPR. Use it to identify which
* sub-module of CPR causes problems.
@@ -393,6 +396,7 @@ typedef struct cpr_terminator ctrm_t;
#define AD_CPR_TESTZ 8 /* test mode, auto-restart compress */
#define AD_CPR_PRINT 9 /* print out stats */
#define AD_CPR_NOCOMPRESS 10 /* store state file uncompressed */
+#define AD_CPR_SUSP_DEVICES 11 /* Only suspend resume devices */
#define AD_CPR_DEBUG0 100 /* clear debug flag */
#define AD_CPR_DEBUG1 101 /* display CPR main flow via prom */
#define AD_CPR_DEBUG2 102 /* misc small/mid size loops */
@@ -404,6 +408,31 @@ typedef struct cpr_terminator ctrm_t;
#define AD_CPR_DEBUG9 109 /* display stat data on console */
/*
+ * Suspend to RAM test points.
+ * Probably belong above, but are placed here for now.
+ */
+/* S3 leave hardware on and return success */
+#define AD_LOOPBACK_SUSPEND_TO_RAM_PASS 22
+
+/* S3 leave hardware on and return failure */
+#define AD_LOOPBACK_SUSPEND_TO_RAM_FAIL 23
+
+/* S3 ignored devices that fail to suspend */
+#define AD_FORCE_SUSPEND_TO_RAM 24
+
+/* S3 on a specified device */
+#define AD_DEVICE_SUSPEND_TO_RAM 25
+
+
+
+/*
+ * Temporary definition of the Suspend to RAM development subcommands
+ * so that non-ON apps will work after initial integration.
+ */
+#define DEV_SUSPEND_TO_RAM 200
+#define DEV_CHECK_SUSPEND_TO_RAM 201
+
+/*
* cprboot related information and definitions.
* The statefile names are hardcoded for now.
*/
@@ -506,6 +535,8 @@ extern cpr_t cpr_state;
#define C_ST_SETPROPS_0 10
#define C_ST_DUMP_NOSPC 11
#define C_ST_REUSABLE 12
+#define C_ST_NODUMP 13
+#define C_ST_MP_PAUSED 14
#define cpr_set_substate(a) (CPR->c_substate = (a))
@@ -547,18 +578,25 @@ struct cpr_walkinfo {
*/
#define DCF_CPR_SUSPENDED 0x1 /* device went through cpr_suspend */
+/*
+ * Values used to differentiate between suspend to disk and suspend to ram
+ * in cpr_suspend and cpr_resume
+ */
+
+#define CPR_TORAM 3
+#define CPR_TODISK 4
+
#ifndef _ASM
extern char *cpr_build_statefile_path(void);
extern char *cpr_enumerate_promprops(char **, size_t *);
extern char *cpr_get_statefile_prom_path(void);
-extern int cpr_clrbit(pfn_t, int);
extern int cpr_contig_pages(vnode_t *, int);
extern int cpr_default_setup(int);
extern int cpr_dump(vnode_t *);
extern int cpr_get_reusable_mode(void);
extern int cpr_isset(pfn_t, int);
-extern int cpr_main(void);
+extern int cpr_main(int);
extern int cpr_mp_offline(void);
extern int cpr_mp_online(void);
extern int cpr_nobit(pfn_t, int);
@@ -570,10 +608,10 @@ extern int cpr_read_phys_page(int, uint_t, int *);
extern int cpr_read_terminator(int, ctrm_t *, caddr_t);
extern int cpr_resume_devices(dev_info_t *, int);
extern int cpr_set_properties(int);
-extern int cpr_setbit(pfn_t, int);
extern int cpr_statefile_is_spec(void);
extern int cpr_statefile_offset(void);
extern int cpr_stop_kernel_threads(void);
+extern int cpr_threads_are_stopped(void);
extern int cpr_stop_user_threads(void);
extern int cpr_suspend_devices(dev_info_t *);
extern int cpr_validate_definfo(int);
@@ -585,9 +623,7 @@ extern int i_cpr_dump_sensitive_kpages(vnode_t *);
extern int i_cpr_save_sensitive_kpages(void);
extern pgcnt_t cpr_count_kpages(int, bitfunc_t);
extern pgcnt_t cpr_count_pages(caddr_t, size_t, int, bitfunc_t, int);
-extern pgcnt_t cpr_count_seg_pages(int, bitfunc_t);
extern pgcnt_t cpr_count_volatile_pages(int, bitfunc_t);
-extern pgcnt_t cpr_scan_kvseg(int, bitfunc_t, struct seg *);
extern pgcnt_t i_cpr_count_sensitive_kpages(int, bitfunc_t);
extern pgcnt_t i_cpr_count_special_kpages(int, bitfunc_t);
extern pgcnt_t i_cpr_count_storage_pages(int, bitfunc_t);
@@ -607,6 +643,9 @@ extern void cpr_stat_record_events(void);
extern void cpr_tod_get(cpr_time_t *ctp);
extern void cpr_tod_fault_reset(void);
extern void i_cpr_bitmap_cleanup(void);
+extern void i_cpr_stop_other_cpus(void);
+extern void i_cpr_alloc_cpus(void);
+extern void i_cpr_free_cpus(void);
/*PRINTFLIKE2*/
extern void cpr_err(int, const char *, ...) __KPRINTFLIKE(2);
diff --git a/usr/src/uts/common/sys/cpuvar.h b/usr/src/uts/common/sys/cpuvar.h
index 3093034ceb..4785796781 100644
--- a/usr/src/uts/common/sys/cpuvar.h
+++ b/usr/src/uts/common/sys/cpuvar.h
@@ -512,6 +512,7 @@ extern cpuset_t cpu_seqid_inuse;
#define CPU_CPR_OFFLINE 0x0
#define CPU_CPR_ONLINE 0x1
#define CPU_CPR_IS_OFFLINE(cpu) (((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) == 0)
+#define CPU_CPR_IS_ONLINE(cpu) ((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE)
#define CPU_SET_CPR_FLAGS(cpu, flag) ((cpu)->cpu_cpr_flags |= flag)
#if defined(_KERNEL) || defined(_KMEMUSER)
diff --git a/usr/src/uts/common/sys/dktp/cmdk.h b/usr/src/uts/common/sys/dktp/cmdk.h
index 05dff32d9a..11b1bd9d78 100644
--- a/usr/src/uts/common/sys/dktp/cmdk.h
+++ b/usr/src/uts/common/sys/dktp/cmdk.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -19,8 +18,9 @@
*
* CDDL HEADER END
*/
+
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -73,10 +73,26 @@ struct cmdk {
uint32_t dk_altused; /* num entries in V_ALTSCTR */
uint32_t *dk_slc_cnt; /* entries per slice */
struct alts_ent **dk_slc_ent; /* link to remap data */
+
+ /*
+ * for power management
+ */
+ kmutex_t dk_pm_mutex;
+ kcondvar_t dk_suspend_cv;
+ uint32_t dk_pm_level;
+ uint32_t dk_pm_is_enabled;
};
+/*
+ * Power Management definitions
+ */
+#define CMDK_SPINDLE_UNINIT ((uint_t)(-1))
+#define CMDK_SPINDLE_OFF 0x0
+#define CMDK_SPINDLE_ON 0x1
+
/* common disk flags definitions */
#define CMDK_OPEN 0x1
+#define CMDK_SUSPEND 0x2
#define CMDK_TGDK_OPEN 0x4
#define CMDKUNIT(dev) (getminor((dev)) >> CMDK_UNITSHF)
diff --git a/usr/src/uts/common/sys/dktp/dadk.h b/usr/src/uts/common/sys/dktp/dadk.h
index 0f07c5a89f..f5c990e7c0 100644
--- a/usr/src/uts/common/sys/dktp/dadk.h
+++ b/usr/src/uts/common/sys/dktp/dadk.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -63,6 +63,8 @@ struct dadk {
uchar_t dad_thread_cnt; /* reference count on removable */
/* - disk state watcher thread */
kstat_t *dad_errstats; /* error stats */
+ kmutex_t dad_cmd_mutex;
+ int dad_cmd_count;
};
#define DAD_SECSIZ dad_phyg.g_secsiz
@@ -132,6 +134,8 @@ static void dadk_watch_thread(struct dadk *dadkp);
int dadk_inquiry(opaque_t objp, opaque_t *inqpp);
void dadk_cleanup(struct tgdk_obj *dkobjp);
+int dadk_getcmds(opaque_t objp);
+
#ifdef __cplusplus
}
#endif
diff --git a/usr/src/uts/common/sys/epm.h b/usr/src/uts/common/sys/epm.h
index 5443eee3e2..35b656409b 100644
--- a/usr/src/uts/common/sys/epm.h
+++ b/usr/src/uts/common/sys/epm.h
@@ -34,6 +34,12 @@
#include <sys/ddi_impldefs.h>
#include <sys/taskq.h>
+/*
+ * XXXX
+ * Do we really need this include? It may be leftover from early CPUPM code.
+ * #include <sys/processor.h>
+ */
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -225,6 +231,8 @@ typedef enum pm_cpupm
PM_CPUPM_DISABLE /* do not power manage CPU devices */
} pm_cpupm_t;
+#define PPM(dip) ((dev_info_t *)DEVI(dip)->devi_pm_ppm)
+
/*
* The power request struct uses for the DDI_CTLOPS_POWER busctl.
*
@@ -258,7 +266,10 @@ typedef enum {
PMR_PPM_TRY_LOCK_POWER, /* ppm try lock power */
PMR_PPM_INIT_CHILD, /* ppm init child notify */
PMR_PPM_UNINIT_CHILD, /* ppm uninit child notify */
- PMR_PPM_POWER_LOCK_OWNER /* ppm power lock owner's address */
+ PMR_PPM_POWER_LOCK_OWNER, /* ppm power lock owner's address */
+ PMR_PPM_ENTER_SX, /* ppm: enter ACPI S[2-4] state */
+ PMR_PPM_EXIT_SX, /* ppm: enter ACPI S[2-4] state */
+ PMR_PPM_SEARCH_LIST /* ppm: search tuple list */
} pm_request_type;
/*
@@ -386,9 +397,47 @@ typedef struct power_req {
dev_info_t *who;
kthread_t *owner;
} ppm_power_lock_owner_req;
+ /*
+ * PMR_PPM_POWER_ENTER_SX
+ */
+ struct ppm_power_enter_sx_req {
+ int sx_state; /* S3, S4 */
+ int test_point; /* test point */
+ uint64_t wakephys; /* restart vector phys addr */
+ void *psr; /* PSM (apic) state buffer */
+ } ppm_power_enter_sx_req;
+ /*
+ * PMR_PPM_SEARCH_LIST
+ */
+ struct ppm_search_list {
+ pm_searchargs_t *searchlist;
+ int result;
+ } ppm_search_list_req;
} req;
} power_req_t;
+#define S3 3
+#define S4 4
+
+extern int cpr_test_point;
+extern major_t cpr_device;
+
+#define LOOP_BACK_NONE 0
+#define LOOP_BACK_PASS 1
+#define LOOP_BACK_FAIL 2
+#define FORCE_SUSPEND_TO_RAM 3
+#define DEVICE_SUSPEND_TO_RAM 4
+
+/*
+ * Struct passed as arg to appm_ioctl
+ */
+typedef struct s3_args {
+ int s3a_state; /* S3, S4 */
+ int s3a_test_point; /* test point */
+ uint64_t s3a_wakephys; /* restart vector physical addr */
+ void *s3a_psr; /* apic state save buffer */
+} s3a_t;
+
/*
* Structure used by the following bus_power operations:
*
@@ -624,25 +673,86 @@ typedef struct pm_thresh_rec {
#define PMD_PIL 0x20000000 /* print out PIL when calling power */
#define PMD_PHC 0x40000000 /* pm_power_has_changed messages */
#define PMD_LOCK 0x80000000
+#define PMD_SX 0x80000000 /* ACPI S[1234] states */
+#define PMD_PROTO PMD_SX /* and other Prototype stuff */
extern uint_t pm_debug;
extern uint_t pm_divertdebug;
/*PRINTFLIKE1*/
extern void pm_log(const char *fmt, ...) __KPRINTFLIKE(1);
+#if !defined(__sparc)
+/*
+ * On non-sparc machines, PMDDEBUG isn't as big a deal as Sparc, so we
+ * define PMDDEUG here for use on non-sparc platforms.
+ */
+#define PMDDEBUG
+#endif /* !__sparc */
+
#ifdef PMDDEBUG
#define PMD(level, arglist) { \
if (pm_debug & (level)) { \
pm_log arglist; \
} \
}
-#else
+#else /* !PMDDEBUG */
#define PMD(level, arglist) ((void)0);
+#endif /* PMDDEBUG */
+#ifndef sparc
+extern clock_t pt_sleep;
+/* code is char hex number to display on POST LED */
+#define PT(code) {outb(0x80, (char)code); drv_usecwait(pt_sleep); }
+#else
+#define PT(code)
#endif
-
#else
#define PMD(level, arglist)
+#define PT(code)
#endif
+/*
+ * Code Value Indication
+ *
+ */
+#define PT_SPL7 0x01 /* pm_suspend spl7 */
+#define PT_PMSRET 0x02 /* pm_suspend returns */
+#define PT_PPMCTLOP 0x03 /* invoking ppm_ctlops */
+#define PT_ACPISDEV 0x04 /* acpi suspend devices */
+#define PT_IC 0x05 /* acpi intr_clear */
+#define PT_1to1 0x06 /* 1:1 mapping */
+#define PT_SC 0x07 /* save context */
+#define PT_SWV 0x08 /* set waking vector */
+#define PT_SWV_FAIL 0x09 /* set waking vector failed */
+#define PT_EWE 0x0a /* enable wake events */
+#define PT_EWE_FAIL 0x0b /* enable wake events failed */
+#define PT_RTCW 0x0c /* setting rtc wakeup */
+#define PT_RTCW_FAIL 0x0d /* setting rtc wakeup failed */
+#define PT_TOD 0x0e /* setting tod */
+#define PT_SXP 0x0f /* sx prep */
+#define PT_SXE 0x10 /* sx enter */
+#define PT_SXE_FAIL 0x11 /* sx enter failed */
+#define PT_INSOM 0x12 /* insomnia label */
+#define PT_WOKE 0x20 /* woke up */
+#define PT_UNDO1to1 0x21 /* Undo 1:1 mapping */
+#define PT_LSS 0x22 /* leave sleep state */
+#define PT_LSS_FAIL 0x23 /* leave sleep state failed */
+#define PT_DPB 0x24 /* disable power button */
+#define PT_DPB_FAIL 0x25 /* disable power button failed */
+#define PT_DRTC_FAIL 0x26 /* disable rtc fails */
+#define PT_ACPIREINIT 0x27 /* reinit apic */
+#define PT_ACPIRESTORE 0x28 /* restore apic */
+#define PT_INTRRESTORE 0x28 /* restore interrupts */
+#define PT_RESDEV 0x2a /* ressume acpi devices */
+#define PT_CPU 0x2b /* init_cpu_syscall */
+#define PT_PRESUME 0x30 /* pm_resume entered */
+#define PT_RSUS 0x31 /* pm_resume "suspended" */
+#define PT_RKERN 0x32 /* pm_resume "kernel" */
+#define PT_RDRV 0x33 /* pm_resume "driver" */
+#define PT_RDRV_FAIL 0x34 /* pm_resume "driver" failed */
+#define PT_RRNOINVOL 0x35 /* pm_resume "reattach_noinvol" */
+#define PT_RUSER 0x36 /* pm_resume "user" */
+#define PT_RAPMSIG 0x37 /* pm_resume APM/SRN signal */
+#define PT_RMPO 0x38 /* pm_resume "mp_online" */
+#define PT_RDONE 0x39 /* pm_resume done */
extern void pm_detaching(dev_info_t *);
extern void pm_detach_failed(dev_info_t *);
@@ -721,7 +831,7 @@ extern int pm_is_cfb(dev_info_t *);
extern int pm_cfb_is_up(void);
#endif
-#ifdef DEBUG
+#ifdef DIPLOCKDEBUG
#define PM_LOCK_DIP(dip) { PMD(PMD_LOCK, ("dip lock %s@%s(%s#%d) " \
"%s %d\n", PM_DEVICE(dip), \
__FILE__, __LINE__)) \
diff --git a/usr/src/uts/common/sys/pm.h b/usr/src/uts/common/sys/pm.h
index a65075b3a5..8be171fef1 100644
--- a/usr/src/uts/common/sys/pm.h
+++ b/usr/src/uts/common/sys/pm.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -95,7 +95,16 @@ typedef enum {
PM_STOP_CPUPM,
PM_GET_CPU_THRESHOLD,
PM_SET_CPU_THRESHOLD,
- PM_GET_CPUPM_STATE
+ PM_GET_CPUPM_STATE,
+ PM_ENABLE_S3, /* allow pm to go to S3 state */
+ PM_DISABLE_S3, /* do not allow pm to go to S3 state */
+ PM_ENTER_S3, /* obsolete, not supported */
+ PM_START_AUTOS3,
+ PM_STOP_AUTOS3,
+ PM_SEARCH_LIST, /* search S3 enable/disable list */
+ PM_GET_AUTOS3_STATE,
+ PM_GET_S3_SUPPORT_STATE,
+ PM_GET_CMD_NAME
} pm_cmds;
/*
@@ -129,6 +138,17 @@ typedef struct pm_req {
} pm_req_t;
/*
+ * PM_SEARCH_LIST requires a list name, manufacturer and product name
+ * Searches the named list for a matching tuple.
+ * NOTE: This structure may be removed in a later release.
+ */
+typedef struct pm_searchargs {
+ char *pms_listname; /* name of list to search */
+ char *pms_manufacturer; /* 1st elment of tuple */
+ char *pms_product; /* 2nd elment of tuple */
+} pm_searchargs_t;
+
+/*
* Use these for PM_ADD_DEPENDENT and PM_ADD_DEPENDENT_PROPERTY
*/
#define pmreq_keeper physpath /* keeper in the physpath field */
@@ -208,6 +228,13 @@ typedef struct pm_state_change32 {
size32_t size; /* size of buffer physpath points to */
} pm_state_change32_t;
+typedef struct pm_searchargs32_t {
+ caddr32_t pms_listname;
+ caddr32_t pms_manufacturer;
+ caddr32_t pms_product;
+} pm_searchargs32_t;
+
+
#endif
/*
@@ -228,10 +255,13 @@ typedef enum {
PM_CPU_THRESHOLD,
PM_CPU_PM_ENABLED,
PM_CPU_PM_DISABLED,
- PM_CPU_PM_NOTSET
+ PM_CPU_PM_NOTSET,
+ PM_AUTOS3_ENABLED,
+ PM_AUTOS3_DISABLED,
+ PM_S3_SUPPORT_ENABLED,
+ PM_S3_SUPPORT_DISABLED
} pm_states;
-
#ifdef __cplusplus
}
#endif
diff --git a/usr/src/uts/common/sys/ppmvar.h b/usr/src/uts/common/sys/ppmvar.h
index 87d2a68086..1b8562a423 100644
--- a/usr/src/uts/common/sys/ppmvar.h
+++ b/usr/src/uts/common/sys/ppmvar.h
@@ -135,6 +135,12 @@ struct ppm_dc {
ldi_handle_t lh; /* layered (ldi) handle */
char *path; /* control device prom pathname */
uint_t cmd; /* search key: op to be performed */
+ /* one of: PPMDC_CPU_NEXT */
+ /* PPMDC_CPU_GO, PPMDC_FET_ON, */
+ /* PPMDC_FET_OFF, PPMDC_LED_ON, */
+ /* PPMDC_LED_OFF, PPMDC_PCI_ON, */
+ /* PPMDC_ENTER_S3, PPMDC_PCI_OFF */
+ /* PPMDC_EXIT_S3 commands */
uint_t method; /* control method / union selector */
/* one of PPMDC_KIO, PPMDC_I2CKIO, */
/* PPMDC_CPUSPEEDKIO */
@@ -157,6 +163,7 @@ struct ppm_dc {
uint_t post_delay; /* post delay, if any */
} kio;
+#ifdef sun4u
/* PPMDC_I2CKIO: KIO requires 'arg' as struct i2c_gpio */
/* (defined in i2c_client.h) */
struct m_i2ckio {
@@ -169,6 +176,7 @@ struct ppm_dc {
/* operation can be carried out */
uint_t post_delay; /* post delay, if any */
} i2c;
+#endif
/* PPMDC_CPUSPEEDKIO, PPMDC_VCORE: cpu estar related */
/* simple KIO */
@@ -203,6 +211,8 @@ typedef struct ppm_dc ppm_dc_t;
#define PPMDC_PWR_ON 16
#define PPMDC_RESET_OFF 17
#define PPMDC_RESET_ON 18
+#define PPMDC_ENTER_S3 19
+#define PPMDC_EXIT_S3 20
/*
* ppm_dc.method field - select union element
@@ -210,7 +220,9 @@ typedef struct ppm_dc ppm_dc_t;
#define PPMDC_KIO 1 /* simple ioctl with val as arg */
#define PPMDC_CPUSPEEDKIO 2 /* ioctl with speed index arg */
#define PPMDC_VCORE 3 /* CPU Vcore change operation */
+#ifdef sun4u
#define PPMDC_I2CKIO 4 /* ioctl with i2c_gpio_t as arg */
+#endif
/*
* devices that are powered by the same source
@@ -245,6 +257,7 @@ typedef struct ppm_domain ppm_domain_t;
#define PPMD_PCI 4 /* PCI pm model */
#define PPMD_PCI_PROP 5 /* PCI_PROP pm model */
#define PPMD_PCIE 6 /* PCI Express pm model */
+#define PPMD_SX 7 /* ACPI Sx pm model */
#define PPMD_IS_PCI(model) \
((model) == PPMD_PCI || (model) == PPMD_PCI_PROP)
diff --git a/usr/src/uts/common/sys/rtc.h b/usr/src/uts/common/sys/rtc.h
index 330f41ee69..1db2925542 100644
--- a/usr/src/uts/common/sys/rtc.h
+++ b/usr/src/uts/common/sys/rtc.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -19,8 +18,9 @@
*
* CDDL HEADER END
*/
+
/*
- * Copyright 1999 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -90,7 +90,7 @@ extern "C" {
#define RTC_VRT 0x80 /* Valid RAM and time bit */
#define RTC_NREG 0x0e /* number of RTC registers */
-#define RTC_NREGP 0x0a /* number of RTC registers to set time */
+#define RTC_NREGP 0x0c /* number of RTC registers to set time */
#define RTC_CENTURY 0x32 /* not included in RTC_NREG(P) */
/*
@@ -117,6 +117,8 @@ struct rtc_t { /* registers 0x0 to 0xD, 0x32 */
unsigned char rtc_statusc;
unsigned char rtc_statusd;
unsigned char rtc_century; /* register 0x32 */
+ unsigned char rtc_adom; /* ACPI-provided day alarm */
+ unsigned char rtc_amon; /* ACPI-provided mon alarm */
};
#ifdef __cplusplus
diff --git a/usr/src/uts/common/sys/srn.h b/usr/src/uts/common/sys/srn.h
new file mode 100644
index 0000000000..6145a98108
--- /dev/null
+++ b/usr/src/uts/common/sys/srn.h
@@ -0,0 +1,79 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_SRN_H
+#define _SYS_SRN_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The following ioctl commands and structures may not exist
+ * or may have a different interpretation in a future release.
+ */
+
+
+#define SRN_STANDBY_REQ 0xa01
+#define SRN_SUSPEND_REQ 0xa02
+#define SRN_NORMAL_RESUME 0xa03
+#define SRN_CRIT_RESUME 0xa04
+#define SRN_BATTERY_LOW 0xa05
+#define SRN_POWER_CHANGE 0xa06
+#define SRN_UPDATE_TIME 0xa07
+#define SRN_CRIT_SUSPEND_REQ 0xa08
+#define SRN_USER_STANDBY_REQ 0xa09
+#define SRN_USER_SUSPEND_REQ 0xa0a
+#define SRN_SYS_STANDBY_RESUME 0xa0b
+#define SRN_IOC_NEXTEVENT 0xa0c
+#define SRN_IOC_RESUME 0xa0d
+#define SRN_IOC_SUSPEND 0xa0e
+#define SRN_IOC_STANDBY 0xa0f
+#define SRN_IOC_AUTOSX 0xa10 /* change behavior of driver */
+
+typedef struct srn_event_info
+{
+ int ae_type;
+
+} srn_event_info_t;
+
+#ifdef _KERNEL
+
+#define SRN_MAX_CLONE 8 /* only two consumer known */
+
+#define SRN_TYPE_APM 1
+#define SRN_TYPE_AUTOSX 2
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SRN_H */
diff --git a/usr/src/uts/common/sys/sunddi.h b/usr/src/uts/common/sys/sunddi.h
index 351a75bcd4..82cb130bd9 100644
--- a/usr/src/uts/common/sys/sunddi.h
+++ b/usr/src/uts/common/sys/sunddi.h
@@ -1919,6 +1919,12 @@ pci_target_enqueue(uint64_t, char *, char *, uint64_t);
void
pci_targetq_init(void);
+int
+pci_post_suspend(dev_info_t *dip);
+
+int
+pci_pre_resume(dev_info_t *dip);
+
/*
* the prototype for the C Language Type Model inquiry.
*/
diff --git a/usr/src/uts/common/sys/uadmin.h b/usr/src/uts/common/sys/uadmin.h
index d88f247a93..e307462896 100644
--- a/usr/src/uts/common/sys/uadmin.h
+++ b/usr/src/uts/common/sys/uadmin.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -58,12 +58,27 @@ extern "C" {
#define AD_POWEROFF 6 /* software poweroff */
#define AD_NOSYNC 7 /* do not sync filesystems on next A_DUMP */
+
/*
* Functions reserved for A_FREEZE (may not be available on all platforms)
+ * Note: AD_COMPRESS, AD_CHECK and AD_FORCE are now obsolete
+ * The first two are succeeded by AD_SUSPEND_TO_DISK and
+ * AD_CHECK_SUSPEND_TO_DISK respectively.
+ * AD_FORCE should not be used by any new application
+ *
+ * We maintain compatibility with the earlier interfaces:
+ * AD_COMPRESS and AD_CHECK, by preserving those values
+ * in the corresponding new interfaces
*/
+
#define AD_COMPRESS 0 /* store state file compressed during CPR */
#define AD_FORCE 1 /* force to do AD_COMPRESS */
#define AD_CHECK 2 /* test if CPR module is available */
+#define AD_SUSPEND_TO_DISK AD_COMPRESS /* A_FREEZE, CPR or ACPI S4 */
+#define AD_CHECK_SUSPEND_TO_DISK AD_CHECK /* A_FREEZE, CPR/S4 capable? */
+#define AD_SUSPEND_TO_RAM 20 /* A_FREEZE, S3 */
+#define AD_CHECK_SUSPEND_TO_RAM 21 /* A_FREEZE, S3 capable? */
+
/*
* NOTE: the following defines comprise an Unstable interface. Their semantics
* may change or they may be removed completely in a later release
diff --git a/usr/src/uts/common/syscall/uadmin.c b/usr/src/uts/common/syscall/uadmin.c
index df00b4e9d0..a5f92268f2 100644
--- a/usr/src/uts/common/syscall/uadmin.c
+++ b/usr/src/uts/common/syscall/uadmin.c
@@ -309,12 +309,15 @@ kadmin(int cmd, int fcn, void *mdep, cred_t *credp)
case A_FREEZE:
{
- /* XXX: declare in some header file */
- extern int cpr(int);
+ /*
+ * This is the entrypoint for all suspend/resume actions.
+ */
+ extern int cpr(int, void *);
if (modload("misc", "cpr") == -1)
return (ENOTSUP);
- error = cpr(fcn);
+ /* Let the CPR module decide what to do with mdep */
+ error = cpr(fcn, mdep);
break;
}
@@ -387,7 +390,8 @@ uadmin(int cmd, int fcn, uintptr_t mdep)
* a boot string. We pull that in as bootargs, if applicable.
*/
if (mdep != NULL &&
- (cmd == A_SHUTDOWN || cmd == A_REBOOT || cmd == A_DUMP)) {
+ (cmd == A_SHUTDOWN || cmd == A_REBOOT || cmd == A_DUMP ||
+ cmd == A_FREEZE)) {
bootargs = kmem_zalloc(BOOTARGS_MAX, KM_SLEEP);
if ((error = copyinstr((const char *)mdep, bootargs,
BOOTARGS_MAX, &nbytes)) != 0) {