summaryrefslogtreecommitdiff
path: root/usr
diff options
context:
space:
mode:
authoreota <none@none>2007-09-20 14:16:47 -0700
committereota <none@none>2007-09-20 14:16:47 -0700
commitdd4eeefdb8e4583c47e28a7f315db6087931ef06 (patch)
tree42d484ac74ab436d78007be666c66d144c55890b /usr
parent55c4b5faaa3d3ff8bea0d08505e7ea0850b949e8 (diff)
downloadillumos-joyent-dd4eeefdb8e4583c47e28a7f315db6087931ef06.tar.gz
PSARC 2007/402 Driver Periodic Timeouts
4977787 Official DDI interface is required for periodic timeout requests
Diffstat (limited to 'usr')
-rw-r--r--usr/src/pkgdefs/SUNWhea/prototype_com1
-rw-r--r--usr/src/uts/common/Makefile.files1
-rw-r--r--usr/src/uts/common/io/avintr.c29
-rw-r--r--usr/src/uts/common/io/bge/bge_impl.h5
-rw-r--r--usr/src/uts/common/io/bge/bge_main2.c26
-rw-r--r--usr/src/uts/common/io/bscv.c315
-rwxr-xr-xusr/src/uts/common/io/rge/rge.h4
-rwxr-xr-xusr/src/uts/common/io/rge/rge_main.c43
-rw-r--r--usr/src/uts/common/os/clock.c13
-rw-r--r--usr/src/uts/common/os/ddi_timer.c936
-rw-r--r--usr/src/uts/common/os/main.c2
-rw-r--r--usr/src/uts/common/os/sunddi.c120
-rw-r--r--usr/src/uts/common/sys/Makefile1
-rw-r--r--usr/src/uts/common/sys/avintr.h8
-rw-r--r--usr/src/uts/common/sys/bscv_impl.h4
-rw-r--r--usr/src/uts/common/sys/ddi_timer.h151
-rw-r--r--usr/src/uts/common/sys/dditypes.h5
-rw-r--r--usr/src/uts/common/sys/sunddi.h20
-rw-r--r--usr/src/uts/i86pc/io/pcplusmp/apic.c35
-rw-r--r--usr/src/uts/i86pc/os/startup.c13
-rw-r--r--usr/src/uts/i86pc/sys/machsystm.h1
-rw-r--r--usr/src/uts/sun4/os/intr.c55
-rw-r--r--usr/src/uts/sun4u/io/dmfe/dmfe_main.c132
-rw-r--r--usr/src/uts/sun4u/io/rmc_comm.c47
-rw-r--r--usr/src/uts/sun4u/io/todds1337.c44
-rw-r--r--usr/src/uts/sun4u/lw2plus/io/lombus.c100
-rw-r--r--usr/src/uts/sun4u/snowbird/io/todds1307/todds1307.c72
-rw-r--r--usr/src/uts/sun4u/snowbird/sys/todds1307.h10
-rw-r--r--usr/src/uts/sun4u/sys/dmfe_impl.h3
-rw-r--r--usr/src/uts/sun4u/sys/machsystm.h1
-rw-r--r--usr/src/uts/sun4u/sys/rmc_comm.h4
-rw-r--r--usr/src/uts/sun4u/sys/todds1337.h10
-rw-r--r--usr/src/uts/sun4v/sys/machsystm.h1
33 files changed, 1722 insertions, 490 deletions
diff --git a/usr/src/pkgdefs/SUNWhea/prototype_com b/usr/src/pkgdefs/SUNWhea/prototype_com
index a726754cda..c8bc84cebf 100644
--- a/usr/src/pkgdefs/SUNWhea/prototype_com
+++ b/usr/src/pkgdefs/SUNWhea/prototype_com
@@ -722,6 +722,7 @@ f none usr/include/sys/ddi_intr.h 644 root bin
f none usr/include/sys/ddi_intr_impl.h 644 root bin
f none usr/include/sys/ddi_isa.h 644 root bin
f none usr/include/sys/ddi_obsolete.h 644 root bin
+f none usr/include/sys/ddi_timer.h 644 root bin
f none usr/include/sys/ddidevmap.h 644 root bin
f none usr/include/sys/ddidmareq.h 644 root bin
f none usr/include/sys/ddimapreq.h 644 root bin
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index aa2c152665..2440d06952 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -119,6 +119,7 @@ GENUNIX_OBJS += \
ddi_intr_impl.o \
ddi_nodeid.o \
ddi_strtol.o \
+ ddi_timer.o \
devcfg.o \
devcache.o \
device.o \
diff --git a/usr/src/uts/common/io/avintr.c b/usr/src/uts/common/io/avintr.c
index 6f3549aa8b..4cf39ac851 100644
--- a/usr/src/uts/common/io/avintr.c
+++ b/usr/src/uts/common/io/avintr.c
@@ -88,10 +88,26 @@ struct autovec *nmivect = NULL;
struct av_head autovect[MAX_VECT];
struct av_head softvect[LOCK_LEVEL + 1];
kmutex_t av_lock;
+/*
+ * These are software interrupt handlers dedicated to ddi timer.
+ * The interrupt levels up to 10 are supported, but high interrupts
+ * must not be used there.
+ */
+ddi_softint_hdl_impl_t softlevel_hdl[DDI_IPL_10] = {
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 1 */
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 2 */
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 3 */
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 4 */
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 5 */
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 6 */
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 7 */
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 8 */
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 9 */
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 10 */
+};
ddi_softint_hdl_impl_t softlevel1_hdl =
{0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
-
/*
* clear/check softint pending flag corresponding for
* the current CPU
@@ -578,10 +594,21 @@ kdi_siron(void)
void
siron(void)
{
+ /* Level 1 software interrupt */
(*setsoftint)(1, softlevel1_hdl.ih_pending);
}
/*
+ * Trigger software interrupts dedicated to ddi timer.
+ */
+void
+sir_on(int level)
+{
+ ASSERT(level >= DDI_IPL_1 && level <= DDI_IPL_10);
+ (*setsoftint)(level, softlevel_hdl[level-1].ih_pending);
+}
+
+/*
* The handler which is executed on the target CPU.
*/
/*ARGSUSED*/
diff --git a/usr/src/uts/common/io/bge/bge_impl.h b/usr/src/uts/common/io/bge/bge_impl.h
index f87f337f4a..8b2bbaebc7 100644
--- a/usr/src/uts/common/io/bge/bge_impl.h
+++ b/usr/src/uts/common/io/bge/bge_impl.h
@@ -52,7 +52,6 @@ extern "C" {
#include <sys/dlpi.h>
#include <sys/devops.h>
#include <sys/debug.h>
-#include <sys/cyclic.h>
#include <sys/conf.h>
#include <netinet/ip6.h>
@@ -63,7 +62,7 @@ extern "C" {
#include <inet/nd.h>
#include <sys/pattr.h>
-#include <sys/dditypes.h>
+#include <sys/disp.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
@@ -770,7 +769,7 @@ typedef struct bge {
ddi_acc_handle_t cfg_handle; /* DDI I/O handle */
ddi_acc_handle_t io_handle; /* DDI I/O handle */
void *io_regs; /* mapped registers */
- cyclic_id_t cyclic_id; /* cyclic callback */
+ ddi_periodic_t periodic_id; /* periodical callback */
ddi_softintr_t factotum_id; /* factotum callback */
ddi_softintr_t drain_id; /* reschedule callback */
diff --git a/usr/src/uts/common/io/bge/bge_main2.c b/usr/src/uts/common/io/bge/bge_main2.c
index 4ed230297b..088d0a5aaf 100644
--- a/usr/src/uts/common/io/bge/bge_main2.c
+++ b/usr/src/uts/common/io/bge/bge_main2.c
@@ -33,7 +33,7 @@
* This is the string displayed by modinfo, etc.
* Make sure you keep the version ID up to date!
*/
-static char bge_ident[] = "Broadcom Gb Ethernet v0.59";
+static char bge_ident[] = "Broadcom Gb Ethernet v0.60";
/*
* Property names
@@ -525,7 +525,6 @@ bge_m_start(void *arg)
if (bgep->asf_enabled) {
if ((bgep->asf_status == ASF_STAT_RUN) &&
(bgep->asf_pseudostop)) {
-
bgep->bge_mac_state = BGE_MAC_STARTED;
mutex_exit(bgep->genlock);
return (0);
@@ -2333,10 +2332,9 @@ bge_unattach(bge_t *bgep)
* Quiesce the PHY and MAC (leave it reset but still powered).
* Clean up and free all BGE data structures
*/
- if (bgep->cyclic_id) {
- mutex_enter(&cpu_lock);
- cyclic_remove(bgep->cyclic_id);
- mutex_exit(&cpu_lock);
+ if (bgep->periodic_id != NULL) {
+ ddi_periodic_delete(bgep->periodic_id);
+ bgep->periodic_id = NULL;
}
if (bgep->progress & PROGRESS_KSTATS)
bge_fini_kstats(bgep);
@@ -2484,8 +2482,6 @@ bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
bge_t *bgep; /* Our private data */
mac_register_t *macp;
chip_id_t *cidp;
- cyc_handler_t cychand;
- cyc_time_t cyctime;
caddr_t regs;
int instance;
int err;
@@ -2873,14 +2869,12 @@ bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
if (err != 0)
goto attach_fail;
- cychand.cyh_func = bge_chip_cyclic;
- cychand.cyh_arg = bgep;
- cychand.cyh_level = CY_LOCK_LEVEL;
- cyctime.cyt_when = 0;
- cyctime.cyt_interval = BGE_CYCLIC_PERIOD;
- mutex_enter(&cpu_lock);
- bgep->cyclic_id = cyclic_add(&cychand, &cyctime);
- mutex_exit(&cpu_lock);
+ /*
+ * Register a periodical handler.
+ * bge_chip_cyclic() is invoked in kernel context.
+ */
+ bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep,
+ BGE_CYCLIC_PERIOD, DDI_IPL_0);
bgep->progress |= PROGRESS_READY;
ASSERT(bgep->bge_guard == BGE_GUARD);
diff --git a/usr/src/uts/common/io/bscv.c b/usr/src/uts/common/io/bscv.c
index 9353af7323..396b0a7d5c 100644
--- a/usr/src/uts/common/io/bscv.c
+++ b/usr/src/uts/common/io/bscv.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -42,7 +42,6 @@
#include <sys/stream.h>
#include <sys/systm.h>
#include <sys/conf.h>
-#include <sys/cyclic.h>
#include <sys/reboot.h>
#include <sys/modctl.h>
#include <sys/mkdev.h>
@@ -52,7 +51,6 @@
#include <sys/consdev.h>
#include <sys/file.h>
#include <sys/stat.h>
-#include <sys/time.h>
#include <sys/disp.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
@@ -223,7 +221,6 @@ static boolean_t bscv_panic_callback(void *, int);
static void bscv_watchdog_cyclic_add(bscv_soft_state_t *);
static void bscv_watchdog_cyclic_remove(bscv_soft_state_t *);
-extern kmutex_t cpu_lock; /* needed for cyclics */
static uint8_t wdog_reset_on_timeout = 1;
#define WDOG_ON 1
@@ -623,7 +620,7 @@ bscv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
ssp->cssp_prog = B_FALSE;
ssp->task_flags = 0;
ssp->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "debug", 0);
+ DDI_PROP_DONTPASS, "debug", 0);
ssp->majornum = ddi_driver_major(dip);
ssp->minornum = BSCV_INST_TO_MINOR(instance);
#if defined(__i386) || defined(__amd64)
@@ -703,7 +700,7 @@ bscv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
if (bscv_watchdog_enable && (boothowto & RB_DEBUG)) {
bscv_watchdog_available = 0;
cmn_err(CE_WARN, "bscv: kernel debugger "
- "detected: hardware watchdog disabled");
+ "detected: hardware watchdog disabled");
}
/*
@@ -856,7 +853,7 @@ bscv_map_regs(bscv_soft_state_t *ssp)
* of elements of the regs property array.
*/
retval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, ssp->dip,
- DDI_PROP_DONTPASS, "reg", &props, &nelements);
+ DDI_PROP_DONTPASS, "reg", &props, &nelements);
/* We don't need props anymore. Free memory if it was allocated */
if (retval == DDI_PROP_SUCCESS)
@@ -890,8 +887,8 @@ bscv_map_regs(bscv_soft_state_t *ssp)
for (i = 0; i < ssp->nchannels; i++) {
retval = ddi_regs_map_setup(ssp->dip, i,
- (caddr_t *)&ssp->channel[i].regs,
- 0, 0, &ssp->attr, &ssp->channel[i].handle);
+ (caddr_t *)&ssp->channel[i].regs,
+ 0, 0, &ssp->attr, &ssp->channel[i].handle);
if (retval != DDI_SUCCESS) {
bscv_trace(ssp, 'A', "bscv_map_regs", "map failure"
" 0x%x on space %d", retval, i);
@@ -988,9 +985,9 @@ bscv_full_stop(bscv_soft_state_t *ssp)
#if defined(__i386) || defined(__amd64)
if (ddi_in_panic()) {
- bscv_inform_bsc(ssp, BSC_INFORM_PANIC);
+ bscv_inform_bsc(ssp, BSC_INFORM_PANIC);
} else {
- bscv_inform_bsc(ssp, BSC_INFORM_OFFLINE);
+ bscv_inform_bsc(ssp, BSC_INFORM_OFFLINE);
}
#endif /* __i386 || __amd64 */
@@ -1009,7 +1006,7 @@ bscv_full_stop(bscv_soft_state_t *ssp)
break;
}
bscv_setclear8_volatile(ssp, chan_general,
- EBUS_IDX_ALARM, bits2set, bits2clear);
+ EBUS_IDX_ALARM, bits2set, bits2clear);
bscv_exit(ssp);
}
@@ -1367,10 +1364,10 @@ bscv_rep_rw8(bscv_soft_state_t *ssp, int chan, uint8_t *host_addr,
} else {
if (is_write) {
bscv_put8_once(ssp, chan,
- dev_addr, *host_addr++);
+ dev_addr, *host_addr++);
} else {
*host_addr++ = bscv_get8_once(ssp, chan,
- dev_addr);
+ dev_addr);
}
/* We need this because _once routines don't do it */
if (ssp->command_error != 0) {
@@ -1514,7 +1511,7 @@ bscv_put8_once(bscv_soft_state_t *ssp, int chan, bscv_addr_t addr, uint8_t val)
/* Do the access and get fault code - may take a long time */
ddi_put8(ssp->channel[chan].handle,
- &ssp->channel[chan].regs[addr], val);
+ &ssp->channel[chan].regs[addr], val);
fault = ddi_get32(ssp->channel[chan].handle,
(uint32_t *)BSC_NEXUS_ADDR(ssp, chan, 0, LOMBUS_FAULT_REG));
@@ -1572,7 +1569,7 @@ bscv_get8_once(bscv_soft_state_t *ssp, int chan, bscv_addr_t addr)
/* Do the access and get fault code - may take a long time */
val = ddi_get8(ssp->channel[chan].handle,
- &ssp->channel[chan].regs[addr]);
+ &ssp->channel[chan].regs[addr]);
fault = ddi_get32(ssp->channel[chan].handle,
(uint32_t *)BSC_NEXUS_ADDR(ssp, chan, 0, LOMBUS_FAULT_REG));
ssp->command_error = fault;
@@ -1659,9 +1656,9 @@ bscv_resync_comms(bscv_soft_state_t *ssp, int chan)
return;
}
if (command_error >= LOMBUS_ERR_BASE &&
- command_error != LOMBUS_ERR_REG_NUM &&
- command_error != LOMBUS_ERR_REG_SIZE &&
- command_error != LOMBUS_ERR_TIMEOUT) {
+ command_error != LOMBUS_ERR_REG_NUM &&
+ command_error != LOMBUS_ERR_REG_SIZE &&
+ command_error != LOMBUS_ERR_TIMEOUT) {
/* Resync here to make sure that the lom is talking */
cmn_err(CE_WARN, "!bscv_resync_comms: "
"Attempting comms resync after comms fault 0x%x",
@@ -1670,7 +1667,7 @@ bscv_resync_comms(bscv_soft_state_t *ssp, int chan)
/* Probe */
fault = ddi_get32(ssp->channel[chan].handle,
(uint32_t *)BSC_NEXUS_ADDR(ssp, chan, 0,
- LOMBUS_PROBE_REG));
+ LOMBUS_PROBE_REG));
if (fault == 0) {
break;
@@ -1719,9 +1716,9 @@ bscv_window_setup(bscv_soft_state_t *ssp)
return (ssp->eeinfo_valid);
}
ssp->eeprom_size =
- bscv_get8(ssp, chan_general, EBUS_IDX_EEPROM_SIZE_KB) * 1024;
+ bscv_get8(ssp, chan_general, EBUS_IDX_EEPROM_SIZE_KB) * 1024;
ssp->eventlog_start = bscv_get16(ssp, chan_general,
- EBUS_IDX_LOG_START_HI);
+ EBUS_IDX_LOG_START_HI);
/*
* The log does not run to the end of the EEPROM because it is a
@@ -2056,11 +2053,11 @@ bscv_ioc_dogstate(bscv_soft_state_t *ssp, intptr_t arg, int mode)
dogstate.dog_enable = (dogval & EBUS_WDOG_ENABLE) ? 1 : 0;
dogstate.reset_enable = (dogval & EBUS_WDOG_RST) ? 1 : 0;
dogstate.dog_timeout = bscv_get8_locked(ssp, chan_general,
- EBUS_IDX_WDOG_TIME, &res);
+ EBUS_IDX_WDOG_TIME, &res);
if ((res == 0) &&
(ddi_copyout((caddr_t)&dogstate,
- (caddr_t)arg, sizeof (dogstate), mode) < 0)) {
+ (caddr_t)arg, sizeof (dogstate), mode) < 0)) {
res = EFAULT;
}
return (res);
@@ -2082,7 +2079,7 @@ bscv_ioc_psustate(bscv_soft_state_t *ssp, intptr_t arg, int mode)
for (i = 0; i < MAX_PSUS; i++) {
psustat = bscv_get8_locked(ssp, chan_general,
- EBUS_IDX_PSU1_STAT + i, &res);
+ EBUS_IDX_PSU1_STAT + i, &res);
psudata.fitted[i] = psustat & EBUS_PSU_PRESENT;
psudata.output[i] = psustat & EBUS_PSU_OUTPUT;
psudata.supplyb[i] = psustat & EBUS_PSU_INPUTB;
@@ -2153,7 +2150,7 @@ bscv_ioc_fledstate(bscv_soft_state_t *ssp, intptr_t arg, int mode)
}
if ((res == 0) &&
(ddi_copyout((caddr_t)&fled_info, (caddr_t)arg,
- sizeof (fled_info), mode) < 0)) {
+ sizeof (fled_info), mode) < 0)) {
res = EFAULT;
}
return (res);
@@ -2226,7 +2223,7 @@ bscv_ioc_ledstate(bscv_soft_state_t *ssp, intptr_t arg, int mode)
/* copy out lom_state */
if ((res == 0) &&
(ddi_copyout((caddr_t)&led_state, (caddr_t)arg,
- sizeof (lom_led_state_t), mode) < 0)) {
+ sizeof (lom_led_state_t), mode) < 0)) {
res = EFAULT;
}
return (res);
@@ -2248,15 +2245,15 @@ bscv_ioc_info(bscv_soft_state_t *ssp, intptr_t arg, int mode)
int res = 0;
info.ser_char = bscv_get8_locked(ssp, chan_general, EBUS_IDX_ESCAPE,
- &res);
+ &res);
info.a3mode = WATCHDOG;
info.fver = bscv_get8_locked(ssp, chan_general, EBUS_IDX_FW_REV, &res);
csum = bscv_get8_locked(ssp, chan_general, EBUS_IDX_CHECK_HI, &res)
- << 8;
+ << 8;
csum |= bscv_get8_locked(ssp, chan_general, EBUS_IDX_CHECK_LO, &res);
info.fchksum = csum;
info.prod_rev = bscv_get8_locked(ssp, chan_general, EBUS_IDX_MODEL_REV,
- &res);
+ &res);
for (i = 0; i < sizeof (info.prod_id); i++) {
info.prod_id[i] = bscv_get8_locked(ssp,
chan_general, EBUS_IDX_MODEL_ID1 + i, &res);
@@ -2292,9 +2289,9 @@ bscv_ioc_mread(bscv_soft_state_t *ssp, intptr_t arg, int mode)
EBUS_IDX_MODEL_ID1 + i, &res);
}
mprog.mod_rev = bscv_get8_locked(ssp, chan_general, EBUS_IDX_MODEL_REV,
- &res);
+ &res);
mprog.config = bscv_get8_locked(ssp, chan_general, EBUS_IDX_CONFIG,
- &res);
+ &res);
/* Read the fan calibration values */
fanz = sizeof (mprog.fanhz) / sizeof (mprog.fanhz[0]);
@@ -2324,8 +2321,8 @@ bscv_ioc_volts(bscv_soft_state_t *ssp, intptr_t arg, int mode)
int res = 0;
supply = (bscv_get8_locked(ssp, chan_general, EBUS_IDX_SUPPLY_HI, &res)
- << 8) | bscv_get8_locked(ssp, chan_general, EBUS_IDX_SUPPLY_LO,
- &res);
+ << 8) | bscv_get8_locked(ssp, chan_general, EBUS_IDX_SUPPLY_LO,
+ &res);
for (i = 0; i < ssp->volts.num; i++) {
ssp->volts.status[i] = (supply >> i) & 1;
@@ -2333,7 +2330,7 @@ bscv_ioc_volts(bscv_soft_state_t *ssp, intptr_t arg, int mode)
if ((res == 0) &&
(ddi_copyout((caddr_t)&ssp->volts, (caddr_t)arg,
- sizeof (ssp->volts), mode) < 0)) {
+ sizeof (ssp->volts), mode) < 0)) {
res = EFAULT;
}
return (res);
@@ -2350,14 +2347,14 @@ bscv_ioc_stats(bscv_soft_state_t *ssp, intptr_t arg, int mode)
int res = 0;
status = bscv_get8_locked(ssp, chan_general, EBUS_IDX_CBREAK_STATUS,
- &res);
+ &res);
for (i = 0; i < ssp->sflags.num; i++) {
ssp->sflags.status[i] = (int)((status >> i) & 1);
}
if ((res == 0) &&
(ddi_copyout((caddr_t)&ssp->sflags, (caddr_t)arg,
- sizeof (ssp->sflags), mode) < 0)) {
+ sizeof (ssp->sflags), mode) < 0)) {
res = EFAULT;
}
return (res);
@@ -2392,7 +2389,7 @@ bscv_ioc_temp(bscv_soft_state_t *ssp, intptr_t arg, int mode)
bcopy(ssp->temps.name_ov, temps.name_ov, sizeof (temps.name_ov));
temps.num_ov = ssp->temps.num_ov;
status_ov = bscv_get8_locked(ssp, chan_general, EBUS_IDX_OTEMP_STATUS,
- &res);
+ &res);
for (i = 0; i < ssp->temps.num_ov; i++) {
ssp->temps.status_ov[i] = (status_ov >> i) & 1;
}
@@ -2530,16 +2527,16 @@ bscv_ioc_eventlog2(bscv_soft_state_t *ssp, intptr_t arg, int mode)
if (level <= eventlog2->level) {
/* Arggh why the funny byte ordering 3, 2, 0, 1 */
eventlog2->code[events_recorded] =
- ((unsigned)event.ev_event |
- ((unsigned)event.ev_subsys << 8) |
- ((unsigned)event.ev_resource << 16) |
- ((unsigned)event.ev_detail << 24));
+ ((unsigned)event.ev_event |
+ ((unsigned)event.ev_subsys << 8) |
+ ((unsigned)event.ev_resource << 16) |
+ ((unsigned)event.ev_detail << 24));
eventlog2->time[events_recorded] =
- ((unsigned)event.ev_data[0] |
- ((unsigned)event.ev_data[1] << 8) |
- ((unsigned)event.ev_data[3] << 16) |
- ((unsigned)event.ev_data[2] << 24));
+ ((unsigned)event.ev_data[0] |
+ ((unsigned)event.ev_data[1] << 8) |
+ ((unsigned)event.ev_data[3] << 16) |
+ ((unsigned)event.ev_data[2] << 24));
bscv_build_eventstring(ssp,
&event, eventlog2->string[events_recorded],
@@ -2555,7 +2552,7 @@ bscv_ioc_eventlog2(bscv_soft_state_t *ssp, intptr_t arg, int mode)
if ((res == 0) &&
(ddi_copyout((caddr_t)eventlog2, (caddr_t)arg,
- sizeof (lom_eventlog2_t), mode) < 0)) {
+ sizeof (lom_eventlog2_t), mode) < 0)) {
res = EFAULT;
}
@@ -2583,17 +2580,17 @@ bscv_ioc_info2(bscv_soft_state_t *ssp, intptr_t arg, int mode)
info2.fver = bscv_get8_locked(ssp, chan_general, EBUS_IDX_FW_REV, &res);
csum = bscv_get8_locked(ssp, chan_general, EBUS_IDX_CHECK_HI, &res)
- << 8;
+ << 8;
csum |= bscv_get8_locked(ssp, chan_general, EBUS_IDX_CHECK_LO, &res);
info2.fchksum = csum;
info2.prod_rev = bscv_get8_locked(ssp, chan_general,
- EBUS_IDX_MODEL_REV, &res);
+ EBUS_IDX_MODEL_REV, &res);
for (i = 0; i < sizeof (info2.prod_id); i++) {
info2.prod_id[i] = bscv_get8_locked(ssp, chan_general,
EBUS_IDX_MODEL_ID1 + i, &res);
}
info2.serial_config = bscv_get8_locked(ssp, chan_general,
- EBUS_IDX_SER_TIMEOUT, &res);
+ EBUS_IDX_SER_TIMEOUT, &res);
if (bscv_get8_locked(ssp, chan_general, EBUS_IDX_CONFIG_MISC, &res) &
EBUS_CONFIG_MISC_SECURITY_ENABLED) {
info2.serial_config |= LOM_SER_SECURITY;
@@ -2607,14 +2604,14 @@ bscv_ioc_info2(bscv_soft_state_t *ssp, intptr_t arg, int mode)
info2.serial_config |= LOM_DISABLE_WDOG_BREAK;
}
info2.baud_rate = bscv_get8_locked(ssp, chan_general,
- EBUS_IDX_SER_BAUD, &res);
+ EBUS_IDX_SER_BAUD, &res);
info2.serial_hw_config =
- ((int)bscv_get8_locked(ssp, chan_general,
- EBUS_IDX_SER_CHARMODE, &res) |
- ((int)bscv_get8_locked(ssp, chan_general,
- EBUS_IDX_SER_FLOWCTL, &res) << 8) |
- ((int)bscv_get8_locked(ssp, chan_general,
- EBUS_IDX_SER_MODEMTYPE, &res) << 16));
+ ((int)bscv_get8_locked(ssp, chan_general,
+ EBUS_IDX_SER_CHARMODE, &res) |
+ ((int)bscv_get8_locked(ssp, chan_general,
+ EBUS_IDX_SER_FLOWCTL, &res) << 8) |
+ ((int)bscv_get8_locked(ssp, chan_general,
+ EBUS_IDX_SER_MODEMTYPE, &res) << 16));
/*
* There is no phone home support on the blade platform. We hardcode
@@ -2686,7 +2683,7 @@ bscv_ioc_test(bscv_soft_state_t *ssp, intptr_t arg, int mode)
* and wait for the return code.
*/
bscv_put8(ssp, chan_general,
- EBUS_IDX_SELFTEST0 + testnum, testarg);
+ EBUS_IDX_SELFTEST0 + testnum, testarg);
if (bscv_faulty(ssp)) {
res = EIO;
} else {
@@ -2740,7 +2737,7 @@ bscv_ioc_mprog2(bscv_soft_state_t *ssp, intptr_t arg, int mode)
base_addr = (mprog2.addr_space - 240) * data_size;
eeprom_size = bscv_get8(ssp, chan_general, EBUS_IDX_EEPROM_SIZE_KB) *
- 1024;
+ 1024;
if (bscv_faulty(ssp)) {
bscv_exit(ssp);
@@ -2815,7 +2812,7 @@ bscv_ioc_mread2(bscv_soft_state_t *ssp, intptr_t arg, int mode)
data_size = 255;
base_addr = (mprog2.addr_space - 240) * data_size;
eeprom_size = bscv_get8(ssp, chan_general, EBUS_IDX_EEPROM_SIZE_KB) *
- 1024;
+ 1024;
if (bscv_faulty(ssp)) {
bscv_exit(ssp);
@@ -3007,7 +3004,7 @@ bscv_event_daemon(void *arg)
utsname.nodename);
ssp->nodename_change = B_TRUE;
(void) strncpy(ssp->last_nodename, utsname.nodename,
- sizeof (ssp->last_nodename));
+ sizeof (ssp->last_nodename));
/* enforce null termination */
ssp->last_nodename[sizeof (ssp->last_nodename) - 1] =
'\0';
@@ -3017,12 +3014,12 @@ bscv_event_daemon(void *arg)
if (((ssp->task_flags & TASK_PAUSE_FLG) == 0) &&
fault_cnt == 0 && ssp->cssp_prog == B_FALSE &&
(ssp->event_waiting || ssp->status_change ||
- ssp->nodename_change || ssp->watchdog_change)) {
+ ssp->nodename_change || ssp->watchdog_change)) {
do_events = ssp->event_waiting;
ssp->event_waiting = B_FALSE;
ssp->task_flags |= do_events ?
- TASK_EVENT_PENDING_FLG : 0;
+ TASK_EVENT_PENDING_FLG : 0;
do_status = ssp->status_change;
ssp->status_change = B_FALSE;
do_nodename = ssp->nodename_change;
@@ -3119,7 +3116,7 @@ bscv_event_daemon(void *arg)
ASSERT(!(ssp->task_flags & TASK_EVENT_PENDING_FLG));
ssp->task_flags &=
- ~(TASK_STOP_FLG | TASK_ALIVE_FLG | TASK_EVENT_CONSUMER_FLG);
+ ~(TASK_STOP_FLG | TASK_ALIVE_FLG | TASK_EVENT_CONSUMER_FLG);
mutex_exit(&ssp->task_mu);
bscv_trace(ssp, 'D', "bscv_event_daemon",
@@ -3161,8 +3158,8 @@ bscv_stop_event_daemon(bscv_soft_state_t *ssp)
/* Wait for task daemon to stop running. */
for (try = 0;
- ((ssp->task_flags & TASK_ALIVE_FLG) && try < 10);
- try++) {
+ ((ssp->task_flags & TASK_ALIVE_FLG) && try < 10);
+ try++) {
/* Signal that the task daemon should stop */
ssp->task_flags |= TASK_STOP_FLG;
cv_signal(&ssp->task_cv);
@@ -3211,10 +3208,10 @@ bscv_pause_event_daemon(bscv_soft_state_t *ssp)
/* Wait for task daemon to pause. */
for (try = 0;
- (!(ssp->task_flags & TASK_SLEEPING_FLG) &&
- (ssp->task_flags & TASK_ALIVE_FLG) &&
- try < 10);
- try++) {
+ (!(ssp->task_flags & TASK_SLEEPING_FLG) &&
+ (ssp->task_flags & TASK_ALIVE_FLG) &&
+ try < 10);
+ try++) {
/* Paranoia */
ssp->task_flags |= TASK_PAUSE_FLG;
cv_signal(&ssp->task_cv);
@@ -3392,7 +3389,7 @@ bscv_event_validate(bscv_soft_state_t *ssp, uint32_t newptr, uint8_t unread)
(newptr >= (ssp->eventlog_start + ssp->eventlog_size))) {
if (!ssp->event_fault_reported) {
cmn_err(CE_WARN, "Event pointer out of range. "
- "Cannot read events.");
+ "Cannot read events.");
ssp->event_fault_reported = B_TRUE;
}
return (-1);
@@ -3531,8 +3528,8 @@ bscv_event_snprintgmttime(char *buf, size_t bufsz, todinfo_t t)
year = 1900 + t.tod_year;
return (snprintf(buf, bufsz, "%04d-%02d-%02d %02d:%02d:%02dZ",
- year, t.tod_month, t.tod_day, t.tod_hour,
- t.tod_min, t.tod_sec));
+ year, t.tod_month, t.tod_day, t.tod_hour,
+ t.tod_min, t.tod_sec));
}
/*
@@ -3569,19 +3566,19 @@ bscv_build_eventstring(bscv_soft_state_t *ssp, lom_event_t *event,
/* time */
bsctm = (((uint32_t)event->ev_data[0]) << 24) |
- (((uint32_t)event->ev_data[1]) << 16) |
- (((uint32_t)event->ev_data[2]) << 8) |
- ((uint32_t)event->ev_data[3]);
+ (((uint32_t)event->ev_data[1]) << 16) |
+ (((uint32_t)event->ev_data[2]) << 8) |
+ ((uint32_t)event->ev_data[3]);
if (bsctm < BSC_TIME_SANITY) {
/* offset */
buf += snprintf(buf, bufend-buf, "+P%dd%02dh%02dm%02ds",
- (int)(bsctm/86400), (int)(bsctm/3600%24),
- (int)(bsctm/60%60), (int)(bsctm%60));
+ (int)(bsctm/86400), (int)(bsctm/3600%24),
+ (int)(bsctm/60%60), (int)(bsctm%60));
} else {
/* absolute time */
mutex_enter(&tod_lock);
buf += bscv_event_snprintgmttime(buf, bufend-buf,
- utc_to_tod(bsctm));
+ utc_to_tod(bsctm));
mutex_exit(&tod_lock);
}
buf += snprintf(buf, bufend-buf, " ");
@@ -3640,7 +3637,7 @@ bscv_build_eventstring(bscv_soft_state_t *ssp, lom_event_t *event,
switch (subsystem) {
case EVENT_SUBSYS_TEMP:
if ((eventtype != EVENT_RECOVERED) &&
- eventtype != EVENT_DEVICE_INACCESSIBLE) {
+ eventtype != EVENT_DEVICE_INACCESSIBLE) {
buf += snprintf(buf, bufend - buf, " - %d degC",
(int8_t)event->ev_detail);
}
@@ -3680,7 +3677,7 @@ bscv_build_eventstring(bscv_soft_state_t *ssp, lom_event_t *event,
buf += snprintf(buf, bufend - buf, " by user");
break;
case LOM_RESET_DETAIL_REPROGRAMMING:
- buf += snprintf(buf, bufend - buf,
+ buf += snprintf(buf, bufend - buf,
" after flash download");
break;
default:
@@ -3960,7 +3957,7 @@ bscv_status(bscv_soft_state_t *ssp, uint8_t state_chng, uint8_t dev_no)
if ((state_chng & EBUS_STATE_FAN) && ((dev_no - 1) < MAX_FANS)) {
fanspeed = bscv_get8(ssp, chan_general,
- EBUS_IDX_FAN1_SPEED + dev_no - 1);
+ EBUS_IDX_FAN1_SPEED + dev_no - 1);
/*
* Only remember fanspeeds which are real values or
* NOT PRESENT values.
@@ -3973,7 +3970,7 @@ bscv_status(bscv_soft_state_t *ssp, uint8_t state_chng, uint8_t dev_no)
if ((state_chng & EBUS_STATE_PSU) && ((dev_no - 1) < MAX_PSUS)) {
(void) bscv_get8(ssp, chan_general,
- EBUS_IDX_PSU1_STAT + dev_no - 1);
+ EBUS_IDX_PSU1_STAT + dev_no - 1);
}
if (state_chng & EBUS_STATE_GP) {
@@ -3987,7 +3984,7 @@ bscv_status(bscv_soft_state_t *ssp, uint8_t state_chng, uint8_t dev_no)
if ((state_chng & EBUS_STATE_TEMPERATURE) &&
((dev_no - 1) < MAX_TEMPS)) {
temp = bscv_get8(ssp, chan_general,
- EBUS_IDX_TEMP1 + dev_no - 1);
+ EBUS_IDX_TEMP1 + dev_no - 1);
/*
* Only remember temperatures which are real values or
* a NOT PRESENT value.
@@ -4105,77 +4102,77 @@ bscv_sysevent(bscv_soft_state_t *ssp, lom_event_t *event)
/* Map ev_subsys to sysevent class/sub-class */
switch (EVENT_DECODE_SUBSYS(event->ev_subsys)) {
- case EVENT_SUBSYS_NONE:
+ case EVENT_SUBSYS_NONE:
break;
- case EVENT_SUBSYS_ALARM:
+ case EVENT_SUBSYS_ALARM:
break;
- case EVENT_SUBSYS_TEMP:
+ case EVENT_SUBSYS_TEMP:
class = EC_ENV, subclass = ESC_ENV_TEMP;
res_id = bscv_get_label(ssp->temps.name, ssp->temps.num,
event->ev_resource - 1);
switch (event->ev_event) {
- case EVENT_SEVERE_OVERHEAT:
+ case EVENT_SEVERE_OVERHEAT:
fru_state = ENV_FAILED;
break;
- case EVENT_OVERHEAT:
+ case EVENT_OVERHEAT:
fru_state = ENV_WARNING;
break;
- case EVENT_NO_OVERHEAT:
+ case EVENT_NO_OVERHEAT:
fru_state = ENV_OK;
break;
- default:
+ default:
return;
}
break;
- case EVENT_SUBSYS_OVERTEMP:
+ case EVENT_SUBSYS_OVERTEMP:
break;
- case EVENT_SUBSYS_FAN:
+ case EVENT_SUBSYS_FAN:
class = EC_ENV, subclass = ESC_ENV_FAN;
res_id = bscv_get_label(ssp->fan_names, ssp->num_fans,
event->ev_resource - 1);
switch (event->ev_event) {
- case EVENT_FAILED:
+ case EVENT_FAILED:
fru_state = ENV_FAILED;
break;
- case EVENT_RECOVERED:
+ case EVENT_RECOVERED:
fru_state = ENV_OK;
break;
- default:
+ default:
return;
}
break;
- case EVENT_SUBSYS_SUPPLY:
+ case EVENT_SUBSYS_SUPPLY:
class = EC_ENV, subclass = ESC_ENV_POWER;
res_id = bscv_get_label(ssp->sflags.name, ssp->sflags.num,
event->ev_resource - 1);
switch (event->ev_event) {
- case EVENT_FAILED:
+ case EVENT_FAILED:
fru_state = ENV_FAILED;
break;
- case EVENT_RECOVERED:
+ case EVENT_RECOVERED:
fru_state = ENV_OK;
break;
- default:
+ default:
return;
}
break;
- case EVENT_SUBSYS_BREAKER:
+ case EVENT_SUBSYS_BREAKER:
break;
- case EVENT_SUBSYS_PSU:
+ case EVENT_SUBSYS_PSU:
break;
- case EVENT_SUBSYS_USER:
+ case EVENT_SUBSYS_USER:
break;
- case EVENT_SUBSYS_PHONEHOME:
+ case EVENT_SUBSYS_PHONEHOME:
break;
- case EVENT_SUBSYS_LOM:
+ case EVENT_SUBSYS_LOM:
break;
- case EVENT_SUBSYS_HOST:
+ case EVENT_SUBSYS_HOST:
break;
- case EVENT_SUBSYS_EVENTLOG:
+ case EVENT_SUBSYS_EVENTLOG:
break;
- case EVENT_SUBSYS_EXTRA:
+ case EVENT_SUBSYS_EXTRA:
break;
- case EVENT_SUBSYS_LED:
+ case EVENT_SUBSYS_LED:
if (event->ev_event != EVENT_FAULT_LED &&
event->ev_event != EVENT_STATE_CHANGE)
return;
@@ -4194,31 +4191,31 @@ bscv_sysevent(bscv_soft_state_t *ssp, lom_event_t *event)
event->ev_resource - 1);
switch (event->ev_detail) {
- case LOM_LED_STATE_ON_STEADY:
+ case LOM_LED_STATE_ON_STEADY:
fru_state = ENV_LED_ON;
break;
- case LOM_LED_STATE_ON_FLASHING:
- case LOM_LED_STATE_ON_SLOWFLASH:
+ case LOM_LED_STATE_ON_FLASHING:
+ case LOM_LED_STATE_ON_SLOWFLASH:
fru_state = ENV_LED_BLINKING;
break;
- case LOM_LED_STATE_OFF:
+ case LOM_LED_STATE_OFF:
fru_state = ENV_LED_OFF;
break;
- case LOM_LED_STATE_INACCESSIBLE:
+ case LOM_LED_STATE_INACCESSIBLE:
fru_state = ENV_LED_INACCESSIBLE;
break;
- case LOM_LED_STATE_STANDBY:
+ case LOM_LED_STATE_STANDBY:
fru_state = ENV_LED_STANDBY;
break;
- case LOM_LED_STATE_NOT_PRESENT:
+ case LOM_LED_STATE_NOT_PRESENT:
fru_state = ENV_LED_NOT_PRESENT;
break;
- default:
+ default:
fru_state = ENV_LED_INACCESSIBLE;
break;
}
break;
- default :
+ default :
break;
}
@@ -4300,7 +4297,7 @@ bscv_prog(bscv_soft_state_t *ssp, intptr_t arg, int mode)
} else {
if (ssp->image == NULL) {
ssp->image = (uint8_t *)kmem_zalloc(
- BSC_IMAGE_MAX_SIZE, KM_SLEEP);
+ BSC_IMAGE_MAX_SIZE, KM_SLEEP);
}
res = bscv_prog_receive_image(ssp, prog,
ssp->image, BSC_IMAGE_MAX_SIZE);
@@ -4352,7 +4349,7 @@ bscv_get_pagesize(bscv_soft_state_t *ssp)
ASSERT(bscv_held(ssp));
pagesize = bscv_get32(ssp, chan_prog,
- BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PAGE0));
+ BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PAGE0));
bscv_trace(ssp, 'U', "bscv_get_pagesize", "pagesize 0x%x", pagesize);
@@ -4376,7 +4373,7 @@ bscv_set_pagesize(bscv_soft_state_t *ssp, uint32_t pagesize)
* changes it.
*/
bscv_put32(ssp, chan_prog,
- BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PSIZ0), pagesize);
+ BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PSIZ0), pagesize);
return (old_pagesize);
}
@@ -4389,11 +4386,11 @@ bscv_enter_programming_mode(bscv_soft_state_t *ssp)
ASSERT(bscv_held(ssp));
bscv_put8(ssp, chan_prog,
- BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PCSR),
- EBUS_PROGRAM_PCR_PRGMODE_ON);
+ BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PCSR),
+ EBUS_PROGRAM_PCR_PRGMODE_ON);
retval = bscv_get8(ssp, chan_prog, BSCVA(EBUS_CMD_SPACE_PROGRAM,
- EBUS_PROGRAM_PCSR));
+ EBUS_PROGRAM_PCSR));
return (retval);
}
@@ -4415,7 +4412,7 @@ bscv_leave_programming_mode(bscv_soft_state_t *ssp, boolean_t with_jmp)
}
bscv_put8(ssp, chan_prog,
- BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PCSR), reg);
+ BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PCSR), reg);
}
@@ -4425,7 +4422,7 @@ bscv_set_jump_to_addr(bscv_soft_state_t *ssp, uint32_t loadaddr)
ASSERT(bscv_held(ssp));
bscv_put32(ssp, chan_prog,
- BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PADR0), loadaddr);
+ BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PADR0), loadaddr);
bscv_trace(ssp, 'U', "bscv_set_jump_to_addr",
"set jump to loadaddr 0x%x", loadaddr);
@@ -4555,7 +4552,7 @@ bscv_do_page_data_once(bscv_soft_state_t *ssp, uint32_t index,
for (i = size; i < pagesize; i++) {
bscv_put8(ssp, chan_prog,
BSCVA(EBUS_CMD_SPACE_PROGRAM,
- EBUS_PROGRAM_DATA),
+ EBUS_PROGRAM_DATA),
0);
}
}
@@ -4564,7 +4561,7 @@ bscv_do_page_data_once(bscv_soft_state_t *ssp, uint32_t index,
chksum = 0;
for (i = 0; i < size; i++) {
chksum = ((chksum << 3) | (chksum >> 13)) ^
- *(imagep + index + i);
+ *(imagep + index + i);
}
/* Cope with non-pagesize sized bufers */
for (; i < pagesize; i++) {
@@ -4578,7 +4575,7 @@ bscv_do_page_data_once(bscv_soft_state_t *ssp, uint32_t index,
EBUS_PROGRAM_PCR_PROGRAM);
retval = bscv_get8(ssp, chan_prog,
- BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PCSR));
+ BSCVA(EBUS_CMD_SPACE_PROGRAM, EBUS_PROGRAM_PCSR));
*calcd_chksum = chksum;
return (retval);
@@ -4645,7 +4642,7 @@ bscv_do_pages(bscv_soft_state_t *ssp, uint32_t loadaddr, uint32_t image_size,
for (index = 0; index < image_size; index += pagesize) {
retval = bscv_do_page(ssp, loadaddr, index, image_size,
- pagesize, imagep, is_image2);
+ pagesize, imagep, is_image2);
if (bscv_faulty(ssp) || !PSR_SUCCESS(retval)) {
bscv_trace(ssp, 'U', "bscv_do_pages",
"Failed to program lom (status 0x%x)", retval);
@@ -4710,7 +4707,7 @@ bscv_prog_image(bscv_soft_state_t *ssp, boolean_t is_image2,
(void) bscv_set_pagesize(ssp, pagesize);
retval = bscv_do_pages(ssp, loadaddr, image_size, pagesize, imagep,
- is_image2);
+ is_image2);
if (bscv_faulty(ssp) || !PSR_SUCCESS(retval)) {
bscv_trace(ssp, 'U', "bscv_prog_image",
"Failed to program lom (status 0x%x)", retval);
@@ -4823,7 +4820,7 @@ bscv_prog_receive_image(bscv_soft_state_t *ssp, lom_prog_t *prog,
ssp->image2_processing = !ssp->image2_processing;
} else if ((ssp->image_ptr < sizeof (*prog_data)) ||
(prog_data->platform.bscv.size !=
- (ssp->image_ptr - sizeof (*prog_data)))) {
+ (ssp->image_ptr - sizeof (*prog_data)))) {
/* Image too small for new style image */
cmn_err(CE_WARN, "image too small");
res = EINVAL;
@@ -5129,9 +5126,7 @@ bscv_cleanup(bscv_soft_state_t *ssp)
}
#if defined(__i386) || defined(__amd64)
- mutex_enter(&cpu_lock);
bscv_watchdog_cyclic_remove(ssp);
- mutex_exit(&cpu_lock);
#endif /* __i386 || __amd64 */
ddi_soft_state_free(bscv_statep, instance);
@@ -5209,7 +5204,7 @@ static int bscv_probe_check(bscv_soft_state_t *ssp)
* sync so lets try the read again.
*/
probeval = bscv_get8(ssp, chan_general,
- EBUS_IDX_PROBEAA);
+ EBUS_IDX_PROBEAA);
if (bscv_faulty(ssp)) {
bscv_trace(ssp, 'A', "bscv_probe_check",
"Init readAA1 failed");
@@ -5603,7 +5598,7 @@ static void bscv_setup_watchdog(bscv_soft_state_t *ssp)
/* Set the timeout */
bscv_put8(ssp, chan_general,
- EBUS_IDX_WDOG_TIME, ssp->watchdog_timeout);
+ EBUS_IDX_WDOG_TIME, ssp->watchdog_timeout);
/* Set whether to reset the system on timeout */
if (ssp->watchdog_reset_on_timeout) {
@@ -5627,9 +5622,7 @@ static void bscv_setup_watchdog(bscv_soft_state_t *ssp)
#if defined(__i386) || defined(__amd64)
/* start the cyclic based watchdog patter */
- mutex_enter(&cpu_lock);
bscv_watchdog_cyclic_add(ssp);
- mutex_exit(&cpu_lock);
#endif /* __i386 || __amd64 */
ssp->progress |= BSCV_WDOG_CFG;
}
@@ -5778,7 +5771,7 @@ bscv_write_hostname(bscv_soft_state_t *ssp,
needretry = 0;
for (i = 0; i < length; i++) {
bscv_put8_once(ssp, chan_general,
- EBUS_IDX_HNAME_CHAR, host_nodename[i]);
+ EBUS_IDX_HNAME_CHAR, host_nodename[i]);
/* Retry on any error */
if (bscv_retcode(ssp) != 0) {
needretry = 1;
@@ -5832,7 +5825,7 @@ bscv_setup_static_info(bscv_soft_state_t *ssp)
*/
bzero(&ssp->volts, sizeof (lom_volts_t));
ssp->volts.num = EBUS_CONFIG2_NSUPPLY_DEC(
- bscv_get8(ssp, chan_general, EBUS_IDX_CONFIG2));
+ bscv_get8(ssp, chan_general, EBUS_IDX_CONFIG2));
if (ssp->volts.num > MAX_VOLTS) {
cmn_err(CE_WARN,
"lom: firmware reported too many voltage lines. ");
@@ -5857,7 +5850,7 @@ bscv_setup_static_info(bscv_soft_state_t *ssp)
for (i = 0; i < ssp->volts.num; i++) {
ssp->volts.shutdown_enabled[i] =
- (((mask >> i) & 1) == 0) ? 0 : 1;
+ (((mask >> i) & 1) == 0) ? 0 : 1;
}
/*
@@ -5869,7 +5862,7 @@ bscv_setup_static_info(bscv_soft_state_t *ssp)
bzero(&ssp->temps, sizeof (lom_temp_t));
ssp->temps.num = EBUS_CONFIG2_NTEMP_DEC(
- bscv_get8(ssp, chan_general, EBUS_IDX_CONFIG2));
+ bscv_get8(ssp, chan_general, EBUS_IDX_CONFIG2));
if (ssp->temps.num > MAX_TEMPS) {
cmn_err(CE_WARN,
"lom: firmware reported too many temperatures being "
@@ -5879,7 +5872,7 @@ bscv_setup_static_info(bscv_soft_state_t *ssp)
ssp->temps.num = MAX_TEMPS;
}
ssp->temps.num_ov = EBUS_CONFIG3_NOTEMP_DEC(
- bscv_get8(ssp, chan_general, EBUS_IDX_CONFIG3));
+ bscv_get8(ssp, chan_general, EBUS_IDX_CONFIG3));
if (ssp->temps.num_ov > MAX_TEMPS) {
cmn_err(CE_WARN,
"lom: firmware reported too many over temperatures being "
@@ -5973,7 +5966,7 @@ bscv_setup_static_info(bscv_soft_state_t *ssp)
* To get the fan static info we need address space 5
*/
ssp->num_fans = EBUS_CONFIG_NFAN_DEC(
- bscv_get8(ssp, chan_general, EBUS_IDX_CONFIG));
+ bscv_get8(ssp, chan_general, EBUS_IDX_CONFIG));
if (ssp->num_fans > MAX_FANS) {
cmn_err(CE_WARN,
"lom: firmware reported too many fans. ");
@@ -5985,7 +5978,7 @@ bscv_setup_static_info(bscv_soft_state_t *ssp)
for (i = 0; i < ssp->num_fans; i++) {
fanspeed = bscv_get8(ssp, chan_general,
- EBUS_IDX_FAN1_SPEED + i);
+ EBUS_IDX_FAN1_SPEED + i);
if ((fanspeed <= LOM_FAN_MAX_SPEED) ||
(fanspeed == LOM_FAN_NOT_PRESENT)) {
/*
@@ -6131,7 +6124,7 @@ bscv_setup_events(bscv_soft_state_t *ssp)
bits2set |= EBUS_ALARM_NOEVENTS;
}
bscv_setclear8_volatile(ssp, chan_general, EBUS_IDX_ALARM,
- bits2set, bits2clear);
+ bits2set, bits2clear);
}
#ifdef __sparc
@@ -6260,7 +6253,7 @@ bscv_set_watchdog_timer(bscv_soft_state_t *ssp, uint_t timeoutval)
*/
if (bscv_watchdog_available && (!watchdog_activated ||
(watchdog_activated &&
- (timeoutval != bscv_watchdog_timeout_seconds)))) {
+ (timeoutval != bscv_watchdog_timeout_seconds)))) {
bscv_watchdog_timeout_seconds = timeoutval;
bscv_watchdog_cfg_request(ssp, WDOG_ON);
return (bscv_watchdog_timeout_seconds);
@@ -6321,23 +6314,12 @@ bscv_panic_callback(void *arg, int code)
static void
bscv_watchdog_cyclic_add(bscv_soft_state_t *ssp)
{
- cyc_handler_t hdlr;
- cyc_time_t when;
-
- ASSERT(MUTEX_HELD(&cpu_lock)); /* for cyclic_add */
-
- if (ssp->cyclic_id != CYCLIC_NONE) {
+ if (ssp->periodic_id != NULL) {
return;
}
- hdlr.cyh_level = CY_LOCK_LEVEL;
- hdlr.cyh_func = (cyc_func_t)bscv_watchdog_pat_request;
- hdlr.cyh_arg = (void *)ssp;
-
- when.cyt_when = 0;
- when.cyt_interval = WATCHDOG_PAT_INTERVAL;
-
- ssp->cyclic_id = cyclic_add(&hdlr, &when);
+ ssp->periodic_id = ddi_periodic_add(bscv_watchdog_pat_request, ssp,
+ WATCHDOG_PAT_INTERVAL, DDI_IPL_10);
bscv_trace(ssp, 'X', "bscv_watchdog_cyclic_add:",
"cyclic added");
@@ -6352,14 +6334,11 @@ bscv_watchdog_cyclic_add(bscv_soft_state_t *ssp)
static void
bscv_watchdog_cyclic_remove(bscv_soft_state_t *ssp)
{
- ASSERT(MUTEX_HELD(&cpu_lock)); /* for cyclic_remove */
-
- if (ssp->cyclic_id == CYCLIC_NONE) {
+ if (ssp->periodic_id == NULL) {
return;
}
-
- cyclic_remove(ssp->cyclic_id);
- ssp->cyclic_id = CYCLIC_NONE;
+ ddi_periodic_delete(ssp->periodic_id);
+ ssp->periodic_id = NULL;
bscv_trace(ssp, 'X', "bscv_watchdog_cyclic_remove:",
"cyclic removed");
}
@@ -6383,7 +6362,7 @@ bscv_trace(bscv_soft_state_t *ssp, char code, const char *caller,
if (ssp->debug & (1 << (code-'@'))) {
p = buf;
(void) snprintf(p, sizeof (buf) - (p - buf),
- "%s/%s: ", MYNAME, caller);
+ "%s/%s: ", MYNAME, caller);
p += strlen(p);
va_start(va, fmt);
diff --git a/usr/src/uts/common/io/rge/rge.h b/usr/src/uts/common/io/rge/rge.h
index 585870de4e..199190097f 100755
--- a/usr/src/uts/common/io/rge/rge.h
+++ b/usr/src/uts/common/io/rge/rge.h
@@ -47,7 +47,6 @@ extern "C" {
#include <sys/dlpi.h>
#include <sys/devops.h>
#include <sys/debug.h>
-#include <sys/cyclic.h>
#include <sys/conf.h>
#include <netinet/ip6.h>
@@ -57,7 +56,6 @@ extern "C" {
#include <inet/nd.h>
#include <sys/pattr.h>
-#include <sys/dditypes.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
@@ -375,7 +373,7 @@ typedef struct rge {
ddi_acc_handle_t cfg_handle; /* DDI I/O handle */
ddi_acc_handle_t io_handle; /* DDI I/O handle */
caddr_t io_regs; /* mapped registers */
- cyclic_id_t cyclic_id; /* cyclic callback */
+ ddi_periodic_t periodic_id; /* periodical callback */
ddi_softint_handle_t resched_hdl; /* reschedule callback */
ddi_softint_handle_t factotum_hdl; /* factotum callback */
uint_t soft_pri;
diff --git a/usr/src/uts/common/io/rge/rge_main.c b/usr/src/uts/common/io/rge/rge_main.c
index c85b0f347e..82d5dfc34f 100755
--- a/usr/src/uts/common/io/rge/rge_main.c
+++ b/usr/src/uts/common/io/rge/rge_main.c
@@ -145,7 +145,7 @@ rge_alloc_dma_mem(rge_t *rgep, size_t memsize, ddi_dma_attr_t *dma_attr_p,
* Allocate handle
*/
err = ddi_dma_alloc_handle(rgep->devinfo, dma_attr_p,
- DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
+ DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
if (err != DDI_SUCCESS) {
dma_p->dma_hdl = NULL;
return (DDI_FAILURE);
@@ -1177,7 +1177,7 @@ rge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
* Error, reply with a NAK and EINVAL or the specified error
*/
miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
- EINVAL : iocp->ioc_error);
+ EINVAL : iocp->ioc_error);
break;
case IOC_DONE:
@@ -1200,7 +1200,7 @@ rge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
* OK, send prepared reply as ACK or NAK
*/
mp->b_datap->db_type = iocp->ioc_error == 0 ?
- M_IOCACK : M_IOCNAK;
+ M_IOCACK : M_IOCNAK;
qreply(wq, mp);
break;
}
@@ -1410,10 +1410,9 @@ rge_unattach(rge_t *rgep)
* Quiesce the PHY and MAC (leave it reset but still powered).
* Clean up and free all RGE data structures
*/
- if (rgep->cyclic_id) {
- mutex_enter(&cpu_lock);
- cyclic_remove(rgep->cyclic_id);
- mutex_exit(&cpu_lock);
+ if (rgep->periodic_id != NULL) {
+ ddi_periodic_delete(rgep->periodic_id);
+ rgep->periodic_id = NULL;
}
if (rgep->progress & PROGRESS_KSTATS)
@@ -1511,8 +1510,6 @@ rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
rge_t *rgep; /* Our private data */
mac_register_t *macp;
chip_id_t *cidp;
- cyc_handler_t cychand;
- cyc_time_t cyctime;
int intr_types;
caddr_t regs;
int instance;
@@ -1530,7 +1527,7 @@ rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
instance = ddi_get_instance(devinfo);
RGE_GTRACE(("rge_attach($%p, %d) instance %d",
- (void *)devinfo, cmd, instance));
+ (void *)devinfo, cmd, instance));
RGE_BRKPT(NULL, "rge_attach");
switch (cmd) {
@@ -1553,13 +1550,13 @@ rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
*/
rgep->rge_mac_state = RGE_MAC_ATTACH;
rgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
- DDI_PROP_DONTPASS, debug_propname, rge_debug);
+ DDI_PROP_DONTPASS, debug_propname, rge_debug);
rgep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
- DDI_PROP_DONTPASS, mtu_propname, ETHERMTU);
+ DDI_PROP_DONTPASS, mtu_propname, ETHERMTU);
rgep->msi_enable = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
- DDI_PROP_DONTPASS, msi_propname, B_TRUE);
+ DDI_PROP_DONTPASS, msi_propname, B_TRUE);
(void) snprintf(rgep->ifname, sizeof (rgep->ifname), "%s%d",
- RGE_DRIVER_NAME, instance);
+ RGE_DRIVER_NAME, instance);
/*
* Map config space registers
@@ -1637,14 +1634,14 @@ rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
* interrupts ...
*/
err = ddi_intr_add_softint(devinfo, &rgep->resched_hdl,
- DDI_INTR_SOFTPRI_MIN, rge_reschedule, (caddr_t)rgep);
+ DDI_INTR_SOFTPRI_MIN, rge_reschedule, (caddr_t)rgep);
if (err != DDI_SUCCESS) {
rge_problem(rgep, "ddi_intr_add_softint() failed");
goto attach_fail;
}
rgep->progress |= PROGRESS_RESCHED;
err = ddi_intr_add_softint(devinfo, &rgep->factotum_hdl,
- DDI_INTR_SOFTPRI_MIN, rge_chip_factotum, (caddr_t)rgep);
+ DDI_INTR_SOFTPRI_MIN, rge_chip_factotum, (caddr_t)rgep);
if (err != DDI_SUCCESS) {
rge_problem(rgep, "ddi_intr_add_softint() failed");
goto attach_fail;
@@ -1771,14 +1768,12 @@ rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
if (err != 0)
goto attach_fail;
- cychand.cyh_func = rge_chip_cyclic;
- cychand.cyh_arg = rgep;
- cychand.cyh_level = CY_LOCK_LEVEL;
- cyctime.cyt_when = 0;
- cyctime.cyt_interval = RGE_CYCLIC_PERIOD;
- mutex_enter(&cpu_lock);
- rgep->cyclic_id = cyclic_add(&cychand, &cyctime);
- mutex_exit(&cpu_lock);
+ /*
+ * Register a periodical handler.
+ * reg_chip_cyclic() is invoked in kernel context.
+ */
+ rgep->periodic_id = ddi_periodic_add(rge_chip_cyclic, rgep,
+ RGE_CYCLIC_PERIOD, DDI_IPL_0);
rgep->progress |= PROGRESS_READY;
return (DDI_SUCCESS);
diff --git a/usr/src/uts/common/os/clock.c b/usr/src/uts/common/os/clock.c
index 669c8d6886..290506962e 100644
--- a/usr/src/uts/common/os/clock.c
+++ b/usr/src/uts/common/os/clock.c
@@ -67,6 +67,7 @@
#include <sys/rctl.h>
#include <sys/task.h>
#include <sys/sdt.h>
+#include <sys/ddi_timer.h>
/*
* for NTP support
@@ -1045,6 +1046,18 @@ clock_init(void)
mutex_enter(&cpu_lock);
clock_cyclic = cyclic_add(&hdlr, &when);
mutex_exit(&cpu_lock);
+
+ /*
+ * cyclic_timer is dedicated to the ddi interface, which
+ * uses the same clock resolution as the system one.
+ */
+ hdlr.cyh_func = (cyc_func_t)cyclic_timer;
+ hdlr.cyh_level = CY_LOCK_LEVEL;
+ hdlr.cyh_arg = NULL;
+
+ mutex_enter(&cpu_lock);
+ clock_cyclic = cyclic_add(&hdlr, &when);
+ mutex_exit(&cpu_lock);
}
/*
diff --git a/usr/src/uts/common/os/ddi_timer.c b/usr/src/uts/common/os/ddi_timer.c
new file mode 100644
index 0000000000..0f60567d25
--- /dev/null
+++ b/usr/src/uts/common/os/ddi_timer.c
@@ -0,0 +1,936 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/atomic.h>
+#include <sys/callb.h>
+#include <sys/conf.h>
+#include <sys/cmn_err.h>
+#include <sys/taskq.h>
+#include <sys/dditypes.h>
+#include <sys/ddi_timer.h>
+#include <sys/disp.h>
+#include <sys/kobj.h>
+#include <sys/note.h>
+#include <sys/param.h>
+#include <sys/sysmacros.h>
+#include <sys/systm.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+/*
+ * global variables for timeout request
+ */
+static kmem_cache_t *req_cache; /* kmem cache for timeout request */
+
+/*
+ * taskq for timer
+ */
+static int timer_taskq_num = 16; /* initial thread number */
+static taskq_t *tm_taskq; /* taskq thread pool */
+static kthread_t *tm_work_thread; /* work thread invoking taskq */
+
+/*
+ * timer variables
+ */
+static cyc_timer_t *ddi_timer; /* ddi timer based on the cyclic */
+static volatile hrtime_t timer_hrtime; /* current tick time on the timer */
+
+/*
+ * Variable used for the suspend/resume.
+ */
+static volatile boolean_t timer_suspended;
+
+/*
+ * Kernel taskq queue to ddi timer
+ */
+static list_t kern_queue; /* kernel thread request queue */
+static kcondvar_t kern_cv; /* condition variable for taskq queue */
+
+/*
+ * Software interrupt queue dedicated to ddi timer
+ */
+static list_t intr_queue; /* software interrupt request queue */
+static uint_t intr_state; /* software interrupt state */
+
+/*
+ * This lock is used to protect the intr_queue and kern_queue.
+ * It's also used to protect the intr_state which represents the software
+ * interrupt state for the timer.
+ */
+static kmutex_t disp_req_lock;
+
+/*
+ * the periodic timer interrupt priority level
+ */
+enum {
+ TM_IPL_0 = 0, /* kernel context */
+ TM_IPL_1, TM_IPL_2, TM_IPL_3, /* level 1-3 */
+ TM_IPL_4, TM_IPL_5, TM_IPL_6, /* level 4-6 */
+ TM_IPL_7, TM_IPL_8, TM_IPL_9, /* level 7-9 */
+ TM_IPL_10 /* level 10 */
+};
+
+/*
+ * A callback handler used by CPR to stop and resume callouts.
+ * Since the taskq uses TASKQ_CPR_SAFE, the function just set the boolean
+ * flag to timer_suspended here.
+ */
+/*ARGSUSED*/
+static boolean_t
+timer_cpr_callb(void *arg, int code)
+{
+ timer_suspended = (code == CB_CODE_CPR_CHKPT);
+ return (B_TRUE);
+}
+
+/*
+ * Return a proposed timeout request id. add_req() determines whether
+ * or not the proposed one is used. If it's not suitable, add_req()
+ * recalls get_req_cnt(). To reduce the lock contention between the
+ * timer and i_untimeout(), the atomic instruction should be used here.
+ */
+static timeout_t
+get_req_cnt(void)
+{
+ static volatile ulong_t timeout_cnt = 0;
+ return ((timeout_t)atomic_inc_ulong_nv(&timeout_cnt));
+}
+
+/*
+ * Get the system resolution.
+ * Note. currently there is a restriction about the system resolution, and
+ * the 10ms tick (the default clock resolution) is only supported now.
+ */
+static hrtime_t
+i_get_res(void)
+{
+ return ((hrtime_t)10000000); /* 10ms tick only */
+}
+
+/*
+ * Return the value for the cog of the timing wheel.
+ * TICK_FACTOR is used to gain a finer cog on the clock resolution.
+ */
+static hrtime_t
+tw_tick(hrtime_t time)
+{
+ return ((time << TICK_FACTOR) / ddi_timer->res);
+}
+
+/*
+ * Calculate the expiration time for the timeout request.
+ */
+static hrtime_t
+expire_tick(tm_req_t *req)
+{
+ return (tw_tick(req->exp_time));
+}
+
+/*
+ * Register a timeout request to the timer. This function is used
+ * in i_timeout().
+ */
+static timeout_t
+add_req(tm_req_t *req)
+{
+ timer_tw_t *tid, *tw;
+ tm_req_t *next;
+ timeout_t id;
+
+retry:
+ /*
+ * Retrieve a timeout request id. Since i_timeout() needs to return
+ * a non-zero value, re-try if the zero is gotten.
+ */
+ if ((id = get_req_cnt()) == 0)
+ id = get_req_cnt();
+
+ /*
+ * Check if the id is not used yet. Since the framework now deals
+ * with the periodic timeout requests, we cannot assume the id
+ * allocated (long) before doesn't exist any more when it will
+ * be re-assigned again (especially on 32bit) but need to handle
+ * this case to solve the conflicts. If it's used already, retry
+ * another.
+ */
+ tid = &ddi_timer->idhash[TM_HASH((uintptr_t)id)];
+ mutex_enter(&tid->lock);
+ for (next = list_head(&tid->req); next != NULL;
+ next = list_next(&tid->req, next)) {
+ if (next->id == id) {
+ mutex_exit(&tid->lock);
+ goto retry;
+ }
+ }
+ /* Nobody uses this id yet */
+ req->id = id;
+
+ /*
+ * Register this request to the timer.
+ * The list operation must be list_insert_head().
+ * Other operations can degrade performance.
+ */
+ list_insert_head(&tid->req, req);
+ mutex_exit(&tid->lock);
+
+ tw = &ddi_timer->exhash[TM_HASH(expire_tick(req))];
+ mutex_enter(&tw->lock);
+ /*
+ * Other operations than list_insert_head() can
+ * degrade performance here.
+ */
+ list_insert_head(&tw->req, req);
+ mutex_exit(&tw->lock);
+
+ return (id);
+}
+
+/*
+ * Periodic timeout requests cannot be removed until they are canceled
+ * explicitly. Until then, they need to be re-registerd after they are
+ * fired. transfer_req() re-registers the requests for the next fires.
+ * Note. transfer_req() sends the cv_signal to timeout_execute(), which
+ * runs in interrupt context. Make sure this function will not be blocked,
+ * otherwise the deadlock situation can occur.
+ */
+static void
+transfer_req(tm_req_t *req, timer_tw_t *tw)
+{
+ timer_tw_t *new_tw;
+ hrtime_t curr_time;
+ ASSERT(tw && MUTEX_HELD(&tw->lock));
+
+ /* Calculate the next expiration time by interval */
+ req->exp_time += req->interval;
+ curr_time = gethrtime();
+
+ /*
+ * If a long time (more than 1 clock resolution) has already
+ * passed for some reason (e.g. debugger or high interrupt),
+ * round up the next expiration to the appropriate one
+ * since this request is periodic and never catches with it.
+ */
+ if (curr_time - req->exp_time >= ddi_timer->res) {
+ req->exp_time = roundup(curr_time + req->interval,
+ ddi_timer->res);
+ }
+
+ /*
+ * Re-register this request.
+ * Note. since it is guaranteed that the timer is invoked on only
+ * one CPU at any time (by the cyclic subsystem), a deadlock
+ * cannot occur regardless of the lock order here.
+ */
+ new_tw = &ddi_timer->exhash[TM_HASH(expire_tick(req))];
+
+ /*
+ * If it's on the timer cog already, there is nothing
+ * to do. Just return.
+ */
+ if (new_tw == tw)
+ return;
+
+ /* Remove this request from the timer */
+ list_remove(&tw->req, req);
+
+ /* Re-register this request to the timer */
+ mutex_enter(&new_tw->lock);
+
+ /*
+ * Other operations than list_insert_head() can
+ * degrade performance here.
+ */
+ list_insert_head(&new_tw->req, req);
+ mutex_exit(&new_tw->lock);
+
+ /*
+ * Set the TM_TRANSFER flag and notify the request is transfered
+ * completely. This prevents a race in the case that this request
+ * is serviced on another CPU already.
+ */
+ mutex_enter(&req->lock);
+ req->flags |= TM_TRANSFER;
+ cv_signal(&req->cv);
+ mutex_exit(&req->lock);
+}
+
+/*
+ * Execute timeout requests.
+ * Note. since timeout_execute() can run in interrupt context and block
+ * on condition variables, there are restrictions on the timer code that
+ * signals these condition variables (see i_untimeout(), transfer_req(),
+ * and condvar(9F)). Functions that signal these cvs must ensure that
+ * they will not be blocked (for memory allocations or any other reason)
+ * since condition variables don't support priority inheritance.
+ */
+static void
+timeout_execute(void *arg)
+{
+ tm_req_t *req = (tm_req_t *)arg;
+ ASSERT(req->flags & TM_INVOKING && !(req->flags & TM_EXECUTING));
+
+ for (;;) {
+ /*
+ * Check if this request is canceled. If it's canceled, do not
+ * execute this request.
+ */
+ mutex_enter(&req->lock);
+ if (!(req->flags & TM_CANCEL)) {
+ /*
+ * Set the current thread to prevent a dead lock
+ * situation in case that this timeout request is
+ * canceled in the handler being invoked now.
+ * (this doesn't violate the spec) Set TM_EXECUTING
+ * to show this handler is invoked soon.
+ */
+ req->h_thread = curthread;
+ req->flags |= TM_EXECUTING;
+ mutex_exit(&req->lock);
+
+ /* The handler is invoked without holding any locks */
+ (*req->handler)(req->arg);
+
+ /*
+ * Set TM_COMPLETE and notify the request is complete
+ * now.
+ */
+ mutex_enter(&req->lock);
+ req->flags |= TM_COMPLETE;
+ if (req->flags & TM_COMPWAIT)
+ cv_signal(&req->cv);
+ }
+
+ /*
+ * The handler is invoked at this point. If this request
+ * is not canceled, prepare for the next fire.
+ */
+ if (req->flags & TM_CANCEL) {
+ timer_tw_t *tw;
+ /*
+ * Wait until the timer finishes all things for
+ * this request.
+ */
+ while (!(req->flags & TM_TRANSFER))
+ cv_wait(&req->cv, &req->lock);
+ mutex_exit(&req->lock);
+ ASSERT(req->flags & TM_TRANSFER);
+
+ /* Remove this request from the timer */
+ tw = &ddi_timer->exhash[TM_HASH(expire_tick(req))];
+ mutex_enter(&tw->lock);
+ list_remove(&tw->req, req);
+ mutex_exit(&tw->lock);
+
+ /*
+ * Wait until i_untimeout() can go ahead.
+ * This prevents the request from being freed before
+ * i_untimeout() is complete.
+ */
+ mutex_enter(&req->lock);
+ while (req->flags & TM_COMPWAIT)
+ cv_wait(&req->cv, &req->lock);
+ mutex_exit(&req->lock);
+ ASSERT(!(req->flags & TM_COMPWAIT));
+
+ /* Free this request */
+ kmem_cache_free(req_cache, req);
+ return;
+ }
+ ASSERT(req->flags & TM_EXECUTING);
+
+ /*
+ * TM_EXECUTING must be set at this point.
+ * Unset the flag.
+ */
+ req->flags &= ~(TM_EXECUTING | TM_TRANSFER);
+
+ /*
+ * Decrease the request cnt. The reqest cnt shows
+ * how many times this request is executed now.
+ * If this counter becomes the zero, drop TM_INVOKING
+ * to show there is no requests to do now.
+ */
+ req->cnt--;
+ if (req->cnt == 0) {
+ req->flags &= ~TM_INVOKING;
+ mutex_exit(&req->lock);
+ return;
+ }
+ mutex_exit(&req->lock);
+ }
+}
+
+/*
+ * Timeout worker thread for processing task queue.
+ */
+static void
+timeout_taskq_thread(void *arg)
+{
+ _NOTE(ARGUNUSED(arg));
+ tm_req_t *kern_req;
+ callb_cpr_t cprinfo;
+
+ CALLB_CPR_INIT(&cprinfo, &disp_req_lock, callb_generic_cpr,
+ "timeout_taskq_thread");
+
+ /*
+ * This thread is wakened up when a new request is added to
+ * the queue. Then pick up all requests and dispatch them
+ * via taskq_dispatch().
+ */
+ for (;;) {
+ /*
+ * Check the queue and pick up a request if the queue
+ * is not NULL.
+ */
+ mutex_enter(&disp_req_lock);
+ while ((kern_req = list_head(&kern_queue)) == NULL) {
+ CALLB_CPR_SAFE_BEGIN(&cprinfo);
+ cv_wait(&kern_cv, &disp_req_lock);
+ CALLB_CPR_SAFE_END(&cprinfo, &disp_req_lock);
+ }
+ list_remove(&kern_queue, kern_req);
+ mutex_exit(&disp_req_lock);
+
+ /* Execute the timeout request via the taskq thread */
+ (void) taskq_dispatch(tm_taskq, timeout_execute,
+ (void *)kern_req, TQ_SLEEP);
+ }
+}
+
+/*
+ * Dispatch the timeout request based on the level specified.
+ * If the level is equal to zero, notify the worker thread to
+ * call taskq_dispatch() in kernel context. If the level is bigger
+ * than zero, add a software interrupt request to the queue and raise
+ * the interrupt level to the specified one.
+ */
+static void
+timeout_dispatch(tm_req_t *req)
+{
+ int level = req->level;
+ extern void sir_on(int);
+
+ if (level == TM_IPL_0) {
+ /* Add a new request to the tail */
+ mutex_enter(&disp_req_lock);
+ list_insert_tail(&kern_queue, req);
+ mutex_exit(&disp_req_lock);
+
+ /*
+ * notify the worker thread that this request
+ * is newly added to the queue.
+ * Note. this cv_signal() can be called after the
+ * mutex_lock.
+ */
+ cv_signal(&kern_cv);
+ } else {
+ /* Add a new request to the tail */
+ mutex_enter(&disp_req_lock);
+ list_insert_tail(&intr_queue, req);
+
+ /* Issue the software interrupt */
+ if (intr_state & TM_INTR_START(level)) {
+ /*
+ * timer_softintr() is already running; no need to
+ * raise a siron. Due to lock protection of
+ * the intr_queue and intr_state, we know that
+ * timer_softintr() will see the new addition to
+ * the intr_queue.
+ */
+ mutex_exit(&disp_req_lock);
+ } else {
+ intr_state |= TM_INTR_SET(level);
+ mutex_exit(&disp_req_lock);
+
+ /* Raise an interrupt to execute timeout requests */
+ sir_on(level);
+ }
+ }
+}
+
+/*
+ * Check the software interrupt queue and invoke requests at the specified
+ * interrupt level.
+ * Note that the queue may change during call so that the disp_req_lock
+ * and the intr_state are used to protect it.
+ * The software interrupts supported here are up to the level 10. Higher
+ * than 10 interrupts cannot be supported.
+ */
+void
+timer_softintr(int level)
+{
+ tm_req_t *intr_req;
+ ASSERT(level >= TM_IPL_1 && level <= TM_IPL_10);
+
+ /* Check if we are asked to process the softcall list */
+ mutex_enter(&disp_req_lock);
+ if (!(intr_state & TM_INTR_SET(level))) {
+ mutex_exit(&disp_req_lock);
+ return;
+ }
+
+ /* Notify this software interrupt request will be executed soon */
+ intr_state |= TM_INTR_START(level);
+ intr_state &= ~TM_INTR_SET(level);
+
+ /* loop the link until there is no requests */
+ for (intr_req = list_head(&intr_queue); intr_req != NULL;
+ /* Nothing */) {
+
+ /* Check the interrupt level */
+ if (intr_req->level != level) {
+ intr_req = list_next(&intr_queue, intr_req);
+ continue;
+ }
+ list_remove(&intr_queue, intr_req);
+ mutex_exit(&disp_req_lock);
+
+ /* Execute the software interrupt request */
+ timeout_execute(intr_req);
+
+ mutex_enter(&disp_req_lock);
+ /* Restart the loop since new requests might be added */
+ intr_req = list_head(&intr_queue);
+ }
+
+ /* reset the interrupt state */
+ intr_state &= ~TM_INTR_START(level);
+ mutex_exit(&disp_req_lock);
+}
+
+/*
+ * void
+ * cyclic_timer(void)
+ *
+ * Overview
+ * cyclic_timer() is a function invoked periodically by the cyclic
+ * subsystem.
+ *
+ * The function calls timeout_invoke() with timeout requests whose
+ * expiration time is already reached.
+ *
+ * Arguments
+ * Nothing
+ *
+ * Return value
+ * Nothing
+ */
+void
+cyclic_timer(void)
+{
+ tm_req_t *req;
+ timer_tw_t *tw;
+ hrtime_t curr_tick, curr;
+
+ /* If the system is suspended, just return */
+ if (timer_suspended)
+ return;
+
+ /* Get the current time */
+ timer_hrtime = ddi_timer->tick_time = curr = gethrtime();
+ curr_tick = tw_tick(ddi_timer->tick_time);
+
+restart:
+ /*
+ * Check the timer cogs to see if there are timeout requests
+ * who reach the expiration time. Call timeout_invoke() to execute
+ * the requests, then.
+ */
+ while (curr_tick >= ddi_timer->tick) {
+ tm_req_t *next;
+ tw = &ddi_timer->exhash[TM_HASH(ddi_timer->tick)];
+ mutex_enter(&tw->lock);
+ for (req = list_head(&tw->req); req != NULL; req = next) {
+ next = list_next(&tw->req, req);
+ /*
+ * If this request is already obsolete, free
+ * it here.
+ */
+ if (req->flags & TM_UTMCOMP) {
+ /*
+ * Remove this request from the timer,
+ * then free it.
+ */
+ list_remove(&tw->req, req);
+ kmem_cache_free(req_cache, req);
+ } else if (curr >= req->exp_time) {
+ mutex_enter(&req->lock);
+ /*
+ * Check if this request is canceled, but not
+ * being executed now.
+ */
+ if (req->flags & TM_CANCEL &&
+ !(req->flags & TM_INVOKING)) {
+ mutex_exit(&req->lock);
+ continue;
+ }
+ /*
+ * Record how many times timeout_execute()
+ * must be invoked.
+ */
+ req->cnt++;
+ /*
+ * Invoke timeout_execute() via taskq or
+ * software interrupt.
+ */
+ if (req->flags & TM_INVOKING) {
+ /*
+ * If it's already invoked,
+ * There is nothing to do.
+ */
+ mutex_exit(&req->lock);
+ } else {
+ req->flags |= TM_INVOKING;
+ mutex_exit(&req->lock);
+ /*
+ * Dispatch this timeout request.
+ * timeout_dispatch() chooses either
+ * a software interrupt or taskq thread
+ * based on the level.
+ */
+ timeout_dispatch(req);
+ }
+ /*
+ * Periodic timeout requests must prepare for
+ * the next fire.
+ */
+ transfer_req(req, tw);
+ }
+ }
+ mutex_exit(&tw->lock);
+ ddi_timer->tick++;
+ }
+
+ /*
+ * Check the current time. If we spend some amount of time,
+ * double-check if some of the requests reaches the expiration
+ * time during the work.
+ */
+ curr = gethrtime();
+ curr_tick = tw_tick(curr);
+ if (curr_tick >= ddi_timer->tick) {
+ ddi_timer->tick -= 1;
+ goto restart;
+ }
+ /* Adjustment for the next rolling */
+ ddi_timer->tick -= 1;
+}
+
+/*
+ * void
+ * timer_init(void)
+ *
+ * Overview
+ * timer_init() allocates the internal data structures used by
+ * i_timeout(), i_untimeout() and the timer.
+ *
+ * Arguments
+ * Nothing
+ *
+ * Return value
+ * Nothing
+ *
+ * Caller's context
+ * timer_init() can be called in kernel context only.
+ */
+void
+timer_init(void)
+{
+ int i;
+
+ /* Create kmem_cache for timeout requests */
+ req_cache = kmem_cache_create("timeout_request", sizeof (tm_req_t),
+ 0, NULL, NULL, NULL, NULL, NULL, 0);
+
+ /* Initialize the timer which is invoked by the cyclic subsystem */
+ ddi_timer = kmem_alloc(sizeof (cyc_timer_t), KM_SLEEP);
+ ddi_timer->res = nsec_per_tick;
+ ddi_timer->tick = tw_tick(gethrtime());
+ ddi_timer->tick_time = 0;
+
+ /* Initialize the timing wheel */
+ bzero((char *)&ddi_timer->idhash[0], TM_HASH_SZ * sizeof (timer_tw_t));
+ bzero((char *)&ddi_timer->exhash[0], TM_HASH_SZ * sizeof (timer_tw_t));
+
+ for (i = 0; i < TM_HASH_SZ; i++) {
+ list_create(&ddi_timer->idhash[i].req, sizeof (tm_req_t),
+ offsetof(tm_req_t, id_req));
+ mutex_init(&ddi_timer->idhash[i].lock, NULL, MUTEX_ADAPTIVE,
+ NULL);
+
+ list_create(&ddi_timer->exhash[i].req, sizeof (tm_req_t),
+ offsetof(tm_req_t, ex_req));
+ mutex_init(&ddi_timer->exhash[i].lock, NULL, MUTEX_ADAPTIVE,
+ NULL);
+ }
+
+ /* Create a taskq thread pool */
+ tm_taskq = taskq_create_instance("timeout_taskq", 0,
+ timer_taskq_num, MAXCLSYSPRI,
+ timer_taskq_num, 2 * timer_taskq_num,
+ TASKQ_PREPOPULATE | TASKQ_CPR_SAFE);
+
+ /*
+ * Initialize the taskq queue which is dedicated to this timeout
+ * interface/timer.
+ */
+ list_create(&kern_queue, sizeof (tm_req_t),
+ offsetof(tm_req_t, disp_req));
+
+ /* Create a worker thread to dispatch the taskq thread */
+ tm_work_thread = thread_create(NULL, 0, timeout_taskq_thread, NULL,
+ 0, &p0, TS_RUN, MAXCLSYSPRI);
+
+ /*
+ * Initialize the software interrupt queue which is dedicated to
+ * this timeout interface/timer.
+ */
+ list_create(&intr_queue, sizeof (tm_req_t),
+ offsetof(tm_req_t, disp_req));
+
+ /*
+ * Initialize the mutex lock used for both of kern_queue and
+ * intr_queue.
+ */
+ mutex_init(&disp_req_lock, NULL, MUTEX_ADAPTIVE, NULL);
+ cv_init(&kern_cv, NULL, CV_DEFAULT, NULL);
+
+ /* Register the callback handler for the system suspend/resume */
+ (void) callb_add(timer_cpr_callb, 0, CB_CL_CPR_CALLOUT, "cyclicTimer");
+}
+
+/*
+ * timeout_t
+ * i_timeout(void (*func)(void *), void *arg, hrtime_t interval,
+ * int level, int flags)
+ *
+ * Overview
+ * i_timeout() is an internal function scheduling the passed function
+ * to be invoked in the interval in nanoseconds. The callback function
+ * keeps invoked until the request is explicitly canceled by i_untimeout().
+ * This function is used for ddi_periodic_add(9F).
+ *
+ * Arguments
+ *
+ * func: the callback function
+ * the callback function will be invoked in kernel context if
+ * the level passed is the zero. Otherwise be invoked in interrupt
+ * context at the specified level by the argument "level".
+ *
+ * Note that It's guaranteed by the cyclic subsystem that the
+ * function is invoked on the only one CPU and is never executed
+ * simultaneously even on MP system.
+ *
+ * arg: the argument passed to the callback function
+ *
+ * interval: interval time in nanoseconds
+ * if the interval is the zero, the timer resolution is used.
+ *
+ * level : callback interrupt level
+ * If the value is 0 (the zero), the callback function is invoked
+ * in kernel context. If the value is more than 0 (the zero), but
+ * less than or equal to 10, the callback function is invoked in
+ * interrupt context at the specified interrupt level.
+ * This value must be in range of 0-10.
+ *
+ * Return value
+ * returns a non-zero opaque value (timeout_t) on success.
+ *
+ * Caller's context
+ * i_timeout() can be called in user, kernel or interrupt context.
+ * It cannot be called in high interrupt context.
+ *
+ * Note. This function is used by ddi_periodic_add(), which cannot
+ * be called in interrupt context. As a result, this function is called
+ * in user or kernel context only in practice.
+ *
+ */
+timeout_t
+i_timeout(void (*func)(void *), void *arg, hrtime_t interval, int level)
+{
+ hrtime_t start_time = gethrtime(), res;
+ tm_req_t *req = NULL;
+
+ /* Allocate and initialize the timeout request */
+ req = kmem_cache_alloc(req_cache, KM_SLEEP);
+ req->handler = func;
+ req->arg = arg;
+ req->h_thread = NULL;
+ req->level = level;
+ req->flags = 0;
+ req->cnt = 0;
+ mutex_init(&req->lock, NULL, MUTEX_ADAPTIVE, NULL);
+ cv_init(&req->cv, NULL, CV_DEFAULT, NULL);
+
+ /*
+ * The resolution must be finer than or equal to
+ * the requested interval. If it's not, set the resolution
+ * to the interval.
+ * Note. There is a restriction currently. Regardless of the
+ * clock resolution used here, 10ms is set as the timer resolution.
+ * Even on the 1ms resolution timer, the minimum interval is 10ms.
+ */
+ if ((res = i_get_res()) > interval) {
+ uintptr_t pc = (uintptr_t)req->handler;
+ ulong_t off;
+ cmn_err(CE_WARN,
+ "The periodic timeout (handler=%s, interval=%lld) "
+ "requests a finer interval than the supported resolution. "
+ "It rounds up to %lld\n", kobj_getsymname(pc, &off),
+ interval, res);
+ interval = res;
+ }
+
+ /*
+ * If the specified interval is already multiples of
+ * the resolution, use it as is. Otherwise, it rounds
+ * up to multiples of the timer resolution.
+ */
+ req->interval = roundup(interval, i_get_res());
+
+ /*
+ * For the periodic timeout requests, the first expiration time will
+ * be adjusted to the timer tick edge to take advantage of the cyclic
+ * subsystem. In that case, the first fire is likely not an expected
+ * one, but the fires later can be more accurate due to this.
+ */
+ req->exp_time = roundup(start_time + req->interval, i_get_res());
+
+ /* Add the request to the timer */
+ return (add_req(req));
+}
+
+/*
+ * void
+ * i_untimeout(timeout_t req)
+ *
+ * Overview
+ * i_untimeout() is an internal function canceling the i_timeout()
+ * request previously issued.
+ * This function is used for ddi_periodic_delete(9F).
+ *
+ * Argument
+ * req: timeout_t opaque value i_timeout() returned previously.
+ *
+ * Return value
+ * Nothing.
+ *
+ * Caller's context
+ * i_untimeout() can be called in user, kernel or interrupt context.
+ * It cannot be called in high interrupt context.
+ *
+ * Note. This function is used by ddi_periodic_delete(), which cannot
+ * be called in interrupt context. As a result, this function is called
+ * in user or kernel context only in practice. Also i_untimeout() sends
+ * the cv_signal to timeout_execute(), which runs in interrupt context.
+ * Make sure this function will not be blocked, otherwise the deadlock
+ * situation can occur. See timeout_execute().
+ */
+void
+i_untimeout(timeout_t timeout_req)
+{
+ timer_tw_t *tid;
+ tm_req_t *req;
+ timeout_t id;
+
+ /* Retrieve the id for this timeout request */
+ id = (timeout_t)timeout_req;
+ tid = &ddi_timer->idhash[TM_HASH((uintptr_t)id)];
+
+ mutex_enter(&tid->lock);
+ for (req = list_head(&tid->req); req != NULL;
+ req = list_next(&tid->req, req)) {
+ if (req->id == id)
+ break;
+ }
+ if (req == NULL) {
+ /* There is no requests with this id after all */
+ mutex_exit(&tid->lock);
+ return;
+ }
+ mutex_enter(&req->lock);
+
+ /* Unregister this request first */
+ list_remove(&tid->req, req);
+
+ /* Notify that this request is canceled */
+ req->flags |= TM_CANCEL;
+
+ /* Check if the handler is invoked */
+ if (req->flags & TM_INVOKING) {
+ /*
+ * If this request is not yet executed or is already finished
+ * then there is nothing to do but just return. Otherwise
+ * we'll have to wait for the callback execution being complete.
+ */
+ if (!(req->flags & TM_EXECUTING) || req->flags & TM_COMPLETE) {
+ /* There is nothing to do any more */
+ mutex_exit(&req->lock);
+ mutex_exit(&tid->lock);
+ return;
+ }
+
+ /*
+ * If this is the recursive call, there is nothing
+ * to do any more. This is the case that i_untimeout()
+ * is called in the handler.
+ */
+ if (req->h_thread == curthread) {
+ mutex_exit(&req->lock);
+ mutex_exit(&tid->lock);
+ return;
+ }
+
+ /*
+ * Notify that i_untimeout() is waiting until this request
+ * is complete.
+ */
+ req->flags |= TM_COMPWAIT;
+ mutex_exit(&tid->lock);
+
+ /*
+ * Wait for this timeout request being complete before
+ * the return.
+ */
+ while (!(req->flags & TM_COMPLETE))
+ cv_wait(&req->cv, &req->lock);
+ req->flags &= ~TM_COMPWAIT;
+ cv_signal(&req->cv);
+ mutex_exit(&req->lock);
+ return;
+ }
+ mutex_exit(&req->lock);
+ mutex_exit(&tid->lock);
+
+ /*
+ * Notify untimeout() is about to be finished, and this request
+ * can be freed.
+ */
+ atomic_or_uint(&req->flags, TM_UTMCOMP);
+}
diff --git a/usr/src/uts/common/os/main.c b/usr/src/uts/common/os/main.c
index b2ceda1e16..ca15fb5a31 100644
--- a/usr/src/uts/common/os/main.c
+++ b/usr/src/uts/common/os/main.c
@@ -56,6 +56,7 @@
#include <sys/modctl.h>
#include <sys/vm.h>
#include <sys/callb.h>
+#include <sys/ddi_timer.h>
#include <sys/kmem.h>
#include <sys/vmem.h>
#include <sys/cpuvar.h>
@@ -396,6 +397,7 @@ main(void)
segkmem_gc();
callb_init();
callout_init(); /* callout table MUST be init'd before clock starts */
+ timer_init(); /* timer must be initialized before cyclic starts */
cbe_init();
clock_init();
diff --git a/usr/src/uts/common/os/sunddi.c b/usr/src/uts/common/os/sunddi.c
index 662f69986f..e919e8b846 100644
--- a/usr/src/uts/common/os/sunddi.c
+++ b/usr/src/uts/common/os/sunddi.c
@@ -60,6 +60,7 @@
#include <sys/conf.h>
#include <sys/ddi_impldefs.h> /* include implementation structure defs */
#include <sys/ndi_impldefs.h> /* include prototypes */
+#include <sys/ddi_timer.h>
#include <sys/hwconf.h>
#include <sys/pathname.h>
#include <sys/modctl.h>
@@ -5302,6 +5303,125 @@ ddi_run_callback(uintptr_t *listid)
softcall(real_callback_run, listid);
}
+/*
+ * ddi_periodic_t
+ * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
+ * int level)
+ *
+ * INTERFACE LEVEL
+ * Solaris DDI specific (Solaris DDI)
+ *
+ * PARAMETERS
+ * func: the callback function
+ *
+ * The callback function will be invoked. The function is invoked
+ * in kernel context if the argument level passed is the zero.
+ * Otherwise it's invoked in interrupt context at the specified
+ * level.
+ *
+ * arg: the argument passed to the callback function
+ *
+ * interval: interval time
+ *
+ * level : callback interrupt level
+ *
+ * If the value is the zero, the callback function is invoked
+ * in kernel context. If the value is more than the zero, but
+ * less than or equal to ten, the callback function is invoked in
+ * interrupt context at the specified interrupt level, which may
+ * be used for real time applications.
+ *
+ * This value must be in range of 0-10, which can be a numeric
+ * number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
+ *
+ * DESCRIPTION
+ * ddi_periodic_add(9F) schedules the specified function to be
+ * periodically invoked in the interval time.
+ *
+ * As well as timeout(9F), the exact time interval over which the function
+ * takes effect cannot be guaranteed, but the value given is a close
+ * approximation.
+ *
+ * Drivers waiting on behalf of processes with real-time constraints must
+ * pass non-zero value with the level argument to ddi_periodic_add(9F).
+ *
+ * RETURN VALUES
+ * ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
+ * which must be used for ddi_periodic_delete(9F) to specify the request.
+ *
+ * CONTEXT
+ * ddi_periodic_add(9F) can be called in user or kernel context, but
+ * it cannot be called in interrupt context, which is different from
+ * timeout(9F).
+ */
+ddi_periodic_t
+ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
+{
+ /*
+ * Sanity check of the argument level.
+ */
+ if (level < DDI_IPL_0 || level > DDI_IPL_10)
+ cmn_err(CE_PANIC,
+ "ddi_periodic_add: invalid interrupt level (%d).", level);
+
+ /*
+ * Sanity check of the context. ddi_periodic_add() cannot be
+ * called in either interrupt context or high interrupt context.
+ */
+ if (servicing_interrupt())
+ cmn_err(CE_PANIC,
+ "ddi_periodic_add: called in (high) interrupt context.");
+
+ return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
+}
+
+/*
+ * void
+ * ddi_periodic_delete(ddi_periodic_t req)
+ *
+ * INTERFACE LEVEL
+ * Solaris DDI specific (Solaris DDI)
+ *
+ * PARAMETERS
+ * req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
+ * previously.
+ *
+ * DESCRIPTION
+ * ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
+ * previously requested.
+ *
+ * ddi_periodic_delete(9F) will not return until the pending request
+ * is canceled or executed.
+ *
+ * As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
+ * timeout which is either running on another CPU, or has already
+ * completed causes no problems. However, unlike untimeout(9F), there is
+ * no restrictions on the lock which might be held across the call to
+ * ddi_periodic_delete(9F).
+ *
+ * Drivers should be structured with the understanding that the arrival of
+ * both an interrupt and a timeout for that interrupt can occasionally
+ * occur, in either order.
+ *
+ * CONTEXT
+ * ddi_periodic_delete(9F) can be called in user or kernel context, but
+ * it cannot be called in interrupt context, which is different from
+ * untimeout(9F).
+ */
+void
+ddi_periodic_delete(ddi_periodic_t req)
+{
+ /*
+ * Sanity check of the context. ddi_periodic_delete() cannot be
+ * called in either interrupt context or high interrupt context.
+ */
+ if (servicing_interrupt())
+ cmn_err(CE_PANIC,
+ "ddi_periodic_delete: called in (high) interrupt context.");
+
+ i_untimeout((timeout_t)req);
+}
+
dev_info_t *
nodevinfo(dev_t dev, int otyp)
{
diff --git a/usr/src/uts/common/sys/Makefile b/usr/src/uts/common/sys/Makefile
index ed9e8eedc5..9420af923f 100644
--- a/usr/src/uts/common/sys/Makefile
+++ b/usr/src/uts/common/sys/Makefile
@@ -160,6 +160,7 @@ CHKHDRS= \
ddi_impldefs.h \
ddi_implfuncs.h \
ddi_obsolete.h \
+ ddi_timer.h \
ddidevmap.h \
ddidmareq.h \
ddimapreq.h \
diff --git a/usr/src/uts/common/sys/avintr.h b/usr/src/uts/common/sys/avintr.h
index 9f907d58b1..05078c36cd 100644
--- a/usr/src/uts/common/sys/avintr.h
+++ b/usr/src/uts/common/sys/avintr.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -87,6 +86,7 @@ struct softint {
#ifdef _KERNEL
extern kmutex_t av_lock;
+extern ddi_softint_hdl_impl_t softlevel_hdl[];
extern ddi_softint_hdl_impl_t softlevel1_hdl;
extern int add_avintr(void *intr_id, int lvl, avfunc xxintr, char *name,
int vect, caddr_t arg1, caddr_t arg2, uint64_t *, dev_info_t *);
diff --git a/usr/src/uts/common/sys/bscv_impl.h b/usr/src/uts/common/sys/bscv_impl.h
index da4654393d..e11f92f6b5 100644
--- a/usr/src/uts/common/sys/bscv_impl.h
+++ b/usr/src/uts/common/sys/bscv_impl.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -292,7 +292,7 @@ struct {
#endif /* __sparc */
uint8_t last_event[8]; /* last event read and reported */
#if defined(__i386) || defined(__amd64)
- cyclic_id_t cyclic_id; /* watchdog patter cyclic timer */
+ ddi_periodic_t periodic_id; /* watchdog patter periodical callback */
callb_id_t callb_id; /* Need to store the ID so we can */
/* unschedule the panic callback */
char last_nodename[128]; /* copy of last utsname.nodename */
diff --git a/usr/src/uts/common/sys/ddi_timer.h b/usr/src/uts/common/sys/ddi_timer.h
new file mode 100644
index 0000000000..035a68f74f
--- /dev/null
+++ b/usr/src/uts/common/sys/ddi_timer.h
@@ -0,0 +1,151 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_DDI_TIMER_H
+#define _SYS_DDI_TIMER_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/list.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _KERNEL
+
+/*
+ * Used by the new timeout functions
+ */
+typedef struct __timeout *timeout_t;
+
+/*
+ * Forward declarations.
+ */
+struct cyc_timer;
+struct tm_req;
+
+/*
+ * Timing wheel cog.
+ * Each cog has a timeout request queue which is guarded by the lock
+ * here.
+ */
+typedef struct timer_tw {
+ list_t req; /* timeout request queue */
+ kmutex_t lock; /* lock for this queue */
+} timer_tw_t;
+
+/*
+ * Timer based on the cyclic subsystem.
+ * For each resolution, this timer structure should be allocated.
+ * Note. currently only one timer is used for periodic timeout requests,
+ * which is based on the system clock resolution.
+ */
+typedef struct cyc_timer {
+ hrtime_t res; /* this cyclic resolution */
+ hrtime_t tick; /* tick of this cyclic */
+ hrtime_t tick_time; /* current time on this timer */
+/*
+ * The hash size might need to be tuned if the lock contention is
+ * observed. So far the current size (1024) is sufficient though.
+ */
+#define TM_HASH_SZ (1024) /* must be power of 2 */
+#define TM_HASH(x) ((x) & (TM_HASH_SZ -1))
+ timer_tw_t idhash[TM_HASH_SZ]; /* ID hash */
+ timer_tw_t exhash[TM_HASH_SZ]; /* expiration time hash */
+} cyc_timer_t;
+
+/*
+ * This value determines how many requests within 10ms can be allocated to
+ * different slots. This is an exponential number powered by 2.
+ * This value should be tuned with the hash size.
+ * Note. This value is fixed now, but can be adjusted by checking the number
+ * of CPUs when the timer structure is allocated.
+ */
+#define TICK_FACTOR (3)
+
+/*
+ * Timer request.
+ */
+typedef struct tm_req {
+ struct list_node id_req; /* request on ID hash */
+ struct list_node ex_req; /* request on expire hash */
+ struct list_node disp_req; /* request on dispatch queue */
+ hrtime_t interval; /* interval this request needs */
+ hrtime_t exp_time; /* time when the request executes */
+ void (*handler)(void *); /* timeout handler */
+ void *arg; /* timeout argument */
+ kthread_t *h_thread; /* handler thread */
+ kmutex_t lock; /* lock for setting counter and flag */
+ kcondvar_t cv; /* condition variable against the lock */
+ timeout_t id; /* this request id */
+ int level; /* interrupt level */
+ volatile uint_t flags; /* flags passed to ddi_timeout() */
+ /*
+ * State flags
+ * These are used internally.
+ */
+#define TM_INVOKING 0x00000001 /* cyclic is invoked now */
+#define TM_EXECUTING 0x00000002 /* timeout is executed now */
+#define TM_CANCEL 0x00000004 /* request is canceled */
+#define TM_TRANSFER 0x00000008 /* request is transfered */
+#define TM_COMPLETE 0x00000010 /* request is complete */
+#define TM_COMPWAIT 0x00000020 /* wait request completion */
+#define TM_UTMCOMP 0x00000040 /* untimeout is complete */
+ uint_t cnt; /* invoke counter */
+} tm_req_t;
+
+/*
+ * Software interrupt intr_state:
+ *
+ * 31 16 15 0
+ * +------------------+------------------+
+ * | interrupt start | interrupt set |
+ * +------------------+------------------+
+ *
+ * Note. This structure can accomodate interrupts up to the level 15,
+ * but supported interrupts are up to the level 10 in practice because
+ * of the ddi timer restriction.
+ */
+#define TM_INTR_SET(l) (1 << (l))
+#define TM_INTR_START(l) (1 << ((l) + 16))
+
+/*
+ * internal functions for the ddi timeout
+ */
+void timer_init(void);
+void cyclic_timer(void);
+void timer_softintr(int);
+timeout_t i_timeout(void (*)(void *), void *, hrtime_t, int);
+void i_untimeout(timeout_t);
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_DDI_TIMER_H */
diff --git a/usr/src/uts/common/sys/dditypes.h b/usr/src/uts/common/sys/dditypes.h
index 52b6198972..3ee81052a6 100644
--- a/usr/src/uts/common/sys/dditypes.h
+++ b/usr/src/uts/common/sys/dditypes.h
@@ -292,6 +292,11 @@ typedef struct {
uint_t flags;
} peekpoke_ctlops_t;
+/*
+ * Used by the high resolution timeout functions
+ */
+typedef struct __ddi_periodic *ddi_periodic_t;
+
#endif /* !_ASM */
/*
diff --git a/usr/src/uts/common/sys/sunddi.h b/usr/src/uts/common/sys/sunddi.h
index 69b62b20c4..40f047c8d5 100644
--- a/usr/src/uts/common/sys/sunddi.h
+++ b/usr/src/uts/common/sys/sunddi.h
@@ -2077,6 +2077,26 @@ boolean_t ddi_taskq_suspended(ddi_taskq_t *tq);
*/
int ddi_parse(const char *, char *, uint_t *);
+/*
+ * DDI interrupt priority level
+ */
+#define DDI_IPL_0 (0) /* kernel context */
+#define DDI_IPL_1 (1) /* interrupt priority level 1 */
+#define DDI_IPL_2 (2) /* interrupt priority level 2 */
+#define DDI_IPL_3 (3) /* interrupt priority level 3 */
+#define DDI_IPL_4 (4) /* interrupt priority level 4 */
+#define DDI_IPL_5 (5) /* interrupt priority level 5 */
+#define DDI_IPL_6 (6) /* interrupt priority level 6 */
+#define DDI_IPL_7 (7) /* interrupt priority level 7 */
+#define DDI_IPL_8 (8) /* interrupt priority level 8 */
+#define DDI_IPL_9 (9) /* interrupt priority level 9 */
+#define DDI_IPL_10 (10) /* interrupt priority level 10 */
+
+/*
+ * DDI periodic timeout interface
+ */
+ddi_periodic_t ddi_periodic_add(void (*)(void *), void *, hrtime_t, int);
+void ddi_periodic_delete(ddi_periodic_t);
#endif /* _KERNEL */
#ifdef __cplusplus
diff --git a/usr/src/uts/i86pc/io/pcplusmp/apic.c b/usr/src/uts/i86pc/io/pcplusmp/apic.c
index 386f609376..f31aa2dbcc 100644
--- a/usr/src/uts/i86pc/io/pcplusmp/apic.c
+++ b/usr/src/uts/i86pc/io/pcplusmp/apic.c
@@ -61,11 +61,12 @@
#include <sys/cpuvar.h>
#include <sys/rm_platter.h>
#include <sys/privregs.h>
-#include <sys/cyclic.h>
#include <sys/note.h>
#include <sys/pci_intr_lib.h>
#include <sys/spl.h>
#include <sys/clock.h>
+#include <sys/dditypes.h>
+#include <sys/sunddi.h>
/*
* Local Function Prototypes
@@ -1754,23 +1755,19 @@ apic_timer_disable(void)
}
-cyclic_id_t apic_cyclic_id;
+ddi_periodic_t apic_periodic_id;
/*
- * If this module needs to be a consumer of cyclic subsystem, they
- * can be added here, since at this time kernel cyclic subsystem is initialized
- * argument is not currently used, and is reserved for future.
+ * If this module needs a periodic handler for the interrupt distribution, it
+ * can be added here. The argument to the periodic handler is not currently
+ * used, but is reserved for future.
*/
static void
apic_post_cyclic_setup(void *arg)
{
_NOTE(ARGUNUSED(arg))
- cyc_handler_t hdlr;
- cyc_time_t when;
-
/* cpu_lock is held */
-
- /* set up cyclics for intr redistribution */
+ /* set up a periodic handler for intr redistribution */
/*
* In peridoc mode intr redistribution processing is done in
@@ -1778,16 +1775,14 @@ _NOTE(ARGUNUSED(arg))
*/
if (!apic_oneshot)
return;
-
- hdlr.cyh_level = CY_LOW_LEVEL;
- hdlr.cyh_func = (cyc_func_t)apic_redistribute_compute;
- hdlr.cyh_arg = NULL;
-
- when.cyt_when = 0;
- when.cyt_interval = apic_redistribute_sample_interval;
- apic_cyclic_id = cyclic_add(&hdlr, &when);
-
-
+ /*
+ * Register a periodical handler for the redistribution processing.
+ * On X86, CY_LOW_LEVEL is mapped to the level 2 interrupt, so
+ * DDI_IPL_2 should be passed to ddi_periodic_add() here.
+ */
+ apic_periodic_id = ddi_periodic_add(
+ (void (*)(void *))apic_redistribute_compute, NULL,
+ apic_redistribute_sample_interval, DDI_IPL_2);
}
static void
diff --git a/usr/src/uts/i86pc/os/startup.c b/usr/src/uts/i86pc/os/startup.c
index 41478d4116..cad59a9c1c 100644
--- a/usr/src/uts/i86pc/os/startup.c
+++ b/usr/src/uts/i86pc/os/startup.c
@@ -112,6 +112,7 @@
#include <sys/cpu_module.h>
#include <sys/smbios.h>
#include <sys/debug_info.h>
+#include <sys/ddi_timer.h>
#ifdef __xpv
#include <sys/hypervisor.h>
@@ -1909,6 +1910,7 @@ load_tod_module(char *todmod)
static void
startup_end(void)
{
+ int i;
extern void setx86isalist(void);
PRM_POINT("startup_end() starting...");
@@ -1984,6 +1986,17 @@ startup_end(void)
(void) add_avsoftintr((void *)&softlevel1_hdl, 1, softlevel1,
"softlevel1", NULL, NULL); /* XXX to be moved later */
+ /*
+ * Register these software interrupts for ddi timer.
+ * Software interrupts up to the level 10 are supported.
+ */
+ for (i = DDI_IPL_1; i <= DDI_IPL_10; i++) {
+ char name[sizeof ("timer_softintr") + 2];
+ (void) sprintf(name, "timer_softintr%02d", i);
+ (void) add_avsoftintr((void *)&softlevel_hdl[i-1], i,
+ (avfunc)timer_softintr, name, (caddr_t)(uintptr_t)i, NULL);
+ }
+
PRM_POINT("startup_end() done");
}
diff --git a/usr/src/uts/i86pc/sys/machsystm.h b/usr/src/uts/i86pc/sys/machsystm.h
index 2e8f2f9af7..fdaa21d218 100644
--- a/usr/src/uts/i86pc/sys/machsystm.h
+++ b/usr/src/uts/i86pc/sys/machsystm.h
@@ -62,6 +62,7 @@ extern void setcpudelay(void);
extern void send_dirint(int, int);
extern void siron(void);
+extern void sir_on(int);
extern void return_instr(void);
diff --git a/usr/src/uts/sun4/os/intr.c b/usr/src/uts/sun4/os/intr.c
index 615b579c17..7f70424a45 100644
--- a/usr/src/uts/sun4/os/intr.c
+++ b/usr/src/uts/sun4/os/intr.c
@@ -33,6 +33,7 @@
#include <sys/membar.h>
#include <sys/kmem.h>
#include <sys/intr.h>
+#include <sys/sunddi.h>
#include <sys/sunndi.h>
#include <sys/cmn_err.h>
#include <sys/privregs.h>
@@ -44,6 +45,7 @@
#include <sys/debug.h>
#include <sys/cyclic.h>
#include <sys/kdi_impl.h>
+#include <sys/ddi_timer.h>
#include <sys/cpu_sgnblk_defs.h>
@@ -55,12 +57,13 @@ static kmutex_t intr_dist_cpu_lock;
static struct intr_dist *intr_dist_head = NULL;
static struct intr_dist *intr_dist_whead = NULL;
-uint64_t siron_inum;
+static uint64_t siron_inum[DDI_IPL_10]; /* software interrupt numbers */
uint64_t *siron_cpu_inum = NULL;
uint64_t siron_poke_cpu_inum;
static int siron_cpu_setup(cpu_setup_t, int, void *);
extern uint_t softlevel1();
+static uint64_t siron1_inum; /* backward compatibility */
uint64_t poke_cpu_inum;
uint_t poke_cpu_intr(caddr_t arg1, caddr_t arg2);
uint_t siron_poke_cpu_intr(caddr_t arg1, caddr_t arg2);
@@ -84,7 +87,8 @@ uint_t siron_poke_cpu_intr(caddr_t arg1, caddr_t arg2);
* stripped of its gatekeeper task, retaining only its intr_init job, where
* it indicates that there is a pending need to call siron().
*/
-int siron_pending;
+static int siron_pending[DDI_IPL_10]; /* software interrupt pending flags */
+static int siron1_pending; /* backward compatibility */
int intr_policy = INTR_WEIGHTED_DIST; /* interrupt distribution policy */
int intr_dist_debug = 0;
@@ -100,6 +104,7 @@ int intr_dist_weight_maxfactor = 2;
void
intr_init(cpu_t *cp)
{
+ int i;
extern uint_t softlevel1();
init_ivintr();
@@ -110,7 +115,16 @@ intr_init(cpu_t *cp)
* the work is done when CPU is configured.
*/
siron_cpu_inum = kmem_zalloc(sizeof (uint64_t) * NCPU, KM_SLEEP);
- siron_inum = add_softintr(PIL_1, softlevel1, 0, SOFTINT_ST);
+ /*
+ * Register these software interrupts for ddi timer.
+ * Software interrupts up to the level 10 are supported.
+ */
+ for (i = DDI_IPL_1; i <= DDI_IPL_10; i++) {
+ siron_inum[i-1] = add_softintr(i, (softintrfunc)timer_softintr,
+ (caddr_t)(uintptr_t)(i), SOFTINT_ST);
+ }
+
+ siron1_inum = add_softintr(PIL_1, softlevel1, 0, SOFTINT_ST);
poke_cpu_inum = add_softintr(PIL_13, poke_cpu_intr, 0, SOFTINT_MT);
siron_poke_cpu_inum = add_softintr(PIL_13,
siron_poke_cpu_intr, 0, SOFTINT_MT);
@@ -125,8 +139,14 @@ intr_init(cpu_t *cp)
* init_intr(), so we have to wait until now before we can dispatch the
* pending soft interrupt (if any).
*/
- if (siron_pending) {
- siron_pending = 0;
+ for (i = DDI_IPL_1; i <= DDI_IPL_10; i++) {
+ if (siron_pending[i-1]) {
+ siron_pending[i-1] = 0;
+ sir_on(i);
+ }
+ }
+ if (siron1_pending) {
+ siron1_pending = 0;
siron();
}
}
@@ -144,6 +164,19 @@ poke_cpu_intr(caddr_t arg1, caddr_t arg2)
}
/*
+ * Trigger software interrupts dedicated to ddi timer.
+ */
+void
+sir_on(int level)
+{
+ ASSERT(level >= DDI_IPL_1 && level <= DDI_IPL_10);
+ if (siron_inum[level-1])
+ setsoftint(siron_inum[level-1]);
+ else
+ siron_pending[level-1] = 1;
+}
+
+/*
* kmdb uses siron (and thus setsoftint) while the world is stopped in order to
* inform its driver component that there's work to be done. We need to keep
* DTrace from instrumenting kmdb's siron and setsoftint. We duplicate siron,
@@ -157,10 +190,10 @@ poke_cpu_intr(caddr_t arg1, caddr_t arg2)
void
kdi_siron(void)
{
- if (siron_inum != 0)
- kdi_setsoftint(siron_inum);
+ if (siron1_inum != 0)
+ kdi_setsoftint(siron1_inum);
else
- siron_pending = 1;
+ siron1_pending = 1;
}
void
@@ -178,15 +211,15 @@ siron(void)
{
uint64_t inum;
- if (siron_inum != 0) {
+ if (siron1_inum != 0) {
if (siron_cpu_inum[CPU->cpu_id] != 0)
inum = siron_cpu_inum[CPU->cpu_id];
else
- inum = siron_inum;
+ inum = siron1_inum;
setsoftint(inum);
} else
- siron_pending = 1;
+ siron1_pending = 1;
}
/*
diff --git a/usr/src/uts/sun4u/io/dmfe/dmfe_main.c b/usr/src/uts/sun4u/io/dmfe/dmfe_main.c
index ca15897d23..cd3d27b86b 100644
--- a/usr/src/uts/sun4u/io/dmfe/dmfe_main.c
+++ b/usr/src/uts/sun4u/io/dmfe/dmfe_main.c
@@ -25,6 +25,8 @@
#pragma ident "%Z%%M% %I% %E% SMI"
+#include <sys/types.h>
+#include <sys/sunddi.h>
#include <sys/dmfe_impl.h>
/*
@@ -447,7 +449,7 @@ dmfe_init_rings(dmfe_t *dmfep)
* Set the base address of the RX descriptor list in CSR3
*/
DMFE_DEBUG(("RX descriptor VA: $%p (DVMA $%x)",
- descp->mem_va, descp->mem_dvma));
+ descp->mem_va, descp->mem_dvma));
dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma);
/*
@@ -481,7 +483,7 @@ dmfe_init_rings(dmfe_t *dmfep)
* Set the base address of the TX descrptor list in CSR4
*/
DMFE_DEBUG(("TX descriptor VA: $%p (DVMA $%x)",
- descp->mem_va, descp->mem_dvma));
+ descp->mem_va, descp->mem_dvma));
dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma);
}
@@ -507,7 +509,7 @@ dmfe_start_chip(dmfe_t *dmfep, int mode)
*/
dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX :
- mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED;
+ mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED;
}
/*
@@ -544,11 +546,11 @@ dmfe_enable_interrupts(dmfe_t *dmfep)
* Put 'the standard set of interrupts' in the interrupt mask register
*/
dmfep->imask = RX_PKTDONE_INT | TX_PKTDONE_INT |
- RX_STOPPED_INT | TX_STOPPED_INT |
- RX_UNAVAIL_INT | SYSTEM_ERR_INT;
+ RX_STOPPED_INT | TX_STOPPED_INT |
+ RX_UNAVAIL_INT | SYSTEM_ERR_INT;
dmfe_chip_put32(dmfep, INT_MASK_REG,
- NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask);
+ NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask);
dmfep->chip_state = CHIP_RUNNING;
DMFE_DEBUG(("dmfe_enable_interrupts: imask 0x%x", dmfep->imask));
@@ -691,7 +693,7 @@ dmfe_getp(dmfe_t *dmfep)
packet_length = (desc0 >> 16) & 0x3fff;
if (packet_length > DMFE_MAX_PKT_SIZE) {
DMFE_DEBUG(("dmfe_getp: dropping oversize packet, "
- "length %d", packet_length));
+ "length %d", packet_length));
goto skip;
} else if (packet_length < ETHERMIN) {
/*
@@ -703,7 +705,7 @@ dmfe_getp(dmfe_t *dmfep)
* since the hardware should drop RUNT frames.
*/
DMFE_DEBUG(("dmfe_getp: dropping undersize packet, "
- "length %d", packet_length));
+ "length %d", packet_length));
goto skip;
}
@@ -716,8 +718,8 @@ dmfe_getp(dmfe_t *dmfep)
* discard these here so they don't get sent upstream ...)
*/
(void) ddi_dma_sync(dmfep->rx_buff.dma_hdl,
- index*DMFE_BUF_SIZE, DMFE_BUF_SIZE,
- DDI_DMA_SYNC_FORKERNEL);
+ index*DMFE_BUF_SIZE, DMFE_BUF_SIZE,
+ DDI_DMA_SYNC_FORKERNEL);
rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE];
@@ -888,7 +890,7 @@ dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1)
collisions = ((desc0 >> 3) & 0x0f);
errsum = desc0 & TX_ERR_SUMMARY;
errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS |
- TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO);
+ TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO);
if ((errsum == 0) != (errbits == 0)) {
dmfe_log(dmfep, "dubious TX error status 0x%x", desc0);
desc0 |= TX_ERR_SUMMARY;
@@ -1194,8 +1196,8 @@ dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp)
desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen;
(void) ddi_dma_sync(dmfep->tx_buff.dma_hdl,
- index*DMFE_BUF_SIZE, DMFE_BUF_SIZE,
- DDI_DMA_SYNC_FORDEV);
+ index*DMFE_BUF_SIZE, DMFE_BUF_SIZE,
+ DDI_DMA_SYNC_FORDEV);
}
@@ -1441,7 +1443,7 @@ dmfe_m_unicst(void *arg, const uint8_t *macaddr)
*/
for (index = 0; index < ETHERADDRL; index += 2)
dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2,
- (macaddr[index+1] << 8) | macaddr[index]);
+ (macaddr[index+1] << 8) | macaddr[index]);
/*
* Finally, we're ready to "transmit" the setup frame
@@ -1528,7 +1530,7 @@ dmfe_start(dmfe_t *dmfep)
ASSERT(mutex_owned(dmfep->oplock));
ASSERT(dmfep->chip_state == CHIP_RESET ||
- dmfep->chip_state == CHIP_STOPPED);
+ dmfep->chip_state == CHIP_STOPPED);
/*
* Make opmode consistent with PHY duplex setting
@@ -1766,7 +1768,7 @@ static void
dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why)
{
DMFE_DEBUG(("dmfe_wake_factotum: %s [%d] flag %d",
- why, ks_id, dmfep->factotum_flag));
+ why, ks_id, dmfep->factotum_flag));
ASSERT(mutex_owned(dmfep->oplock));
DRV_KS_INC(dmfep, ks_id);
@@ -1832,7 +1834,7 @@ dmfe_tick_link_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
if (why != NULL) {
DMFE_DEBUG(("dmfe_%s: link %d phy %d utp %d",
- why, dmfep->link_state, phy_state, utp_state));
+ why, dmfep->link_state, phy_state, utp_state));
dmfe_wake_factotum(dmfep, ks_id, why);
}
}
@@ -1884,7 +1886,7 @@ dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
dmfe_log(dmfep, "TX stall detected "
"after %d ticks in state %d; "
"automatic recovery initiated",
- dmfep->tx_pending_tix, tx_state);
+ dmfep->tx_pending_tix, tx_state);
tx_stall = B_TRUE;
}
}
@@ -2065,7 +2067,7 @@ dmfe_interrupt(caddr_t arg)
if (warning_msg)
dmfe_warning(dmfep, "abnormal interrupt, "
- "status 0x%x: %s", istat, msg);
+ "status 0x%x: %s", istat, msg);
/*
* We don't want to run the entire reinitialisation
@@ -2084,7 +2086,7 @@ dmfe_interrupt(caddr_t arg)
* in case ...
*/
DMFE_DEBUG(("unexpected interrupt bits: 0x%x",
- istat));
+ istat));
}
}
@@ -2436,7 +2438,7 @@ dmfe_loop_ioctl(dmfe_t *dmfep, queue_t *wq, mblk_t *mp, int cmd)
* Select any of the various loopback modes
*/
DMFE_DEBUG(("dmfe_loop_ioctl: SET_LOOP_MODE %d",
- loop_req_p->loopback));
+ loop_req_p->loopback));
switch (loop_req_p->loopback) {
default:
return (IOC_INVAL);
@@ -2621,7 +2623,7 @@ dmfe_find_mac_address(dmfe_t *dmfep)
*/
bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr));
err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo,
- DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize);
+ DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize);
if (err == DDI_PROP_SUCCESS) {
if (propsize == ETHERADDRL)
ethaddr_copy(prop, dmfep->curr_addr);
@@ -2629,7 +2631,7 @@ dmfe_find_mac_address(dmfe_t *dmfep)
}
DMFE_DEBUG(("dmfe_setup_mac_address: factory %s",
- ether_sprintf((void *)dmfep->curr_addr)));
+ ether_sprintf((void *)dmfep->curr_addr)));
}
static int
@@ -2645,7 +2647,7 @@ dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize,
* Allocate handle
*/
err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr,
- DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
+ DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
@@ -2653,9 +2655,9 @@ dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize,
* Allocate memory
*/
err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop,
- attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
- DDI_DMA_SLEEP, NULL,
- &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl);
+ attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
+ DDI_DMA_SLEEP, NULL,
+ &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl);
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
@@ -2663,8 +2665,8 @@ dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize,
* Bind the two together
*/
err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
- dma_p->mem_va, dma_p->alength, dma_flags,
- DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies);
+ dma_p->mem_va, dma_p->alength, dma_flags,
+ DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies);
if (err != DDI_DMA_MAPPED)
return (DDI_FAILURE);
if ((dma_p->ncookies = ncookies) != 1)
@@ -2696,8 +2698,8 @@ dmfe_alloc_bufs(dmfe_t *dmfep)
*/
memsize = dmfep->tx.n_desc*sizeof (struct tx_desc_type);
err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP,
- &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
- &dmfep->tx_desc);
+ &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &dmfep->tx_desc);
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
@@ -2705,9 +2707,9 @@ dmfe_alloc_bufs(dmfe_t *dmfep)
* Allocate memory & handles for TX buffers
*/
memsize = dmfep->tx.n_desc*DMFE_BUF_SIZE,
- err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
- &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE,
- &dmfep->tx_buff);
+ err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
+ &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE,
+ &dmfep->tx_buff);
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
@@ -2716,8 +2718,8 @@ dmfe_alloc_bufs(dmfe_t *dmfep)
*/
memsize = dmfep->rx.n_desc*sizeof (struct rx_desc_type);
err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP,
- &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
- &dmfep->rx_desc);
+ &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &dmfep->rx_desc);
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
@@ -2725,9 +2727,9 @@ dmfe_alloc_bufs(dmfe_t *dmfep)
* Allocate memory & handles for RX buffers
*/
memsize = dmfep->rx.n_desc*DMFE_BUF_SIZE,
- err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
- &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE,
- &dmfep->rx_buff);
+ err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
+ &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE,
+ &dmfep->rx_buff);
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
@@ -2783,12 +2785,11 @@ dmfe_unattach(dmfe_t *dmfep)
/*
* Clean up and free all DMFE data structures
*/
- if (dmfep->cycid != CYCLIC_NONE) {
- mutex_enter(&cpu_lock);
- cyclic_remove(dmfep->cycid);
- mutex_exit(&cpu_lock);
- dmfep->cycid = CYCLIC_NONE;
+ if (dmfep->cycid != NULL) {
+ ddi_periodic_delete(dmfep->cycid);
+ dmfep->cycid = NULL;
}
+
if (dmfep->ksp_drv != NULL)
kstat_delete(dmfep->ksp_drv);
if (dmfep->progress & PROGRESS_HWINT) {
@@ -2834,7 +2835,7 @@ dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp)
regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD);
pci_config_put32(handle, PCI_DMFE_CONF_CFDD,
- regval & ~(CFDD_SLEEP | CFDD_SNOOZE));
+ regval & ~(CFDD_SLEEP | CFDD_SNOOZE));
pci_config_teardown(&handle);
return (DDI_SUCCESS);
@@ -2881,12 +2882,12 @@ dmfe_init_kstats(dmfe_t *dmfep, int instance)
/* Create and initialise driver-defined kstats */
ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net",
- KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT);
+ KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT);
if (ksp != NULL) {
for (knp = ksp->ks_data, ksip = ks_drv_names;
- ksip->name != NULL; ++ksip) {
+ ksip->name != NULL; ++ksip) {
kstat_named_init(&knp[ksip->index], ksip->name,
- KSTAT_DATA_UINT64);
+ KSTAT_DATA_UINT64);
}
dmfep->ksp_drv = ksp;
dmfep->knp_drv = knp;
@@ -2943,8 +2944,6 @@ static int
dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
{
mac_register_t *macp;
- cyc_handler_t cychand;
- cyc_time_t cyctime;
dmfe_t *dmfep; /* Our private data */
uint32_t csr6;
int instance;
@@ -2974,11 +2973,11 @@ dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
*/
#if DMFEDEBUG
dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0,
- debug_propname, dmfe_debug);
+ debug_propname, dmfe_debug);
#endif /* DMFEDEBUG */
- dmfep->cycid = CYCLIC_NONE;
+ dmfep->cycid = NULL;
(void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d",
- instance);
+ instance);
dmfe_find_mac_address(dmfep);
/*
@@ -2987,7 +2986,7 @@ dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
*/
csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1;
dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
- DDI_PROP_DONTPASS, opmode_propname, csr6);
+ DDI_PROP_DONTPASS, opmode_propname, csr6);
/*
* Read chip ID & set up config space command register(s)
@@ -3011,7 +3010,7 @@ dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
* Map operating registers
*/
err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER,
- &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle);
+ &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle);
if (err != DDI_SUCCESS) {
dmfe_error(dmfep, "ddi_regs_map_setup() failed");
goto attach_fail;
@@ -3035,7 +3034,7 @@ dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
*/
dmfep->link_poll_tix = factotum_start_tix;
if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id,
- NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) {
+ NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) {
dmfe_error(dmfep, "ddi_add_softintr() failed");
goto attach_fail;
}
@@ -3045,7 +3044,7 @@ dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
* Add the h/w interrupt handler & initialise mutexen
*/
if (ddi_add_intr(devinfo, 0, &dmfep->iblk, NULL,
- dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) {
+ dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) {
dmfe_error(dmfep, "ddi_add_intr() failed");
goto attach_fail;
}
@@ -3120,19 +3119,12 @@ dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
/*
* Install the cyclic callback that we use to check for link
- * status, transmit stall, etc. We ASSERT that this can't fail
- */
- cychand.cyh_func = dmfe_cyclic;
- cychand.cyh_arg = dmfep;
- cychand.cyh_level = CY_LOW_LEVEL;
- cyctime.cyt_when = 0;
- cyctime.cyt_interval = dmfe_tick_us*1000; /* ns */
- ASSERT(dmfep->cycid == CYCLIC_NONE);
- mutex_enter(&cpu_lock);
- dmfep->cycid = cyclic_add(&cychand, &cyctime);
- mutex_exit(&cpu_lock);
- ASSERT(dmfep->cycid != CYCLIC_NONE);
-
+ * status, transmit stall, etc. The cyclic callback (dmfe_cyclic())
+ * is invoked in kernel context then.
+ */
+ ASSERT(dmfep->cycid == NULL);
+ dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep,
+ dmfe_tick_us * 1000, DDI_IPL_0);
return (DDI_SUCCESS);
attach_fail:
diff --git a/usr/src/uts/sun4u/io/rmc_comm.c b/usr/src/uts/sun4u/io/rmc_comm.c
index f17ea1e849..f999268cd5 100644
--- a/usr/src/uts/sun4u/io/rmc_comm.c
+++ b/usr/src/uts/sun4u/io/rmc_comm.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* The "rmc_comm" driver provides access to the RMC so that its clients need
@@ -35,7 +35,6 @@
* Header files
*/
#include <sys/conf.h>
-#include <sys/cyclic.h>
#include <sys/membar.h>
#include <sys/modctl.h>
#include <sys/strlog.h>
@@ -237,7 +236,7 @@ static void
sio_check_fault_status(struct rmc_comm_state *rcs)
{
rcs->sd_state.sio_fault =
- ddi_check_acc_handle(rcs->sd_state.sio_handle) != DDI_SUCCESS;
+ ddi_check_acc_handle(rcs->sd_state.sio_handle) != DDI_SUCCESS;
}
boolean_t
@@ -305,7 +304,7 @@ rmc_comm_hi_intr(caddr_t arg)
uint_t claim;
claim = DDI_INTR_UNCLAIMED;
- if (rcs->sd_state.cycid != CYCLIC_NONE) {
+ if (rcs->sd_state.cycid != NULL) {
/*
* Handle the case where this interrupt fires during
* panic processing. If that occurs, then a thread
@@ -597,11 +596,9 @@ rmc_comm_online(struct rmc_comm_state *rcs, dev_info_t *dip)
int
rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip)
{
- cyc_handler_t cychand;
- cyc_time_t cyctime;
int err = DDI_SUCCESS;
- rcs->sd_state.cycid = CYCLIC_NONE;
+ rcs->sd_state.cycid = NULL;
/*
* Online the hardware ...
@@ -656,7 +653,7 @@ rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip)
if (rcs->sd_state.sio_handle != NULL) {
err = ddi_add_intr(dip, 0, &rcs->sd_state.hw_iblk, NULL,
- rmc_comm_hi_intr, (caddr_t)rcs);
+ rmc_comm_hi_intr, (caddr_t)rcs);
/*
* did we successfully install the h/w interrupt handler?
@@ -669,20 +666,11 @@ rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip)
}
}
-
/*
- * Start cyclic callbacks
+ * Start periodical callbacks
*/
-
- cychand.cyh_func = rmc_comm_cyclic;
- cychand.cyh_arg = rcs;
- cychand.cyh_level = CY_LOW_LEVEL;
- cyctime.cyt_when = 0; /* from the next second */
- cyctime.cyt_interval = 5*RMC_COMM_ONE_SEC; /* call at 5s intervals */
- mutex_enter(&cpu_lock);
- rcs->sd_state.cycid = cyclic_add(&cychand, &cyctime);
- mutex_exit(&cpu_lock);
-
+ rcs->sd_state.cycid = ddi_periodic_add(rmc_comm_cyclic, rcs,
+ 5 * RMC_COMM_ONE_SEC, DDI_IPL_1);
return (0);
}
@@ -696,10 +684,9 @@ rmc_comm_serdev_fini(struct rmc_comm_state *rcs, dev_info_t *dip)
{
rmc_comm_hw_reset(rcs);
- if (rcs->sd_state.cycid != CYCLIC_NONE) {
- mutex_enter(&cpu_lock);
- cyclic_remove(rcs->sd_state.cycid);
- mutex_exit(&cpu_lock);
+ if (rcs->sd_state.cycid != NULL) {
+ ddi_periodic_delete(rcs->sd_state.cycid);
+ rcs->sd_state.cycid = NULL;
if (rcs->sd_state.sio_handle != NULL)
ddi_remove_intr(dip, 0, rcs->sd_state.hw_iblk);
@@ -796,12 +783,12 @@ rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
mutex_exit(rcs->dp_state.dp_mutex);
current_sgn_p = (sig_state_t *)modgetsymvalue(
- "current_sgn", 0);
+ "current_sgn", 0);
if ((current_sgn_p != NULL) &&
- (current_sgn_p->state_t.sig != 0)) {
+ (current_sgn_p->state_t.sig != 0)) {
CPU_SIGNATURE(current_sgn_p->state_t.sig,
- current_sgn_p->state_t.state,
- current_sgn_p->state_t.sub_state, -1);
+ current_sgn_p->state_t.state,
+ current_sgn_p->state_t.sub_state, -1);
}
return (DDI_SUCCESS);
@@ -844,7 +831,7 @@ rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
*/
if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) ||
(rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX))
- rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN;
+ rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN;
/*
* initialize serial device
@@ -996,7 +983,7 @@ _init(void)
mutex_init(&rmc_comm_attach_lock, NULL, MUTEX_DRIVER, NULL);
err = ddi_soft_state_init(&rmc_comm_statep,
- sizeof (struct rmc_comm_state), 0);
+ sizeof (struct rmc_comm_state), 0);
if (err == DDI_SUCCESS)
if ((err = mod_install(&modlinkage)) != 0) {
ddi_soft_state_fini(&rmc_comm_statep);
diff --git a/usr/src/uts/sun4u/io/todds1337.c b/usr/src/uts/sun4u/io/todds1337.c
index 5de43c2c53..1a826fc279 100644
--- a/usr/src/uts/sun4u/io/todds1337.c
+++ b/usr/src/uts/sun4u/io/todds1337.c
@@ -43,7 +43,6 @@
#include <sys/poll.h>
#include <sys/pbio.h>
#include <sys/sysmacros.h>
-#include <sys/cyclic.h>
/* Added for prom interface */
#include <sys/promif.h>
@@ -157,7 +156,7 @@ _init(void)
if (strcmp(tod_module_name, "todds1337") == 0) {
if ((error = ddi_soft_state_init(&ds1337_statep,
- sizeof (ds1337_state_t), 0)) != DDI_SUCCESS) {
+ sizeof (ds1337_state_t), 0)) != DDI_SUCCESS) {
return (error);
}
@@ -233,9 +232,6 @@ todds1337_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
static ds1337_state_t *statep = NULL;
i2c_transfer_t *i2c_tp = NULL;
uint8_t tempVal = (uint8_t)0;
- cyc_handler_t cychand;
- cyc_time_t cyctime;
-
switch (cmd) {
case DDI_ATTACH:
break;
@@ -354,18 +350,12 @@ todds1337_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
(void) i2c_transfer_free(statep->ds1337_i2c_hdl, i2c_tp);
}
- /* Create a cyclic handler to read TOD */
- cychand.cyh_func = todds1337_cyclic;
- cychand.cyh_arg = &soft_rtc;
- cychand.cyh_level = CY_LOW_LEVEL;
- cyctime.cyt_when = 0;
- cyctime.cyt_interval = i2c_cyclic_timeout;
- ASSERT(statep->cycid == CYCLIC_NONE);
- mutex_enter(&cpu_lock);
- statep->cycid = cyclic_add(&cychand, &cyctime);
- mutex_exit(&cpu_lock);
- ASSERT(statep->cycid != CYCLIC_NONE);
-
+ /*
+ * Create a periodical handler to read TOD.
+ */
+ ASSERT(statep->cycid == NULL);
+ statep->cycid = ddi_periodic_add(todds1337_cyclic, &soft_rtc,
+ i2c_cyclic_timeout, DDI_IPL_1);
statep->state = TOD_ATTACHED;
todds1337_attach_done = 1;
ddi_report_dev(dip);
@@ -777,9 +767,9 @@ todds1337_read_rtc(struct rtc_t *rtc)
i2c_tp)) != I2C_SUCCESS) {
goto done;
}
- /* for first read, need to get valid data */
- while (tod_read[0] == -1 && counter > 0) {
- /* move data to static buffer */
+ /* for first read, need to get valid data */
+ while (tod_read[0] == -1 && counter > 0) {
+ /* move data to static buffer */
bcopy(i2c_tp->i2c_rbuf, tod_read, 7);
/* now read again */
@@ -789,22 +779,22 @@ todds1337_read_rtc(struct rtc_t *rtc)
i2c_tp->i2c_rlen = 7; /* Read 7 regs */
if ((i2c_cmd_status = i2c_transfer(statep->ds1337_i2c_hdl,
i2c_tp)) != I2C_SUCCESS) {
- goto done;
+ goto done;
}
/* if they are not the same, then read again */
if (bcmp(tod_read, i2c_tp->i2c_rbuf, 7) != 0) {
- tod_read[0] = -1;
- counter--;
+ tod_read[0] = -1;
+ counter--;
}
- }
+ }
} while (i2c_tp->i2c_rbuf[0] == 0x59 &&
/* if seconds register is 0x59 (BCD), add data should match */
- bcmp(&tod_read[1], &i2c_tp->i2c_rbuf[1], 6) != 0 &&
- counter-- > 0);
+ bcmp(&tod_read[1], &i2c_tp->i2c_rbuf[1], 6) != 0 &&
+ counter-- > 0);
if (counter < 0)
- cmn_err(CE_WARN, "i2ctod: TOD Chip failed ??");
+ cmn_err(CE_WARN, "i2ctod: TOD Chip failed ??");
/* move data to static buffer */
bcopy(i2c_tp->i2c_rbuf, tod_read, 7);
diff --git a/usr/src/uts/sun4u/lw2plus/io/lombus.c b/usr/src/uts/sun4u/lw2plus/io/lombus.c
index f2384e4a52..0ea088fb64 100644
--- a/usr/src/uts/sun4u/lw2plus/io/lombus.c
+++ b/usr/src/uts/sun4u/lw2plus/io/lombus.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* The "lombus" driver provides access to the LOMlite2 virtual registers,
@@ -41,7 +40,6 @@
#include <sys/types.h>
#include <sys/conf.h>
-#include <sys/cyclic.h>
#include <sys/debug.h>
#include <sys/errno.h>
#include <sys/file.h>
@@ -265,7 +263,7 @@ struct lombus_state {
ddi_acc_handle_t sio_handle;
uint8_t *sio_regs;
ddi_softintr_t softid;
- cyclic_id_t cycid;
+ ddi_periodic_t cycid; /* periodical callback */
/*
* Parameters derived from .conf properties
@@ -358,7 +356,7 @@ lombus_trace(struct lombus_state *ssp, char code, const char *caller,
if (ssp->debug & (1 << (code-'@'))) {
p = buf;
snprintf(p, sizeof (buf) - (p - buf),
- "%s/%s: ", MYNAME, caller);
+ "%s/%s: ", MYNAME, caller);
p += strlen(p);
va_start(va, fmt);
@@ -522,7 +520,7 @@ sio_lom_ready(struct lombus_state *ssp)
rslt = (status & SIO_MSR_CTS) != 0 && !sio_faulty(ssp);
lombus_trace(ssp, 'R', "sio_lom_ready", "S $%02x R %d F %d",
- status, rslt, ssp->fake_cts);
+ status, rslt, ssp->fake_cts);
return (rslt || ssp->fake_cts);
}
@@ -549,7 +547,7 @@ sio_irq_pending(struct lombus_state *ssp)
rslt = (status & SIO_EIR_IPF) == 0 && !sio_faulty(ssp);
lombus_trace(ssp, 'I', "sio_irq_pending", "S $%02x R %d",
- status, rslt);
+ status, rslt);
/*
* To investigate whether we're getting any abnormal interrupts
@@ -622,7 +620,7 @@ lombus_hi_intr(caddr_t arg)
uint_t claim;
claim = DDI_INTR_UNCLAIMED;
- if (ssp->cycid != CYCLIC_NONE) {
+ if (ssp->cycid != NULL) {
mutex_enter(ssp->hw_mutex);
if (ssp->hw_int_enabled) {
lombus_set_irq(ssp, B_FALSE);
@@ -651,8 +649,8 @@ lombus_receive(struct lombus_state *ssp)
uint8_t tmp;
lombus_trace(ssp, 'S', "lombus_receive",
- "state %d; error $%x",
- ssp->cmdstate, ssp->error);
+ "state %d; error $%x",
+ ssp->cmdstate, ssp->error);
/*
* Check for access faults before starting the receive
@@ -680,12 +678,12 @@ lombus_receive(struct lombus_state *ssp)
}
lombus_trace(ssp, 'S', "lombus_receive",
- "rcvd %d: $%02x $%02x $%02x $%02x $%02x $%02x $%02x $%02x",
- rcvd,
- ssp->reply[0], ssp->reply[1],
- ssp->reply[2], ssp->reply[3],
- ssp->reply[4], ssp->reply[5],
- ssp->reply[6], ssp->reply[7]);
+ "rcvd %d: $%02x $%02x $%02x $%02x $%02x $%02x $%02x $%02x",
+ rcvd,
+ ssp->reply[0], ssp->reply[1],
+ ssp->reply[2], ssp->reply[3],
+ ssp->reply[4], ssp->reply[5],
+ ssp->reply[6], ssp->reply[7]);
if (ssp->cmdstate != LOMBUS_CMDSTATE_WAITING) {
/*
@@ -738,12 +736,12 @@ lombus_receive(struct lombus_state *ssp)
*/
if (ssp->allow_echo) {
lombus_trace(ssp, 'E', "lombus_receive",
- "echo $%02x $%02x $%02x $%02x "
- "$%02x $%02x $%02x $%02x",
- ssp->reply[0], ssp->reply[1],
- ssp->reply[2], ssp->reply[3],
- ssp->reply[4], ssp->reply[5],
- ssp->reply[6], ssp->reply[7]);
+ "echo $%02x $%02x $%02x $%02x "
+ "$%02x $%02x $%02x $%02x",
+ ssp->reply[0], ssp->reply[1],
+ ssp->reply[2], ssp->reply[3],
+ ssp->reply[4], ssp->reply[5],
+ ssp->reply[6], ssp->reply[7]);
ssp->index = 0;
} else {
ssp->cmdstate = LOMBUS_CMDSTATE_ERROR;
@@ -781,8 +779,8 @@ lombus_receive(struct lombus_state *ssp)
}
lombus_trace(ssp, 'T', "lombus_receive",
- "rcvd %d; last $%02x; state %d; error $%x; ready %d",
- rcvd, data, ssp->cmdstate, ssp->error, ready);
+ "rcvd %d; last $%02x; state %d; error $%x; ready %d",
+ rcvd, data, ssp->cmdstate, ssp->error, ready);
if (ready)
cv_broadcast(ssp->lo_cv);
@@ -1866,8 +1864,8 @@ lombus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
return (DDI_ME_INVAL);
return (lombus_map_handle(ssp, mp->map_op,
- rsp->lombus_space, VREG_TO_ADDR(rsp->lombus_base+off), len,
- mp->map_handlep, addrp));
+ rsp->lombus_space, VREG_TO_ADDR(rsp->lombus_base+off), len,
+ mp->map_handlep, addrp));
}
static int
@@ -1909,7 +1907,7 @@ lombus_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
*/
cdip = arg;
err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
- DDI_PROP_DONTPASS, "reg", &regs, &nregs);
+ DDI_PROP_DONTPASS, "reg", &regs, &nregs);
lombus_trace(ssp, 'C', "initchild",
"prop status %d size %d", err, nregs);
if (err != DDI_PROP_SUCCESS)
@@ -1960,7 +1958,7 @@ lombus_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
ddi_set_parent_data(cdip, lcip);
(void) snprintf(addr, sizeof (addr),
- "%x,%x", rsp[0].lombus_space, rsp[0].lombus_base);
+ "%x,%x", rsp[0].lombus_space, rsp[0].lombus_base);
ddi_set_name_addr(cdip, addr);
return (DDI_SUCCESS);
@@ -1979,8 +1977,8 @@ lombus_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
return (DDI_FAILURE);
cmn_err(CE_CONT, "?LOM device: %s@%s, %s#%d\n",
- ddi_node_name(rdip), ddi_get_name_addr(rdip),
- ddi_driver_name(dip), ddi_get_instance(dip));
+ ddi_node_name(rdip), ddi_get_name_addr(rdip),
+ ddi_driver_name(dip), ddi_get_instance(dip));
return (DDI_SUCCESS);
@@ -2011,10 +2009,9 @@ lombus_unattach(struct lombus_state *ssp, int instance)
{
if (ssp != NULL) {
lombus_hw_reset(ssp);
- if (ssp->cycid != CYCLIC_NONE) {
- mutex_enter(&cpu_lock);
- cyclic_remove(ssp->cycid);
- mutex_exit(&cpu_lock);
+ if (ssp->cycid != NULL) {
+ ddi_periodic_delete(ssp->cycid);
+ ssp->cycid = NULL;
if (ssp->sio_handle != NULL)
ddi_remove_intr(ssp->dip, 0, ssp->hw_iblk);
ddi_remove_softintr(ssp->softid);
@@ -2038,8 +2035,6 @@ static int
lombus_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
struct lombus_state *ssp = NULL;
- cyc_handler_t cychand;
- cyc_time_t cyctime;
int instance;
int err;
@@ -2072,20 +2067,20 @@ lombus_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
* Set various options from .conf properties
*/
ssp->allow_echo = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "allow-lom-echo", 0) != 0;
+ DDI_PROP_DONTPASS, "allow-lom-echo", 0) != 0;
ssp->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "baud-rate", 0);
+ DDI_PROP_DONTPASS, "baud-rate", 0);
ssp->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "debug", 0);
+ DDI_PROP_DONTPASS, "debug", 0);
ssp->fake_cts = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "fake-cts", 0) != 0;
+ DDI_PROP_DONTPASS, "fake-cts", 0) != 0;
/*
* Initialise current state & time
*/
ssp->cmdstate = LOMBUS_CMDSTATE_IDLE;
ssp->hw_last_pat = gethrtime();
- ssp->cycid = CYCLIC_NONE;
+ ssp->cycid = NULL;
/*
* Online the hardware ...
@@ -2101,26 +2096,23 @@ lombus_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
* Enable interrupts
*/
err = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &ssp->softid,
- &ssp->lo_iblk, NULL, lombus_softint, (caddr_t)ssp);
+ &ssp->lo_iblk, NULL, lombus_softint, (caddr_t)ssp);
if (err != DDI_SUCCESS)
return (lombus_unattach(ssp, instance));
if (ssp->sio_handle != NULL)
err = ddi_add_intr(dip, 0, &ssp->hw_iblk, NULL,
- lombus_hi_intr, (caddr_t)ssp);
+ lombus_hi_intr, (caddr_t)ssp);
mutex_init(ssp->hw_mutex, NULL, MUTEX_DRIVER, ssp->hw_iblk);
mutex_init(ssp->lo_mutex, NULL, MUTEX_DRIVER, ssp->lo_iblk);
cv_init(ssp->lo_cv, NULL, CV_DRIVER, NULL);
- cychand.cyh_func = lombus_cyclic;
- cychand.cyh_arg = ssp;
- cychand.cyh_level = CY_LOW_LEVEL;
- cyctime.cyt_when = 0; /* from the next second */
- cyctime.cyt_interval = LOMBUS_ONE_SEC; /* call at 1s intervals */
- mutex_enter(&cpu_lock);
- ssp->cycid = cyclic_add(&cychand, &cyctime);
- mutex_exit(&cpu_lock);
+ /*
+ * Register a periodical handler.
+ */
+ ssp->cycid = ddi_periodic_add(lombus_cyclic, ssp, LOMBUS_ONE_SEC,
+ DDI_IPL_1);
/*
* Final check before enabling h/w interrupts - did
@@ -2273,7 +2265,7 @@ _init(void)
int err;
err = ddi_soft_state_init(&lombus_statep,
- sizeof (struct lombus_state), 0);
+ sizeof (struct lombus_state), 0);
if (err == DDI_SUCCESS)
if ((err = mod_install(&modlinkage)) != 0) {
ddi_soft_state_fini(&lombus_statep);
diff --git a/usr/src/uts/sun4u/snowbird/io/todds1307/todds1307.c b/usr/src/uts/sun4u/snowbird/io/todds1307/todds1307.c
index 8f8bdfa278..dbe082e0e2 100644
--- a/usr/src/uts/sun4u/snowbird/io/todds1307/todds1307.c
+++ b/usr/src/uts/sun4u/snowbird/io/todds1307/todds1307.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -44,7 +43,6 @@
#include <sys/poll.h>
#include <sys/pbio.h>
#include <sys/sysmacros.h>
-#include <sys/cyclic.h>
/* Added for prom interface */
#include <sys/promif.h>
@@ -164,7 +162,7 @@ _init(void)
if (strcmp(tod_module_name, "todds1307") == 0) {
if ((error = ddi_soft_state_init(&ds1307_statep,
- sizeof (ds1307_state_t), 0)) != DDI_SUCCESS) {
+ sizeof (ds1307_state_t), 0)) != DDI_SUCCESS) {
return (error);
}
@@ -172,7 +170,7 @@ _init(void)
tod_ops.tod_set = todds1307_set;
tod_ops.tod_set_watchdog_timer = todds1307_set_watchdog_timer;
tod_ops.tod_clear_watchdog_timer =
- todds1307_clear_watchdog_timer;
+ todds1307_clear_watchdog_timer;
tod_ops.tod_set_power_alarm = todds1307_set_power_alarm;
tod_ops.tod_clear_power_alarm = todds1307_clear_power_alarm;
}
@@ -236,9 +234,6 @@ todds1307_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
static ds1307_state_t *statep = NULL;
i2c_transfer_t *i2c_tp = NULL;
uint8_t tempVal = (uint8_t)0;
- cyc_handler_t cychand;
- cyc_time_t cyctime;
-
switch (cmd) {
case DDI_ATTACH:
@@ -278,7 +273,7 @@ todds1307_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
/* check and initialize the oscillator */
(void) i2c_transfer_alloc(statep->ds1307_i2c_hdl,
- &i2c_tp, 1, 1, I2C_SLEEP);
+ &i2c_tp, 1, 1, I2C_SLEEP);
i2c_tp->i2c_version = I2C_XFER_REV;
i2c_tp->i2c_flags = I2C_WR_RD;
i2c_tp->i2c_wbuf[0] = (uchar_t)0x00; /* Read 00h */
@@ -298,7 +293,7 @@ todds1307_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
if (tempVal & 0x80) { /* check Oscillator */
(void) i2c_transfer_alloc(statep->ds1307_i2c_hdl, &i2c_tp,
- 2, 1, I2C_SLEEP);
+ 2, 1, I2C_SLEEP);
i2c_tp->i2c_version = I2C_XFER_REV;
i2c_tp->i2c_flags = I2C_WR;
i2c_tp->i2c_wbuf[0] = 0x00;
@@ -307,26 +302,21 @@ todds1307_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
i2c_tp->i2c_wlen = 2;
/* Enable oscillator */
if ((i2c_transfer(statep->ds1307_i2c_hdl, i2c_tp))
- != I2C_SUCCESS) {
+ != I2C_SUCCESS) {
(void) i2c_transfer_free(statep->ds1307_i2c_hdl,
- i2c_tp);
+ i2c_tp);
ddi_soft_state_free(ds1307_statep, instance);
return (DDI_FAILURE);
}
(void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp);
}
- /* Create a cyclic handler to read TOD */
- cychand.cyh_func = todds1307_cyclic;
- cychand.cyh_arg = &soft_rtc;
- cychand.cyh_level = CY_LOW_LEVEL;
- cyctime.cyt_when = 0;
- cyctime.cyt_interval = i2c_cyclic_timeout;
- ASSERT(statep->cycid == CYCLIC_NONE);
- mutex_enter(&cpu_lock);
- statep->cycid = cyclic_add(&cychand, &cyctime);
- mutex_exit(&cpu_lock);
- ASSERT(statep->cycid != CYCLIC_NONE);
+ /*
+ * Create a periodical handler to read TOD.
+ */
+ ASSERT(statep->cycid == NULL);
+ statep->cycid = ddi_periodic_add(todds1307_cyclic, &soft_rtc,
+ i2c_cyclic_timeout, DDI_IPL_1);
statep->state = TOD_ATTACHED;
todds1307_attach_done = 1;
@@ -509,7 +499,7 @@ todds1307_read_rtc(struct rtc_t *rtc)
* to accomodate sec, min, hrs, dayOfWeek, dayOfMonth, year
*/
if ((i2c_transfer_alloc(statep->ds1307_i2c_hdl, &i2c_tp, 1,
- 7, I2C_SLEEP)) != I2C_SUCCESS) {
+ 7, I2C_SLEEP)) != I2C_SUCCESS) {
mutex_exit(&todds1307_rd_lock);
return (DDI_FAILURE);
}
@@ -522,13 +512,13 @@ todds1307_read_rtc(struct rtc_t *rtc)
i2c_tp->i2c_rlen = 7; /* Read 7 regs */
if ((i2c_cmd_status = i2c_transfer(statep->ds1307_i2c_hdl,
- i2c_tp)) != I2C_SUCCESS) {
+ i2c_tp)) != I2C_SUCCESS) {
drv_usecwait(I2C_DELAY);
goto done;
}
- /* for first read, need to get valid data */
- while (tod_read[0] == -1 && counter > 0) {
- /* move data to static buffer */
+ /* for first read, need to get valid data */
+ while (tod_read[0] == -1 && counter > 0) {
+ /* move data to static buffer */
bcopy(i2c_tp->i2c_rbuf, tod_read, 7);
/* now read again */
@@ -537,16 +527,16 @@ todds1307_read_rtc(struct rtc_t *rtc)
i2c_tp->i2c_wlen = 1; /* Write one byte address */
i2c_tp->i2c_rlen = 7; /* Read 7 regs */
if ((i2c_cmd_status = i2c_transfer(statep->ds1307_i2c_hdl,
- i2c_tp)) != I2C_SUCCESS) {
- drv_usecwait(I2C_DELAY);
- goto done;
+ i2c_tp)) != I2C_SUCCESS) {
+ drv_usecwait(I2C_DELAY);
+ goto done;
}
/* if they are not the same, then read again */
if (bcmp(tod_read, i2c_tp->i2c_rbuf, 7) != 0) {
- tod_read[0] = -1;
- counter--;
+ tod_read[0] = -1;
+ counter--;
}
- }
+ }
} while (i2c_tp->i2c_rbuf[0] == 0x59 &&
/* if seconds register is 0x59 (BCD), add data should match */
@@ -554,7 +544,7 @@ todds1307_read_rtc(struct rtc_t *rtc)
counter-- > 0);
if (counter < 0)
- cmn_err(CE_WARN, "i2ctod: TOD Chip failed ??");
+ cmn_err(CE_WARN, "i2ctod: TOD Chip failed ??");
/* move data to static buffer */
bcopy(i2c_tp->i2c_rbuf, tod_read, 7);
@@ -594,7 +584,7 @@ todds1307_write_rtc(struct rtc_t *rtc)
}
if ((i2c_cmd_status = i2c_transfer_alloc(statep->ds1307_i2c_hdl,
- &i2c_tp, 8, 0, I2C_SLEEP)) != I2C_SUCCESS) {
+ &i2c_tp, 8, 0, I2C_SLEEP)) != I2C_SUCCESS) {
return (i2c_cmd_status);
}
@@ -611,7 +601,7 @@ todds1307_write_rtc(struct rtc_t *rtc)
i2c_tp->i2c_wlen = 8;
if ((i2c_cmd_status = i2c_transfer(statep->ds1307_i2c_hdl,
- i2c_tp)) != I2C_SUCCESS) {
+ i2c_tp)) != I2C_SUCCESS) {
(void) i2c_transfer_free(statep->ds1307_i2c_hdl, i2c_tp);
/* delay(drv_usectohz(I2C_DELAY)); */
drv_usecwait(I2C_DELAY);
@@ -680,7 +670,7 @@ todds1307_setup_prom()
char tod1307_devpath[MAXNAMELEN];
if ((todnode = prom_findnode_bydevtype(prom_rootnode(),
- DS1307_DEVICE_TYPE)) == OBP_NONODE)
+ DS1307_DEVICE_TYPE)) == OBP_NONODE)
return (DDI_FAILURE);
/*
@@ -688,7 +678,7 @@ todds1307_setup_prom()
* node and get the ihandle
*/
if (prom_phandle_to_path(todnode, tod1307_devpath,
- sizeof (tod1307_devpath)) < 0) {
+ sizeof (tod1307_devpath)) < 0) {
cmn_err(CE_WARN, "prom_phandle_to_path failed");
return (DDI_FAILURE);
}
diff --git a/usr/src/uts/sun4u/snowbird/sys/todds1307.h b/usr/src/uts/sun4u/snowbird/sys/todds1307.h
index 34cc98143d..dd006c174b 100644
--- a/usr/src/uts/sun4u/snowbird/sys/todds1307.h
+++ b/usr/src/uts/sun4u/snowbird/sys/todds1307.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2003 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -33,7 +32,6 @@
extern "C" {
#endif
-#include <sys/cyclic.h>
#include <sys/i2c/clients/i2c_client.h>
extern char *v_rtc_addr_reg;
@@ -85,7 +83,7 @@ typedef struct ds1307_state {
int instance;
dev_info_t *dip;
uint32_t state;
- cyclic_id_t cycid;
+ ddi_periodic_t cycid; /* periodical callback */
struct rtc_t rtc;
i2c_transfer_t *i2c_tp;
ddi_softintr_t soft_intr_id;
diff --git a/usr/src/uts/sun4u/sys/dmfe_impl.h b/usr/src/uts/sun4u/sys/dmfe_impl.h
index d4acfcf44a..2699b9dc51 100644
--- a/usr/src/uts/sun4u/sys/dmfe_impl.h
+++ b/usr/src/uts/sun4u/sys/dmfe_impl.h
@@ -43,7 +43,6 @@ extern "C" {
#include <sys/ethernet.h>
#include <sys/devops.h>
#include <sys/debug.h>
-#include <sys/cyclic.h>
#include <sys/conf.h>
#include <inet/common.h>
@@ -249,7 +248,7 @@ typedef struct {
dma_area_t rx_desc; /* receive descriptors */
dma_area_t rx_buff; /* receive buffers */
- cyclic_id_t cycid; /* cyclic callback id */
+ ddi_periodic_t cycid; /* periodical callback */
ddi_softintr_t factotum_id; /* identity of factotum */
ddi_iblock_cookie_t iblk;
diff --git a/usr/src/uts/sun4u/sys/machsystm.h b/usr/src/uts/sun4u/sys/machsystm.h
index 75302b3a9e..28d3f1a137 100644
--- a/usr/src/uts/sun4u/sys/machsystm.h
+++ b/usr/src/uts/sun4u/sys/machsystm.h
@@ -231,6 +231,7 @@ extern void send_dirint(int, int);
extern void setsoftint(uint64_t);
extern void setsoftint_tl1(uint64_t, uint64_t);
extern void siron(void);
+extern void sir_on(int);
extern uint64_t getidsr(void);
extern void intr_enqueue_req(uint_t pil, uint64_t inum);
extern void intr_dequeue_req(uint_t pil, uint64_t inum);
diff --git a/usr/src/uts/sun4u/sys/rmc_comm.h b/usr/src/uts/sun4u/sys/rmc_comm.h
index 50a4e792d8..397ba01b89 100644
--- a/usr/src/uts/sun4u/sys/rmc_comm.h
+++ b/usr/src/uts/sun4u/sys/rmc_comm.h
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -113,7 +113,7 @@ typedef struct rmc_comm_serdev_state {
ddi_acc_handle_t sio_handle;
uint8_t *sio_regs;
ddi_softintr_t softid;
- cyclic_id_t cycid;
+ ddi_periodic_t cycid; /* periodical callback */
/*
* Hardware mutex (initialised using <hw_iblk>),
diff --git a/usr/src/uts/sun4u/sys/todds1337.h b/usr/src/uts/sun4u/sys/todds1337.h
index 2fa0101708..39eb144f78 100644
--- a/usr/src/uts/sun4u/sys/todds1337.h
+++ b/usr/src/uts/sun4u/sys/todds1337.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -33,7 +32,6 @@
extern "C" {
#endif
-#include <sys/cyclic.h>
#include <sys/i2c/clients/i2c_client.h>
extern char *v_rtc_addr_reg;
@@ -115,7 +113,7 @@ typedef struct ds1337_state {
int instance;
dev_info_t *dip;
uint32_t state;
- cyclic_id_t cycid;
+ ddi_periodic_t cycid; /* periodical callback */
struct rtc_t rtc;
i2c_transfer_t *i2c_tp;
ddi_softintr_t soft_intr_id;
diff --git a/usr/src/uts/sun4v/sys/machsystm.h b/usr/src/uts/sun4v/sys/machsystm.h
index a7d31c6eca..84ef8b740f 100644
--- a/usr/src/uts/sun4v/sys/machsystm.h
+++ b/usr/src/uts/sun4v/sys/machsystm.h
@@ -229,6 +229,7 @@ extern void send_dirint(int, int);
extern void setsoftint(uint64_t);
extern void setsoftint_tl1(uint64_t, uint64_t);
extern void siron(void);
+extern void sir_on(int);
extern void intr_enqueue_req(uint_t pil, uint64_t inum);
extern void intr_dequeue_req(uint_t pil, uint64_t inum);
extern void wr_clr_softint(uint_t);