summaryrefslogtreecommitdiff
path: root/usr
diff options
context:
space:
mode:
Diffstat (limited to 'usr')
-rw-r--r--usr/src/cmd/dtrace/test/tst/i386/funcs/tst.badcopyin.d9
-rw-r--r--usr/src/cmd/mdb/common/mdb/mdb_ks.h8
-rw-r--r--usr/src/cmd/mdb/common/modules/conf/mapfile-extern2
-rw-r--r--usr/src/cmd/mdb/common/modules/genunix/genunix.c68
-rw-r--r--usr/src/cmd/mdb/common/modules/genunix/net.c3
-rw-r--r--usr/src/cmd/mdb/common/modules/idm/idm.c14
-rw-r--r--usr/src/cmd/mdb/common/modules/ip/ip.c4
-rw-r--r--usr/src/cmd/mdb/common/modules/mdb_ks/mdb_ks.c79
-rw-r--r--usr/src/cmd/mdb/common/modules/nca/nca.c21
-rw-r--r--usr/src/lib/libzpool/common/kernel.c2
-rw-r--r--usr/src/lib/libzpool/common/sys/zfs_context.h4
-rw-r--r--usr/src/pkgdefs/etc/exception_list_i3861
-rw-r--r--usr/src/pkgdefs/etc/exception_list_sparc1
-rw-r--r--usr/src/uts/common/avs/ns/rdc/rdc_svc.c6
-rw-r--r--usr/src/uts/common/avs/ns/sdbc/sd_io.c8
-rw-r--r--usr/src/uts/common/avs/ns/sdbc/sd_misc.c8
-rw-r--r--usr/src/uts/common/brand/lx/procfs/lx_prvnops.c1
-rw-r--r--usr/src/uts/common/c2/audit_io.c8
-rw-r--r--usr/src/uts/common/conf/param.c47
-rw-r--r--usr/src/uts/common/crypto/core/kcf_sched.c18
-rw-r--r--usr/src/uts/common/crypto/io/dca.c4
-rw-r--r--usr/src/uts/common/disp/cpucaps.c56
-rw-r--r--usr/src/uts/common/disp/disp.c6
-rw-r--r--usr/src/uts/common/disp/fss.c18
-rw-r--r--usr/src/uts/common/disp/fx.c12
-rw-r--r--usr/src/uts/common/disp/thread.c3
-rw-r--r--usr/src/uts/common/disp/ts.c16
-rw-r--r--usr/src/uts/common/fs/autofs/auto_subr.c6
-rw-r--r--usr/src/uts/common/fs/cachefs/cachefs_resource.c32
-rw-r--r--usr/src/uts/common/fs/cachefs/cachefs_subr.c168
-rw-r--r--usr/src/uts/common/fs/dev/sdev_comm.c11
-rw-r--r--usr/src/uts/common/fs/dnlc.c18
-rw-r--r--usr/src/uts/common/fs/fsflush.c4
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_client.c18
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_db.c9
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_deleg_ops.c6
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_recovery.c4
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_srv.c4
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_stub_vnops.c2
-rw-r--r--usr/src/uts/common/fs/nfs/nfs_client.c11
-rw-r--r--usr/src/uts/common/fs/nfs/nfs_dump.c9
-rw-r--r--usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c25
-rw-r--r--usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c18
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_lock.c2
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_opipe.c2
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_session.c7
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_util.c11
-rw-r--r--usr/src/uts/common/fs/sockfs/nl7chttp.c7
-rw-r--r--usr/src/uts/common/fs/sockfs/nl7curi.c3
-rw-r--r--usr/src/uts/common/fs/sockfs/sockcommon_subr.c19
-rw-r--r--usr/src/uts/common/fs/sockfs/sockstr.c7
-rw-r--r--usr/src/uts/common/fs/sockfs/socksyscalls.c6
-rw-r--r--usr/src/uts/common/fs/ufs/lufs.c4
-rw-r--r--usr/src/uts/common/fs/ufs/lufs_log.c4
-rw-r--r--usr/src/uts/common/fs/ufs/lufs_thread.c8
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_alloc.c12
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_directio.c2
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_inode.c4
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_lockfs.c3
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_panic.c9
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_subr.c6
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_thread.c6
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_vfsops.c10
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_vnops.c16
-rw-r--r--usr/src/uts/common/fs/zfs/arc.c40
-rw-r--r--usr/src/uts/common/fs/zfs/dmu_zfetch.c6
-rw-r--r--usr/src/uts/common/fs/zfs/dsl_scrub.c4
-rw-r--r--usr/src/uts/common/fs/zfs/metaslab.c2
-rw-r--r--usr/src/uts/common/fs/zfs/txg.c13
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_cache.c6
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_queue.c5
-rw-r--r--usr/src/uts/common/fs/zfs/zil.c2
-rw-r--r--usr/src/uts/common/fs/zfs/zio_inject.c5
-rw-r--r--usr/src/uts/common/inet/ilb/ilb.c8
-rw-r--r--usr/src/uts/common/inet/ilb/ilb_conn.c20
-rw-r--r--usr/src/uts/common/inet/ip/conn_opt.c2
-rw-r--r--usr/src/uts/common/inet/ip/icmp.c2
-rw-r--r--usr/src/uts/common/inet/ip/ip.c12
-rw-r--r--usr/src/uts/common/inet/ip/ip2mac.c4
-rw-r--r--usr/src/uts/common/inet/ip/ip6.c2
-rw-r--r--usr/src/uts/common/inet/ip/ip6_input.c2
-rw-r--r--usr/src/uts/common/inet/ip/ip6_output.c6
-rw-r--r--usr/src/uts/common/inet/ip/ip_dce.c13
-rw-r--r--usr/src/uts/common/inet/ip/ip_ftable.c2
-rw-r--r--usr/src/uts/common/inet/ip/ip_if.c12
-rw-r--r--usr/src/uts/common/inet/ip/ip_input.c2
-rw-r--r--usr/src/uts/common/inet/ip/ip_ire.c4
-rw-r--r--usr/src/uts/common/inet/ip/ip_ndp.c10
-rw-r--r--usr/src/uts/common/inet/ip/ip_output.c12
-rw-r--r--usr/src/uts/common/inet/ip/ip_squeue.c2
-rw-r--r--usr/src/uts/common/inet/kssl/ksslrec.c5
-rw-r--r--usr/src/uts/common/inet/nca/nca.h4
-rw-r--r--usr/src/uts/common/inet/sctp/sctp.c7
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_bind.c2
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_common.c5
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_conn.c4
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_cookie.c13
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_heartbeat.c6
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_impl.h2
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_input.c16
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_opt_data.c2
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_output.c8
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_shutdown.c2
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_timer.c3
-rw-r--r--usr/src/uts/common/inet/squeue.c15
-rw-r--r--usr/src/uts/common/inet/tcp/tcp.c78
-rw-r--r--usr/src/uts/common/inet/udp/udp.c2
-rw-r--r--usr/src/uts/common/io/bridge.c8
-rw-r--r--usr/src/uts/common/io/bscbus.c7
-rw-r--r--usr/src/uts/common/io/bscv.c5
-rw-r--r--usr/src/uts/common/io/comstar/port/fcoet/fcoet.c5
-rw-r--r--usr/src/uts/common/io/comstar/port/fct/discovery.c10
-rw-r--r--usr/src/uts/common/io/comstar/port/iscsit/iscsit_isns.c4
-rw-r--r--usr/src/uts/common/io/comstar/port/qlt/qlt.c15
-rw-r--r--usr/src/uts/common/io/comstar/stmf/stmf.c15
-rw-r--r--usr/src/uts/common/io/drm/drmP.h34
-rw-r--r--usr/src/uts/common/io/drm/drm_lock.c2
-rw-r--r--usr/src/uts/common/io/e1000g/e1000g_tx.c4
-rw-r--r--usr/src/uts/common/io/ecpp.c8
-rw-r--r--usr/src/uts/common/io/emul64_bsd.c6
-rw-r--r--usr/src/uts/common/io/fcoe/fcoe_fc.c4
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c44
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/qlc/ql_mbx.c17
-rw-r--r--usr/src/uts/common/io/fibre-channel/ulp/fcp.c6
-rw-r--r--usr/src/uts/common/io/ib/adapters/hermon/hermon_stats.c6
-rw-r--r--usr/src/uts/common/io/ib/clients/rds/rdsib_buf.c4
-rw-r--r--usr/src/uts/common/io/ib/mgt/ibdm/ibdm.c7
-rw-r--r--usr/src/uts/common/io/idm/idm_impl.c7
-rw-r--r--usr/src/uts/common/io/ipw/ipw2100.c5
-rw-r--r--usr/src/uts/common/io/ipw/ipw2100_hw.c9
-rw-r--r--usr/src/uts/common/io/iwi/ipw2200.c6
-rw-r--r--usr/src/uts/common/io/iwi/ipw2200_hw.c10
-rw-r--r--usr/src/uts/common/io/lvm/md/md_subr.c14
-rw-r--r--usr/src/uts/common/io/lvm/raid/raid.c11
-rw-r--r--usr/src/uts/common/io/mac/mac_sched.c28
-rw-r--r--usr/src/uts/common/io/mii/mii.c5
-rw-r--r--usr/src/uts/common/io/mms/dmd/dmd.c14
-rw-r--r--usr/src/uts/common/io/net80211/net80211_ioctl.c6
-rw-r--r--usr/src/uts/common/io/nxge/nxge_mac.c6
-rw-r--r--usr/src/uts/common/io/pciex/hotplug/pcishpc.c4
-rw-r--r--usr/src/uts/common/io/rsm/rsm.c23
-rw-r--r--usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c4
-rw-r--r--usr/src/uts/common/io/sata/impl/sata.c8
-rw-r--r--usr/src/uts/common/io/scsi/adapters/iscsi/iscsi_login.c1
-rw-r--r--usr/src/uts/common/io/scsi/adapters/iscsi/iscsi_thread.c4
-rw-r--r--usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas_impl.c4
-rw-r--r--usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c12
-rw-r--r--usr/src/uts/common/io/scsi/impl/scsi_watch.c11
-rw-r--r--usr/src/uts/common/io/sdcard/impl/sda_slot.c8
-rw-r--r--usr/src/uts/common/io/softmac/softmac_pkt.c4
-rw-r--r--usr/src/uts/common/io/usb/hcd/ehci/ehci_isoch.c12
-rw-r--r--usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c4
-rw-r--r--usr/src/uts/common/io/usb/hcd/ehci/ehci_xfer.c9
-rw-r--r--usr/src/uts/common/io/usb/hcd/openhci/ohci.c17
-rw-r--r--usr/src/uts/common/io/usb/hcd/uhci/uhciutil.c7
-rw-r--r--usr/src/uts/common/io/usb/hwa/hwahc/hwahc_util.c6
-rw-r--r--usr/src/uts/common/io/usb/usba/hubdi.c16
-rw-r--r--usr/src/uts/common/io/usb/usba/wa.c4
-rw-r--r--usr/src/uts/common/io/vscan/vscan_door.c8
-rw-r--r--usr/src/uts/common/io/vscan/vscan_drv.c8
-rw-r--r--usr/src/uts/common/io/vscan/vscan_svc.c14
-rw-r--r--usr/src/uts/common/io/winlockio.c6
-rw-r--r--usr/src/uts/common/os/acct.c6
-rw-r--r--usr/src/uts/common/os/bio.c57
-rw-r--r--usr/src/uts/common/os/callb.c10
-rw-r--r--usr/src/uts/common/os/clock.c424
-rw-r--r--usr/src/uts/common/os/clock_tick.c8
-rw-r--r--usr/src/uts/common/os/condvar.c51
-rw-r--r--usr/src/uts/common/os/damap.c4
-rw-r--r--usr/src/uts/common/os/ddi.c8
-rw-r--r--usr/src/uts/common/os/ddi_intr_irm.c7
-rw-r--r--usr/src/uts/common/os/devcache.c8
-rw-r--r--usr/src/uts/common/os/dumpsubr.c6
-rw-r--r--usr/src/uts/common/os/fork.c2
-rw-r--r--usr/src/uts/common/os/kstat_fr.c2
-rw-r--r--usr/src/uts/common/os/logsubr.c5
-rw-r--r--usr/src/uts/common/os/mem_cage.c6
-rw-r--r--usr/src/uts/common/os/mem_config.c8
-rw-r--r--usr/src/uts/common/os/modctl.c9
-rw-r--r--usr/src/uts/common/os/panic.c10
-rw-r--r--usr/src/uts/common/os/sched.c54
-rw-r--r--usr/src/uts/common/os/sig.c4
-rw-r--r--usr/src/uts/common/os/softint.c16
-rw-r--r--usr/src/uts/common/os/strsubr.c25
-rw-r--r--usr/src/uts/common/os/sunddi.c9
-rw-r--r--usr/src/uts/common/os/sunmdi.c20
-rw-r--r--usr/src/uts/common/os/sunpm.c22
-rw-r--r--usr/src/uts/common/os/taskq.c2
-rw-r--r--usr/src/uts/common/os/vm_pageout.c8
-rw-r--r--usr/src/uts/common/os/zone.c6
-rw-r--r--usr/src/uts/common/pcmcia/nexus/pcmcia.c161
-rw-r--r--usr/src/uts/common/pcmcia/pem/pem.c8
-rw-r--r--usr/src/uts/common/rpc/clnt_clts.c7
-rw-r--r--usr/src/uts/common/rpc/clnt_cots.c44
-rw-r--r--usr/src/uts/common/rpc/rpcib.c10
-rw-r--r--usr/src/uts/common/rpc/rpcmod.c14
-rw-r--r--usr/src/uts/common/rpc/svc.c4
-rw-r--r--usr/src/uts/common/sys/Makefile1
-rw-r--r--usr/src/uts/common/sys/clock_impl.h125
-rw-r--r--usr/src/uts/common/sys/condvar.h31
-rw-r--r--usr/src/uts/common/sys/cpucaps_impl.h6
-rw-r--r--usr/src/uts/common/sys/cpuvar.h2
-rw-r--r--usr/src/uts/common/sys/fcoe/fcoe_common.h2
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/mpt_sas/mptsas_var.h7
-rw-r--r--usr/src/uts/common/sys/sunddi.h3
-rw-r--r--usr/src/uts/common/sys/systm.h2
-rw-r--r--usr/src/uts/common/sys/time.h3
-rw-r--r--usr/src/uts/common/syscall/poll.c5
-rw-r--r--usr/src/uts/common/syscall/times.c13
-rw-r--r--usr/src/uts/common/syscall/uadmin.c5
-rw-r--r--usr/src/uts/common/vm/seg_kp.c16
-rw-r--r--usr/src/uts/common/vm/seg_spt.c4
-rw-r--r--usr/src/uts/common/vm/vm_page.c16
-rw-r--r--usr/src/uts/common/vm/vm_seg.c14
-rw-r--r--usr/src/uts/common/xen/io/xdf.c5
-rw-r--r--usr/src/uts/common/xen/io/xpvtap.c10
-rw-r--r--usr/src/uts/i86pc/io/tzmon/tzmon.c4
-rw-r--r--usr/src/uts/i86pc/os/graphics.c6
-rw-r--r--usr/src/uts/i86pc/os/machdep.c23
-rw-r--r--usr/src/uts/i86xpv/os/balloon.c4
-rw-r--r--usr/src/uts/intel/io/heci/heci_init.c46
-rw-r--r--usr/src/uts/intel/io/heci/heci_main.c21
-rw-r--r--usr/src/uts/intel/io/heci/io_heci.c6
-rw-r--r--usr/src/uts/intel/os/arch_kdi.c9
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.h2
-rw-r--r--usr/src/uts/sun/io/scsi/adapters/sf.c7
-rw-r--r--usr/src/uts/sun/io/zs_async.c209
-rw-r--r--usr/src/uts/sun4/os/machdep.c42
-rw-r--r--usr/src/uts/sun4u/cpu/us3_common.c5
-rw-r--r--usr/src/uts/sun4u/daktari/io/hpc3130_dak.c11
-rw-r--r--usr/src/uts/sun4u/io/rmc_comm_drvintf.c27
-rw-r--r--usr/src/uts/sun4u/lw2plus/io/lombus.c14
-rw-r--r--usr/src/uts/sun4u/lw8/os/lw8_platmod.c9
-rw-r--r--usr/src/uts/sun4u/ngdr/io/dr_mem.c356
-rw-r--r--usr/src/uts/sun4u/opl/io/dm2s.c9
-rw-r--r--usr/src/uts/sun4u/opl/io/dr_mem.c8
-rw-r--r--usr/src/uts/sun4u/opl/io/mc-opl.c4
-rw-r--r--usr/src/uts/sun4u/opl/io/oplkmdrv.c6
-rw-r--r--usr/src/uts/sun4u/serengeti/io/sbdp_error.c8
-rw-r--r--usr/src/uts/sun4u/serengeti/io/sbdp_mem.c43
-rw-r--r--usr/src/uts/sun4u/serengeti/os/serengeti.c9
-rw-r--r--usr/src/uts/sun4u/starcat/io/dman.c6
-rw-r--r--usr/src/uts/sun4u/starcat/io/drmach.c739
-rw-r--r--usr/src/uts/sun4u/starcat/io/iosram.c7
-rw-r--r--usr/src/uts/sun4u/starcat/io/sckmdrv.c6
-rw-r--r--usr/src/uts/sun4u/starcat/os/starcat.c9
-rw-r--r--usr/src/uts/sun4u/starfire/io/idn.c15
-rw-r--r--usr/src/uts/sun4u/starfire/io/idn_proto.c2303
-rw-r--r--usr/src/uts/sun4u/starfire/io/idn_smr.c223
-rw-r--r--usr/src/uts/sun4u/starfire/io/idn_xf.c339
-rw-r--r--usr/src/uts/sun4u/starfire/os/starfire.c26
-rw-r--r--usr/src/uts/sun4u/starfire/sys/idn.h6
-rw-r--r--usr/src/uts/sun4v/io/glvc/glvc.c23
-rw-r--r--usr/src/uts/sun4v/io/ntwdt.c7
-rw-r--r--usr/src/uts/sun4v/io/vdc.c9
-rw-r--r--usr/src/uts/sun4v/io/vldc.c6
-rw-r--r--usr/src/uts/sun4v/io/vsw_ldc.c4
-rw-r--r--usr/src/uts/sun4v/os/mach_cpu_states.c5
-rw-r--r--usr/src/uts/sun4v/promif/promif_prop.c8
259 files changed, 4113 insertions, 3649 deletions
diff --git a/usr/src/cmd/dtrace/test/tst/i386/funcs/tst.badcopyin.d b/usr/src/cmd/dtrace/test/tst/i386/funcs/tst.badcopyin.d
index d5b8c24aa5..f14c8afc8c 100644
--- a/usr/src/cmd/dtrace/test/tst/i386/funcs/tst.badcopyin.d
+++ b/usr/src/cmd/dtrace/test/tst/i386/funcs/tst.badcopyin.d
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* ASSERTION:
* On IA/32, there is a single 32-bit address space that is partitioned
@@ -43,11 +41,12 @@
BEGIN
{
- lbolt = copyin((uintptr_t)&`lbolt, sizeof (int));
+ dtrace_zero = copyin((uintptr_t)&`dtrace_zero, sizeof (int));
exit(1);
}
ERROR
{
- exit(arg4 == DTRACEFLT_BADADDR && arg5 == (uint64_t)&`lbolt ? 0 : 1);
+ exit(arg4 == DTRACEFLT_BADADDR &&
+ arg5 == (uint64_t)&`dtrace_zero ? 0 : 1);
}
diff --git a/usr/src/cmd/mdb/common/mdb/mdb_ks.h b/usr/src/cmd/mdb/common/mdb/mdb_ks.h
index 3ea1343492..9d48535161 100644
--- a/usr/src/cmd/mdb/common/mdb/mdb_ks.h
+++ b/usr/src/cmd/mdb/common/mdb/mdb_ks.h
@@ -19,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _MDB_KS_H
#define _MDB_KS_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/int_types.h>
#include <sys/stream.h>
@@ -67,6 +65,9 @@ extern int mdb_cpu2cpuid(uintptr_t);
extern int mdb_cpuset_find(uintptr_t);
+extern hrtime_t mdb_gethrtime(void);
+extern int64_t mdb_get_lbolt(void);
+
/*
* Returns a pointer to the top of the soft state struct for the instance
* specified, given the address of the global soft state pointer and size
@@ -91,7 +92,6 @@ extern int mdb_get_soft_state_byname(char *, uint_t, uintptr_t *, void *,
*/
extern char *mdb_ddi_pathname(uintptr_t, char *, size_t);
-
/*
* MDB Kernel STREAMS Subsystem:
*
diff --git a/usr/src/cmd/mdb/common/modules/conf/mapfile-extern b/usr/src/cmd/mdb/common/modules/conf/mapfile-extern
index 4fc9613e1f..84f8426ef7 100644
--- a/usr/src/cmd/mdb/common/modules/conf/mapfile-extern
+++ b/usr/src/cmd/mdb/common/modules/conf/mapfile-extern
@@ -93,6 +93,8 @@
mdb_gelf_destroy = EXTERN;
mdb_gelf_sect_by_name = EXTERN;
mdb_gelf_sect_load = EXTERN;
+ mdb_gethrtime = EXTERN;
+ mdb_get_lbolt = EXTERN;
mdb_inc_indent = EXTERN;
mdb_io_destroy = EXTERN;
mdb_iob_clrflags = EXTERN;
diff --git a/usr/src/cmd/mdb/common/modules/genunix/genunix.c b/usr/src/cmd/mdb/common/modules/genunix/genunix.c
index e6fe3f7dcf..1687c34809 100644
--- a/usr/src/cmd/mdb/common/modules/genunix/genunix.c
+++ b/usr/src/cmd/mdb/common/modules/genunix/genunix.c
@@ -3083,7 +3083,6 @@ cpu_walk_step(mdb_walk_state_t *wsp)
typedef struct cpuinfo_data {
intptr_t cid_cpu;
- uintptr_t cid_lbolt;
uintptr_t **cid_ithr;
char cid_print_head;
char cid_print_thr;
@@ -3210,13 +3209,8 @@ cpuinfo_walk_cpu(uintptr_t addr, const cpu_t *cpu, cpuinfo_data_t *cid)
cpu->cpu_kprunrun ? "yes" : "no");
if (cpu->cpu_last_swtch) {
- clock_t lbolt;
-
- if (mdb_vread(&lbolt, sizeof (lbolt), cid->cid_lbolt) == -1) {
- mdb_warn("failed to read lbolt at %p", cid->cid_lbolt);
- return (WALK_ERR);
- }
- mdb_printf("t-%-4d ", lbolt - cpu->cpu_last_swtch);
+ mdb_printf("t-%-4d ",
+ (clock_t)mdb_get_lbolt() - cpu->cpu_last_swtch);
} else {
mdb_printf("%-6s ", "-");
}
@@ -3395,8 +3389,6 @@ cpuinfo(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
{
uint_t verbose = FALSE;
cpuinfo_data_t cid;
- GElf_Sym sym;
- clock_t lbolt;
cid.cid_print_ithr = FALSE;
cid.cid_print_thr = FALSE;
@@ -3435,26 +3427,6 @@ cpuinfo(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
}
}
- if (mdb_lookup_by_name("panic_lbolt", &sym) == -1) {
- mdb_warn("failed to find panic_lbolt");
- return (DCMD_ERR);
- }
-
- cid.cid_lbolt = (uintptr_t)sym.st_value;
-
- if (mdb_vread(&lbolt, sizeof (lbolt), cid.cid_lbolt) == -1) {
- mdb_warn("failed to read panic_lbolt");
- return (DCMD_ERR);
- }
-
- if (lbolt == 0) {
- if (mdb_lookup_by_name("lbolt", &sym) == -1) {
- mdb_warn("failed to find lbolt");
- return (DCMD_ERR);
- }
- cid.cid_lbolt = (uintptr_t)sym.st_value;
- }
-
if (mdb_walk("cpu", (mdb_walk_cb_t)cpuinfo_walk_cpu, &cid) == -1) {
mdb_warn("can't walk cpus");
return (DCMD_ERR);
@@ -4175,6 +4147,41 @@ panicinfo(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
return (DCMD_OK);
}
+/*
+ * ::time dcmd, which will print a hires timestamp of when we entered the
+ * debugger, or the lbolt value if used with the -l option.
+ *
+ */
+/*ARGSUSED*/
+static int
+time(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ uint_t opt_lbolt = FALSE;
+
+ if (mdb_getopts(argc, argv, 'l', MDB_OPT_SETBITS, TRUE, &opt_lbolt,
+ NULL) != argc)
+ return (DCMD_USAGE);
+
+ if (opt_lbolt)
+ mdb_printf("%ld\n", mdb_get_lbolt());
+ else
+ mdb_printf("%lld\n", mdb_gethrtime());
+
+ return (DCMD_OK);
+}
+
+void
+time_help(void)
+{
+ mdb_printf("Prints the system time in nanoseconds.\n\n"
+ "::time will return the timestamp at which we dropped into, if "
+ "called from, kmdb(1); the core dump's high resolution time if "
+ "inspecting one; or the running hires time if we're inspecting "
+ "a live system.\n\n"
+ "Switches:\n"
+ " -l prints the number of clock ticks since system boot\n");
+}
+
static const mdb_dcmd_t dcmds[] = {
/* from genunix.c */
@@ -4216,6 +4223,7 @@ static const mdb_dcmd_t dcmds[] = {
"print sysevent subclass list", sysevent_subclass_list},
{ "system", NULL, "print contents of /etc/system file", sysfile },
{ "task", NULL, "display kernel task(s)", task },
+ { "time", "[-l]", "display system time", time, time_help },
{ "vnode2path", ":[-F]", "vnode address to pathname", vnode2path },
{ "vnode2smap", ":[offset]", "translate vnode to smap", vnode2smap },
{ "whereopen", ":", "given a vnode, dumps procs which have it open",
diff --git a/usr/src/cmd/mdb/common/modules/genunix/net.c b/usr/src/cmd/mdb/common/modules/genunix/net.c
index 23d6202fff..3f5f54059b 100644
--- a/usr/src/cmd/mdb/common/modules/genunix/net.c
+++ b/usr/src/cmd/mdb/common/modules/genunix/net.c
@@ -1559,8 +1559,7 @@ dladm_show_bridge(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
if (argc == 1)
args->name = argv[0].a_un.a_str;
- if (mdb_readvar(&args->lbolt,
- mdb_prop_postmortem ? "panic_lbolt" : "lbolt") == -1) {
+ if ((args->lbolt = mdb_get_lbolt()) == -1) {
mdb_warn("failed to read lbolt");
goto err;
}
diff --git a/usr/src/cmd/mdb/common/modules/idm/idm.c b/usr/src/cmd/mdb/common/modules/idm/idm.c
index fc8ea6fe1f..add67eac36 100644
--- a/usr/src/cmd/mdb/common/modules/idm/idm.c
+++ b/usr/src/cmd/mdb/common/modules/idm/idm.c
@@ -23,7 +23,9 @@
* Use is subject to license terms.
*/
-#include <sys/mdb_modapi.h>
+#include <mdb/mdb_modapi.h>
+#include <mdb/mdb_ks.h>
+
#include <sys/cpuvar.h>
#include <sys/conf.h>
#include <sys/file.h>
@@ -2284,7 +2286,6 @@ iscsi_isns_targets(iscsi_dcmd_ctrl_t *idc)
static int
iscsi_isns_servers_cb(uintptr_t addr, const void *walker_data, void *data)
{
- GElf_Sym sym;
iscsit_isns_svr_t server;
char server_addr[PORTAL_STR_LEN];
struct sockaddr_storage *ss;
@@ -2297,15 +2298,8 @@ iscsi_isns_servers_cb(uintptr_t addr, const void *walker_data, void *data)
return (WALK_ERR);
}
- if (mdb_lookup_by_name("lbolt", &sym) == -1) {
- mdb_warn("failed to find symbol 'lbolt'");
- return (DCMD_ERR);
- }
-
- if (mdb_vread(&lbolt, sizeof (clock_t), sym.st_value) !=
- sizeof (clock_t)) {
+ if ((lbolt = (clock_t)mdb_get_lbolt()) == -1)
return (WALK_ERR);
- }
mdb_printf("iSNS server %p:\n", addr);
mdb_inc_indent(4);
diff --git a/usr/src/cmd/mdb/common/modules/ip/ip.c b/usr/src/cmd/mdb/common/modules/ip/ip.c
index da94942eae..800e46552a 100644
--- a/usr/src/cmd/mdb/common/modules/ip/ip.c
+++ b/usr/src/cmd/mdb/common/modules/ip/ip.c
@@ -1621,8 +1621,8 @@ th_trace(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
} else {
thw.thw_match = B_TRUE;
thw.thw_matchkey = addr;
- if (mdb_readvar(&thw.thw_lbolt,
- mdb_prop_postmortem ? "panic_lbolt" : "lbolt") == -1) {
+
+ if ((thw.thw_lbolt = (clock_t)mdb_get_lbolt()) == -1) {
mdb_warn("failed to read lbolt");
return (DCMD_ERR);
}
diff --git a/usr/src/cmd/mdb/common/modules/mdb_ks/mdb_ks.c b/usr/src/cmd/mdb/common/modules/mdb_ks/mdb_ks.c
index fec3ff52e0..37051e60bf 100644
--- a/usr/src/cmd/mdb/common/modules/mdb_ks/mdb_ks.c
+++ b/usr/src/cmd/mdb/common/modules/mdb_ks/mdb_ks.c
@@ -52,6 +52,7 @@
#include <sys/refstr_impl.h>
#include <sys/cpuvar.h>
#include <sys/dlpi.h>
+#include <sys/clock_impl.h>
#include <errno.h>
#include <vm/seg_vn.h>
@@ -1585,3 +1586,81 @@ mdb_dlpi_prim(int prim)
default: return (NULL);
}
}
+
+/*
+ * mdb_gethrtime() returns the hires system time. This will be the timestamp at
+ * which we dropped into, if called from, kmdb(1); the core dump's hires time
+ * if inspecting one; or the running system's hires time if we're inspecting
+ * a live kernel.
+ */
+hrtime_t
+mdb_gethrtime(void)
+{
+ uintptr_t ptr;
+ lbolt_info_t lbi;
+ hrtime_t ts;
+
+#ifdef _KMDB
+ if (mdb_readvar(&ptr, "lb_info") == -1)
+ return (0);
+
+ if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
+ sizeof (lbolt_info_t))
+ return (0);
+
+ ts = lbi.lbi_debug_ts;
+#else
+ if (mdb_prop_postmortem) {
+ if (mdb_readvar(&ptr, "lb_info") == -1)
+ return (0);
+
+ if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
+ sizeof (lbolt_info_t))
+ return (0);
+
+ ts = lbi.lbi_debug_ts;
+ } else {
+ ts = gethrtime();
+ }
+#endif
+ return (ts);
+}
+
+/*
+ * mdb_get_lbolt() returns the number of clock ticks since system boot.
+ * Depending on the context in which it's called, the value will be derived
+ * from different sources per mdb_gethrtime(). If inspecting a panicked
+ * system, the routine returns the 'panic_lbolt64' variable from the core file.
+ */
+int64_t
+mdb_get_lbolt(void)
+{
+ lbolt_info_t lbi;
+ uintptr_t ptr;
+ int64_t pl;
+ hrtime_t ts;
+ int nsec;
+
+ if (mdb_readvar(&pl, "panic_lbolt64") != -1 && pl > 0)
+ return (pl);
+
+ /*
+ * Load the time spent in kmdb, if any.
+ */
+ if (mdb_readvar(&ptr, "lb_info") == -1)
+ return (0);
+
+ if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
+ sizeof (lbolt_info_t))
+ return (0);
+
+ if ((ts = mdb_gethrtime()) <= 0)
+ return (0);
+
+ if (mdb_readvar(&nsec, "nsec_per_tick") == -1 || nsec == 0) {
+ mdb_warn("failed to read 'nsec_per_tick'");
+ return (-1);
+ }
+
+ return ((ts/nsec) - lbi.lbi_debug_time);
+}
diff --git a/usr/src/cmd/mdb/common/modules/nca/nca.c b/usr/src/cmd/mdb/common/modules/nca/nca.c
index fa0afc5d37..e10a42e0aa 100644
--- a/usr/src/cmd/mdb/common/modules/nca/nca.c
+++ b/usr/src/cmd/mdb/common/modules/nca/nca.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* NCA mdb module. Provides a collection of dcmds and walkers that
* operate on core NCA data structures. Dependencies on NCA internals
@@ -465,10 +462,8 @@ nca_timer(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
return (DCMD_ERR);
}
- if (mdb_readvar(&lbolt, "lbolt") == -1) {
- mdb_warn("cannot read symbol lbolt");
+ if ((lbolt = (clock_t)mdb_get_lbolt()) == -1)
return (DCMD_ERR);
- }
mdb_printf("%0*p %0*p", NCA_ADDR_WIDTH, addr, NCA_ADDR_WIDTH, ti.ep);
mdb_inc_indent(24);
@@ -537,7 +532,7 @@ nca_node(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
return (DCMD_USAGE);
if (mdb_getopts(argc, argv, 'v', MDB_OPT_SETBITS, TRUE, &verbose,
- 'r', MDB_OPT_SETBITS, TRUE, &request, 'p', NULL) != argc)
+ 'r', MDB_OPT_SETBITS, TRUE, &request, 'p', NULL) != argc)
return (DCMD_USAGE);
if (!DCMD_HDRSPEC(flags) && verbose)
@@ -545,9 +540,9 @@ nca_node(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
if (DCMD_HDRSPEC(flags) || verbose) {
mdb_printf("%<u>%-*s %4s %5s %8s %-*s %-*s %-*s %-*s%</u>\n",
- NCA_ADDR_WIDTH, "ADDR", "REF", "STATE", "DATASIZE",
- NCA_ADDR_WIDTH, "SQUEUE", NCA_ADDR_WIDTH, "REQUEST",
- NCA_ADDR_WIDTH, "PLRUN", NCA_ADDR_WIDTH, "VLRUN");
+ NCA_ADDR_WIDTH, "ADDR", "REF", "STATE", "DATASIZE",
+ NCA_ADDR_WIDTH, "SQUEUE", NCA_ADDR_WIDTH, "REQUEST",
+ NCA_ADDR_WIDTH, "PLRUN", NCA_ADDR_WIDTH, "VLRUN");
}
if (mdb_vread(&node, sizeof (node_t), addr) == -1) {
diff --git a/usr/src/lib/libzpool/common/kernel.c b/usr/src/lib/libzpool/common/kernel.c
index f248049cdd..9a9aa8f213 100644
--- a/usr/src/lib/libzpool/common/kernel.c
+++ b/usr/src/lib/libzpool/common/kernel.c
@@ -269,7 +269,7 @@ cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime)
clock_t delta;
top:
- delta = abstime - lbolt;
+ delta = abstime - ddi_get_lbolt();
if (delta <= 0)
return (-1);
diff --git a/usr/src/lib/libzpool/common/sys/zfs_context.h b/usr/src/lib/libzpool/common/sys/zfs_context.h
index 96e2adfd3d..7346907676 100644
--- a/usr/src/lib/libzpool/common/sys/zfs_context.h
+++ b/usr/src/lib/libzpool/common/sys/zfs_context.h
@@ -440,8 +440,8 @@ extern vnode_t *rootdir;
/*
* Random stuff
*/
-#define lbolt (gethrtime() >> 23)
-#define lbolt64 (gethrtime() >> 23)
+#define ddi_get_lbolt() (gethrtime() >> 23)
+#define ddi_get_lbolt64() (gethrtime() >> 23)
#define hz 119 /* frequency when using gethrtime() >> 23 for lbolt */
extern void delay(clock_t ticks);
diff --git a/usr/src/pkgdefs/etc/exception_list_i386 b/usr/src/pkgdefs/etc/exception_list_i386
index c133bfb26e..ee820e58a0 100644
--- a/usr/src/pkgdefs/etc/exception_list_i386
+++ b/usr/src/pkgdefs/etc/exception_list_i386
@@ -52,6 +52,7 @@ usr/include/bsm/audit_private.h i386
usr/include/bsm/devalloc.h i386
usr/include/sys/ieeefp.h i386
usr/include/sys/winlockio.h i386
+usr/include/sys/clock_impl.h i386
usr/include/security/pam_impl.h i386
usr/include/passwdutil.h i386
#
diff --git a/usr/src/pkgdefs/etc/exception_list_sparc b/usr/src/pkgdefs/etc/exception_list_sparc
index 34ca25ea4a..f30707bd2c 100644
--- a/usr/src/pkgdefs/etc/exception_list_sparc
+++ b/usr/src/pkgdefs/etc/exception_list_sparc
@@ -43,6 +43,7 @@ usr/include/bsm/audit_private.h sparc
usr/include/bsm/devalloc.h sparc
usr/include/security/pam_impl.h sparc
usr/include/passwdutil.h sparc
+usr/include/sys/clock_impl.h sparc
#
# Private/Internal libraries of the Cryptographic Framework.
#
diff --git a/usr/src/uts/common/avs/ns/rdc/rdc_svc.c b/usr/src/uts/common/avs/ns/rdc/rdc_svc.c
index 3ce99afd96..109cad8bb2 100644
--- a/usr/src/uts/common/avs/ns/rdc/rdc_svc.c
+++ b/usr/src/uts/common/avs/ns/rdc/rdc_svc.c
@@ -123,9 +123,9 @@ _rdc_sync_event_notify(int operation, char *volume, char *group)
cv_signal(&rdc_sync_event.cv);
rdc_sync_event.kernel_waiting = 1;
- time = cv_timedwait_sig(&rdc_sync_event.done_cv,
- &rdc_sync_event.mutex,
- nsc_lbolt() + rdc_sync_event_timeout);
+ time = cv_reltimedwait_sig(&rdc_sync_event.done_cv,
+ &rdc_sync_event.mutex, rdc_sync_event_timeout,
+ TR_CLOCK_TICK);
if (time == (clock_t)0 || time == (clock_t)-1) {
/* signalled or timed out */
ack = 0;
diff --git a/usr/src/uts/common/avs/ns/sdbc/sd_io.c b/usr/src/uts/common/avs/ns/sdbc/sd_io.c
index b31fddc037..05884467a9 100644
--- a/usr/src/uts/common/avs/ns/sdbc/sd_io.c
+++ b/usr/src/uts/common/avs/ns/sdbc/sd_io.c
@@ -320,9 +320,6 @@ _sd_dealloc_dm(void)
int dealloc;
_dm_process_vars_t *ppvars;
- /* clock_t ticker; */
- unsigned long ticker;
-
int write_dealloc; /* remove after debugging */
ppvars = &dynmem_processing_dm;
@@ -384,10 +381,9 @@ _sd_dealloc_dm(void)
if (sd_dealloc_flag_dm == TIME_DELAY_LVL2)
tic_delay = sleep_tics_lvl3;
- (void) drv_getparm(LBOLT, &ticker);
mutex_enter(&ppvars->thread_dm_lock);
- (void) cv_timedwait(&ppvars->thread_dm_cv,
- &ppvars->thread_dm_lock, ticker+tic_delay);
+ (void) cv_reltimedwait(&ppvars->thread_dm_cv,
+ &ppvars->thread_dm_lock, tic_delay, TR_CLOCK_TICK);
mutex_exit(&ppvars->thread_dm_lock);
/* check for special directives on wakeup */
diff --git a/usr/src/uts/common/avs/ns/sdbc/sd_misc.c b/usr/src/uts/common/avs/ns/sdbc/sd_misc.c
index fc7362ef42..e63bf9dd4d 100644
--- a/usr/src/uts/common/avs/ns/sdbc/sd_misc.c
+++ b/usr/src/uts/common/avs/ns/sdbc/sd_misc.c
@@ -1212,15 +1212,9 @@ sdbcioctl(dev_t dev, int cmd, void *arg, int mode, cred_t *crp, int *rvp)
void
_sd_timed_block(clock_t ticks, kcondvar_t *cvp)
{
- clock_t ticker;
-
- if (drv_getparm(LBOLT, &ticker) != 0)
- cmn_err(CE_WARN, "!_sd_timed_block:failed to get current time");
-
mutex_enter(&_sd_block_lk);
- (void) cv_timedwait(cvp, &_sd_block_lk, ticks + ticker);
+ (void) cv_reltimedwait(cvp, &_sd_block_lk, ticks, TR_CLOCK_TICK);
mutex_exit(&_sd_block_lk);
-
}
diff --git a/usr/src/uts/common/brand/lx/procfs/lx_prvnops.c b/usr/src/uts/common/brand/lx/procfs/lx_prvnops.c
index a2365bd352..37e66ba807 100644
--- a/usr/src/uts/common/brand/lx/procfs/lx_prvnops.c
+++ b/usr/src/uts/common/brand/lx/procfs/lx_prvnops.c
@@ -64,7 +64,6 @@ extern kthread_t *prchoose(proc_t *);
#include "lx_proc.h"
extern pgcnt_t swapfs_minfree;
-extern volatile clock_t lbolt;
extern time_t boot_time;
/*
diff --git a/usr/src/uts/common/c2/audit_io.c b/usr/src/uts/common/c2/audit_io.c
index 04dab516c2..c062ca204e 100644
--- a/usr/src/uts/common/c2/audit_io.c
+++ b/usr/src/uts/common/c2/audit_io.c
@@ -21,7 +21,7 @@
/*
* Routines for writing audit records.
*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -547,9 +547,9 @@ au_door_upcall(au_kcontext_t *kctx, au_dbuf_t *aubuf)
return (rc);
mutex_enter(&(kctx->auk_eagain_mutex));
- (void) cv_timedwait(&(kctx->auk_eagain_cv),
- &(kctx->auk_eagain_mutex),
- lbolt + ticks_to_wait);
+ (void) cv_reltimedwait(&(kctx->auk_eagain_cv),
+ &(kctx->auk_eagain_mutex), ticks_to_wait,
+ TR_CLOCK_TICK);
mutex_exit(&(kctx->auk_eagain_mutex));
retry = 1;
diff --git a/usr/src/uts/common/conf/param.c b/usr/src/uts/common/conf/param.c
index ceecf32ee8..ac5d99a76f 100644
--- a/usr/src/uts/common/conf/param.c
+++ b/usr/src/uts/common/conf/param.c
@@ -67,6 +67,7 @@
#include <sys/vmem.h>
#include <sys/clock.h>
+#include <sys/clock_impl.h>
#include <sys/serializer.h>
/*
@@ -149,15 +150,35 @@ const unsigned int _nbpg = (unsigned int)MMU_PAGESIZE;
* of more than about 10 kHz seems utterly ridiculous, although
* this observation will no doubt seem quaintly amusing one day.
*/
-int hz = 100;
-int hires_hz = 1000;
+#define HIRES_HZ_DEFAULT 1000
+
+int hz = HZ_DEFAULT;
+int hires_hz = HIRES_HZ_DEFAULT;
+
int hires_tick = 0;
int cpu_decay_factor = 10; /* this is no longer tied to clock */
-int tick_per_msec; /* clock ticks per millisecond (zero if hz < 1000) */
-int msec_per_tick; /* millseconds per clock tick (zero if hz > 1000) */
-int usec_per_tick; /* microseconds per clock tick */
-int nsec_per_tick; /* nanoseconds per clock tick */
int max_hres_adj; /* maximum adjustment of hrtime per tick */
+int tick_per_msec; /* clock ticks per millisecond (zero if hz < 1000) */
+
+/*
+ * Milliseconds, Microseconds, and Nanoseconds per clock tick
+ *
+ * Note:
+ * msec_per_tick is zero if hz > 1000
+ */
+int msec_per_tick;
+int usec_per_tick;
+int nsec_per_tick;
+
+/*
+ * Time Resolution values. These are defined in condvar.h and initialized in
+ * param_init(). Consumers of cv_reltimedwait() and cv_reltimedwait_sig()
+ * need to specify how accurate the timeout argument should be through
+ * one of these values. The intention is to allow the underlying implementation
+ * to anticipate or defer the expiration of timeouts, preventing unnecessary
+ * wakeups by batch processing similarly expiring events.
+ */
+time_res_t time_res[TR_COUNT];
/*
* Setting "snooping" to a non-zero value will cause a deadman panic if
@@ -661,6 +682,20 @@ param_init(void)
usec_per_tick = MICROSEC / hz;
nsec_per_tick = NANOSEC / hz;
max_hres_adj = nsec_per_tick >> ADJ_SHIFT;
+
+ /*
+ * Consumers of relative timedwait functions must specify how accurately
+ * the given timeout must expire. This is currently TR_CLOCK_TICK for
+ * the vast majority of consumers, but nsec_per_tick becomes an
+ * artificial value in a tickless world. Each caller of such routines
+ * should re-evaluate their usage and specify the appropriate
+ * resolution.
+ */
+ time_res[TR_NANOSEC] = SEC;
+ time_res[TR_MICROSEC] = MILLISEC;
+ time_res[TR_MILLISEC] = MICROSEC;
+ time_res[TR_SEC] = NANOSEC;
+ time_res[TR_CLOCK_TICK] = nsec_per_tick;
}
/*
diff --git a/usr/src/uts/common/crypto/core/kcf_sched.c b/usr/src/uts/common/crypto/core/kcf_sched.c
index 7bae858d30..e012aba412 100644
--- a/usr/src/uts/common/crypto/core/kcf_sched.c
+++ b/usr/src/uts/common/crypto/core/kcf_sched.c
@@ -1045,7 +1045,7 @@ kcf_svc_do_run(void)
{
int error = 0;
clock_t rv;
- clock_t timeout_val;
+ clock_t timeout_val = drv_usectohz(kcf_idlethr_timeout);
kcf_areq_node_t *req;
kcf_context_t *ictx;
kcf_provider_desc_t *pd;
@@ -1056,12 +1056,9 @@ kcf_svc_do_run(void)
mutex_enter(&gswq->gs_lock);
while ((req = kcf_dequeue()) == NULL) {
- timeout_val = ddi_get_lbolt() +
- drv_usectohz(kcf_idlethr_timeout);
-
KCF_ATOMIC_INCR(kcfpool->kp_idlethreads);
- rv = cv_timedwait_sig(&gswq->gs_cv, &gswq->gs_lock,
- timeout_val);
+ rv = cv_reltimedwait_sig(&gswq->gs_cv,
+ &gswq->gs_lock, timeout_val, TR_CLOCK_TICK);
KCF_ATOMIC_DECR(kcfpool->kp_idlethreads);
switch (rv) {
@@ -1462,7 +1459,7 @@ int
kcf_svc_wait(int *nthrs)
{
clock_t rv;
- clock_t timeout_val;
+ clock_t timeout_val = drv_usectohz(kcf_idlethr_timeout);
if (kcfpool == NULL)
return (ENOENT);
@@ -1479,11 +1476,8 @@ kcf_svc_wait(int *nthrs)
/* Go to sleep, waiting for the signaled flag. */
while (!kcfpool->kp_signal_create_thread) {
- timeout_val = ddi_get_lbolt() +
- drv_usectohz(kcf_idlethr_timeout);
-
- rv = cv_timedwait_sig(&kcfpool->kp_user_cv,
- &kcfpool->kp_user_lock, timeout_val);
+ rv = cv_reltimedwait_sig(&kcfpool->kp_user_cv,
+ &kcfpool->kp_user_lock, timeout_val, TR_CLOCK_TICK);
switch (rv) {
case 0:
/* Interrupted, return to handle exit or signal */
diff --git a/usr/src/uts/common/crypto/io/dca.c b/usr/src/uts/common/crypto/io/dca.c
index 1968b93adf..1f94ba07dc 100644
--- a/usr/src/uts/common/crypto/io/dca.c
+++ b/usr/src/uts/common/crypto/io/dca.c
@@ -2699,8 +2699,8 @@ dca_drain(dca_t *dca)
/* give it up to a second to drain from the chip */
if (!QEMPTY(&wlp->dwl_runq)) {
- (void) cv_timedwait(&wlp->dwl_cv, &wlp->dwl_lock,
- ddi_get_time() + drv_usectohz(STALETIME));
+ (void) cv_reltimedwait(&wlp->dwl_cv, &wlp->dwl_lock,
+ drv_usectohz(STALETIME), TR_CLOCK_TICK);
if (!QEMPTY(&wlp->dwl_runq)) {
dca_error(dca, "unable to drain device");
diff --git a/usr/src/uts/common/disp/cpucaps.c b/usr/src/uts/common/disp/cpucaps.c
index 7f0905e7f8..46f53faab6 100644
--- a/usr/src/uts/common/disp/cpucaps.c
+++ b/usr/src/uts/common/disp/cpucaps.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/disp.h>
#include <sys/param.h>
#include <sys/systm.h>
@@ -383,13 +381,13 @@ cap_project_enable(kproject_t *kpj, hrtime_t value)
KSTAT_TYPE_NAMED,
sizeof (cap_kstat) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL)) != NULL) {
- cap->cap_kstat->ks_data_size +=
- strlen(cap->cap_zone->zone_name) + 1;
- cap->cap_kstat->ks_lock = &cap_kstat_lock;
- cap->cap_kstat->ks_data = &cap_kstat;
- cap->cap_kstat->ks_update = cap_kstat_update;
- cap->cap_kstat->ks_private = cap;
- kstat_install(cap->cap_kstat);
+ cap->cap_kstat->ks_data_size +=
+ strlen(cap->cap_zone->zone_name) + 1;
+ cap->cap_kstat->ks_lock = &cap_kstat_lock;
+ cap->cap_kstat->ks_data = &cap_kstat;
+ cap->cap_kstat->ks_update = cap_kstat_update;
+ cap->cap_kstat->ks_private = cap;
+ kstat_install(cap->cap_kstat);
}
}
}
@@ -437,13 +435,13 @@ cap_zone_enable(zone_t *zone, hrtime_t value)
KSTAT_TYPE_NAMED,
sizeof (cap_kstat) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL)) != NULL) {
- cap->cap_kstat->ks_data_size +=
- strlen(cap->cap_zone->zone_name) + 1;
- cap->cap_kstat->ks_lock = &cap_kstat_lock;
- cap->cap_kstat->ks_data = &cap_kstat;
- cap->cap_kstat->ks_update = cap_kstat_update;
- cap->cap_kstat->ks_private = cap;
- kstat_install(cap->cap_kstat);
+ cap->cap_kstat->ks_data_size +=
+ strlen(cap->cap_zone->zone_name) + 1;
+ cap->cap_kstat->ks_lock = &cap_kstat_lock;
+ cap->cap_kstat->ks_data = &cap_kstat;
+ cap->cap_kstat->ks_update = cap_kstat_update;
+ cap->cap_kstat->ks_private = cap;
+ kstat_install(cap->cap_kstat);
}
}
}
@@ -470,15 +468,18 @@ cap_zone_disable(zone_t *zone)
* Apply specified callback to all caps contained in the list `l'.
*/
static void
-cap_walk(list_t *l, void (*cb)(cpucap_t *))
+cap_walk(list_t *l, void (*cb)(cpucap_t *, int64_t))
{
+ static uint64_t cpucap_walk_gen;
cpucap_t *cap;
ASSERT(MUTEX_HELD(&caps_lock));
for (cap = list_head(l); cap != NULL; cap = list_next(l, cap)) {
- (*cb)(cap);
+ (*cb)(cap, cpucap_walk_gen);
}
+
+ atomic_inc_64(&cpucap_walk_gen);
}
/*
@@ -487,8 +488,9 @@ cap_walk(list_t *l, void (*cb)(cpucap_t *))
* is placed on the waitq right after the check, it will be picked up during the
* next invocation of cap_poke_waitq().
*/
+/* ARGSUSED */
static void
-cap_poke_waitq(cpucap_t *cap)
+cap_poke_waitq(cpucap_t *cap, int64_t gen)
{
ASSERT(MUTEX_HELD(&caps_lock));
@@ -511,7 +513,7 @@ cap_poke_waitq(cpucap_t *cap)
* Kick off a thread from the cap waitq if cap is not reached.
*/
static void
-cap_project_usage_walker(cpucap_t *cap)
+cap_project_usage_walker(cpucap_t *cap, int64_t gen)
{
zone_t *zone = cap->cap_zone;
hrtime_t cap_usage = cap->cap_usage;
@@ -525,7 +527,7 @@ cap_project_usage_walker(cpucap_t *cap)
* Set or clear the CAP_REACHED flag based on the current usage.
* Only projects having their own caps are ever marked as CAP_REACHED.
*/
- cap_poke_waitq(cap);
+ cap_poke_waitq(cap, 0);
/*
* Add project's CPU usage to our zone's CPU usage.
@@ -537,15 +539,15 @@ cap_project_usage_walker(cpucap_t *cap)
/*
* If we haven't reset this zone's usage during this clock tick
- * yet, then do it now. The cap_lbolt field is used to check
+ * yet, then do it now. The cap_gen field is used to check
* whether this is the first zone's project we see during this
* tick or a subsequent one.
*/
- if (zcap->cap_lbolt != lbolt64) {
+ if (zcap->cap_gen != gen) {
if (zcap->cap_usage > zcap->cap_maxusage)
zcap->cap_maxusage = zcap->cap_usage;
zcap->cap_usage = 0;
- zcap->cap_lbolt = lbolt64;
+ zcap->cap_gen = gen;
}
DTRACE_PROBE2(cpucaps__zusage, cpucap_t *, zcap,
hrtime_t, cap_usage);
@@ -1095,7 +1097,7 @@ cpucaps_enforce(kthread_t *t)
ASSERT(ttoproj(t)->kpj_cpucap != NULL);
t->t_schedflag &= ~TS_ANYWAITQ;
if (waitq_enqueue(&(ttoproj(t)->kpj_cpucap->cap_waitq),
- t)) {
+ t)) {
return (B_TRUE);
}
}
@@ -1103,7 +1105,7 @@ cpucaps_enforce(kthread_t *t)
ASSERT(ttozone(t)->zone_cpucap != NULL);
t->t_schedflag &= ~TS_ZONEWAITQ;
if (waitq_enqueue(&(ttozone(t)->zone_cpucap->cap_waitq),
- t)) {
+ t)) {
return (B_TRUE);
}
}
diff --git a/usr/src/uts/common/disp/disp.c b/usr/src/uts/common/disp/disp.c
index b3f6efeb2e..26d9d2d22b 100644
--- a/usr/src/uts/common/disp/disp.c
+++ b/usr/src/uts/common/disp/disp.c
@@ -912,7 +912,7 @@ swtch()
restore_mstate(next);
CPU_STATS_ADDQ(cp, sys, pswitch, 1);
- cp->cpu_last_swtch = t->t_disp_time = lbolt;
+ cp->cpu_last_swtch = t->t_disp_time = ddi_get_lbolt();
TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start");
if (dtrace_vtime_active)
@@ -1073,7 +1073,7 @@ swtch_to(kthread_t *next)
cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL;
/* record last execution time */
- cp->cpu_last_swtch = curthread->t_disp_time = lbolt;
+ cp->cpu_last_swtch = curthread->t_disp_time = ddi_get_lbolt();
/*
* If t was previously in the TS_ONPROC state, setfrontdq and setbackdq
@@ -1152,7 +1152,7 @@ cpu_resched(cpu_t *cp, pri_t tpri)
*/
#define THREAD_HAS_CACHE_WARMTH(thread) \
((thread == curthread) || \
- ((lbolt - thread->t_disp_time) <= rechoose_interval))
+ ((ddi_get_lbolt() - thread->t_disp_time) <= rechoose_interval))
/*
* Put the specified thread on the back of the dispatcher
* queue corresponding to its current priority.
diff --git a/usr/src/uts/common/disp/fss.c b/usr/src/uts/common/disp/fss.c
index e52a9d89aa..b051974669 100644
--- a/usr/src/uts/common/disp/fss.c
+++ b/usr/src/uts/common/disp/fss.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
@@ -1641,7 +1639,7 @@ fss_forkret(kthread_t *t, kthread_t *ct)
*/
fssproc->fss_flags &= ~FSSBACKQ;
- if (t->t_disp_time != lbolt)
+ if (t->t_disp_time != ddi_get_lbolt())
setbackdq(t);
else
setfrontdq(t);
@@ -1844,7 +1842,7 @@ fss_swapin(kthread_t *t, int flags)
if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
time_t swapout_time;
- swapout_time = (lbolt - t->t_stime) / hz;
+ swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
if (INHERITED(t) || (fssproc->fss_flags & FSSKPRI)) {
epri = (long)DISP_PRIO(t) + swapout_time;
} else {
@@ -1894,7 +1892,7 @@ fss_swapout(kthread_t *t, int flags)
ASSERT(t->t_state & (TS_SLEEP | TS_RUN));
- swapin_time = (lbolt - t->t_stime) / hz;
+ swapin_time = (ddi_get_lbolt() - t->t_stime) / hz;
if (flags == SOFTSWAP) {
if (t->t_state == TS_SLEEP && swapin_time > maxslp) {
@@ -2098,7 +2096,7 @@ fss_setrun(kthread_t *t)
if ((fssproc->fss_flags & FSSKPRI) == 0)
THREAD_CHANGE_PRI(t, fssproc->fss_umdpri);
- if (t->t_disp_time != lbolt)
+ if (t->t_disp_time != ddi_get_lbolt())
setbackdq(t);
else
setfrontdq(t);
@@ -2149,7 +2147,7 @@ fss_sleep(kthread_t *t)
if (DISP_MUST_SURRENDER(curthread))
cpu_surrender(t);
}
- t->t_stime = lbolt; /* time stamp for the swapper */
+ t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
}
/*
@@ -2300,7 +2298,7 @@ fss_wakeup(kthread_t *t)
fss_active(t);
- t->t_stime = lbolt; /* time stamp for the swapper */
+ t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
fssproc = FSSPROC(t);
fssproc->fss_flags &= ~FSSBACKQ;
@@ -2323,7 +2321,7 @@ fss_wakeup(kthread_t *t)
/*
* Otherwise, we recalculate the priority.
*/
- if (t->t_disp_time == lbolt) {
+ if (t->t_disp_time == ddi_get_lbolt()) {
setfrontdq(t);
} else {
fssproc->fss_timeleft = fss_quantum;
diff --git a/usr/src/uts/common/disp/fx.c b/usr/src/uts/common/disp/fx.c
index 08a67f671f..dd6295074c 100644
--- a/usr/src/uts/common/disp/fx.c
+++ b/usr/src/uts/common/disp/fx.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
@@ -1195,7 +1193,7 @@ fx_setrun(kthread_t *t)
ASSERT(THREAD_LOCK_HELD(t)); /* t should be in transition */
fxpp->fx_flags &= ~FXBACKQ;
- if (t->t_disp_time != lbolt)
+ if (t->t_disp_time != ddi_get_lbolt())
setbackdq(t);
else
setfrontdq(t);
@@ -1222,7 +1220,7 @@ fx_sleep(kthread_t *t)
if (FX_HAS_CB(fxpp)) {
FX_CB_SLEEP(FX_CALLB(fxpp), fxpp->fx_cookie);
}
- t->t_stime = lbolt; /* time stamp for the swapper */
+ t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
}
@@ -1394,7 +1392,7 @@ fx_wakeup(kthread_t *t)
ASSERT(THREAD_LOCK_HELD(t));
- t->t_stime = lbolt; /* time stamp for the swapper */
+ t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
if (FX_HAS_CB(fxpp)) {
clock_t new_quantum = (clock_t)fxpp->fx_pquantum;
pri_t newpri = fxpp->fx_pri;
@@ -1415,7 +1413,7 @@ fx_wakeup(kthread_t *t)
fxpp->fx_flags &= ~FXBACKQ;
- if (t->t_disp_time != lbolt)
+ if (t->t_disp_time != ddi_get_lbolt())
setbackdq(t);
else
setfrontdq(t);
diff --git a/usr/src/uts/common/disp/thread.c b/usr/src/uts/common/disp/thread.c
index 740d247672..1b39cf0d9e 100644
--- a/usr/src/uts/common/disp/thread.c
+++ b/usr/src/uts/common/disp/thread.c
@@ -24,7 +24,6 @@
* Use is subject to license terms.
*/
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
@@ -431,7 +430,7 @@ thread_create(
t->t_clfuncs = &sys_classfuncs.thread;
t->t_cid = syscid;
t->t_pri = pri;
- t->t_stime = lbolt;
+ t->t_stime = ddi_get_lbolt();
t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
t->t_bind_cpu = PBIND_NONE;
t->t_bindflag = (uchar_t)default_binding_mode;
diff --git a/usr/src/uts/common/disp/ts.c b/usr/src/uts/common/disp/ts.c
index 53612cf2bc..e8d1565276 100644
--- a/usr/src/uts/common/disp/ts.c
+++ b/usr/src/uts/common/disp/ts.c
@@ -20,15 +20,13 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
-#pragma ident "%Z%%M% %I% %E% SMI" /* from SVr4.0 1.23 */
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
@@ -1498,7 +1496,7 @@ ts_setrun(kthread_t *t)
else
setbackdq(t);
} else {
- if (t->t_disp_time != lbolt)
+ if (t->t_disp_time != ddi_get_lbolt())
setbackdq(t);
else
setfrontdq(t);
@@ -1560,7 +1558,7 @@ ts_sleep(kthread_t *t)
if (DISP_MUST_SURRENDER(curthread))
cpu_surrender(curthread);
}
- t->t_stime = lbolt; /* time stamp for the swapper */
+ t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
TRACE_2(TR_FAC_DISP, TR_SLEEP,
"sleep:tid %p old pri %d", t, old_pri);
}
@@ -1591,7 +1589,7 @@ ts_swapin(kthread_t *t, int flags)
if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
time_t swapout_time;
- swapout_time = (lbolt - t->t_stime) / hz;
+ swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)))
epri = (long)DISP_PRIO(t) + swapout_time;
else {
@@ -1659,7 +1657,7 @@ ts_swapout(kthread_t *t, int flags)
* We know that pri_t is a short.
* Be sure not to overrun its range.
*/
- swapin_time = (lbolt - t->t_stime) / hz;
+ swapin_time = (ddi_get_lbolt() - t->t_stime) / hz;
if (flags == SOFTSWAP) {
if (t->t_state == TS_SLEEP && swapin_time > maxslp) {
epri = 0;
@@ -1980,7 +1978,7 @@ ts_wakeup(kthread_t *t)
ASSERT(THREAD_LOCK_HELD(t));
- t->t_stime = lbolt; /* time stamp for the swapper */
+ t->t_stime = ddi_get_lbolt(); /* time stamp for the swapper */
if (tspp->ts_flags & TSKPRI) {
tspp->ts_flags &= ~TSBACKQ;
@@ -2017,7 +2015,7 @@ ts_wakeup(kthread_t *t)
else
setbackdq(t);
} else {
- if (t->t_disp_time != lbolt)
+ if (t->t_disp_time != ddi_get_lbolt())
setbackdq(t);
else
setfrontdq(t);
diff --git a/usr/src/uts/common/fs/autofs/auto_subr.c b/usr/src/uts/common/fs/autofs/auto_subr.c
index 522bbc38d7..f648b5ed50 100644
--- a/usr/src/uts/common/fs/autofs/auto_subr.c
+++ b/usr/src/uts/common/fs/autofs/auto_subr.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/param.h>
#include <sys/kmem.h>
#include <sys/errno.h>
@@ -2604,7 +2602,7 @@ auto_do_unmount(struct autofs_globals *fngp)
CALLB_CPR_SAFE_BEGIN(&cprinfo);
newthread:
mutex_exit(&fngp->fng_unmount_threads_lock);
- timeleft = zone_status_timedwait(zone, lbolt +
+ timeleft = zone_status_timedwait(zone, ddi_get_lbolt() +
autofs_unmount_thread_timer * hz, ZONE_IS_SHUTTING_DOWN);
mutex_enter(&fngp->fng_unmount_threads_lock);
diff --git a/usr/src/uts/common/fs/cachefs/cachefs_resource.c b/usr/src/uts/common/fs/cachefs/cachefs_resource.c
index 796d352237..7526a5f826 100644
--- a/usr/src/uts/common/fs/cachefs/cachefs_resource.c
+++ b/usr/src/uts/common/fs/cachefs/cachefs_resource.c
@@ -19,10 +19,9 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/param.h>
#include <sys/types.h>
@@ -257,7 +256,7 @@ cachefs_rl_alloc(struct cachefscache *cachep, rl_entry_t *valp, uint_t *entnop)
entno = lhp->rli_front;
if (entno == 0) {
if (cachep->c_rlinfo.rl_entries >=
- cachep->c_label.cl_maxinodes) {
+ cachep->c_label.cl_maxinodes) {
error = ENOMEM;
goto out;
}
@@ -807,7 +806,7 @@ cachefs_garbage_collect(cachefscache_t *cachep)
cachep->c_gc_after = cachefs_gc_front_atime(cachep);
CACHEFS_TIME_TO_CFS_TIME_COPY(cachep->c_gc_after,
- cachep->c_rlinfo.rl_gctime, error);
+ cachep->c_rlinfo.rl_gctime, error);
}
/*
@@ -978,7 +977,7 @@ cachefs_cachep_worker_thread(cachefscache_t *cachep)
struct flock64 fl;
callb_cpr_t cprinfo;
kmutex_t cpr_lock;
-
+ clock_t wakeup;
/* lock the lock file for exclusive write access */
fl.l_type = F_WRLCK;
@@ -987,8 +986,8 @@ cachefs_cachep_worker_thread(cachefscache_t *cachep)
fl.l_len = (offset_t)1024;
fl.l_sysid = 0;
fl.l_pid = 0;
- error = VOP_FRLOCK(cachep->c_lockvp, F_SETLK, &fl, FWRITE,
- (offset_t)0, NULL, kcred, NULL);
+ error = VOP_FRLOCK(cachep->c_lockvp, F_SETLK, &fl, FWRITE, (offset_t)0,
+ NULL, kcred, NULL);
if (error) {
cmn_err(CE_WARN,
"cachefs: Can't lock Cache Lock File(r); Error %d\n",
@@ -1000,17 +999,16 @@ cachefs_cachep_worker_thread(cachefscache_t *cachep)
mutex_enter(&cpr_lock);
mutex_enter(&cachep->c_contentslock);
+ wakeup = (clock_t)(cachefs_ppend_time * hz);
+
/* loop while the thread is allowed to run */
while ((cachep->c_flags & CACHE_CACHEW_THREADEXIT) == 0) {
- clock_t wakeup;
-
/* wait for a wakeup call */
cachep->c_flags &= ~CACHE_GARBAGE_COLLECT;
CALLB_CPR_SAFE_BEGIN(&cprinfo);
mutex_exit(&cpr_lock);
- wakeup = (clock_t)(lbolt + (cachefs_ppend_time * hz));
- (void) cv_timedwait(&cachep->c_cwcv,
- &cachep->c_contentslock, wakeup);
+ (void) cv_reltimedwait(&cachep->c_cwcv,
+ &cachep->c_contentslock, wakeup, TR_CLOCK_TICK);
mutex_enter(&cpr_lock);
CALLB_CPR_SAFE_END(&cprinfo, &cpr_lock);
@@ -1056,8 +1054,8 @@ cachefs_cachep_worker_thread(cachefscache_t *cachep)
fl.l_len = (offset_t)1024;
fl.l_sysid = 0;
fl.l_pid = 0;
- error = VOP_FRLOCK(cachep->c_lockvp, F_SETLK, &fl,
- FWRITE, (offset_t)0, NULL, kcred, NULL);
+ error = VOP_FRLOCK(cachep->c_lockvp, F_SETLK, &fl, FWRITE, (offset_t)0,
+ NULL, kcred, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: Can't unlock lock file\n");
}
@@ -1119,7 +1117,7 @@ cachefs_rl_debug_reclaim(void *cdrarg)
int error;
for (cachep = cachefs_cachelist; cachep != NULL;
- cachep = cachep->c_next) {
+ cachep = cachep->c_next) {
mutex_enter(&cachep->c_contentslock);
for (index = 0;
@@ -1147,8 +1145,8 @@ cachefs_rl_debug_save(rl_entry_t *rlent)
if (cachefs_rl_debug_cache == NULL)
cachefs_rl_debug_cache =
kmem_cache_create("cachefs_rl_debug",
- sizeof (rl_debug_t), 0,
- NULL, NULL, cachefs_rl_debug_reclaim, NULL, NULL, 0);
+ sizeof (rl_debug_t), 0,
+ NULL, NULL, cachefs_rl_debug_reclaim, NULL, NULL, 0);
rldb = kmem_cache_alloc(cachefs_rl_debug_cache, KM_SLEEP);
++cachefs_rl_debug_inuse;
diff --git a/usr/src/uts/common/fs/cachefs/cachefs_subr.c b/usr/src/uts/common/fs/cachefs/cachefs_subr.c
index 1f82e638e6..c1ab7f0b05 100644
--- a/usr/src/uts/common/fs/cachefs/cachefs_subr.c
+++ b/usr/src/uts/common/fs/cachefs/cachefs_subr.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/param.h>
#include <sys/types.h>
#include <sys/systm.h>
@@ -157,11 +155,11 @@ cachefs_cache_create(void)
void
cachefs_cache_destroy(cachefscache_t *cachep)
{
- clock_t tend;
int error = 0;
#ifdef CFSRLDEBUG
uint_t index;
#endif /* CFSRLDEBUG */
+ clock_t wakeup = (60 * hz);
/* stop async threads */
while (cachep->c_workq.wq_thread_count > 0)
@@ -172,9 +170,8 @@ cachefs_cache_destroy(cachefscache_t *cachep)
while (cachep->c_flags & CACHE_CACHEW_THREADRUN) {
cachep->c_flags |= CACHE_CACHEW_THREADEXIT;
cv_signal(&cachep->c_cwcv);
- tend = lbolt + (60 * hz);
- (void) cv_timedwait(&cachep->c_cwhaltcv,
- &cachep->c_contentslock, tend);
+ (void) cv_reltimedwait(&cachep->c_cwhaltcv,
+ &cachep->c_contentslock, wakeup, TR_CLOCK_TICK);
}
if ((cachep->c_flags & CACHE_ALLOC_PENDING) == 0) {
@@ -195,7 +192,7 @@ cachefs_cache_destroy(cachefscache_t *cachep)
rl_entry_t *rlent;
error = cachefs_rl_entry_get(cachep, index,
- rlent);
+ rlent);
/*
* Since we are destroying the cache,
* better to ignore and proceed
@@ -289,29 +286,29 @@ cachefs_cache_activate_ro(cachefscache_t *cachep, vnode_t *cdvp)
/* Get the lock file */
error = VOP_LOOKUP(cdvp, CACHEFS_LOCK_FILE, &lockvp, NULL, 0, NULL,
- kcred, NULL, NULL, NULL);
+ kcred, NULL, NULL, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: activate_a: cache corruption"
- " run fsck.\n");
+ " run fsck.\n");
goto out;
}
/* Get the label file */
error = VOP_LOOKUP(cdvp, CACHELABEL_NAME, &labelvp, NULL, 0, NULL,
- kcred, NULL, NULL, NULL);
+ kcred, NULL, NULL, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: activate_b: cache corruption"
- " run fsck.\n");
+ " run fsck.\n");
goto out;
}
/* read in the label */
error = vn_rdwr(UIO_READ, labelvp, (caddr_t)&cachep->c_label,
- sizeof (struct cache_label), 0LL, UIO_SYSSPACE,
- 0, (rlim64_t)0, kcred, NULL);
+ sizeof (struct cache_label), 0LL, UIO_SYSSPACE,
+ 0, (rlim64_t)0, kcred, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: activate_c: cache corruption"
- " run fsck.\n");
+ " run fsck.\n");
goto out;
}
@@ -327,17 +324,17 @@ cachefs_cache_activate_ro(cachefscache_t *cachep, vnode_t *cdvp)
NULL, NULL, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: activate_d: cache corruption"
- " run fsck.\n");
+ " run fsck.\n");
goto out;
}
/* Read the usage struct for this cache */
error = vn_rdwr(UIO_READ, rifvp, (caddr_t)&cachep->c_usage,
- sizeof (struct cache_usage), 0LL, UIO_SYSSPACE, 0,
- (rlim64_t)0, kcred, NULL);
+ sizeof (struct cache_usage), 0LL, UIO_SYSSPACE, 0,
+ (rlim64_t)0, kcred, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: activate_e: cache corruption"
- " run fsck.\n");
+ " run fsck.\n");
goto out;
}
@@ -350,11 +347,11 @@ cachefs_cache_activate_ro(cachefscache_t *cachep, vnode_t *cdvp)
/* Read the rlinfo for this cache */
error = vn_rdwr(UIO_READ, rifvp, (caddr_t)&cachep->c_rlinfo,
- sizeof (cachefs_rl_info_t), (offset_t)sizeof (struct cache_usage),
- UIO_SYSSPACE, 0, 0, kcred, NULL);
+ sizeof (cachefs_rl_info_t), (offset_t)sizeof (struct cache_usage),
+ UIO_SYSSPACE, 0, 0, kcred, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: activate_f: cache corruption"
- " run fsck.\n");
+ " run fsck.\n");
goto out;
}
@@ -363,7 +360,7 @@ cachefs_cache_activate_ro(cachefscache_t *cachep, vnode_t *cdvp)
NULL, 0, NULL, kcred, NULL, NULL, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: activate_g: cache corruption"
- " run fsck.\n");
+ " run fsck.\n");
goto out;
}
@@ -441,8 +438,8 @@ cachefs_stop_cache(cnode_t *cp)
cachefscache_t *cachep = fscp->fs_cache;
filegrp_t *fgp;
int i;
- clock_t tend;
int error = 0;
+ clock_t wakeup = (60 * hz);
/* XXX verify lock-ordering for this function */
@@ -497,7 +494,7 @@ cachefs_stop_cache(cnode_t *cp)
for (i = 0; i < CFS_FS_FGP_BUCKET_SIZE; i++) {
for (fgp = fscp->fs_filegrp[i]; fgp != NULL;
- fgp = fgp->fg_next) {
+ fgp = fgp->fg_next) {
mutex_enter(&fgp->fg_mutex);
ASSERT((fgp->fg_flags &
@@ -552,9 +549,8 @@ cachefs_stop_cache(cnode_t *cp)
while (cachep->c_flags & CACHE_CACHEW_THREADRUN) {
cachep->c_flags |= CACHE_CACHEW_THREADEXIT;
cv_signal(&cachep->c_cwcv);
- tend = lbolt + (60 * hz);
- (void) cv_timedwait(&cachep->c_cwhaltcv,
- &cachep->c_contentslock, tend);
+ (void) cv_reltimedwait(&cachep->c_cwhaltcv,
+ &cachep->c_contentslock, wakeup, TR_CLOCK_TICK);
}
if (cachep->c_resfilevp) {
@@ -677,7 +673,7 @@ cachefs_cache_dirty(struct cachefscache *cachep, int lockit)
if (error = vn_rdwr(UIO_WRITE, cachep->c_resfilevp,
(caddr_t)&cachep->c_usage, sizeof (struct cache_usage),
0LL, UIO_SYSSPACE, FSYNC, (rlim64_t)RLIM_INFINITY,
- kcred, NULL)) {
+ kcred, NULL)) {
cmn_err(CE_WARN,
"cachefs: clean flag write error: %d\n", error);
}
@@ -717,24 +713,25 @@ cachefs_cache_rssync(struct cachefscache *cachep)
(offset_t)((cachep->c_rl_window + 1) * MAXBSIZE),
UIO_SYSSPACE, FSYNC, RLIM_INFINITY, kcred, NULL);
if (error)
- cmn_err(CE_WARN, "cachefs: Can't Write rl entries Info\n");
+ cmn_err(CE_WARN,
+ "cachefs: Can't Write rl entries Info\n");
cachefs_kmem_free(cachep->c_rl_entries, MAXBSIZE);
cachep->c_rl_entries = NULL;
}
/* write the usage struct for this cache */
error = vn_rdwr(UIO_WRITE, cachep->c_resfilevp,
- (caddr_t)&cachep->c_usage, sizeof (struct cache_usage),
- 0LL, UIO_SYSSPACE, 0, (rlim64_t)RLIM_INFINITY, kcred, NULL);
+ (caddr_t)&cachep->c_usage, sizeof (struct cache_usage),
+ 0LL, UIO_SYSSPACE, 0, (rlim64_t)RLIM_INFINITY, kcred, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: Can't Write Cache Usage Info\n");
}
/* write the rlinfo for this cache */
error = vn_rdwr(UIO_WRITE, cachep->c_resfilevp,
- (caddr_t)&cachep->c_rlinfo, sizeof (cachefs_rl_info_t),
- (offset_t)sizeof (struct cache_usage), UIO_SYSSPACE,
- 0, (rlim64_t)RLIM_INFINITY, kcred, NULL);
+ (caddr_t)&cachep->c_rlinfo, sizeof (cachefs_rl_info_t),
+ (offset_t)sizeof (struct cache_usage), UIO_SYSSPACE,
+ 0, (rlim64_t)RLIM_INFINITY, kcred, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: Can't Write Cache RL Info\n");
}
@@ -822,7 +819,7 @@ cachefs_cache_sync(struct cachefscache *cachep)
done = 1;
} else {
cachep->c_usage.cu_flags |=
- CUSAGE_ACTIVE;
+ CUSAGE_ACTIVE;
}
} else {
done = 1;
@@ -860,7 +857,7 @@ cachefs_cache_unique(cachefscache_t *cachep)
mutex_enter(&cachep->c_contentslock);
if (cachep->c_usage.cu_flags & CUSAGE_NEED_ADJUST ||
- ++(cachep->c_unique) == 0) {
+ ++(cachep->c_unique) == 0) {
cachep->c_usage.cu_unique++;
if (cachep->c_unique == 0)
@@ -893,7 +890,7 @@ cachefs_createfrontfile(cnode_t *cp, struct filegrp *fgp)
#ifdef CFSDEBUG
CFS_DEBUG(CFSDEBUG_FRONT)
printf("c_createfrontfile: ENTER cp %p fgp %p\n",
- (void *)cp, (void *)fgp);
+ (void *)cp, (void *)fgp);
#endif
ASSERT(cp->c_frontvp == NULL);
@@ -1037,7 +1034,7 @@ out:
#ifdef CFSDEBUG
CFS_DEBUG(CFSDEBUG_FRONT)
printf("c_createfrontfile: EXIT error = %d name %s\n", error,
- name);
+ name);
#endif
return (error);
}
@@ -1082,7 +1079,7 @@ cachefs_removefrontfile(cachefs_metadata_t *mdp, cfs_cid_t *cidp,
}
mdp->md_flags &= ~(MD_FILE | MD_POPULATED | MD_ACL | MD_ACLDIR);
bzero(&mdp->md_allocinfo, mdp->md_allocents *
- sizeof (struct cachefs_allocmap));
+ sizeof (struct cachefs_allocmap));
cachefs_freefile(fgp->fg_fscp->fs_cache);
}
@@ -1157,9 +1154,8 @@ cachefs_getfrontfile(cnode_t *cp)
#ifdef CFSDEBUG
if (cp->c_frontvp != NULL)
CFS_DEBUG(CFSDEBUG_FRONT)
- printf(
- "c_getfrontfile: !MD_FILE and frontvp not null cp %p\n",
- (void *)cp);
+ printf("c_getfrontfile: !MD_FILE and frontvp "
+ "not null cp %p\n", (void *)cp);
#endif
if (CTOV(cp)->v_type == VDIR)
ASSERT((cp->c_metadata.md_flags & MD_POPULATED) == 0);
@@ -1174,14 +1170,14 @@ cachefs_getfrontfile(cnode_t *cp)
*/
if (fgp->fg_dirvp == NULL) {
cmn_err(CE_WARN, "cachefs: gff0: corrupted file system"
- " run fsck\n");
+ " run fsck\n");
cachefs_inval_object(cp);
cp->c_flags |= CN_NOCACHE;
error = ESTALE;
goto out;
}
error = VFS_VGET(fgp->fg_dirvp->v_vfsp, &cp->c_frontvp,
- &cp->c_metadata.md_fid);
+ &cp->c_metadata.md_fid);
if (error || (cp->c_frontvp == NULL)) {
#ifdef CFSDEBUG
CFS_DEBUG(CFSDEBUG_FRONT)
@@ -1213,7 +1209,7 @@ cachefs_getfrontfile(cnode_t *cp)
error = VOP_GETATTR(cp->c_frontvp, &va, 0, kcred, NULL);
if (error) {
cmn_err(CE_WARN, "cachefs: gff2: front file"
- " system error %d", error);
+ " system error %d", error);
cachefs_inval_object(cp);
error = (cp->c_flags & CN_NOCACHE) ? ESTALE : 0;
goto out;
@@ -1228,11 +1224,11 @@ cachefs_getfrontfile(cnode_t *cp)
sec = cp->c_metadata.md_timestamp.tv_sec;
nsec = cp->c_metadata.md_timestamp.tv_nsec;
printf("c_getfrontfile: timestamps don't"
- " match fileno %lld va %lx %lx"
- " meta %lx %lx\n",
- (u_longlong_t)cp->c_id.cid_fileno,
- va.va_mtime.tv_sec,
- va.va_mtime.tv_nsec, sec, nsec);
+ " match fileno %lld va %lx %lx"
+ " meta %lx %lx\n",
+ (u_longlong_t)cp->c_id.cid_fileno,
+ va.va_mtime.tv_sec,
+ va.va_mtime.tv_nsec, sec, nsec);
}
#endif
cachefs_inval_object(cp);
@@ -1240,7 +1236,6 @@ cachefs_getfrontfile(cnode_t *cp)
}
}
out:
-
#ifdef CFSDEBUG
CFS_DEBUG(CFSDEBUG_FRONT)
printf("c_getfrontfile: EXIT error = %d\n", error);
@@ -1258,7 +1253,7 @@ cachefs_inval_object(cnode_t *cp)
ASSERT(CFS_ISFS_BACKFS_NFSV4(C_TO_FSCACHE(cp)) == 0);
ASSERT(MUTEX_HELD(&cp->c_statelock));
ASSERT((cp->c_flags & CN_ASYNC_POP_WORKING) == 0 ||
- cp->c_popthrp == curthread);
+ cp->c_popthrp == curthread);
#if 0
CFS_DEBUG(CFSDEBUG_SUBR)
printf("c_inval_object: ENTER cp %p\n", (void *)cp);
@@ -1282,7 +1277,7 @@ cachefs_inval_object(cnode_t *cp)
if (cp->c_frontvp == NULL) {
error = VFS_VGET(fgp->fg_dirvp->v_vfsp, &cp->c_frontvp,
- &cp->c_metadata.md_fid);
+ &cp->c_metadata.md_fid);
if (error || (cp->c_frontvp == NULL)) {
#ifdef CFSDEBUG
CFS_DEBUG(CFSDEBUG_FRONT)
@@ -1463,7 +1458,7 @@ cachefs_check_allocmap(cnode_t *cp, u_offset_t off)
for (i = 0; i < cp->c_metadata.md_allocents; i++) {
struct cachefs_allocmap *allocp =
- cp->c_metadata.md_allocinfo + i;
+ cp->c_metadata.md_allocinfo + i;
if (off >= allocp->am_start_off) {
if ((off + size_to_look) <=
@@ -1508,7 +1503,7 @@ cachefs_coalesce_allocmap(struct cachefs_metadata *cmd)
allocp++;
for (i = 1; i < cmd->md_allocents; i++, allocp++) {
if (nallocp->am_start_off + nallocp->am_size ==
- allocp->am_start_off) {
+ allocp->am_start_off) {
nallocp->am_size += allocp->am_size;
reduced++;
} else {
@@ -1552,7 +1547,7 @@ again:
endoff = off + size;
if (endoff >= allocp->am_start_off) {
tmpendoff = allocp->am_start_off +
- allocp->am_size;
+ allocp->am_size;
if (endoff < tmpendoff)
endoff = tmpendoff;
allocp->am_size = endoff - off;
@@ -1657,7 +1652,7 @@ cachefs_populate(cnode_t *cp, u_offset_t off, size_t popsize, vnode_t *frontvp,
size = MAXBSIZE - n;
error = fbread(backvp, (offset_t)blkoff, n + size,
- S_OTHER, &fbp);
+ S_OTHER, &fbp);
if (CFS_TIMEOUT(C_TO_FSCACHE(cp), error))
goto out;
else if (error) {
@@ -1683,8 +1678,8 @@ cachefs_populate(cnode_t *cp, u_offset_t off, size_t popsize, vnode_t *frontvp,
}
resid = 0;
error = vn_rdwr(UIO_WRITE, frontvp, buf + n, size,
- (offset_t)from, UIO_SYSSPACE, 0,
- (rlim64_t)RLIM64_INFINITY, cr, &resid);
+ (offset_t)from, UIO_SYSSPACE, 0,
+ (rlim64_t)RLIM64_INFINITY, cr, &resid);
if (error) {
#ifdef CFSDEBUG
CFS_DEBUG(CFSDEBUG_FRONT)
@@ -1724,8 +1719,8 @@ out:
* occurred during large files project - XXX.
*/
void
-cachefs_cluster_allocmap(u_offset_t off, u_offset_t *popoffp,
- size_t *popsizep, size_t size, struct cnode *cp)
+cachefs_cluster_allocmap(u_offset_t off, u_offset_t *popoffp, size_t *popsizep,
+ size_t size, struct cnode *cp)
{
int i;
u_offset_t lastoff = 0;
@@ -1737,11 +1732,11 @@ cachefs_cluster_allocmap(u_offset_t off, u_offset_t *popoffp,
#ifdef CFSDEBUG
CFS_DEBUG(CFSDEBUG_SUBR)
printf("cachefs_cluster_allocmap: off %llx, size %llx, "
- "c_size %llx\n", off, size, (longlong_t)cp->c_size);
+ "c_size %llx\n", off, size, (longlong_t)cp->c_size);
#endif /* CFSDEBUG */
for (i = 0; i < cp->c_metadata.md_allocents; i++) {
struct cachefs_allocmap *allocp =
- cp->c_metadata.md_allocinfo + i;
+ cp->c_metadata.md_allocinfo + i;
if (allocp->am_start_off > off) {
if ((off + size) > allocp->am_start_off) {
@@ -1756,7 +1751,7 @@ cachefs_cluster_allocmap(u_offset_t off, u_offset_t *popoffp,
}
*popoffp = (off - backward_diff) & (offset_t)PAGEMASK;
*popsizep = ((off + forward_diff) - *popoffp) &
- (offset_t)PAGEMASK;
+ (offset_t)PAGEMASK;
return;
} else {
lastoff = allocp->am_start_off + allocp->am_size;
@@ -1765,7 +1760,7 @@ cachefs_cluster_allocmap(u_offset_t off, u_offset_t *popoffp,
if ((lastoff + size) > off) {
*popoffp = (lastoff & (offset_t)PAGEMASK);
} else {
- *popoffp = off & (offset_t)PAGEMASK;
+ *popoffp = off & (offset_t)PAGEMASK;
}
/*
@@ -1774,17 +1769,16 @@ cachefs_cluster_allocmap(u_offset_t off, u_offset_t *popoffp,
*/
if ((*popoffp + size) > cp->c_size)
*popsizep = (cp->c_size - *popoffp + PAGEOFFSET) &
- (offset_t)PAGEMASK;
+ (offset_t)PAGEMASK;
else if (size < PAGESIZE)
- *popsizep = (size + PAGEOFFSET) &
- (offset_t)PAGEMASK;
+ *popsizep = (size + PAGEOFFSET) & (offset_t)PAGEMASK;
else
*popsizep = size & (offset_t)PAGEMASK;
#ifdef CFSDEBUG
CFS_DEBUG(CFSDEBUG_SUBR)
printf("cachefs_cluster_allocmap: popoff %llx, popsize %llx\n",
- (u_longlong_t)(*popoffp), (u_longlong_t)(*popsizep));
+ (u_longlong_t)(*popoffp), (u_longlong_t)(*popsizep));
#endif /* CFSDEBUG */
}
@@ -1909,8 +1903,8 @@ cachefs_readlink_back(cnode_t *cp, cred_t *cr, caddr_t *bufp, int *buflenp)
/* get the link data */
CFS_DPRINT_BACKFS_NFSV4(fscp,
- ("cachefs_readlink (nfsv4): cnode %p, backvp %p\n",
- cp, cp->c_backvp));
+ ("cachefs_readlink (nfsv4): cnode %p, backvp %p\n",
+ cp, cp->c_backvp));
error = VOP_READLINK(cp->c_backvp, &uio, cr, NULL);
if (error) {
cachefs_kmem_free(buf, MAXPATHLEN);
@@ -2138,10 +2132,10 @@ cachefs_async_start(struct cachefs_workq *qp)
CALLB_CPR_SAFE_BEGIN(&cprinfo);
/* sleep until there is something to do */
- left = cv_timedwait(&qp->wq_req_cv,
- &qp->wq_queue_lock, CFS_ASYNC_TIMEOUT + lbolt);
- CALLB_CPR_SAFE_END(&cprinfo,
- &qp->wq_queue_lock);
+ left = cv_reltimedwait(&qp->wq_req_cv,
+ &qp->wq_queue_lock, CFS_ASYNC_TIMEOUT,
+ TR_CLOCK_TICK);
+ CALLB_CPR_SAFE_END(&cprinfo, &qp->wq_queue_lock);
if ((qp->wq_head == NULL) && (qp->wq_logwork == 0))
continue;
}
@@ -2188,7 +2182,6 @@ int
cachefs_async_halt(struct cachefs_workq *qp, int force)
{
int error = 0;
- clock_t tend;
mutex_enter(&qp->wq_queue_lock);
if (force)
@@ -2197,9 +2190,8 @@ cachefs_async_halt(struct cachefs_workq *qp, int force)
if (qp->wq_thread_count > 0) {
qp->wq_halt_request++;
cv_broadcast(&qp->wq_req_cv);
- tend = lbolt + (60 * hz);
- (void) cv_timedwait(&qp->wq_halt_cv,
- &qp->wq_queue_lock, tend);
+ (void) cv_reltimedwait(&qp->wq_halt_cv,
+ &qp->wq_queue_lock, (60 * hz), TR_CLOCK_TICK);
qp->wq_halt_request--;
if (qp->wq_thread_count > 0) {
if ((qp->wq_thread_count == 1) &&
@@ -2254,7 +2246,7 @@ cachefs_async_putpage(struct cachefs_putpage_req *prp, cred_t *cr)
ASSERT(CFS_ISFS_BACKFS_NFSV4(C_TO_FSCACHE(cp)) == 0);
(void) VOP_PUTPAGE(prp->cp_vp, prp->cp_off, prp->cp_len,
- prp->cp_flags, cr, NULL);
+ prp->cp_flags, cr, NULL);
mutex_enter(&cp->c_iomutex);
if (--cp->c_nio == 0)
@@ -2666,7 +2658,7 @@ cachefs_kmem_free(void *mp, size_t size)
ASSERT(n >= (size + 8));
front_kwp = (struct km_wrap *)((uintptr_t)mp - sizeof (struct km_wrap));
back_kwp = (struct km_wrap *)
- ((uintptr_t)front_kwp + n - sizeof (struct km_wrap));
+ ((uintptr_t)front_kwp + n - sizeof (struct km_wrap));
ASSERT(front_kwp->kw_other == back_kwp);
ASSERT(front_kwp->kw_size == n);
@@ -2711,21 +2703,21 @@ cachefs_stats_kstat_snapshot(kstat_t *ksp, void *buf, int rw)
bcopy(buf, &fscp->fs_stats, sizeof (fscp->fs_stats));
cachep->c_gc_count = fscp->fs_stats.st_gc_count;
CACHEFS_CFS_TIME_TO_TIME_COPY(fscp->fs_stats.st_gc_time,
- cachep->c_gc_time);
+ cachep->c_gc_time);
CACHEFS_CFS_TIME_TO_TIME_COPY(fscp->fs_stats.st_gc_before_atime,
- cachep->c_gc_before);
+ cachep->c_gc_before);
CACHEFS_CFS_TIME_TO_TIME_COPY(fscp->fs_stats.st_gc_after_atime,
- cachep->c_gc_after);
+ cachep->c_gc_after);
return (error);
}
fscp->fs_stats.st_gc_count = cachep->c_gc_count;
CACHEFS_TIME_TO_CFS_TIME_COPY(cachep->c_gc_time,
- fscp->fs_stats.st_gc_time, error);
+ fscp->fs_stats.st_gc_time, error);
CACHEFS_TIME_TO_CFS_TIME_COPY(cachep->c_gc_before,
- fscp->fs_stats.st_gc_before_atime, error);
+ fscp->fs_stats.st_gc_before_atime, error);
CACHEFS_TIME_TO_CFS_TIME_COPY(cachep->c_gc_after,
- fscp->fs_stats.st_gc_after_atime, error);
+ fscp->fs_stats.st_gc_after_atime, error);
bcopy(&fscp->fs_stats, buf, sizeof (fscp->fs_stats));
return (error);
diff --git a/usr/src/uts/common/fs/dev/sdev_comm.c b/usr/src/uts/common/fs/dev/sdev_comm.c
index 226e323149..2cf5b37db7 100644
--- a/usr/src/uts/common/fs/dev/sdev_comm.c
+++ b/usr/src/uts/common/fs/dev/sdev_comm.c
@@ -110,6 +110,7 @@ sdev_wait4lookup(struct sdev_node *dv, int cmd)
{
clock_t expire;
clock_t rv;
+ clock_t wakeup = drv_usectohz(2 * 1000000);
int rval = ENOENT;
int is_lookup = (cmd == SDEV_LOOKUP);
@@ -129,9 +130,8 @@ sdev_wait4lookup(struct sdev_node *dv, int cmd)
while (DEVNAME_DEVFSADM_IS_RUNNING(devfsadm_state) &&
!sdev_devfsadm_revoked()) {
/* wait 2 sec and check devfsadm completion */
- rv = cv_timedwait_sig(&dv->sdev_lookup_cv,
- &dv->sdev_lookup_lock, ddi_get_lbolt() +
- drv_usectohz(2 * 1000000));
+ rv = cv_reltimedwait_sig(&dv->sdev_lookup_cv,
+ &dv->sdev_lookup_lock, wakeup, TR_CLOCK_TICK);
if (is_lookup && (rv > 0)) {
/* was this node constructed ? */
@@ -233,9 +233,8 @@ sdev_open_upcall_door()
ASSERT(sdev_upcall_door == NULL);
- /* tick value at which wait expires */
- expire = ddi_get_lbolt() +
- drv_usectohz(dev_devfsadm_startup * 1000000);
+ /* timeout expires this many ticks in the future */
+ expire = ddi_get_lbolt() + drv_usectohz(dev_devfsadm_startup * 1000000);
if (sdev_door_upcall_filename == NULL) {
if ((error = sdev_start_devfsadmd()) != 0) {
diff --git a/usr/src/uts/common/fs/dnlc.c b/usr/src/uts/common/fs/dnlc.c
index ef44a25622..0941dfb9ac 100644
--- a/usr/src/uts/common/fs/dnlc.c
+++ b/usr/src/uts/common/fs/dnlc.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -36,8 +36,6 @@
* contributors.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/param.h>
@@ -1166,7 +1164,7 @@ dnlc_dir_lookup(dcanchor_t *dcap, char *name, uint64_t *handle)
mutex_enter(&dcap->dca_lock);
dcp = (dircache_t *)dcap->dca_dircache;
if (VALID_DIR_CACHE(dcp)) {
- dcp->dc_actime = lbolt64;
+ dcp->dc_actime = ddi_get_lbolt64();
DNLC_DIR_HASH(name, hash, namlen);
dep = dcp->dc_namehash[hash & dcp->dc_nhash_mask];
while (dep != NULL) {
@@ -1385,7 +1383,7 @@ ok:
bcopy(name, dep->de_name, namlen);
dep->de_next = *hp;
*hp = dep;
- dcp->dc_actime = lbolt64;
+ dcp->dc_actime = ddi_get_lbolt64();
mutex_exit(&dcap->dca_lock);
ncs.ncs_dir_num_ents.value.ui64++;
return (DOK);
@@ -1474,7 +1472,7 @@ ok:
*/
dfp->df_handle = handle;
dfp->df_len = len;
- dcp->dc_actime = lbolt64;
+ dcp->dc_actime = ddi_get_lbolt64();
hp = &(dcp->dc_freehash[DDFHASH(handle, dcp)]);
dfp->df_next = *hp;
*hp = dfp;
@@ -1601,7 +1599,7 @@ dnlc_dir_rem_entry(dcanchor_t *dcap, char *name, uint64_t *handlep)
mutex_enter(&dcap->dca_lock);
dcp = (dircache_t *)dcap->dca_dircache;
if (VALID_DIR_CACHE(dcp)) {
- dcp->dc_actime = lbolt64;
+ dcp->dc_actime = ddi_get_lbolt64();
if (dcp->dc_nhash_mask > 0) { /* ie not minimum */
capacity = (dcp->dc_nhash_mask + 1) <<
dnlc_dir_hash_size_shift;
@@ -1677,7 +1675,7 @@ dnlc_dir_rem_space_by_len(dcanchor_t *dcap, uint_t len, uint64_t *handlep)
mutex_enter(&dcap->dca_lock);
dcp = (dircache_t *)dcap->dca_dircache;
if (VALID_DIR_CACHE(dcp)) {
- dcp->dc_actime = lbolt64;
+ dcp->dc_actime = ddi_get_lbolt64();
if (dcp->dc_fhash_mask > 0) { /* ie not minimum */
capacity = (dcp->dc_fhash_mask + 1) <<
dnlc_dir_hash_size_shift;
@@ -1740,7 +1738,7 @@ dnlc_dir_rem_space_by_handle(dcanchor_t *dcap, uint64_t handle)
mutex_enter(&dcap->dca_lock);
dcp = (dircache_t *)dcap->dca_dircache;
if (VALID_DIR_CACHE(dcp)) {
- dcp->dc_actime = lbolt64;
+ dcp->dc_actime = ddi_get_lbolt64();
if (dcp->dc_fhash_mask > 0) { /* ie not minimum */
capacity = (dcp->dc_fhash_mask + 1) <<
dnlc_dir_hash_size_shift;
@@ -1799,7 +1797,7 @@ dnlc_dir_update(dcanchor_t *dcap, char *name, uint64_t handle)
mutex_enter(&dcap->dca_lock);
dcp = (dircache_t *)dcap->dca_dircache;
if (VALID_DIR_CACHE(dcp)) {
- dcp->dc_actime = lbolt64;
+ dcp->dc_actime = ddi_get_lbolt64();
DNLC_DIR_HASH(name, hash, namlen);
dep = dcp->dc_namehash[hash & dcp->dc_nhash_mask];
while (dep != NULL) {
diff --git a/usr/src/uts/common/fs/fsflush.c b/usr/src/uts/common/fs/fsflush.c
index f83409ac13..0e0fbe0c50 100644
--- a/usr/src/uts/common/fs/fsflush.c
+++ b/usr/src/uts/common/fs/fsflush.c
@@ -422,7 +422,7 @@ loop:
ASSERT(bp->b_flags & B_DELWRI);
if ((bp->b_flags & B_DELWRI) &&
- (lbolt - bp->b_start >= autoup) &&
+ (ddi_get_lbolt() - bp->b_start >= autoup) &&
sema_tryp(&bp->b_sem)) {
bp->b_flags |= B_ASYNC;
hp->b_length--;
@@ -461,7 +461,7 @@ loop:
* inode flushing until after fsflush_iflush_delay secs have elapsed.
*/
if ((boothowto & RB_SINGLE) == 0 &&
- (lbolt64 / hz) < fsflush_iflush_delay)
+ (ddi_get_lbolt64() / hz) < fsflush_iflush_delay)
goto loop;
/*
diff --git a/usr/src/uts/common/fs/nfs/nfs4_client.c b/usr/src/uts/common/fs/nfs/nfs4_client.c
index 7335f83616..9ed69f7eed 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_client.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_client.c
@@ -1412,8 +1412,9 @@ nfs4_async_start(struct vfs *vfsp)
zthread_exit();
/* NOTREACHED */
}
- time_left = cv_timedwait(&mi->mi_async_work_cv,
- &mi->mi_async_lock, nfs_async_timeout + lbolt);
+ time_left = cv_reltimedwait(&mi->mi_async_work_cv,
+ &mi->mi_async_lock, nfs_async_timeout,
+ TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
@@ -2619,6 +2620,7 @@ void
nfs4_write_error(vnode_t *vp, int error, cred_t *cr)
{
mntinfo4_t *mi;
+ clock_t now = ddi_get_lbolt();
mi = VTOMI4(vp);
/*
@@ -2640,7 +2642,7 @@ nfs4_write_error(vnode_t *vp, int error, cred_t *cr)
* messages from the same file system.
*/
if ((error != ENOSPC && error != EDQUOT) ||
- lbolt - mi->mi_printftime > 0) {
+ now - mi->mi_printftime > 0) {
zoneid_t zoneid = mi->mi_zone->zone_id;
#ifdef DEBUG
@@ -2661,7 +2663,7 @@ nfs4_write_error(vnode_t *vp, int error, cred_t *cr)
crgetuid(curthread->t_cred),
crgetgid(curthread->t_cred));
}
- mi->mi_printftime = lbolt +
+ mi->mi_printftime = now +
nfs_write_error_interval * hz;
}
sfh4_printfhandle(VTOR4(vp)->r_fh);
@@ -3225,8 +3227,8 @@ nfs4_renew_lease_thread(nfs4_server_t *sp)
mutex_enter(&cpr_lock);
CALLB_CPR_SAFE_BEGIN(&cpr_info);
mutex_exit(&cpr_lock);
- time_left = cv_timedwait(&sp->cv_thread_exit,
- &sp->s_lock, tick_delay + lbolt);
+ time_left = cv_reltimedwait(&sp->cv_thread_exit,
+ &sp->s_lock, tick_delay, TR_CLOCK_TICK);
mutex_enter(&cpr_lock);
CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
mutex_exit(&cpr_lock);
@@ -3261,8 +3263,8 @@ nfs4_renew_lease_thread(nfs4_server_t *sp)
mutex_enter(&cpr_lock);
CALLB_CPR_SAFE_BEGIN(&cpr_info);
mutex_exit(&cpr_lock);
- time_left = cv_timedwait(&sp->cv_thread_exit, &sp->s_lock,
- tick_delay + lbolt);
+ time_left = cv_reltimedwait(&sp->cv_thread_exit, &sp->s_lock,
+ tick_delay, TR_CLOCK_TICK);
mutex_enter(&cpr_lock);
CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
mutex_exit(&cpr_lock);
diff --git a/usr/src/uts/common/fs/nfs/nfs4_db.c b/usr/src/uts/common/fs/nfs/nfs4_db.c
index 13accb9eac..98685a5938 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_db.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_db.c
@@ -804,7 +804,7 @@ static void
reaper_thread(caddr_t *arg)
{
rfs4_table_t *table = (rfs4_table_t *)arg;
- clock_t rc, time;
+ clock_t rc, time, wakeup;
NFS4_DEBUG(table->dbt_debug,
(CE_NOTE, "rfs4_reaper_thread starting for %s", table->dbt_name));
@@ -813,12 +813,13 @@ reaper_thread(caddr_t *arg)
callb_generic_cpr, "nfsv4Reaper");
time = MIN(rfs4_reap_interval, table->dbt_max_cache_time);
+ wakeup = SEC_TO_TICK(time);
+
mutex_enter(&table->dbt_reaper_cv_lock);
do {
CALLB_CPR_SAFE_BEGIN(&table->dbt_reaper_cpr_info);
- rc = cv_timedwait_sig(&table->dbt_reaper_wait,
- &table->dbt_reaper_cv_lock,
- lbolt + SEC_TO_TICK(time));
+ rc = cv_reltimedwait_sig(&table->dbt_reaper_wait,
+ &table->dbt_reaper_cv_lock, wakeup, TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&table->dbt_reaper_cpr_info,
&table->dbt_reaper_cv_lock);
rfs4_dbe_reap(table, table->dbt_max_cache_time, 0);
diff --git a/usr/src/uts/common/fs/nfs/nfs4_deleg_ops.c b/usr/src/uts/common/fs/nfs/nfs4_deleg_ops.c
index 696e060c63..bc29ee8ae7 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_deleg_ops.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_deleg_ops.c
@@ -84,7 +84,7 @@ recall_all_delegations(rfs4_file_t *fp, bool_t trunc, caller_context_t *ct)
while (fp->rf_dinfo.rd_dtype != OPEN_DELEGATE_NONE) {
rc = rfs4_dbe_twait(fp->rf_dbe,
- lbolt + SEC_TO_TICK(rfs4_lease_time));
+ ddi_get_lbolt() + SEC_TO_TICK(rfs4_lease_time));
if (rc == -1) { /* timed out */
rfs4_dbe_unlock(fp->rf_dbe);
rfs4_recall_deleg(fp, trunc, NULL);
@@ -377,7 +377,7 @@ deleg_rd_vnevent(femarg_t *arg, vnevent_t vnevent, vnode_t *dvp, char *name,
rfs4_dbe_lock(fp->rf_dbe);
while (fp->rf_dinfo.rd_dtype != OPEN_DELEGATE_NONE) {
rc = rfs4_dbe_twait(fp->rf_dbe,
- lbolt + SEC_TO_TICK(rfs4_lease_time));
+ ddi_get_lbolt() + SEC_TO_TICK(rfs4_lease_time));
if (rc == -1) { /* timed out */
rfs4_dbe_unlock(fp->rf_dbe);
rfs4_recall_deleg(fp, trunc, NULL);
@@ -414,7 +414,7 @@ deleg_wr_vnevent(femarg_t *arg, vnevent_t vnevent, vnode_t *dvp, char *name,
rfs4_dbe_lock(fp->rf_dbe);
while (fp->rf_dinfo.rd_dtype != OPEN_DELEGATE_NONE) {
rc = rfs4_dbe_twait(fp->rf_dbe,
- lbolt + SEC_TO_TICK(rfs4_lease_time));
+ ddi_get_lbolt() + SEC_TO_TICK(rfs4_lease_time));
if (rc == -1) { /* timed out */
rfs4_dbe_unlock(fp->rf_dbe);
rfs4_recall_deleg(fp, trunc, NULL);
diff --git a/usr/src/uts/common/fs/nfs/nfs4_recovery.c b/usr/src/uts/common/fs/nfs/nfs4_recovery.c
index f107519eda..9d2ef14175 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_recovery.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_recovery.c
@@ -2479,8 +2479,8 @@ recov_openfiles(recov_info_t *recovp, nfs4_server_t *sp)
mutex_enter(&sp->s_lock);
if ((sp->s_flags & (N4S_CB_PINGED | N4S_CB_WAITER)) == 0) {
sp->s_flags |= N4S_CB_WAITER;
- (void) cv_timedwait(&sp->wait_cb_null, &sp->s_lock,
- (lbolt + drv_usectohz(N4S_CB_PAUSE_TIME)));
+ (void) cv_reltimedwait(&sp->wait_cb_null, &sp->s_lock,
+ drv_usectohz(N4S_CB_PAUSE_TIME), TR_CLOCK_TICK);
}
mutex_exit(&sp->s_lock);
diff --git a/usr/src/uts/common/fs/nfs/nfs4_srv.c b/usr/src/uts/common/fs/nfs/nfs4_srv.c
index 27eb457b05..d7f71fa380 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_srv.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_srv.c
@@ -597,7 +597,7 @@ void
rfs4_grace_start(rfs4_servinst_t *sip)
{
rw_enter(&sip->rwlock, RW_WRITER);
- sip->start_time = (time_t)TICK_TO_SEC(lbolt);
+ sip->start_time = (time_t)TICK_TO_SEC(ddi_get_lbolt());
sip->grace_period = rfs4_grace_period;
rw_exit(&sip->rwlock);
}
@@ -630,7 +630,7 @@ rfs4_servinst_in_grace(rfs4_servinst_t *sip)
grace_expiry = sip->start_time + sip->grace_period;
rw_exit(&sip->rwlock);
- return (((time_t)TICK_TO_SEC(lbolt)) < grace_expiry);
+ return (((time_t)TICK_TO_SEC(ddi_get_lbolt())) < grace_expiry);
}
int
diff --git a/usr/src/uts/common/fs/nfs/nfs4_stub_vnops.c b/usr/src/uts/common/fs/nfs/nfs4_stub_vnops.c
index e26f15264c..37bc502b0b 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_stub_vnops.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_stub_vnops.c
@@ -2370,7 +2370,7 @@ nfs4_ephemeral_harvester(nfs4_trigger_globals_t *ntg)
zone_t *zone = curproc->p_zone;
for (;;) {
- timeleft = zone_status_timedwait(zone, lbolt +
+ timeleft = zone_status_timedwait(zone, ddi_get_lbolt() +
nfs4_trigger_thread_timer * hz, ZONE_IS_SHUTTING_DOWN);
/*
diff --git a/usr/src/uts/common/fs/nfs/nfs_client.c b/usr/src/uts/common/fs/nfs/nfs_client.c
index 3c5d02fad9..a32d730ea7 100644
--- a/usr/src/uts/common/fs/nfs/nfs_client.c
+++ b/usr/src/uts/common/fs/nfs/nfs_client.c
@@ -2050,8 +2050,9 @@ nfs_async_start(struct vfs *vfsp)
zthread_exit();
/* NOTREACHED */
}
- time_left = cv_timedwait(&mi->mi_async_work_cv,
- &mi->mi_async_lock, nfs_async_timeout + lbolt);
+ time_left = cv_reltimedwait(&mi->mi_async_work_cv,
+ &mi->mi_async_lock, nfs_async_timeout,
+ TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
@@ -2554,6 +2555,7 @@ void
nfs_write_error(vnode_t *vp, int error, cred_t *cr)
{
mntinfo_t *mi;
+ clock_t now;
mi = VTOMI(vp);
/*
@@ -2567,8 +2569,9 @@ nfs_write_error(vnode_t *vp, int error, cred_t *cr)
* No use in flooding the console with ENOSPC
* messages from the same file system.
*/
+ now = ddi_get_lbolt();
if ((error != ENOSPC && error != EDQUOT) ||
- lbolt - mi->mi_printftime > 0) {
+ now - mi->mi_printftime > 0) {
zoneid_t zoneid = mi->mi_zone->zone_id;
#ifdef DEBUG
@@ -2588,7 +2591,7 @@ nfs_write_error(vnode_t *vp, int error, cred_t *cr)
MSG("^User: userid=%d, groupid=%d\n"),
crgetuid(CRED()), crgetgid(CRED()));
}
- mi->mi_printftime = lbolt +
+ mi->mi_printftime = now +
nfs_write_error_interval * hz;
}
nfs_printfhandle(&VTOR(vp)->r_fh);
diff --git a/usr/src/uts/common/fs/nfs/nfs_dump.c b/usr/src/uts/common/fs/nfs/nfs_dump.c
index 98a69cca50..9b570716d8 100644
--- a/usr/src/uts/common/fs/nfs/nfs_dump.c
+++ b/usr/src/uts/common/fs/nfs/nfs_dump.c
@@ -19,16 +19,15 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Dump memory to NFS swap file after a panic.
* We have no timeouts, context switches, etc.
*/
+
#include <rpc/types.h>
#include <sys/param.h>
#include <sys/errno.h>
@@ -452,7 +451,7 @@ nd_get_reply(TIUSER *tiptr, XDR *xdrp, uint32_t call_xid, int *badmsg)
static int
nd_poll(TIUSER *tiptr, int retry, int *eventp)
{
- clock_t start_bolt = lbolt;
+ clock_t start_bolt = ddi_get_lbolt();
clock_t timout = TIMEOUT * (retry + 1);
int error;
@@ -460,7 +459,7 @@ nd_poll(TIUSER *tiptr, int retry, int *eventp)
*eventp = 0;
- while (!*eventp && ((lbolt - start_bolt) < timout)) {
+ while (!*eventp && ((ddi_get_lbolt() - start_bolt) < timout)) {
/*
* Briefly enable interrupts before checking for a reply;
* the network transports do not yet support do_polled_io.
diff --git a/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c b/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c
index 8f6e95de23..f6bba8380a 100644
--- a/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c
+++ b/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c
@@ -786,10 +786,11 @@ smb_iod_waitrq(struct smb_rq *rqp)
* start the timer(s) after the request is sent.
*/
if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
- tmo1 = lbolt + SEC_TO_TICK(smb_timo_notice);
+ tmo1 = SEC_TO_TICK(smb_timo_notice);
else
tmo1 = 0;
- tmo2 = lbolt + SEC_TO_TICK(rqp->sr_timo);
+
+ tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
/*
* As above, we don't want to allow interrupt for some
@@ -802,11 +803,11 @@ smb_iod_waitrq(struct smb_rq *rqp)
*/
if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
if (rqp->sr_flags & SMBR_NOINTR_RECV)
- tr = cv_timedwait(&rqp->sr_cond,
- &rqp->sr_lock, tmo1);
+ tr = cv_reltimedwait(&rqp->sr_cond,
+ &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
else
- tr = cv_timedwait_sig(&rqp->sr_cond,
- &rqp->sr_lock, tmo1);
+ tr = cv_reltimedwait_sig(&rqp->sr_cond,
+ &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
if (tr == 0) {
error = EINTR;
goto out;
@@ -1064,7 +1065,7 @@ out:
int
smb_iod_vc_idle(struct smb_vc *vcp)
{
- clock_t tr, tmo;
+ clock_t tr, delta = SEC_TO_TICK(15);
int err = 0;
/*
@@ -1075,8 +1076,8 @@ smb_iod_vc_idle(struct smb_vc *vcp)
SMB_VC_LOCK(vcp);
while (vcp->vc_state == SMBIOD_ST_IDLE) {
- tmo = lbolt + SEC_TO_TICK(15);
- tr = cv_timedwait_sig(&vcp->iod_idle, &vcp->vc_lock, tmo);
+ tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
+ delta, TR_CLOCK_TICK);
if (tr == 0) {
err = EINTR;
break;
@@ -1103,7 +1104,7 @@ smb_iod_vc_idle(struct smb_vc *vcp)
int
smb_iod_vc_rcfail(struct smb_vc *vcp)
{
- clock_t tr, tmo;
+ clock_t tr;
int err = 0;
/*
@@ -1125,8 +1126,8 @@ smb_iod_vc_rcfail(struct smb_vc *vcp)
* (1) Give requests a chance to error out.
* (2) Prevent immediate retry.
*/
- tmo = lbolt + SEC_TO_TICK(5);
- tr = cv_timedwait_sig(&vcp->iod_idle, &vcp->vc_lock, tmo);
+ tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
+ SEC_TO_TICK(5), TR_CLOCK_TICK);
if (tr == 0)
err = EINTR;
diff --git a/usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c b/usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c
index 5199127d47..0985d58ff4 100644
--- a/usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c
+++ b/usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c
@@ -261,11 +261,11 @@ smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
break;
SMBRQ_LOCK(rqp);
if (rqp->sr_share) {
- cv_timedwait(&rqp->sr_cond, &(rqp)->sr_lock,
- lbolt + (hz * SMB_RCNDELAY));
+ cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
+ (hz * SMB_RCNDELAY), TR_CLOCK_TICK);
} else {
- delay(lbolt + (hz * SMB_RCNDELAY));
+ delay(ddi_get_lbolt() + (hz * SMB_RCNDELAY));
}
SMBRQ_UNLOCK(rqp);
rqp->sr_rexmit--;
@@ -1409,10 +1409,10 @@ smb_t2_request(struct smb_t2rq *t2p)
break;
mutex_enter(&(t2p)->t2_lock);
if (t2p->t2_share) {
- cv_timedwait(&t2p->t2_cond, &(t2p)->t2_lock,
- lbolt + (hz * SMB_RCNDELAY));
+ cv_reltimedwait(&t2p->t2_cond, &(t2p)->t2_lock,
+ (hz * SMB_RCNDELAY), TR_CLOCK_TICK);
} else {
- delay(lbolt + (hz * SMB_RCNDELAY));
+ delay(ddi_get_lbolt() + (hz * SMB_RCNDELAY));
}
mutex_exit(&(t2p)->t2_lock);
}
@@ -1442,11 +1442,11 @@ smb_nt_request(struct smb_ntrq *ntp)
break;
mutex_enter(&(ntp)->nt_lock);
if (ntp->nt_share) {
- cv_timedwait(&ntp->nt_cond, &(ntp)->nt_lock,
- lbolt + (hz * SMB_RCNDELAY));
+ cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
+ (hz * SMB_RCNDELAY), TR_CLOCK_TICK);
} else {
- delay(lbolt + (hz * SMB_RCNDELAY));
+ delay(ddi_get_lbolt() + (hz * SMB_RCNDELAY));
}
mutex_exit(&(ntp)->nt_lock);
}
diff --git a/usr/src/uts/common/fs/smbsrv/smb_lock.c b/usr/src/uts/common/fs/smbsrv/smb_lock.c
index d74aa0be32..5032bac80b 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_lock.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_lock.c
@@ -725,7 +725,7 @@ smb_lock_create(
* Calculate the absolute end time so that we can use it
* in cv_timedwait.
*/
- lock->l_end_time = lbolt + MSEC_TO_TICK(timeout);
+ lock->l_end_time = ddi_get_lbolt() + MSEC_TO_TICK(timeout);
if (timeout == UINT_MAX)
lock->l_flags |= SMB_LOCK_FLAG_INDEFINITE;
diff --git a/usr/src/uts/common/fs/smbsrv/smb_opipe.c b/usr/src/uts/common/fs/smbsrv/smb_opipe.c
index 3048e034e7..092f726535 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_opipe.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_opipe.c
@@ -232,7 +232,7 @@ smb_opipe_fid(void)
mutex_enter(&smb_opipe_fid_mutex);
if (opipe_fid == 0)
- opipe_fid = lbolt << 11;
+ opipe_fid = ddi_get_lbolt() << 11;
do {
++opipe_fid;
diff --git a/usr/src/uts/common/fs/smbsrv/smb_session.c b/usr/src/uts/common/fs/smbsrv/smb_session.c
index 14971d067e..6d31a3d2a4 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_session.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_session.c
@@ -640,6 +640,7 @@ smb_session_create(ksocket_t new_so, uint16_t port, smb_server_t *sv,
socklen_t slen;
struct sockaddr_in6 sin6;
smb_session_t *session;
+ int64_t now;
session = kmem_cache_alloc(sv->si_cache_session, KM_SLEEP);
bzero(session, sizeof (smb_session_t));
@@ -649,12 +650,14 @@ smb_session_create(ksocket_t new_so, uint16_t port, smb_server_t *sv,
return (NULL);
}
+ now = ddi_get_lbolt64();
+
session->s_kid = SMB_NEW_KID();
session->s_state = SMB_SESSION_STATE_INITIALIZED;
session->native_os = NATIVE_OS_UNKNOWN;
- session->opentime = lbolt64;
+ session->opentime = now;
session->keep_alive = smb_keep_alive;
- session->activity_timestamp = lbolt64;
+ session->activity_timestamp = now;
smb_slist_constructor(&session->s_req_list, sizeof (smb_request_t),
offsetof(smb_request_t, sr_session_lnd));
diff --git a/usr/src/uts/common/fs/smbsrv/smb_util.c b/usr/src/uts/common/fs/smbsrv/smb_util.c
index cfc1264fb4..01d6fdd3bb 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_util.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_util.c
@@ -392,7 +392,7 @@ microtime(timestruc_t *tvp)
int32_t
clock_get_milli_uptime()
{
- return (TICK_TO_MSEC(lbolt));
+ return (TICK_TO_MSEC(ddi_get_lbolt()));
}
int /*ARGSUSED*/
@@ -1105,15 +1105,14 @@ static boolean_t
smb_thread_continue_timedwait_locked(smb_thread_t *thread, int ticks)
{
boolean_t result;
- clock_t finish_time = lbolt + ticks;
/* -1 means don't block */
if (ticks != -1 && !thread->sth_kill) {
if (ticks == 0) {
cv_wait(&thread->sth_cv, &thread->sth_mtx);
} else {
- (void) cv_timedwait(&thread->sth_cv, &thread->sth_mtx,
- finish_time);
+ (void) cv_reltimedwait(&thread->sth_cv,
+ &thread->sth_mtx, (clock_t)ticks, TR_CLOCK_TICK);
}
}
result = (thread->sth_kill == 0);
@@ -1258,8 +1257,8 @@ smb_rwx_rwwait(
rc = 1;
cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
} else {
- rc = cv_timedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
- lbolt + timeout);
+ rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
+ timeout, TR_CLOCK_TICK);
}
}
mutex_exit(&rwx->rwx_mutex);
diff --git a/usr/src/uts/common/fs/sockfs/nl7chttp.c b/usr/src/uts/common/fs/sockfs/nl7chttp.c
index 81dd8a99a5..8019295836 100644
--- a/usr/src/uts/common/fs/sockfs/nl7chttp.c
+++ b/usr/src/uts/common/fs/sockfs/nl7chttp.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1797,10 +1797,11 @@ done:
goto pass;
}
/* Have a valid expire and date so calc an lbolt expire */
- uri->expire = lbolt + SEC_TO_TICK(http->expire - http->date);
+ uri->expire = ddi_get_lbolt() + SEC_TO_TICK(http->expire -
+ http->date);
} else if (nl7c_uri_ttl != -1) {
/* No valid expire speced and we have a TTL */
- uri->expire = lbolt + SEC_TO_TICK(nl7c_uri_ttl);
+ uri->expire = ddi_get_lbolt() + SEC_TO_TICK(nl7c_uri_ttl);
}
chunked:
diff --git a/usr/src/uts/common/fs/sockfs/nl7curi.c b/usr/src/uts/common/fs/sockfs/nl7curi.c
index f0ba55de87..ae133979fa 100644
--- a/usr/src/uts/common/fs/sockfs/nl7curi.c
+++ b/usr/src/uts/common/fs/sockfs/nl7curi.c
@@ -821,7 +821,8 @@ nexthash:
* of requested URI, check for expire or request no cache
* purge.
*/
- if (uri->expire >= 0 && uri->expire <= lbolt || ruri->nocache) {
+ if (uri->expire >= 0 && uri->expire <= ddi_get_lbolt() ||
+ ruri->nocache) {
/*
* URI has expired or request specified to not use
* the cached version, unlink the URI from the hash
diff --git a/usr/src/uts/common/fs/sockfs/sockcommon_subr.c b/usr/src/uts/common/fs/sockfs/sockcommon_subr.c
index 9b806d0a4a..98cb194037 100644
--- a/usr/src/uts/common/fs/sockfs/sockcommon_subr.c
+++ b/usr/src/uts/common/fs/sockfs/sockcommon_subr.c
@@ -311,11 +311,8 @@ so_snd_wait_qnotfull_locked(struct sonode *so, boolean_t dontblock)
*/
error = cv_wait_sig(&so->so_snd_cv, &so->so_lock);
} else {
- clock_t now;
-
- time_to_wait(&now, so->so_sndtimeo);
- error = cv_timedwait_sig(&so->so_snd_cv, &so->so_lock,
- now);
+ error = cv_reltimedwait_sig(&so->so_snd_cv,
+ &so->so_lock, so->so_sndtimeo, TR_CLOCK_TICK);
}
if (error == 0)
return (EINTR);
@@ -971,10 +968,9 @@ try_again:
error = cv_wait_sig(&so->so_rcv_cv,
&so->so_lock);
} else {
- clock_t now;
- time_to_wait(&now, so->so_rcvtimeo);
- error = cv_timedwait_sig(&so->so_rcv_cv,
- &so->so_lock, now);
+ error = cv_reltimedwait_sig(
+ &so->so_rcv_cv, &so->so_lock,
+ so->so_rcvtimeo, TR_CLOCK_TICK);
}
so->so_rcv_wakeup = B_FALSE;
so->so_rcv_wanted = 0;
@@ -1558,6 +1554,7 @@ so_strioc_nread(struct sonode *so, intptr_t arg, int mode, int32_t *rvalp)
int retval;
int count = 0;
mblk_t *mp;
+ clock_t wakeup = drv_usectohz(10);
if (so->so_downcalls == NULL ||
so->so_downcalls->sd_recv_uio != NULL)
@@ -1575,8 +1572,8 @@ so_strioc_nread(struct sonode *so, intptr_t arg, int mode, int32_t *rvalp)
so->so_flag |= SOWANT;
/* Do a timed sleep, in case the reader goes to sleep. */
- (void) cv_timedwait(&so->so_state_cv, &so->so_lock,
- lbolt + drv_usectohz(10));
+ (void) cv_reltimedwait(&so->so_state_cv, &so->so_lock, wakeup,
+ TR_CLOCK_TICK);
}
/*
diff --git a/usr/src/uts/common/fs/sockfs/sockstr.c b/usr/src/uts/common/fs/sockfs/sockstr.c
index 26f3d4bd91..dc2af07a93 100644
--- a/usr/src/uts/common/fs/sockfs/sockstr.c
+++ b/usr/src/uts/common/fs/sockfs/sockstr.c
@@ -1078,11 +1078,8 @@ sowaitack(struct sonode *so, mblk_t **mpp, clock_t wait)
/*
* Only wait for the time limit.
*/
- clock_t now;
-
- time_to_wait(&now, wait);
- if (cv_timedwait(&sti->sti_ack_cv, &so->so_lock,
- now) == -1) {
+ if (cv_reltimedwait(&sti->sti_ack_cv, &so->so_lock,
+ wait, TR_CLOCK_TICK) == -1) {
eprintsoline(so, ETIME);
return (ETIME);
}
diff --git a/usr/src/uts/common/fs/sockfs/socksyscalls.c b/usr/src/uts/common/fs/sockfs/socksyscalls.c
index bab4753591..9a25a7362d 100644
--- a/usr/src/uts/common/fs/sockfs/socksyscalls.c
+++ b/usr/src/uts/common/fs/sockfs/socksyscalls.c
@@ -1986,7 +1986,6 @@ snf_async_thread(void)
snf_req_t *sr;
callb_cpr_t cprinfo;
clock_t time_left = 1;
- clock_t now;
CALLB_CPR_INIT(&cprinfo, &snfq->snfq_lock, callb_generic_cpr, "snfq");
@@ -2006,9 +2005,8 @@ snf_async_thread(void)
}
snfq->snfq_idle_cnt++;
- time_to_wait(&now, snfq_timeout);
- time_left = cv_timedwait(&snfq->snfq_cv,
- &snfq->snfq_lock, now);
+ time_left = cv_reltimedwait(&snfq->snfq_cv,
+ &snfq->snfq_lock, snfq_timeout, TR_CLOCK_TICK);
snfq->snfq_idle_cnt--;
CALLB_CPR_SAFE_END(&cprinfo, &snfq->snfq_lock);
diff --git a/usr/src/uts/common/fs/ufs/lufs.c b/usr/src/uts/common/fs/ufs/lufs.c
index 8d49b12ac0..301bb09c01 100644
--- a/usr/src/uts/common/fs/ufs/lufs.c
+++ b/usr/src/uts/common/fs/ufs/lufs.c
@@ -1299,7 +1299,7 @@ lufs_read_strategy(ml_unit_t *ul, buf_t *bp)
bp->b_error = EIO;
biodone(bp);
} else {
- ul->un_ufsvfs->vfs_iotstamp = lbolt;
+ ul->un_ufsvfs->vfs_iotstamp = ddi_get_lbolt();
logstats.ls_lreads.value.ui64++;
(void) bdev_strategy(bp);
lwp_stat_update(LWP_STAT_INBLK, 1);
@@ -1401,7 +1401,7 @@ lufs_write_strategy(ml_unit_t *ul, buf_t *bp)
!(matamap_overlap(ul->un_matamap, mof, nb) &&
((bp->b_flags & B_PHYS) == 0)));
- ul->un_ufsvfs->vfs_iotstamp = lbolt;
+ ul->un_ufsvfs->vfs_iotstamp = ddi_get_lbolt();
logstats.ls_lwrites.value.ui64++;
/* If snapshots are enabled, write through the snapshot driver */
diff --git a/usr/src/uts/common/fs/ufs/lufs_log.c b/usr/src/uts/common/fs/ufs/lufs_log.c
index faec6f915e..2ec3f7907c 100644
--- a/usr/src/uts/common/fs/ufs/lufs_log.c
+++ b/usr/src/uts/common/fs/ufs/lufs_log.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -205,7 +205,7 @@ ldl_strategy(ml_unit_t *ul, buf_t *pb)
} else {
if (read) {
logstats.ls_ldlreads.value.ui64++;
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
lwp_stat_update(LWP_STAT_INBLK, 1);
} else {
logstats.ls_ldlwrites.value.ui64++;
diff --git a/usr/src/uts/common/fs/ufs/lufs_thread.c b/usr/src/uts/common/fs/ufs/lufs_thread.c
index fba11199a6..da37db94df 100644
--- a/usr/src/uts/common/fs/ufs/lufs_thread.c
+++ b/usr/src/uts/common/fs/ufs/lufs_thread.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/systm.h>
#include <sys/types.h>
#include <sys/vnode.h>
@@ -92,8 +90,8 @@ trans_roll_wait(mt_map_t *logmap, callb_cpr_t *cprinfop)
}
logmap->mtm_flags &= ~(MTM_FORCE_ROLL | MTM_ROLLING);
CALLB_CPR_SAFE_BEGIN(cprinfop);
- (void) cv_timedwait(&logmap->mtm_to_roll_cv, &logmap->mtm_mutex,
- lbolt + trans_roll_tics);
+ (void) cv_reltimedwait(&logmap->mtm_to_roll_cv, &logmap->mtm_mutex,
+ trans_roll_tics, TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(cprinfop, &logmap->mtm_mutex);
logmap->mtm_flags |= MTM_ROLLING;
mutex_exit(&logmap->mtm_mutex);
diff --git a/usr/src/uts/common/fs/ufs/ufs_alloc.c b/usr/src/uts/common/fs/ufs/ufs_alloc.c
index d446ddc1e3..3e4d38a9b2 100644
--- a/usr/src/uts/common/fs/ufs/ufs_alloc.c
+++ b/usr/src/uts/common/fs/ufs/ufs_alloc.c
@@ -111,6 +111,7 @@ alloc(struct inode *ip, daddr_t bpref, int size, daddr_t *bnp, cred_t *cr)
int err;
char *errmsg = NULL;
size_t len;
+ clock_t now;
ufsvfsp = ip->i_ufsvfs;
fs = ufsvfsp->vfs_fs;
@@ -158,10 +159,11 @@ alloc(struct inode *ip, daddr_t bpref, int size, daddr_t *bnp, cred_t *cr)
(size_t *)NULL);
nospace:
+ now = ddi_get_lbolt();
mutex_enter(&ufsvfsp->vfs_lock);
- if ((lbolt - ufsvfsp->vfs_lastwhinetime) > (hz << 2) &&
+ if ((now - ufsvfsp->vfs_lastwhinetime) > (hz << 2) &&
(!(TRANS_ISTRANS(ufsvfsp)) || !(ip->i_flag & IQUIET))) {
- ufsvfsp->vfs_lastwhinetime = lbolt;
+ ufsvfsp->vfs_lastwhinetime = now;
cmn_err(CE_NOTE, "alloc: %s: file system full", fs->fs_fsmnt);
}
mutex_exit(&ufsvfsp->vfs_lock);
@@ -187,6 +189,7 @@ realloccg(struct inode *ip, daddr_t bprev, daddr_t bpref, int osize,
int err;
char *errmsg = NULL;
size_t len;
+ clock_t now;
ufsvfsp = ip->i_ufsvfs;
fs = ufsvfsp->vfs_fs;
@@ -260,10 +263,11 @@ realloccg(struct inode *ip, daddr_t bprev, daddr_t bpref, int osize,
(size_t *)NULL);
nospace:
+ now = ddi_get_lbolt();
mutex_enter(&ufsvfsp->vfs_lock);
- if ((lbolt - ufsvfsp->vfs_lastwhinetime) > (hz << 2) &&
+ if ((now - ufsvfsp->vfs_lastwhinetime) > (hz << 2) &&
(!(TRANS_ISTRANS(ufsvfsp)) || !(ip->i_flag & IQUIET))) {
- ufsvfsp->vfs_lastwhinetime = lbolt;
+ ufsvfsp->vfs_lastwhinetime = now;
cmn_err(CE_NOTE,
"realloccg %s: file system full", fs->fs_fsmnt);
}
diff --git a/usr/src/uts/common/fs/ufs/ufs_directio.c b/usr/src/uts/common/fs/ufs/ufs_directio.c
index 05afd6d25f..86f7f559eb 100644
--- a/usr/src/uts/common/fs/ufs/ufs_directio.c
+++ b/usr/src/uts/common/fs/ufs/ufs_directio.c
@@ -314,7 +314,7 @@ directio_start(struct ufsvfs *ufsvfsp, struct inode *ip, size_t nbytes,
/*
* Issue I/O request.
*/
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
if (ufsvfsp->vfs_snapshot)
fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
else
diff --git a/usr/src/uts/common/fs/ufs/ufs_inode.c b/usr/src/uts/common/fs/ufs/ufs_inode.c
index 760043a2d9..40e051005a 100644
--- a/usr/src/uts/common/fs/ufs/ufs_inode.c
+++ b/usr/src/uts/common/fs/ufs/ufs_inode.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1192,7 +1192,7 @@ ufs_itrunc(struct inode *oip, u_offset_t length, int flags, cred_t *cr)
if (flags & I_FREE) {
i_genrand *= 16843009; /* turns into shift and adds */
i_genrand++;
- oip->i_gen += ((i_genrand + lbolt) & 0xffff) + 1;
+ oip->i_gen += ((i_genrand + ddi_get_lbolt()) & 0xffff) + 1;
oip->i_flag |= ICHG |IUPD;
oip->i_seq++;
if (length == oip->i_size)
diff --git a/usr/src/uts/common/fs/ufs/ufs_lockfs.c b/usr/src/uts/common/fs/ufs/ufs_lockfs.c
index 78836c6335..bbcad8883b 100644
--- a/usr/src/uts/common/fs/ufs/ufs_lockfs.c
+++ b/usr/src/uts/common/fs/ufs/ufs_lockfs.c
@@ -277,7 +277,8 @@ ufs_quiesce(struct ulockfs *ulp)
if (!ulp->ul_vnops_cnt && !ULOCKFS_IS_FWLOCK(ulp))
goto out;
}
- if (!cv_timedwait_sig(&ulp->ul_cv, &ulp->ul_lock, lbolt + hz)) {
+ if (!cv_reltimedwait_sig(&ulp->ul_cv, &ulp->ul_lock, hz,
+ TR_CLOCK_TICK)) {
error = EINTR;
goto out;
}
diff --git a/usr/src/uts/common/fs/ufs/ufs_panic.c b/usr/src/uts/common/fs/ufs/ufs_panic.c
index 5dbb1add54..ced31d8a70 100644
--- a/usr/src/uts/common/fs/ufs/ufs_panic.c
+++ b/usr/src/uts/common/fs/ufs/ufs_panic.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
@@ -1089,8 +1087,9 @@ ufsfx_thread_fix_failures(void *ignored)
if (retry) {
mutex_enter(&ufs_fix.uq_mutex);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
- (void) cv_timedwait(&ufs_fix.uq_cv,
- &ufs_fix.uq_mutex, lbolt + (hz * retry));
+ (void) cv_reltimedwait(&ufs_fix.uq_cv,
+ &ufs_fix.uq_mutex, (hz * retry),
+ TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&cprinfo,
&ufs_fix.uq_mutex);
mutex_exit(&ufs_fix.uq_mutex);
diff --git a/usr/src/uts/common/fs/ufs/ufs_subr.c b/usr/src/uts/common/fs/ufs/ufs_subr.c
index d8d4090ea8..0ef1f8280d 100644
--- a/usr/src/uts/common/fs/ufs/ufs_subr.c
+++ b/usr/src/uts/common/fs/ufs/ufs_subr.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -424,7 +424,7 @@ ufs_sync_inode(struct inode *ip, void *arg)
* flushed when I/Os start again.
*/
if (cheap && (ufsvfsp->vfs_dfritime & UFS_DFRATIME) &&
- (ufsvfsp->vfs_iotstamp + ufs_iowait < lbolt))
+ (ufsvfsp->vfs_iotstamp + ufs_iowait < ddi_get_lbolt()))
return (0);
/*
* an app issueing a sync() can take forever on a trans device
@@ -1103,7 +1103,7 @@ ufs_fbiwrite(struct fbuf *fbp, struct inode *ip, daddr_t bn, long bsize)
} else if (ufsvfsp->vfs_snapshot) {
fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
} else {
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
ub.ub_fbiwrites.value.ul++;
(void) bdev_strategy(bp);
lwp_stat_update(LWP_STAT_OUBLK, 1);
diff --git a/usr/src/uts/common/fs/ufs/ufs_thread.c b/usr/src/uts/common/fs/ufs/ufs_thread.c
index 1ea7d1f1b3..9ea1d8c980 100644
--- a/usr/src/uts/common/fs/ufs/ufs_thread.c
+++ b/usr/src/uts/common/fs/ufs/ufs_thread.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1154,8 +1154,8 @@ ufs_thread_hlock(void *ignore)
if (retry) {
mutex_enter(&ufs_hlock.uq_mutex);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
- (void) cv_timedwait(&ufs_hlock.uq_cv,
- &ufs_hlock.uq_mutex, lbolt + hz);
+ (void) cv_reltimedwait(&ufs_hlock.uq_cv,
+ &ufs_hlock.uq_mutex, hz, TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&cprinfo,
&ufs_hlock.uq_mutex);
mutex_exit(&ufs_hlock.uq_mutex);
diff --git a/usr/src/uts/common/fs/ufs/ufs_vfsops.c b/usr/src/uts/common/fs/ufs/ufs_vfsops.c
index a2bfd32f52..b41947eeb1 100644
--- a/usr/src/uts/common/fs/ufs/ufs_vfsops.c
+++ b/usr/src/uts/common/fs/ufs/ufs_vfsops.c
@@ -36,7 +36,6 @@
* contributors.
*/
-
#include <sys/types.h>
#include <sys/t_lock.h>
#include <sys/param.h>
@@ -617,7 +616,7 @@ remountfs(struct vfs *vfsp, dev_t dev, void *raw_argsp, int args_len)
ufsvfsp->vfs_forcedirectio = 1;
else /* default is no direct I/O */
ufsvfsp->vfs_forcedirectio = 0;
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
/*
* set largefiles flag in ufsvfs equal to the
@@ -646,7 +645,8 @@ remountfs(struct vfs *vfsp, dev_t dev, void *raw_argsp, int args_len)
* XXX UFSMNT_ONERROR_RDONLY rather than UFSMNT_ONERROR_PANIC
*/
#define BOOT_TIME_LIMIT (180*hz)
- if (!(flags & UFSMNT_ONERROR_FLGMASK) && lbolt < BOOT_TIME_LIMIT) {
+ if (!(flags & UFSMNT_ONERROR_FLGMASK) &&
+ ddi_get_lbolt() < BOOT_TIME_LIMIT) {
cmn_err(CE_WARN, "%s is required to be mounted onerror=%s",
ufsvfsp->vfs_fs->fs_fsmnt, UFSMNT_ONERROR_PANIC_STR);
flags |= UFSMNT_ONERROR_PANIC;
@@ -1185,7 +1185,7 @@ mountfs(struct vfs *vfsp, enum whymountroot why, struct vnode *devvp,
ufsvfsp->vfs_forcedirectio = 1;
else if (flags & UFSMNT_NOFORCEDIRECTIO)
ufsvfsp->vfs_forcedirectio = 0;
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
ufsvfsp->vfs_nindiroffset = fsp->fs_nindir - 1;
ufsvfsp->vfs_nindirshift = highbit(ufsvfsp->vfs_nindiroffset);
@@ -1444,7 +1444,7 @@ ufs_unmount(struct vfs *vfsp, int fflag, struct cred *cr)
}
/* let all types of writes go through */
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
/* coordinate with global hlock thread */
if (TRANS_ISTRANS(ufsvfsp) && (ufsvfsp->vfs_validfs == UT_HLOCKING)) {
diff --git a/usr/src/uts/common/fs/ufs/ufs_vnops.c b/usr/src/uts/common/fs/ufs/ufs_vnops.c
index 03faaab8ec..ff4632806d 100644
--- a/usr/src/uts/common/fs/ufs/ufs_vnops.c
+++ b/usr/src/uts/common/fs/ufs/ufs_vnops.c
@@ -1351,6 +1351,7 @@ rdip(struct inode *ip, struct uio *uio, int ioflag, cred_t *cr)
int dofree, directio_status;
krw_t rwtype;
o_mode_t type;
+ clock_t now;
vp = ITOV(ip);
@@ -1419,7 +1420,8 @@ rdip(struct inode *ip, struct uio *uio, int ioflag, cred_t *cr)
/*
* We update smallfile2 and smallfile1 at most every second.
*/
- if (lbolt >= smallfile_update) {
+ now = ddi_get_lbolt();
+ if (now >= smallfile_update) {
uint64_t percpufreeb;
if (smallfile1_d == 0) smallfile1_d = SMALLFILE1_D;
if (smallfile2_d == 0) smallfile2_d = SMALLFILE2_D;
@@ -1429,7 +1431,7 @@ rdip(struct inode *ip, struct uio *uio, int ioflag, cred_t *cr)
smallfile1 = MAX(smallfile1, smallfile);
smallfile1 = MAX(smallfile1, smallfile64);
smallfile2 = MAX(smallfile1, smallfile2);
- smallfile_update = lbolt + hz;
+ smallfile_update = now + hz;
}
dofree = freebehind &&
@@ -4965,7 +4967,7 @@ ufs_getpage_miss(struct vnode *vp, u_offset_t off, size_t len, struct seg *seg,
} else if (ufsvfsp->vfs_snapshot) {
fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
} else {
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
ub.ub_getpages.value.ul++;
(void) bdev_strategy(bp);
lwp_stat_update(LWP_STAT_INBLK, 1);
@@ -5068,7 +5070,7 @@ ufs_getpage_ra(struct vnode *vp, u_offset_t off, struct seg *seg, caddr_t addr)
} else if (ufsvfsp->vfs_snapshot) {
fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
} else {
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
ub.ub_getras.value.ul++;
(void) bdev_strategy(bp);
lwp_stat_update(LWP_STAT_INBLK, 1);
@@ -5542,7 +5544,7 @@ ufs_putapage(
} else if (ufsvfsp->vfs_snapshot) {
fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
} else {
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
ub.ub_putasyncs.value.ul++;
(void) bdev_strategy(bp);
lwp_stat_update(LWP_STAT_OUBLK, 1);
@@ -5553,7 +5555,7 @@ ufs_putapage(
} else if (ufsvfsp->vfs_snapshot) {
fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
} else {
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
ub.ub_putsyncs.value.ul++;
(void) bdev_strategy(bp);
lwp_stat_update(LWP_STAT_OUBLK, 1);
@@ -6065,7 +6067,7 @@ ufs_pageio(struct vnode *vp, page_t *pp, u_offset_t io_off, size_t io_len,
bp->b_un.b_addr = (caddr_t)0;
bp->b_file = ip->i_vnode;
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
ub.ub_pageios.value.ul++;
if (ufsvfsp->vfs_snapshot)
fssnap_strategy(&(ufsvfsp->vfs_snapshot), bp);
diff --git a/usr/src/uts/common/fs/zfs/arc.c b/usr/src/uts/common/fs/zfs/arc.c
index 8cc845ffeb..9c4fb291ca 100644
--- a/usr/src/uts/common/fs/zfs/arc.c
+++ b/usr/src/uts/common/fs/zfs/arc.c
@@ -1580,7 +1580,8 @@ arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
if (HDR_IO_IN_PROGRESS(ab) ||
(spa && ab->b_spa != spa) ||
(ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
- lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
+ ddi_get_lbolt() - ab->b_arc_access <
+ arc_min_prefetch_lifespan)) {
skipped++;
continue;
}
@@ -2051,12 +2052,12 @@ arc_reclaim_thread(void)
}
/* reset the growth delay for every reclaim */
- growtime = lbolt + (arc_grow_retry * hz);
+ growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
arc_kmem_reap_now(last_reclaim);
arc_warm = B_TRUE;
- } else if (arc_no_grow && lbolt >= growtime) {
+ } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
arc_no_grow = FALSE;
}
@@ -2070,7 +2071,7 @@ arc_reclaim_thread(void)
/* block until needed, or one second, whichever is shorter */
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait(&arc_reclaim_thr_cv,
- &arc_reclaim_thr_lock, (lbolt + hz));
+ &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
}
@@ -2285,6 +2286,8 @@ out:
static void
arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
{
+ clock_t now;
+
ASSERT(MUTEX_HELD(hash_lock));
if (buf->b_state == arc_anon) {
@@ -2295,11 +2298,13 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
*/
ASSERT(buf->b_arc_access == 0);
- buf->b_arc_access = lbolt;
+ buf->b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
arc_change_state(arc_mru, buf, hash_lock);
} else if (buf->b_state == arc_mru) {
+ now = ddi_get_lbolt();
+
/*
* If this buffer is here because of a prefetch, then either:
* - clear the flag if this is a "referencing" read
@@ -2315,7 +2320,7 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
buf->b_flags &= ~ARC_PREFETCH;
ARCSTAT_BUMP(arcstat_mru_hits);
}
- buf->b_arc_access = lbolt;
+ buf->b_arc_access = now;
return;
}
@@ -2324,13 +2329,13 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
* but it is still in the cache. Move it to the MFU
* state.
*/
- if (lbolt > buf->b_arc_access + ARC_MINTIME) {
+ if (now > buf->b_arc_access + ARC_MINTIME) {
/*
* More than 125ms have passed since we
* instantiated this buffer. Move it to the
* most frequently used state.
*/
- buf->b_arc_access = lbolt;
+ buf->b_arc_access = now;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
arc_change_state(arc_mfu, buf, hash_lock);
}
@@ -2353,7 +2358,7 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
}
- buf->b_arc_access = lbolt;
+ buf->b_arc_access = ddi_get_lbolt();
arc_change_state(new_state, buf, hash_lock);
ARCSTAT_BUMP(arcstat_mru_ghost_hits);
@@ -2372,7 +2377,7 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
ASSERT(list_link_active(&buf->b_arc_node));
}
ARCSTAT_BUMP(arcstat_mfu_hits);
- buf->b_arc_access = lbolt;
+ buf->b_arc_access = ddi_get_lbolt();
} else if (buf->b_state == arc_mfu_ghost) {
arc_state_t *new_state = arc_mfu;
/*
@@ -2390,7 +2395,7 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
new_state = arc_mru;
}
- buf->b_arc_access = lbolt;
+ buf->b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
arc_change_state(new_state, buf, hash_lock);
@@ -2400,7 +2405,7 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
* This buffer is on the 2nd Level ARC.
*/
- buf->b_arc_access = lbolt;
+ buf->b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
arc_change_state(arc_mfu, buf, hash_lock);
} else {
@@ -3741,7 +3746,7 @@ l2arc_write_size(l2arc_dev_t *dev)
static clock_t
l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
{
- clock_t interval, next;
+ clock_t interval, next, now;
/*
* If the ARC lists are busy, increase our write rate; if the
@@ -3754,7 +3759,8 @@ l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
else
interval = hz * l2arc_feed_secs;
- next = MAX(lbolt, MIN(lbolt + interval, began + interval));
+ now = ddi_get_lbolt();
+ next = MAX(now, MIN(now + interval, began + interval));
return (next);
}
@@ -4365,7 +4371,7 @@ l2arc_feed_thread(void)
l2arc_dev_t *dev;
spa_t *spa;
uint64_t size, wrote;
- clock_t begin, next = lbolt;
+ clock_t begin, next = ddi_get_lbolt();
CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
@@ -4376,7 +4382,7 @@ l2arc_feed_thread(void)
(void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
next);
CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
- next = lbolt + hz;
+ next = ddi_get_lbolt() + hz;
/*
* Quick check for L2ARC devices.
@@ -4387,7 +4393,7 @@ l2arc_feed_thread(void)
continue;
}
mutex_exit(&l2arc_dev_mtx);
- begin = lbolt;
+ begin = ddi_get_lbolt();
/*
* This selects the next l2arc device to write to, and in
diff --git a/usr/src/uts/common/fs/zfs/dmu_zfetch.c b/usr/src/uts/common/fs/zfs/dmu_zfetch.c
index c51ba2a0b6..37037c30f6 100644
--- a/usr/src/uts/common/fs/zfs/dmu_zfetch.c
+++ b/usr/src/uts/common/fs/zfs/dmu_zfetch.c
@@ -226,7 +226,7 @@ dmu_zfetch_dofetch(zfetch_t *zf, zstream_t *zs)
break;
}
zs->zst_ph_offset = prefetch_tail;
- zs->zst_last = lbolt;
+ zs->zst_last = ddi_get_lbolt();
}
void
@@ -577,7 +577,7 @@ dmu_zfetch_stream_reclaim(zfetch_t *zf)
for (zs = list_head(&zf->zf_stream); zs;
zs = list_next(&zf->zf_stream, zs)) {
- if (((lbolt - zs->zst_last) / hz) > zfetch_min_sec_reap)
+ if (((ddi_get_lbolt() - zs->zst_last)/hz) > zfetch_min_sec_reap)
break;
}
@@ -708,7 +708,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
newstream->zst_ph_offset = zst.zst_len + zst.zst_offset;
newstream->zst_cap = zst.zst_len;
newstream->zst_direction = ZFETCH_FORWARD;
- newstream->zst_last = lbolt;
+ newstream->zst_last = ddi_get_lbolt();
mutex_init(&newstream->zst_lock, NULL, MUTEX_DEFAULT, NULL);
diff --git a/usr/src/uts/common/fs/zfs/dsl_scrub.c b/usr/src/uts/common/fs/zfs/dsl_scrub.c
index d1fb3d2e52..d511bb841a 100644
--- a/usr/src/uts/common/fs/zfs/dsl_scrub.c
+++ b/usr/src/uts/common/fs/zfs/dsl_scrub.c
@@ -313,7 +313,7 @@ scrub_pause(dsl_pool_t *dp, const zbookmark_t *zb)
mintime = dp->dp_scrub_isresilver ? zfs_resilver_min_time :
zfs_scrub_min_time;
- elapsed_ticks = lbolt64 - dp->dp_scrub_start_time;
+ elapsed_ticks = ddi_get_lbolt64() - dp->dp_scrub_start_time;
if (elapsed_ticks > hz * zfs_txg_timeout ||
(elapsed_ticks > hz * mintime && txg_sync_waiting(dp))) {
dprintf("pausing at %llx/%llx/%llx/%llx\n",
@@ -836,7 +836,7 @@ dsl_pool_scrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
}
dp->dp_scrub_pausing = B_FALSE;
- dp->dp_scrub_start_time = lbolt64;
+ dp->dp_scrub_start_time = ddi_get_lbolt64();
dp->dp_scrub_isresilver = (dp->dp_scrub_min_txg != 0);
spa->spa_scrub_active = B_TRUE;
diff --git a/usr/src/uts/common/fs/zfs/metaslab.c b/usr/src/uts/common/fs/zfs/metaslab.c
index cdbed0144c..27dc2e4fd5 100644
--- a/usr/src/uts/common/fs/zfs/metaslab.c
+++ b/usr/src/uts/common/fs/zfs/metaslab.c
@@ -1040,7 +1040,7 @@ metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
/*
* For testing, make some blocks above a certain size be gang blocks.
*/
- if (psize >= metaslab_gang_bang && (lbolt & 3) == 0)
+ if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
return (ENOSPC);
/*
diff --git a/usr/src/uts/common/fs/zfs/txg.c b/usr/src/uts/common/fs/zfs/txg.c
index 344dcb7722..ceed1200ca 100644
--- a/usr/src/uts/common/fs/zfs/txg.c
+++ b/usr/src/uts/common/fs/zfs/txg.c
@@ -166,7 +166,8 @@ txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
CALLB_CPR_SAFE_BEGIN(cpr);
if (time)
- (void) cv_timedwait(cv, &tx->tx_sync_lock, lbolt + time);
+ (void) cv_timedwait(cv, &tx->tx_sync_lock,
+ ddi_get_lbolt() + time);
else
cv_wait(cv, &tx->tx_sync_lock);
@@ -377,7 +378,7 @@ txg_sync_thread(dsl_pool_t *dp)
dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
- delta = lbolt - start;
+ delta = ddi_get_lbolt() - start;
timer = (delta > timeout ? 0 : timeout - delta);
}
@@ -409,9 +410,9 @@ txg_sync_thread(dsl_pool_t *dp)
txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
mutex_exit(&tx->tx_sync_lock);
- start = lbolt;
+ start = ddi_get_lbolt();
spa_sync(dp->dp_spa, txg);
- delta = lbolt - start;
+ delta = ddi_get_lbolt() - start;
mutex_enter(&tx->tx_sync_lock);
tx->tx_synced_txg = txg;
@@ -478,7 +479,7 @@ void
txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
{
tx_state_t *tx = &dp->dp_tx;
- int timeout = lbolt + ticks;
+ int timeout = ddi_get_lbolt() + ticks;
/* don't delay if this txg could transition to quiesing immediately */
if (tx->tx_open_txg > txg ||
@@ -491,7 +492,7 @@ txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
return;
}
- while (lbolt < timeout &&
+ while (ddi_get_lbolt() < timeout &&
tx->tx_syncing_txg < txg-1 && !txg_stalled(dp))
(void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock,
timeout);
diff --git a/usr/src/uts/common/fs/zfs/vdev_cache.c b/usr/src/uts/common/fs/zfs/vdev_cache.c
index 9b3a9f5a26..688d541344 100644
--- a/usr/src/uts/common/fs/zfs/vdev_cache.c
+++ b/usr/src/uts/common/fs/zfs/vdev_cache.c
@@ -172,7 +172,7 @@ vdev_cache_allocate(zio_t *zio)
ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP);
ve->ve_offset = offset;
- ve->ve_lastused = lbolt;
+ ve->ve_lastused = ddi_get_lbolt();
ve->ve_data = zio_buf_alloc(VCBS);
avl_add(&vc->vc_offset_tree, ve);
@@ -189,9 +189,9 @@ vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio)
ASSERT(MUTEX_HELD(&vc->vc_lock));
ASSERT(ve->ve_fill_io == NULL);
- if (ve->ve_lastused != lbolt) {
+ if (ve->ve_lastused != ddi_get_lbolt()) {
avl_remove(&vc->vc_lastused_tree, ve);
- ve->ve_lastused = lbolt;
+ ve->ve_lastused = ddi_get_lbolt();
avl_add(&vc->vc_lastused_tree, ve);
}
diff --git a/usr/src/uts/common/fs/zfs/vdev_queue.c b/usr/src/uts/common/fs/zfs/vdev_queue.c
index d98278ddef..21e60ce843 100644
--- a/usr/src/uts/common/fs/zfs/vdev_queue.c
+++ b/usr/src/uts/common/fs/zfs/vdev_queue.c
@@ -40,7 +40,7 @@
int zfs_vdev_max_pending = 10;
int zfs_vdev_min_pending = 4;
-/* deadline = pri + (lbolt >> time_shift) */
+/* deadline = pri + ddi_get_lbolt64() >> time_shift) */
int zfs_vdev_time_shift = 6;
/* exponential I/O issue ramp-up rate */
@@ -359,7 +359,8 @@ vdev_queue_io(zio_t *zio)
mutex_enter(&vq->vq_lock);
- zio->io_deadline = (lbolt64 >> zfs_vdev_time_shift) + zio->io_priority;
+ zio->io_deadline = (ddi_get_lbolt64() >> zfs_vdev_time_shift) +
+ zio->io_priority;
vdev_queue_io_add(vq, zio);
diff --git a/usr/src/uts/common/fs/zfs/zil.c b/usr/src/uts/common/fs/zfs/zil.c
index dba690bbaf..40ba1a2d68 100644
--- a/usr/src/uts/common/fs/zfs/zil.c
+++ b/usr/src/uts/common/fs/zfs/zil.c
@@ -1647,7 +1647,7 @@ zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
txg_wait_synced(zilog->zl_dmu_pool, 0);
zilog->zl_replay = B_TRUE;
- zilog->zl_replay_time = lbolt;
+ zilog->zl_replay_time = ddi_get_lbolt();
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg);
diff --git a/usr/src/uts/common/fs/zfs/zio_inject.c b/usr/src/uts/common/fs/zfs/zio_inject.c
index 5c4a6c3df6..e8f8f7b723 100644
--- a/usr/src/uts/common/fs/zfs/zio_inject.c
+++ b/usr/src/uts/common/fs/zfs/zio_inject.c
@@ -318,7 +318,7 @@ zio_handle_ignored_writes(zio_t *zio)
*/
if (handler->zi_record.zi_timer == 0) {
if (handler->zi_record.zi_duration > 0)
- handler->zi_record.zi_timer = lbolt64;
+ handler->zi_record.zi_timer = ddi_get_lbolt64();
else
handler->zi_record.zi_timer = zio->io_txg;
}
@@ -355,7 +355,8 @@ spa_handle_ignored_writes(spa_t *spa)
if (handler->zi_record.zi_duration > 0) {
VERIFY(handler->zi_record.zi_timer == 0 ||
handler->zi_record.zi_timer +
- handler->zi_record.zi_duration * hz > lbolt64);
+ handler->zi_record.zi_duration * hz >
+ ddi_get_lbolt64());
} else {
/* duration is negative so the subtraction here adds */
VERIFY(handler->zi_record.zi_timer == 0 ||
diff --git a/usr/src/uts/common/inet/ilb/ilb.c b/usr/src/uts/common/inet/ilb/ilb.c
index 7e6a7829d8..747c3768ad 100644
--- a/usr/src/uts/common/inet/ilb/ilb.c
+++ b/usr/src/uts/common/inet/ilb/ilb.c
@@ -778,7 +778,8 @@ ilb_rule_del_common(ilb_stack_t *ilbs, ilb_rule_t *tmp_rule)
* by gc thread is intact.
*/
(void) atomic_swap_64(
- (uint64_t *)&server->iser_die_time, lbolt64 +
+ (uint64_t *)&server->iser_die_time,
+ ddi_get_lbolt64() +
SEC_TO_TICK(tmp_rule->ir_conn_drain_timeout));
}
while (server->iser_refcnt > 1)
@@ -1176,7 +1177,7 @@ ilb_server_toggle(ilb_stack_t *ilbs, zoneid_t zoneid, const char *rule_name,
if (rule->ir_conn_drain_timeout != 0) {
(void) atomic_swap_64(
(uint64_t *)&tmp_server->iser_die_time,
- lbolt64 + SEC_TO_TICK(
+ ddi_get_lbolt64() + SEC_TO_TICK(
rule->ir_conn_drain_timeout));
}
}
@@ -1499,7 +1500,8 @@ ilb_server_del(ilb_stack_t *ilbs, zoneid_t zoneid, const char *rule_name,
/* If there is a hard limit on when a server should die, set it. */
if (rule->ir_conn_drain_timeout != 0) {
(void) atomic_swap_64((uint64_t *)&server->iser_die_time,
- lbolt64 + SEC_TO_TICK(rule->ir_conn_drain_timeout));
+ ddi_get_lbolt64() +
+ SEC_TO_TICK(rule->ir_conn_drain_timeout));
}
if (server->iser_refcnt > 1) {
diff --git a/usr/src/uts/common/inet/ilb/ilb_conn.c b/usr/src/uts/common/inet/ilb/ilb_conn.c
index 6a7a75a150..450adab5f4 100644
--- a/usr/src/uts/common/inet/ilb/ilb_conn.c
+++ b/usr/src/uts/common/inet/ilb/ilb_conn.c
@@ -101,7 +101,7 @@ static int ilb_sticky_timeout = 15;
{ \
mutex_enter(&(s)->hash->sticky_lock); \
(s)->refcnt--; \
- (s)->atime = lbolt64; \
+ (s)->atime = ddi_get_lbolt64(); \
mutex_exit(&s->hash->sticky_lock); \
}
@@ -211,7 +211,7 @@ ilb_conn_cleanup(void *arg)
c2s_hash = ilbs->ilbs_c2s_conn_hash;
ASSERT(c2s_hash != NULL);
- now = lbolt64;
+ now = ddi_get_lbolt64();
for (i = timer->start; i < timer->end; i++) {
mutex_enter(&c2s_hash[i].ilb_conn_hash_lock);
if ((connp = c2s_hash[i].ilb_connp) == NULL) {
@@ -531,7 +531,7 @@ ilb_conn_add(ilb_stack_t *ilbs, ilb_rule_t *rule, ilb_server_t *server,
connp->conn_gc = B_FALSE;
connp->conn_expiry = rule->ir_nat_expiry;
- connp->conn_cr_time = lbolt64;
+ connp->conn_cr_time = ddi_get_lbolt64();
/* Client to server info. */
connp->conn_c2s_saddr = *src;
@@ -539,7 +539,7 @@ ilb_conn_add(ilb_stack_t *ilbs, ilb_rule_t *rule, ilb_server_t *server,
connp->conn_c2s_daddr = *dst;
connp->conn_c2s_dport = dport;
- connp->conn_c2s_atime = lbolt64;
+ connp->conn_c2s_atime = ddi_get_lbolt64();
/* The packet ths triggers this creation should be counted */
connp->conn_c2s_pkt_cnt = 1;
connp->conn_c2s_tcp_fin_sent = B_FALSE;
@@ -641,7 +641,7 @@ ilb_conn_add(ilb_stack_t *ilbs, ilb_rule_t *rule, ilb_server_t *server,
break;
}
- connp->conn_s2c_atime = lbolt64;
+ connp->conn_s2c_atime = ddi_get_lbolt64();
connp->conn_s2c_pkt_cnt = 1;
connp->conn_s2c_tcp_fin_sent = B_FALSE;
connp->conn_s2c_tcp_fin_acked = B_FALSE;
@@ -766,7 +766,7 @@ ilb_find_conn(ilb_stack_t *ilbs, void *iph, void *tph, int l4, in6_addr_t *src,
connp->conn_c2s_sport == sport &&
IN6_ARE_ADDR_EQUAL(src, &connp->conn_c2s_saddr) &&
IN6_ARE_ADDR_EQUAL(dst, &connp->conn_c2s_daddr)) {
- connp->conn_c2s_atime = lbolt64;
+ connp->conn_c2s_atime = ddi_get_lbolt64();
connp->conn_c2s_pkt_cnt++;
*rule_cache = connp->conn_rule_cache;
*ip_sum = connp->conn_c2s_ip_sum;
@@ -785,7 +785,7 @@ ilb_find_conn(ilb_stack_t *ilbs, void *iph, void *tph, int l4, in6_addr_t *src,
connp->conn_s2c_sport == sport &&
IN6_ARE_ADDR_EQUAL(src, &connp->conn_s2c_saddr) &&
IN6_ARE_ADDR_EQUAL(dst, &connp->conn_s2c_daddr)) {
- connp->conn_s2c_atime = lbolt64;
+ connp->conn_s2c_atime = ddi_get_lbolt64();
connp->conn_s2c_pkt_cnt++;
*rule_cache = connp->conn_rule_cache;
*ip_sum = connp->conn_s2c_ip_sum;
@@ -966,7 +966,7 @@ ilb_check_icmp_conn(ilb_stack_t *ilbs, mblk_t *mp, int l3, void *out_iph,
connp->conn_c2s_sport == *dport &&
IN6_ARE_ADDR_EQUAL(in_dst_p, &connp->conn_c2s_saddr) &&
IN6_ARE_ADDR_EQUAL(in_src_p, &connp->conn_c2s_daddr)) {
- connp->conn_c2s_atime = lbolt64;
+ connp->conn_c2s_atime = ddi_get_lbolt64();
connp->conn_c2s_pkt_cnt++;
rule_cache = connp->conn_rule_cache;
adj_ip_sum = connp->conn_c2s_ip_sum;
@@ -1208,7 +1208,7 @@ ilb_sticky_add(ilb_sticky_hash_t *hash, ilb_rule_t *rule, ilb_server_t *server,
* zero. But just set it here for debugging purpose. The
* atime is set when a refrele is done on a sticky entry.
*/
- s->atime = lbolt64;
+ s->atime = ddi_get_lbolt64();
list_insert_head(&hash->sticky_head, s);
hash->sticky_cnt++;
@@ -1314,7 +1314,7 @@ ilb_sticky_cleanup(void *arg)
hash = ilbs->ilbs_sticky_hash;
ASSERT(hash != NULL);
- now = lbolt64;
+ now = ddi_get_lbolt64();
for (i = timer->start; i < timer->end; i++) {
mutex_enter(&hash[i].sticky_lock);
for (s = list_head(&hash[i].sticky_head); s != NULL;
diff --git a/usr/src/uts/common/inet/ip/conn_opt.c b/usr/src/uts/common/inet/ip/conn_opt.c
index a46d7c4cd0..8820f6d2ab 100644
--- a/usr/src/uts/common/inet/ip/conn_opt.c
+++ b/usr/src/uts/common/inet/ip/conn_opt.c
@@ -2881,7 +2881,7 @@ conn_inherit_parent(conn_t *lconnp, conn_t *econnp)
econnp->conn_cred = credp = lconnp->conn_cred;
crhold(credp);
econnp->conn_cpid = lconnp->conn_cpid;
- econnp->conn_open_time = lbolt64;
+ econnp->conn_open_time = ddi_get_lbolt64();
/*
* Cache things in the ixa without any refhold.
diff --git a/usr/src/uts/common/inet/ip/icmp.c b/usr/src/uts/common/inet/ip/icmp.c
index 8222c866d0..a070f1de35 100644
--- a/usr/src/uts/common/inet/ip/icmp.c
+++ b/usr/src/uts/common/inet/ip/icmp.c
@@ -1714,7 +1714,7 @@ rawip_do_open(int family, cred_t *credp, int *err, int flags)
crhold(credp);
connp->conn_cred = credp;
connp->conn_cpid = curproc->p_pid;
- connp->conn_open_time = lbolt64;
+ connp->conn_open_time = ddi_get_lbolt64();
/* Cache things in ixa without an extra refhold */
connp->conn_ixa->ixa_cred = connp->conn_cred;
connp->conn_ixa->ixa_cpid = connp->conn_cpid;
diff --git a/usr/src/uts/common/inet/ip/ip.c b/usr/src/uts/common/inet/ip/ip.c
index b59087e9b1..240d054d73 100644
--- a/usr/src/uts/common/inet/ip/ip.c
+++ b/usr/src/uts/common/inet/ip/ip.c
@@ -2130,7 +2130,7 @@ icmp_inbound_too_big_v4(icmph_t *icmph, ip_recv_attr_t *ira)
/* We now have a PMTU for sure */
dce->dce_flags |= DCEF_PMTU;
- dce->dce_last_change_time = TICK_TO_SEC(lbolt64);
+ dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
mutex_exit(&dce->dce_lock);
/*
* After dropping the lock the new value is visible to everyone.
@@ -3083,7 +3083,7 @@ icmp_pkt(mblk_t *mp, void *stuff, size_t len, ip_recv_attr_t *ira)
boolean_t
icmp_err_rate_limit(ip_stack_t *ipst)
{
- clock_t now = TICK_TO_MSEC(lbolt);
+ clock_t now = TICK_TO_MSEC(ddi_get_lbolt());
uint_t refilled; /* Number of packets refilled in tbf since last */
/* Guard against changes by loading into local variable */
uint_t err_interval = ipst->ips_ip_icmp_err_interval;
@@ -3893,14 +3893,14 @@ ip_get_pmtu(ip_xmit_attr_t *ixa)
/* Check if the PMTU is to old before we use it */
if ((dce->dce_flags & DCEF_PMTU) &&
- TICK_TO_SEC(lbolt64) - dce->dce_last_change_time >
+ TICK_TO_SEC(ddi_get_lbolt64()) - dce->dce_last_change_time >
ipst->ips_ip_pathmtu_interval) {
/*
* Older than 20 minutes. Drop the path MTU information.
*/
mutex_enter(&dce->dce_lock);
dce->dce_flags &= ~(DCEF_PMTU|DCEF_TOO_SMALL_PMTU);
- dce->dce_last_change_time = TICK_TO_SEC(lbolt64);
+ dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
mutex_exit(&dce->dce_lock);
dce_increment_generation(dce);
}
@@ -14773,7 +14773,7 @@ sendit:
* It should be o.k. to check the state without
* a lock here, at most we lose an advice.
*/
- ncec->ncec_last = TICK_TO_MSEC(lbolt64);
+ ncec->ncec_last = TICK_TO_MSEC(ddi_get_lbolt64());
if (ncec->ncec_state != ND_REACHABLE) {
mutex_enter(&ncec->ncec_lock);
ncec->ncec_state = ND_REACHABLE;
@@ -14792,7 +14792,7 @@ sendit:
return (0);
}
- delta = TICK_TO_MSEC(lbolt64) - ncec->ncec_last;
+ delta = TICK_TO_MSEC(ddi_get_lbolt64()) - ncec->ncec_last;
ip1dbg(("ip_xmit: delta = %" PRId64
" ill_reachable_time = %d \n", delta,
ill->ill_reachable_time));
diff --git a/usr/src/uts/common/inet/ip/ip2mac.c b/usr/src/uts/common/inet/ip/ip2mac.c
index 55a17f762a..7ee7504d28 100644
--- a/usr/src/uts/common/inet/ip/ip2mac.c
+++ b/usr/src/uts/common/inet/ip/ip2mac.c
@@ -238,7 +238,7 @@ ip2mac(uint_t op, ip2mac_t *ip2m, ip2mac_callback_t *cb, void *cbarg,
goto done;
}
ncec = nce->nce_common;
- delta = TICK_TO_MSEC(lbolt64) - ncec->ncec_last;
+ delta = TICK_TO_MSEC(ddi_get_lbolt64()) - ncec->ncec_last;
mutex_enter(&ncec->ncec_lock);
if (NCE_ISREACHABLE(ncec) &&
delta < (uint64_t)ill->ill_reachable_time) {
@@ -256,7 +256,7 @@ ip2mac(uint_t op, ip2mac_t *ip2m, ip2mac_callback_t *cb, void *cbarg,
}
}
ncec = nce->nce_common;
- delta = TICK_TO_MSEC(lbolt64) - ncec->ncec_last;
+ delta = TICK_TO_MSEC(ddi_get_lbolt64()) - ncec->ncec_last;
mutex_enter(&ncec->ncec_lock);
if (NCE_ISCONDEMNED(ncec)) {
ip2m->ip2mac_err = ESRCH;
diff --git a/usr/src/uts/common/inet/ip/ip6.c b/usr/src/uts/common/inet/ip/ip6.c
index ed54c08884..2794716bcd 100644
--- a/usr/src/uts/common/inet/ip/ip6.c
+++ b/usr/src/uts/common/inet/ip/ip6.c
@@ -739,7 +739,7 @@ icmp_inbound_too_big_v6(icmp6_t *icmp6, ip_recv_attr_t *ira)
}
/* We now have a PMTU for sure */
dce->dce_flags |= DCEF_PMTU;
- dce->dce_last_change_time = TICK_TO_SEC(lbolt64);
+ dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
mutex_exit(&dce->dce_lock);
/*
* After dropping the lock the new value is visible to everyone.
diff --git a/usr/src/uts/common/inet/ip/ip6_input.c b/usr/src/uts/common/inet/ip/ip6_input.c
index cee5344bf6..bda97c602b 100644
--- a/usr/src/uts/common/inet/ip/ip6_input.c
+++ b/usr/src/uts/common/inet/ip/ip6_input.c
@@ -1437,7 +1437,7 @@ ire_recv_local_v6(ire_t *ire, mblk_t *mp, void *iph_arg, ip_recv_attr_t *ira)
ill_t *ire_ill = ire->ire_ill;
/* Make a note for DAD that this address is in use */
- ire->ire_last_used_time = lbolt;
+ ire->ire_last_used_time = ddi_get_lbolt();
/* Only target the IRE_LOCAL with the right zoneid. */
ira->ira_zoneid = ire->ire_zoneid;
diff --git a/usr/src/uts/common/inet/ip/ip6_output.c b/usr/src/uts/common/inet/ip/ip6_output.c
index 3e06050781..31b7a54868 100644
--- a/usr/src/uts/common/inet/ip/ip6_output.c
+++ b/usr/src/uts/common/inet/ip/ip6_output.c
@@ -118,6 +118,7 @@ ip_output_simple_v6(mblk_t *mp, ip_xmit_attr_t *ixa)
boolean_t repeat = B_FALSE;
boolean_t multirt = B_FALSE;
uint_t ifindex;
+ int64_t now;
ip6h = (ip6_t *)mp->b_rptr;
ASSERT(IPH_HDR_VERSION(ip6h) == IPV6_VERSION);
@@ -237,14 +238,15 @@ repeat_ire:
* To avoid a periodic timer to increase the path MTU we
* look at dce_last_change_time each time we send a packet.
*/
- if (TICK_TO_SEC(lbolt64) - dce->dce_last_change_time >
+ now = ddi_get_lbolt64();
+ if (TICK_TO_SEC(now) - dce->dce_last_change_time >
ipst->ips_ip_pathmtu_interval) {
/*
* Older than 20 minutes. Drop the path MTU information.
*/
mutex_enter(&dce->dce_lock);
dce->dce_flags &= ~(DCEF_PMTU|DCEF_TOO_SMALL_PMTU);
- dce->dce_last_change_time = TICK_TO_SEC(lbolt64);
+ dce->dce_last_change_time = TICK_TO_SEC(now);
mutex_exit(&dce->dce_lock);
dce_increment_generation(dce);
ixa->ixa_fragsize = ip_get_base_mtu(nce->nce_ill, ire);
diff --git a/usr/src/uts/common/inet/ip/ip_dce.c b/usr/src/uts/common/inet/ip/ip_dce.c
index 839c5ae0d0..b7d7e38022 100644
--- a/usr/src/uts/common/inet/ip/ip_dce.c
+++ b/usr/src/uts/common/inet/ip/ip_dce.c
@@ -125,7 +125,7 @@ dcb_reclaim(dcb_t *dcb, ip_stack_t *ipst, uint_t fraction)
/* Clear DCEF_PMTU if the pmtu is too old */
mutex_enter(&dce->dce_lock);
if ((dce->dce_flags & DCEF_PMTU) &&
- TICK_TO_SEC(lbolt64) - dce->dce_last_change_time >
+ TICK_TO_SEC(ddi_get_lbolt64()) - dce->dce_last_change_time >
ipst->ips_ip_pathmtu_interval) {
dce->dce_flags &= ~DCEF_PMTU;
mutex_exit(&dce->dce_lock);
@@ -220,7 +220,8 @@ dce_stack_init(ip_stack_t *ipst)
bzero(ipst->ips_dce_default, sizeof (dce_t));
ipst->ips_dce_default->dce_flags = DCEF_DEFAULT;
ipst->ips_dce_default->dce_generation = DCE_GENERATION_INITIAL;
- ipst->ips_dce_default->dce_last_change_time = TICK_TO_SEC(lbolt64);
+ ipst->ips_dce_default->dce_last_change_time =
+ TICK_TO_SEC(ddi_get_lbolt64());
ipst->ips_dce_default->dce_refcnt = 1; /* Should never go away */
ipst->ips_dce_default->dce_ipst = ipst;
@@ -428,7 +429,7 @@ dce_lookup_and_add_v4(ipaddr_t dst, ip_stack_t *ipst)
dce->dce_v4addr = dst;
dce->dce_generation = DCE_GENERATION_INITIAL;
dce->dce_ipversion = IPV4_VERSION;
- dce->dce_last_change_time = TICK_TO_SEC(lbolt64);
+ dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
dce_refhold(dce); /* For the hash list */
/* Link into list */
@@ -493,7 +494,7 @@ dce_lookup_and_add_v6(const in6_addr_t *dst, uint_t ifindex, ip_stack_t *ipst)
dce->dce_ifindex = ifindex;
dce->dce_generation = DCE_GENERATION_INITIAL;
dce->dce_ipversion = IPV6_VERSION;
- dce->dce_last_change_time = TICK_TO_SEC(lbolt64);
+ dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
dce_refhold(dce); /* For the hash list */
/* Link into list */
@@ -560,7 +561,7 @@ dce_setuinfo(dce_t *dce, iulp_t *uinfo)
dce->dce_pmtu = MIN(uinfo->iulp_mtu, IP_MAXPACKET);
dce->dce_flags |= DCEF_PMTU;
}
- dce->dce_last_change_time = TICK_TO_SEC(lbolt64);
+ dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
}
if (uinfo->iulp_ssthresh != 0) {
if (dce->dce_uinfo.iulp_ssthresh != 0)
@@ -756,7 +757,7 @@ ip_snmp_get_mib2_ip_dce(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
int i;
uint64_t current_time;
- current_time = TICK_TO_SEC(lbolt64);
+ current_time = TICK_TO_SEC(ddi_get_lbolt64());
/*
* make a copy of the original message
diff --git a/usr/src/uts/common/inet/ip/ip_ftable.c b/usr/src/uts/common/inet/ip/ip_ftable.c
index 771dd9f62f..7dd350a42a 100644
--- a/usr/src/uts/common/inet/ip/ip_ftable.c
+++ b/usr/src/uts/common/inet/ip/ip_ftable.c
@@ -892,7 +892,7 @@ ire_round_robin(irb_t *irb_ptr, ire_ftable_args_t *margs, uint_t hash,
mutex_enter(&ire->ire_lock);
/* Look for stale ire_badcnt and clear */
if (ire->ire_badcnt != 0 &&
- (TICK_TO_SEC(lbolt64) - ire->ire_last_badcnt >
+ (TICK_TO_SEC(ddi_get_lbolt64()) - ire->ire_last_badcnt >
ipst->ips_ip_ire_badcnt_lifetime))
ire->ire_badcnt = 0;
mutex_exit(&ire->ire_lock);
diff --git a/usr/src/uts/common/inet/ip/ip_if.c b/usr/src/uts/common/inet/ip/ip_if.c
index 6066da35b4..cd5a98b7b5 100644
--- a/usr/src/uts/common/inet/ip/ip_if.c
+++ b/usr/src/uts/common/inet/ip/ip_if.c
@@ -2426,6 +2426,7 @@ ill_frag_prune(ill_t *ill, uint_t max_count)
ipfb_t *ipfb;
ipf_t *ipf;
size_t count;
+ clock_t now;
/*
* If we are here within ip_min_frag_prune_time msecs remove
@@ -2433,7 +2434,8 @@ ill_frag_prune(ill_t *ill, uint_t max_count)
* ill_frag_free_num_pkts.
*/
mutex_enter(&ill->ill_lock);
- if (TICK_TO_MSEC(lbolt - ill->ill_last_frag_clean_time) <=
+ now = ddi_get_lbolt();
+ if (TICK_TO_MSEC(now - ill->ill_last_frag_clean_time) <=
(ip_min_frag_prune_time != 0 ?
ip_min_frag_prune_time : msec_per_tick)) {
@@ -2442,7 +2444,7 @@ ill_frag_prune(ill_t *ill, uint_t max_count)
} else {
ill->ill_frag_free_num_pkts = 0;
}
- ill->ill_last_frag_clean_time = lbolt;
+ ill->ill_last_frag_clean_time = now;
mutex_exit(&ill->ill_lock);
/*
@@ -4994,7 +4996,7 @@ th_trace_rrecord(th_trace_t *th_trace)
lastref = 0;
th_trace->th_trace_lastref = lastref;
tr_buf = &th_trace->th_trbuf[lastref];
- tr_buf->tr_time = lbolt;
+ tr_buf->tr_time = ddi_get_lbolt();
tr_buf->tr_depth = getpcstack(tr_buf->tr_stack, TR_STACK_DEPTH);
}
@@ -6528,8 +6530,8 @@ ipsq_enter(ill_t *ill, boolean_t force, int type)
} else {
mutex_exit(&ipx->ipx_lock);
mutex_exit(&ipsq->ipsq_lock);
- (void) cv_timedwait(&ill->ill_cv,
- &ill->ill_lock, lbolt + ENTER_SQ_WAIT_TICKS);
+ (void) cv_reltimedwait(&ill->ill_cv,
+ &ill->ill_lock, ENTER_SQ_WAIT_TICKS, TR_CLOCK_TICK);
waited_enough = B_TRUE;
}
mutex_exit(&ill->ill_lock);
diff --git a/usr/src/uts/common/inet/ip/ip_input.c b/usr/src/uts/common/inet/ip/ip_input.c
index d47670f85d..e9850f220c 100644
--- a/usr/src/uts/common/inet/ip/ip_input.c
+++ b/usr/src/uts/common/inet/ip/ip_input.c
@@ -1679,7 +1679,7 @@ ire_recv_local_v4(ire_t *ire, mblk_t *mp, void *iph_arg, ip_recv_attr_t *ira)
ill_t *ire_ill = ire->ire_ill;
/* Make a note for DAD that this address is in use */
- ire->ire_last_used_time = lbolt;
+ ire->ire_last_used_time = ddi_get_lbolt();
/* Only target the IRE_LOCAL with the right zoneid. */
ira->ira_zoneid = ire->ire_zoneid;
diff --git a/usr/src/uts/common/inet/ip/ip_ire.c b/usr/src/uts/common/inet/ip/ip_ire.c
index be0017cb62..c204870e60 100644
--- a/usr/src/uts/common/inet/ip/ip_ire.c
+++ b/usr/src/uts/common/inet/ip/ip_ire.c
@@ -2140,7 +2140,7 @@ ire_delete_reclaim(ire_t *ire, char *arg)
(ire->ire_type & IRE_IF_CLONE)) {
/* Pick a random number */
- rand = (uint_t)lbolt +
+ rand = (uint_t)ddi_get_lbolt() +
IRE_ADDR_HASH_V6(ire->ire_addr_v6, 256);
/* Use truncation */
@@ -3092,7 +3092,7 @@ ire_no_good(ire_t *ire)
*/
mutex_enter(&ire->ire_lock);
ire->ire_badcnt++;
- ire->ire_last_badcnt = TICK_TO_SEC(lbolt64);
+ ire->ire_last_badcnt = TICK_TO_SEC(ddi_get_lbolt64());
nce = ire->ire_nce_cache;
if (nce != NULL && nce->nce_is_condemned &&
nce->nce_common->ncec_state == ND_UNREACHABLE)
diff --git a/usr/src/uts/common/inet/ip/ip_ndp.c b/usr/src/uts/common/inet/ip/ip_ndp.c
index 97096bea99..76d84761f2 100644
--- a/usr/src/uts/common/inet/ip/ip_ndp.c
+++ b/usr/src/uts/common/inet/ip/ip_ndp.c
@@ -2677,7 +2677,7 @@ nce_update(ncec_t *ncec, uint16_t new_state, uchar_t *new_ll_addr)
ASSERT((int16_t)new_state <= ND_STATE_VALID_MAX);
need_stop_timer = B_TRUE;
if (new_state == ND_REACHABLE)
- ncec->ncec_last = TICK_TO_MSEC(lbolt64);
+ ncec->ncec_last = TICK_TO_MSEC(ddi_get_lbolt64());
else {
/* We force NUD in this case */
ncec->ncec_last = 0;
@@ -3392,7 +3392,7 @@ ncec_cache_reclaim(ncec_t *ncec, char *arg)
return;
}
- rand = (uint_t)lbolt +
+ rand = (uint_t)ddi_get_lbolt() +
NCE_ADDR_HASH_V6(ncec->ncec_addr, NCE_TABLE_SIZE);
if ((rand/fraction)*fraction == rand) {
IP_STAT(ipst, ip_nce_reclaim_deleted);
@@ -4557,12 +4557,12 @@ nce_add_common(ill_t *ill, uchar_t *hw_addr, uint_t hw_addr_len,
ncec->ncec_state = state;
if (state == ND_REACHABLE) {
- ncec->ncec_last = TICK_TO_MSEC(lbolt64);
- ncec->ncec_init_time = TICK_TO_MSEC(lbolt64);
+ ncec->ncec_last = ncec->ncec_init_time =
+ TICK_TO_MSEC(ddi_get_lbolt64());
} else {
ncec->ncec_last = 0;
if (state == ND_INITIAL)
- ncec->ncec_init_time = TICK_TO_MSEC(lbolt64);
+ ncec->ncec_init_time = TICK_TO_MSEC(ddi_get_lbolt64());
}
list_create(&ncec->ncec_cb, sizeof (ncec_cb_t),
offsetof(ncec_cb_t, ncec_cb_node));
diff --git a/usr/src/uts/common/inet/ip/ip_output.c b/usr/src/uts/common/inet/ip/ip_output.c
index a4940fd3e8..5196fdfe3a 100644
--- a/usr/src/uts/common/inet/ip/ip_output.c
+++ b/usr/src/uts/common/inet/ip/ip_output.c
@@ -140,6 +140,7 @@ conn_ip_output(mblk_t *mp, ip_xmit_attr_t *ixa)
ill_t *ill;
ip_stack_t *ipst = ixa->ixa_ipst;
int error;
+ int64_t now;
/* We defer ipIfStatsHCOutRequests until an error or we have an ill */
@@ -285,8 +286,9 @@ conn_ip_output(mblk_t *mp, ip_xmit_attr_t *ixa)
* To avoid a periodic timer to increase the path MTU we
* look at dce_last_change_time each time we send a packet.
*/
+ now = ddi_get_lbolt64();
if ((dce->dce_flags & DCEF_PMTU) &&
- (TICK_TO_SEC(lbolt64) - dce->dce_last_change_time >
+ (TICK_TO_SEC(now) - dce->dce_last_change_time >
ipst->ips_ip_pathmtu_interval)) {
/*
* Older than 20 minutes. Drop the path MTU information.
@@ -296,7 +298,7 @@ conn_ip_output(mblk_t *mp, ip_xmit_attr_t *ixa)
*/
mutex_enter(&dce->dce_lock);
dce->dce_flags &= ~(DCEF_PMTU|DCEF_TOO_SMALL_PMTU);
- dce->dce_last_change_time = TICK_TO_SEC(lbolt64);
+ dce->dce_last_change_time = TICK_TO_SEC(now);
mutex_exit(&dce->dce_lock);
dce_increment_generation(dce);
}
@@ -810,6 +812,7 @@ ip_output_simple_v4(mblk_t *mp, ip_xmit_attr_t *ixa)
ip_stack_t *ipst = ixa->ixa_ipst;
boolean_t repeat = B_FALSE;
boolean_t multirt = B_FALSE;
+ int64_t now;
ipha = (ipha_t *)mp->b_rptr;
ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION);
@@ -928,14 +931,15 @@ repeat_ire:
* To avoid a periodic timer to increase the path MTU we
* look at dce_last_change_time each time we send a packet.
*/
- if (TICK_TO_SEC(lbolt64) - dce->dce_last_change_time >
+ now = ddi_get_lbolt64();
+ if (TICK_TO_SEC(now) - dce->dce_last_change_time >
ipst->ips_ip_pathmtu_interval) {
/*
* Older than 20 minutes. Drop the path MTU information.
*/
mutex_enter(&dce->dce_lock);
dce->dce_flags &= ~(DCEF_PMTU|DCEF_TOO_SMALL_PMTU);
- dce->dce_last_change_time = TICK_TO_SEC(lbolt64);
+ dce->dce_last_change_time = TICK_TO_SEC(now);
mutex_exit(&dce->dce_lock);
dce_increment_generation(dce);
ixa->ixa_fragsize = ip_get_base_mtu(nce->nce_ill, ire);
diff --git a/usr/src/uts/common/inet/ip/ip_squeue.c b/usr/src/uts/common/inet/ip/ip_squeue.c
index 0955bfd3f2..33a2fa5935 100644
--- a/usr/src/uts/common/inet/ip/ip_squeue.c
+++ b/usr/src/uts/common/inet/ip/ip_squeue.c
@@ -686,7 +686,7 @@ ip_squeue_get(ill_rx_ring_t *ill_rx_ring)
squeue_t *sqp;
if ((ill_rx_ring == NULL) || ((sqp = ill_rx_ring->rr_sqp) == NULL))
- return (IP_SQUEUE_GET(lbolt));
+ return (IP_SQUEUE_GET(CPU_PSEUDO_RANDOM()));
return (sqp);
}
diff --git a/usr/src/uts/common/inet/kssl/ksslrec.c b/usr/src/uts/common/inet/kssl/ksslrec.c
index 6b7ce0ad42..109fc51b7f 100644
--- a/usr/src/uts/common/inet/kssl/ksslrec.c
+++ b/usr/src/uts/common/inet/kssl/ksslrec.c
@@ -678,7 +678,7 @@ kssl_cache_sid(sslSessionID *sid, kssl_entry_t *kssl_entry)
/* set the values before creating the cache entry */
sid->cached = B_TRUE;
- sid->time = lbolt;
+ sid->time = ddi_get_lbolt();
SET_HASH_INDEX(index, s, &sid->client_addr);
index %= kssl_entry->sid_cache_nentries;
@@ -747,7 +747,8 @@ kssl_lookup_sid(sslSessionID *sid, uchar_t *s, in6_addr_t *faddr,
return;
}
- if (TICK_TO_SEC(lbolt - csid->time) > kssl_entry->sid_cache_timeout) {
+ if (TICK_TO_SEC(ddi_get_lbolt() - csid->time) >
+ kssl_entry->sid_cache_timeout) {
csid->cached = B_FALSE;
mutex_exit(lock);
return;
diff --git a/usr/src/uts/common/inet/nca/nca.h b/usr/src/uts/common/inet/nca/nca.h
index 1670c49d5d..1dea4fa392 100644
--- a/usr/src/uts/common/inet/nca/nca.h
+++ b/usr/src/uts/common/inet/nca/nca.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -326,7 +326,7 @@ typedef struct tw_s {
} \
if ((_lbolt) != NCA_TW_NONE) { \
(twp)->tid = timeout((pfv_t)nca_tw_fire, (twp), \
- (twp)->lbolt3 - lbolt); \
+ (twp)->lbolt3 - ddi_get_lbolt()); \
} \
} \
}
diff --git a/usr/src/uts/common/inet/sctp/sctp.c b/usr/src/uts/common/inet/sctp/sctp.c
index d444e1f10e..6f1a83b34a 100644
--- a/usr/src/uts/common/inet/sctp/sctp.c
+++ b/usr/src/uts/common/inet/sctp/sctp.c
@@ -363,7 +363,8 @@ sctp_disconnect(sctp_t *sctp)
sctp->sctp_lingering = 1;
sctp->sctp_client_errno = 0;
- stoptime = lbolt + connp->conn_lingertime * hz;
+ stoptime = ddi_get_lbolt() +
+ connp->conn_lingertime * hz;
mutex_enter(&sctp->sctp_lock);
sctp->sctp_running = B_FALSE;
@@ -847,7 +848,7 @@ sctp_init_values(sctp_t *sctp, sctp_t *psctp, int sleep)
sctp->sctp_strikes = 0;
- sctp->sctp_last_mtu_probe = lbolt64;
+ sctp->sctp_last_mtu_probe = ddi_get_lbolt64();
sctp->sctp_mtu_probe_intvl = sctps->sctps_mtu_probe_interval;
sctp->sctp_sack_gaps = 0;
@@ -1485,7 +1486,7 @@ sctp_create(void *ulpd, sctp_t *parent, int family, int type, int flags,
* no IPCL_ZONEID
*/
connp->conn_ixa->ixa_zoneid = zoneid;
- connp->conn_open_time = lbolt64;
+ connp->conn_open_time = ddi_get_lbolt64();
connp->conn_cred = credp;
crhold(credp);
connp->conn_cpid = curproc->p_pid;
diff --git a/usr/src/uts/common/inet/sctp/sctp_bind.c b/usr/src/uts/common/inet/sctp/sctp_bind.c
index 9e0b0e7418..b80995628d 100644
--- a/usr/src/uts/common/inet/sctp/sctp_bind.c
+++ b/usr/src/uts/common/inet/sctp/sctp_bind.c
@@ -157,7 +157,7 @@ sctp_listen(sctp_t *sctp)
sctp->sctp_state = SCTPS_LISTEN;
(void) random_get_pseudo_bytes(sctp->sctp_secret, SCTP_SECRET_LEN);
- sctp->sctp_last_secret_update = lbolt64;
+ sctp->sctp_last_secret_update = ddi_get_lbolt64();
bzero(sctp->sctp_old_secret, SCTP_SECRET_LEN);
tf = &sctps->sctps_listen_fanout[SCTP_LISTEN_HASH(
ntohs(connp->conn_lport))];
diff --git a/usr/src/uts/common/inet/sctp/sctp_common.c b/usr/src/uts/common/inet/sctp/sctp_common.c
index b518eb3981..6400c2cc0f 100644
--- a/usr/src/uts/common/inet/sctp/sctp_common.c
+++ b/usr/src/uts/common/inet/sctp/sctp_common.c
@@ -619,7 +619,7 @@ sctp_redo_faddr_srcs(sctp_t *sctp)
void
sctp_faddr_alive(sctp_t *sctp, sctp_faddr_t *fp)
{
- int64_t now = lbolt64;
+ int64_t now = ddi_get_lbolt64();
fp->strikes = 0;
sctp->sctp_strikes = 0;
@@ -1779,7 +1779,7 @@ sctp_init_faddr(sctp_t *sctp, sctp_faddr_t *fp, in6_addr_t *addr,
fp->suna = 0;
fp->pba = 0;
fp->acked = 0;
- fp->lastactive = lbolt64;
+ fp->lastactive = fp->hb_expiry = ddi_get_lbolt64();
fp->timer_mp = timer_mp;
fp->hb_pending = B_FALSE;
fp->hb_enabled = B_TRUE;
@@ -1789,7 +1789,6 @@ sctp_init_faddr(sctp_t *sctp, sctp_faddr_t *fp, in6_addr_t *addr,
fp->T3expire = 0;
(void) random_get_pseudo_bytes((uint8_t *)&fp->hb_secret,
sizeof (fp->hb_secret));
- fp->hb_expiry = lbolt64;
fp->rxt_unacked = 0;
sctp_get_dest(sctp, fp);
diff --git a/usr/src/uts/common/inet/sctp/sctp_conn.c b/usr/src/uts/common/inet/sctp/sctp_conn.c
index 7dc048f919..6cf4d9af3e 100644
--- a/usr/src/uts/common/inet/sctp/sctp_conn.c
+++ b/usr/src/uts/common/inet/sctp/sctp_conn.c
@@ -115,7 +115,7 @@ sctp_accept_comm(sctp_t *listener, sctp_t *acceptor, mblk_t *cr_pkt,
bcopy(listener->sctp_secret, acceptor->sctp_secret, SCTP_SECRET_LEN);
bcopy(listener->sctp_old_secret, acceptor->sctp_old_secret,
SCTP_SECRET_LEN);
- acceptor->sctp_last_secret_update = lbolt64;
+ acceptor->sctp_last_secret_update = ddi_get_lbolt64();
/*
* After acceptor is inserted in the hash list, it can be found.
@@ -136,7 +136,7 @@ sctp_accept_comm(sctp_t *listener, sctp_t *acceptor, mblk_t *cr_pkt,
*/
/* XXXSCTP */
acceptor->sctp_state = SCTPS_ESTABLISHED;
- acceptor->sctp_assoc_start_time = (uint32_t)lbolt;
+ acceptor->sctp_assoc_start_time = (uint32_t)ddi_get_lbolt();
/*
* listener->sctp_rwnd should be the default window size or a
* window size changed via SO_RCVBUF option.
diff --git a/usr/src/uts/common/inet/sctp/sctp_cookie.c b/usr/src/uts/common/inet/sctp/sctp_cookie.c
index 4baf0a7147..a72df4c68e 100644
--- a/usr/src/uts/common/inet/sctp/sctp_cookie.c
+++ b/usr/src/uts/common/inet/sctp/sctp_cookie.c
@@ -736,7 +736,7 @@ sctp_send_initack(sctp_t *sctp, sctp_hdr_t *initsh, sctp_chunk_hdr_t *ch,
/* timestamp */
now = (int64_t *)(cookieph + 1);
- nowt = lbolt64;
+ nowt = ddi_get_lbolt64();
bcopy(&nowt, now, sizeof (*now));
/* cookie lifetime -- need configuration */
@@ -1279,7 +1279,7 @@ sctp_process_cookie(sctp_t *sctp, sctp_chunk_hdr_t *ch, mblk_t *cmp,
* So it is lbolt64 - (ts + *lt). If it is positive, it means
* that the Cookie has expired.
*/
- diff = lbolt64 - (ts + *lt);
+ diff = ddi_get_lbolt64() - (ts + *lt);
if (diff > 0 && (init->sic_inittag != sctp->sctp_fvtag ||
iack->sic_inittag != sctp->sctp_lvtag)) {
uint32_t staleness;
@@ -1343,7 +1343,8 @@ sctp_process_cookie(sctp_t *sctp, sctp_chunk_hdr_t *ch, mblk_t *cmp,
if (sctp->sctp_state < SCTPS_ESTABLISHED) {
sctp->sctp_state = SCTPS_ESTABLISHED;
- sctp->sctp_assoc_start_time = (uint32_t)lbolt;
+ sctp->sctp_assoc_start_time =
+ (uint32_t)ddi_get_lbolt();
}
dprint(1, ("sctp peer %x:%x:%x:%x (%d) restarted\n",
@@ -1371,7 +1372,8 @@ sctp_process_cookie(sctp_t *sctp, sctp_chunk_hdr_t *ch, mblk_t *cmp,
if (!sctp_initialize_params(sctp, init, iack))
return (-1); /* Drop? */
sctp->sctp_state = SCTPS_ESTABLISHED;
- sctp->sctp_assoc_start_time = (uint32_t)lbolt;
+ sctp->sctp_assoc_start_time =
+ (uint32_t)ddi_get_lbolt();
}
dprint(1, ("init collision with %x:%x:%x:%x (%d)\n",
@@ -1402,7 +1404,8 @@ sctp_process_cookie(sctp_t *sctp, sctp_chunk_hdr_t *ch, mblk_t *cmp,
if (!sctp_initialize_params(sctp, init, iack))
return (-1); /* Drop? */
sctp->sctp_state = SCTPS_ESTABLISHED;
- sctp->sctp_assoc_start_time = (uint32_t)lbolt;
+ sctp->sctp_assoc_start_time =
+ (uint32_t)ddi_get_lbolt();
}
return (0);
} else {
diff --git a/usr/src/uts/common/inet/sctp/sctp_heartbeat.c b/usr/src/uts/common/inet/sctp/sctp_heartbeat.c
index 2fbffee1c3..6069739f3c 100644
--- a/usr/src/uts/common/inet/sctp/sctp_heartbeat.c
+++ b/usr/src/uts/common/inet/sctp/sctp_heartbeat.c
@@ -158,7 +158,7 @@ sctp_send_heartbeat(sctp_t *sctp, sctp_faddr_t *fp)
* Copy the current time to the heartbeat and we can use it to
* calculate the RTT when we get it back in the heartbeat ACK.
*/
- now = lbolt64;
+ now = ddi_get_lbolt64();
t = (int64_t *)(hpp + 1);
bcopy(&now, t, sizeof (now));
@@ -209,7 +209,7 @@ sctp_validate_peer(sctp_t *sctp)
int64_t earliest_expiry;
sctp_stack_t *sctps = sctp->sctp_sctps;
- now = lbolt64;
+ now = ddi_get_lbolt64();
earliest_expiry = 0;
cnt = sctps->sctps_maxburst;
@@ -329,7 +329,7 @@ sctp_process_heartbeat(sctp_t *sctp, sctp_chunk_hdr_t *cp)
/* This address is now confirmed and alive. */
sctp_faddr_alive(sctp, fp);
- now = lbolt64;
+ now = ddi_get_lbolt64();
sctp_update_rtt(sctp, fp, now - sent);
/*
diff --git a/usr/src/uts/common/inet/sctp/sctp_impl.h b/usr/src/uts/common/inet/sctp/sctp_impl.h
index d84c3762f3..509cb76fce 100644
--- a/usr/src/uts/common/inet/sctp/sctp_impl.h
+++ b/usr/src/uts/common/inet/sctp/sctp_impl.h
@@ -340,7 +340,7 @@ typedef struct {
#define SCTP_MSG_TO_BE_ABANDONED(meta, mhdr, sctp) \
(((!SCTP_CHUNK_ISSENT((meta)->b_cont) && (mhdr)->smh_ttl > 0) || \
((sctp)->sctp_prsctp_aware && ((mhdr)->smh_flags & MSG_PR_SCTP))) && \
- ((lbolt64 - (mhdr)->smh_tob) > (mhdr)->smh_ttl))
+ ((ddi_get_lbolt64() - (mhdr)->smh_tob) > (mhdr)->smh_ttl))
/* SCTP association hash function. */
#define SCTP_CONN_HASH(sctps, ports) \
diff --git a/usr/src/uts/common/inet/sctp/sctp_input.c b/usr/src/uts/common/inet/sctp/sctp_input.c
index e4a5ef5c5b..7ff11a588d 100644
--- a/usr/src/uts/common/inet/sctp/sctp_input.c
+++ b/usr/src/uts/common/inet/sctp/sctp_input.c
@@ -1776,7 +1776,7 @@ sctp_sack(sctp_t *sctp, mblk_t *dups)
(void *)sctp->sctp_lastdata,
SCTP_PRINTADDR(sctp->sctp_lastdata->faddr)));
- sctp->sctp_active = lbolt64;
+ sctp->sctp_active = ddi_get_lbolt64();
BUMP_MIB(&sctps->sctps_mib, sctpOutAck);
@@ -1918,7 +1918,8 @@ sctp_cumack(sctp_t *sctp, uint32_t tsn, mblk_t **first_unacked)
xtsn == sctp->sctp_rtt_tsn) {
/* Got a new RTT measurement */
sctp_update_rtt(sctp, fp,
- lbolt64 - sctp->sctp_out_time);
+ ddi_get_lbolt64() -
+ sctp->sctp_out_time);
sctp->sctp_out_time = 0;
}
if (SCTP_CHUNK_ISACKED(mp))
@@ -3583,7 +3584,7 @@ sctp_input_data(sctp_t *sctp, mblk_t *mp, ip_recv_attr_t *ira)
gotdata = 0;
trysend = 0;
- now = lbolt64;
+ now = ddi_get_lbolt64();
/* Process the chunks */
do {
dprint(3, ("sctp_dispatch_rput: state=%d, chunk id=%d\n",
@@ -3918,7 +3919,8 @@ sctp_input_data(sctp_t *sctp, mblk_t *mp, ip_recv_attr_t *ira)
}
sctp->sctp_state = SCTPS_ESTABLISHED;
- sctp->sctp_assoc_start_time = (uint32_t)lbolt;
+ sctp->sctp_assoc_start_time =
+ (uint32_t)ddi_get_lbolt();
BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab);
if (sctp->sctp_cookie_mp) {
freemsg(sctp->sctp_cookie_mp);
@@ -3959,7 +3961,8 @@ sctp_input_data(sctp_t *sctp, mblk_t *mp, ip_recv_attr_t *ira)
if (sctp->sctp_unacked == 0)
sctp_stop_faddr_timers(sctp);
sctp->sctp_state = SCTPS_ESTABLISHED;
- sctp->sctp_assoc_start_time = (uint32_t)lbolt;
+ sctp->sctp_assoc_start_time =
+ (uint32_t)ddi_get_lbolt();
BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab);
BUMP_LOCAL(sctp->sctp_ibchunks);
if (sctp->sctp_cookie_mp) {
@@ -4000,7 +4003,8 @@ sctp_input_data(sctp_t *sctp, mblk_t *mp, ip_recv_attr_t *ira)
if (sctp->sctp_unacked == 0)
sctp_stop_faddr_timers(sctp);
sctp->sctp_state = SCTPS_ESTABLISHED;
- sctp->sctp_assoc_start_time = (uint32_t)lbolt;
+ sctp->sctp_assoc_start_time =
+ (uint32_t)ddi_get_lbolt();
BUMP_MIB(&sctps->sctps_mib, sctpActiveEstab);
if (sctp->sctp_cookie_mp) {
freemsg(sctp->sctp_cookie_mp);
diff --git a/usr/src/uts/common/inet/sctp/sctp_opt_data.c b/usr/src/uts/common/inet/sctp/sctp_opt_data.c
index ee5eb445af..d114434723 100644
--- a/usr/src/uts/common/inet/sctp/sctp_opt_data.c
+++ b/usr/src/uts/common/inet/sctp/sctp_opt_data.c
@@ -478,7 +478,7 @@ sctp_set_peer_addr_params(sctp_t *sctp, const void *invalp)
}
}
- now = lbolt64;
+ now = ddi_get_lbolt64();
if (fp != NULL) {
if (spp->spp_hbinterval == UINT32_MAX) {
/*
diff --git a/usr/src/uts/common/inet/sctp/sctp_output.c b/usr/src/uts/common/inet/sctp/sctp_output.c
index 1a50097260..f2b6084c26 100644
--- a/usr/src/uts/common/inet/sctp/sctp_output.c
+++ b/usr/src/uts/common/inet/sctp/sctp_output.c
@@ -257,7 +257,7 @@ sctp_sendmsg(sctp_t *sctp, mblk_t *mp, int flags)
sctp_msg_hdr->smh_ppid = ppid;
sctp_msg_hdr->smh_flags = msg_flags;
sctp_msg_hdr->smh_ttl = MSEC_TO_TICK(timetolive);
- sctp_msg_hdr->smh_tob = lbolt64;
+ sctp_msg_hdr->smh_tob = ddi_get_lbolt64();
for (; mp != NULL; mp = mp->b_cont)
msg_len += MBLKL(mp);
sctp_msg_hdr->smh_msglen = msg_len;
@@ -979,7 +979,7 @@ sctp_fast_rexmit(sctp_t *sctp)
sctp_set_iplen(sctp, head, fp->ixa);
(void) conn_ip_output(head, fp->ixa);
BUMP_LOCAL(sctp->sctp_opkts);
- sctp->sctp_active = fp->lastactive = lbolt64;
+ sctp->sctp_active = fp->lastactive = ddi_get_lbolt64();
}
void
@@ -998,7 +998,7 @@ sctp_output(sctp_t *sctp, uint_t num_pkt)
int32_t pad = 0;
int32_t pathmax;
int extra;
- int64_t now = lbolt64;
+ int64_t now = ddi_get_lbolt64();
sctp_faddr_t *fp;
sctp_faddr_t *lfp;
sctp_data_hdr_t *sdc;
@@ -2051,7 +2051,7 @@ restart_timer:
*/
SCTP_FADDR_TIMER_RESTART(sctp, fp, fp->rto);
- sctp->sctp_active = lbolt64;
+ sctp->sctp_active = ddi_get_lbolt64();
}
/*
diff --git a/usr/src/uts/common/inet/sctp/sctp_shutdown.c b/usr/src/uts/common/inet/sctp/sctp_shutdown.c
index ff835a60c0..017fed208a 100644
--- a/usr/src/uts/common/inet/sctp/sctp_shutdown.c
+++ b/usr/src/uts/common/inet/sctp/sctp_shutdown.c
@@ -75,7 +75,7 @@ sctp_send_shutdown(sctp_t *sctp, int rexmit)
* sending the shutdown, we can overload out_time
* to track how long we have waited.
*/
- sctp->sctp_out_time = lbolt64;
+ sctp->sctp_out_time = ddi_get_lbolt64();
}
/*
diff --git a/usr/src/uts/common/inet/sctp/sctp_timer.c b/usr/src/uts/common/inet/sctp/sctp_timer.c
index 24b46ad6f0..47ffe3d1fc 100644
--- a/usr/src/uts/common/inet/sctp/sctp_timer.c
+++ b/usr/src/uts/common/inet/sctp/sctp_timer.c
@@ -24,7 +24,6 @@
* Use is subject to license terms.
*/
-
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/stream.h>
@@ -416,7 +415,7 @@ sctp_heartbeat_timer(sctp_t *sctp)
return;
}
- now = lbolt64;
+ now = ddi_get_lbolt64();
earliest_expiry = 0;
cnt = sctps->sctps_maxburst;
diff --git a/usr/src/uts/common/inet/squeue.c b/usr/src/uts/common/inet/squeue.c
index db11ef79ae..a58b445158 100644
--- a/usr/src/uts/common/inet/squeue.c
+++ b/usr/src/uts/common/inet/squeue.c
@@ -311,7 +311,7 @@ squeue_worker_wakeup(squeue_t *sqp)
if (sqp->sq_wait == 0) {
ASSERT(tid == 0);
ASSERT(!(sqp->sq_state & SQS_TMO_PROG));
- sqp->sq_awaken = lbolt;
+ sqp->sq_awaken = ddi_get_lbolt();
cv_signal(&sqp->sq_worker_cv);
mutex_exit(&sqp->sq_lock);
return;
@@ -325,7 +325,8 @@ squeue_worker_wakeup(squeue_t *sqp)
/*
* Waiting for an enter() to process mblk(s).
*/
- clock_t waited = lbolt - sqp->sq_awaken;
+ clock_t now = ddi_get_lbolt();
+ clock_t waited = now - sqp->sq_awaken;
if (TICK_TO_MSEC(waited) >= sqp->sq_wait) {
/*
@@ -333,7 +334,7 @@ squeue_worker_wakeup(squeue_t *sqp)
* waiting for work, so schedule it.
*/
sqp->sq_tid = 0;
- sqp->sq_awaken = lbolt;
+ sqp->sq_awaken = now;
cv_signal(&sqp->sq_worker_cv);
mutex_exit(&sqp->sq_lock);
(void) untimeout(tid);
@@ -691,7 +692,7 @@ squeue_fire(void *arg)
sqp->sq_state &= ~SQS_TMO_PROG;
if (!(state & SQS_PROC)) {
- sqp->sq_awaken = lbolt;
+ sqp->sq_awaken = ddi_get_lbolt();
cv_signal(&sqp->sq_worker_cv);
}
mutex_exit(&sqp->sq_lock);
@@ -842,7 +843,7 @@ again:
goto again;
} else {
did_wakeup = B_TRUE;
- sqp->sq_awaken = lbolt;
+ sqp->sq_awaken = ddi_get_lbolt();
cv_signal(&sqp->sq_worker_cv);
}
}
@@ -1113,7 +1114,7 @@ poll_again:
*/
}
- sqp->sq_awaken = lbolt;
+ sqp->sq_awaken = ddi_get_lbolt();
/*
* Put the SQS_PROC_HELD on so the worker
* thread can distinguish where its called from. We
@@ -1464,7 +1465,7 @@ squeue_synch_exit(squeue_t *sqp, conn_t *connp)
* worker thread right away when there are outstanding
* requests.
*/
- sqp->sq_awaken = lbolt;
+ sqp->sq_awaken = ddi_get_lbolt();
cv_signal(&sqp->sq_worker_cv);
mutex_exit(&sqp->sq_lock);
}
diff --git a/usr/src/uts/common/inet/tcp/tcp.c b/usr/src/uts/common/inet/tcp/tcp.c
index bfa25ce300..2a1568c063 100644
--- a/usr/src/uts/common/inet/tcp/tcp.c
+++ b/usr/src/uts/common/inet/tcp/tcp.c
@@ -102,6 +102,8 @@
#include <rpc/pmap_prot.h>
#include <sys/callo.h>
+#include <sys/clock_impl.h>
+
/*
* TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
*
@@ -4437,7 +4439,7 @@ tcp_input_listener(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
* be to set the "tcp_syn_defense" flag now.
*/
TCP_STAT(tcps, tcp_listendropq0);
- listener->tcp_last_rcv_lbolt = lbolt64;
+ listener->tcp_last_rcv_lbolt = ddi_get_lbolt64();
if (!tcp_drop_q0(listener)) {
mutex_exit(&listener->tcp_eager_lock);
BUMP_MIB(&tcps->tcps_mib, tcpListenDropQ0);
@@ -6689,7 +6691,7 @@ tcp_init_values(tcp_t *tcp)
tcp->tcp_rto = tcps->tcps_rexmit_interval_min;
tcp->tcp_timer_backoff = 0;
tcp->tcp_ms_we_have_waited = 0;
- tcp->tcp_last_recv_time = lbolt;
+ tcp->tcp_last_recv_time = ddi_get_lbolt();
tcp->tcp_cwnd_max = tcps->tcps_cwnd_max_;
tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN;
tcp->tcp_snd_burst = TCP_CWND_INFINITE;
@@ -7221,7 +7223,7 @@ tcp_keepalive_killer(void *arg)
return;
}
- idletime = TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time);
+ idletime = TICK_TO_MSEC(ddi_get_lbolt() - tcp->tcp_last_recv_time);
/*
* If we have not heard from the other side for a long
* time, kill the connection unless the keepalive abort
@@ -7650,7 +7652,7 @@ tcp_create_common(cred_t *credp, boolean_t isv6, boolean_t issocket,
crhold(credp);
connp->conn_cred = credp;
connp->conn_cpid = curproc->p_pid;
- connp->conn_open_time = lbolt64;
+ connp->conn_open_time = ddi_get_lbolt64();
connp->conn_zoneid = zoneid;
/* conn_allzones can not be set this early, hence no IPCL_ZONEID */
@@ -9246,7 +9248,7 @@ tcp_sack_rxmit(tcp_t *tcp, uint_t *flags)
/*
* Update the send timestamp to avoid false retransmission.
*/
- snxt_mp->b_prev = (mblk_t *)lbolt;
+ snxt_mp->b_prev = (mblk_t *)ddi_get_lbolt();
BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs);
UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, seg_len);
@@ -9323,7 +9325,7 @@ tcp_ss_rexmit(tcp_t *tcp)
* Update the send timestamp to avoid false
* retransmission.
*/
- old_snxt_mp->b_prev = (mblk_t *)lbolt;
+ old_snxt_mp->b_prev = (mblk_t *)ddi_get_lbolt();
BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs);
UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, cnt);
@@ -9410,7 +9412,7 @@ tcp_process_options(tcp_t *tcp, tcpha_t *tcpha)
tcp->tcp_snd_ts_ok = B_TRUE;
tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val;
- tcp->tcp_last_rcv_lbolt = lbolt64;
+ tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64();
ASSERT(OK_32PTR(tmp_tcph));
ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
@@ -9648,7 +9650,7 @@ tcp_send_conn_ind(void *arg, mblk_t *mp, void *arg2)
if (listener->tcp_syn_defense &&
listener->tcp_syn_rcvd_timeout <=
(tcps->tcps_conn_req_max_q0 >> 5) &&
- 10*MINUTES < TICK_TO_MSEC(lbolt64 -
+ 10*MINUTES < TICK_TO_MSEC(ddi_get_lbolt64() -
listener->tcp_last_rcv_lbolt)) {
/*
* Turn off the defense mode if we
@@ -9893,7 +9895,7 @@ tcp_input_data(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
* But that should not cause any harm. And it is updated for
* all kinds of incoming segments, not only for data segments.
*/
- tcp->tcp_last_recv_time = lbolt;
+ tcp->tcp_last_recv_time = LBOLT_FASTPATH;
}
flags = (unsigned int)tcpha->tha_flags & 0xFF;
@@ -10638,7 +10640,7 @@ ok:;
TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) &&
SEQ_LEQ(seg_seq, tcp->tcp_rack)) {
tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val;
- tcp->tcp_last_rcv_lbolt = lbolt64;
+ tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64();
}
if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) {
@@ -11660,7 +11662,7 @@ process_ack:
if (tcp->tcp_snd_ts_ok) {
/* Ignore zero timestamp echo-reply. */
if (tcpopt.tcp_opt_ts_ecr != 0) {
- tcp_set_rto(tcp, (int32_t)lbolt -
+ tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH -
(int32_t)tcpopt.tcp_opt_ts_ecr);
}
@@ -11682,7 +11684,7 @@ process_ack:
*/
if ((mp1->b_next != NULL) &&
SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next)))
- tcp_set_rto(tcp, (int32_t)lbolt -
+ tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH -
(int32_t)(intptr_t)mp1->b_prev);
else
BUMP_MIB(&tcps->tcps_mib, tcpRttNoUpdate);
@@ -11713,7 +11715,8 @@ process_ack:
*/
if (SEQ_GT(seg_ack,
(uint32_t)(uintptr_t)(mp1->b_next))) {
- mp1->b_prev = (mblk_t *)(uintptr_t)lbolt;
+ mp1->b_prev =
+ (mblk_t *)(uintptr_t)LBOLT_FASTPATH;
mp1->b_next = NULL;
}
break;
@@ -12211,7 +12214,8 @@ xmit_check:
B_TRUE);
if (mp1 != NULL) {
- tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt;
+ tcp->tcp_xmit_head->b_prev =
+ (mblk_t *)LBOLT_FASTPATH;
tcp->tcp_csuna = tcp->tcp_snxt;
BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs);
UPDATE_MIB(&tcps->tcps_mib,
@@ -12246,7 +12250,8 @@ xmit_check:
* limited transmitted segment's ACK gets back.
*/
if (tcp->tcp_xmit_head != NULL)
- tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt;
+ tcp->tcp_xmit_head->b_prev =
+ (mblk_t *)LBOLT_FASTPATH;
}
/* Anything more to do? */
@@ -12407,8 +12412,8 @@ tcp_paws_check(tcp_t *tcp, tcpha_t *tcpha, tcp_opt_t *tcpoptp)
if ((flags & TH_RST) == 0 &&
TSTMP_LT(tcpoptp->tcp_opt_ts_val,
tcp->tcp_ts_recent)) {
- if (TSTMP_LT(lbolt64, tcp->tcp_last_rcv_lbolt +
- PAWS_TIMEOUT)) {
+ if (TSTMP_LT(LBOLT_FASTPATH,
+ tcp->tcp_last_rcv_lbolt + PAWS_TIMEOUT)) {
/* This segment is not acceptable. */
return (B_FALSE);
} else {
@@ -13368,7 +13373,7 @@ tcp_timer(void *arg)
BUMP_MIB(&tcps->tcps_mib, tcpTimRetrans);
if (!tcp->tcp_xmit_head)
break;
- time_to_wait = lbolt -
+ time_to_wait = ddi_get_lbolt() -
(clock_t)tcp->tcp_xmit_head->b_prev;
time_to_wait = tcp->tcp_rto -
TICK_TO_MSEC(time_to_wait);
@@ -13536,7 +13541,7 @@ tcp_timer(void *arg)
* time...
*/
if ((tcp->tcp_zero_win_probe == 0) ||
- (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >
+ (TICK_TO_MSEC(ddi_get_lbolt() - tcp->tcp_last_recv_time) >
second_threshold)) {
BUMP_MIB(&tcps->tcps_mib, tcpTimRetransDrop);
/*
@@ -13558,8 +13563,8 @@ tcp_timer(void *arg)
/*
* Set tcp_ms_we_have_waited to second_threshold
* so that in next timeout, we will do the above
- * check (lbolt - tcp_last_recv_time). This is
- * also to avoid overflow.
+ * check (ddi_get_lbolt() - tcp_last_recv_time).
+ * This is also to avoid overflow.
*
* We don't need to decrement tcp_timer_backoff
* to avoid overflow because it will be decremented
@@ -13635,7 +13640,7 @@ tcp_timer(void *arg)
mss = tcp->tcp_swnd;
if ((mp = tcp->tcp_xmit_head) != NULL)
- mp->b_prev = (mblk_t *)lbolt;
+ mp->b_prev = (mblk_t *)ddi_get_lbolt();
mp = tcp_xmit_mp(tcp, mp, mss, NULL, NULL, tcp->tcp_suna, B_TRUE, &mss,
B_TRUE);
@@ -14003,7 +14008,8 @@ tcp_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
* Reinitialize tcp_cwnd after idle.
*/
if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
- (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) {
+ (TICK_TO_MSEC(ddi_get_lbolt() - tcp->tcp_last_recv_time) >=
+ tcp->tcp_rto)) {
SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
}
@@ -14064,7 +14070,7 @@ tcp_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
if ((mp1 = dupb(mp)) == 0)
goto no_memory;
- mp->b_prev = (mblk_t *)(uintptr_t)lbolt;
+ mp->b_prev = (mblk_t *)(uintptr_t)ddi_get_lbolt();
mp->b_next = (mblk_t *)(uintptr_t)snxt;
/* adjust tcp header information */
@@ -14119,7 +14125,9 @@ tcp_output(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
/* Fill in the timestamp option. */
if (tcp->tcp_snd_ts_ok) {
- U32_TO_BE32((uint32_t)lbolt,
+ uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
+
+ U32_TO_BE32(llbolt,
(char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
U32_TO_BE32(tcp->tcp_ts_recent,
(char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
@@ -15552,7 +15560,8 @@ data_null:
}
if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
- (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) {
+ (TICK_TO_MSEC((clock_t)LBOLT_FASTPATH - tcp->tcp_last_recv_time) >=
+ tcp->tcp_rto)) {
SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
}
if (tcpstate == TCPS_SYN_RCVD) {
@@ -15634,7 +15643,7 @@ data_null:
}
}
- local_time = (mblk_t *)lbolt;
+ local_time = (mblk_t *)LBOLT_FASTPATH;
/*
* "Our" Nagle Algorithm. This is not the same as in the old
@@ -16795,7 +16804,9 @@ tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, uint32_t ack, int ctl)
}
if (ctl & TH_ACK) {
if (tcp->tcp_snd_ts_ok) {
- U32_TO_BE32(lbolt,
+ uint32_t llbolt = (uint32_t)ddi_get_lbolt();
+
+ U32_TO_BE32(llbolt,
(char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
U32_TO_BE32(tcp->tcp_ts_recent,
(char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
@@ -16838,7 +16849,7 @@ tcp_send_rst_chk(tcp_stack_t *tcps)
* limited.
*/
if (tcps->tcps_rst_sent_rate_enabled != 0) {
- now = lbolt;
+ now = ddi_get_lbolt();
/* lbolt can wrap around. */
if ((tcps->tcps_last_rst_intrvl > now) ||
(TICK_TO_MSEC(now - tcps->tcps_last_rst_intrvl) >
@@ -17482,7 +17493,8 @@ tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset,
flags = TH_SYN;
if (tcp->tcp_snd_ts_ok) {
- uint32_t llbolt = (uint32_t)lbolt;
+ uint32_t llbolt =
+ (uint32_t)ddi_get_lbolt();
wptr = mp1->b_wptr;
wptr[0] = TCPOPT_NOP;
@@ -17619,7 +17631,7 @@ tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset,
if (tcp->tcp_snd_ts_ok) {
if (tcp->tcp_state != TCPS_SYN_SENT) {
- uint32_t llbolt = (uint32_t)lbolt;
+ uint32_t llbolt = (uint32_t)ddi_get_lbolt();
U32_TO_BE32(llbolt,
(char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
@@ -17843,7 +17855,7 @@ tcp_ack_mp(tcp_t *tcp)
/* fill in timestamp option if in use */
if (tcp->tcp_snd_ts_ok) {
- uint32_t llbolt = (uint32_t)lbolt;
+ uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
U32_TO_BE32(llbolt,
(char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
@@ -19339,7 +19351,7 @@ tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, uint32_t seg_seq,
TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) &&
SEQ_LEQ(seg_seq, tcp->tcp_rack)) {
tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val;
- tcp->tcp_last_rcv_lbolt = lbolt64;
+ tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64();
}
if (seg_seq != tcp->tcp_rnxt && seg_len > 0) {
diff --git a/usr/src/uts/common/inet/udp/udp.c b/usr/src/uts/common/inet/udp/udp.c
index e18fc57f40..de54c3a5ab 100644
--- a/usr/src/uts/common/inet/udp/udp.c
+++ b/usr/src/uts/common/inet/udp/udp.c
@@ -5148,7 +5148,7 @@ udp_do_open(cred_t *credp, boolean_t isv6, int flags)
crhold(credp);
connp->conn_cred = credp;
connp->conn_cpid = curproc->p_pid;
- connp->conn_open_time = lbolt64;
+ connp->conn_open_time = ddi_get_lbolt64();
/* Cache things in ixa without an extra refhold */
connp->conn_ixa->ixa_cred = connp->conn_cred;
connp->conn_ixa->ixa_cpid = connp->conn_cpid;
diff --git a/usr/src/uts/common/io/bridge.c b/usr/src/uts/common/io/bridge.c
index 1382dd9d0b..4d3110bdd1 100644
--- a/usr/src/uts/common/io/bridge.c
+++ b/usr/src/uts/common/io/bridge.c
@@ -429,7 +429,7 @@ bridge_ioc_listfwd(void *karg, intptr_t arg, int mode, cred_t *cred, int *rvalp)
bcopy(bfp->bf_dest, blf->blf_dest, ETHERADDRL);
blf->blf_trill_nick = bfp->bf_trill_nick;
blf->blf_ms_age =
- drv_hztousec(lbolt - bfp->bf_lastheard) / 1000;
+ drv_hztousec(ddi_get_lbolt() - bfp->bf_lastheard) / 1000;
blf->blf_is_local =
(bfp->bf_flags & BFF_LOCALADDR) != 0;
blf->blf_linkid = bfp->bf_links[0]->bl_linkid;
@@ -904,7 +904,7 @@ fwd_alloc(const uint8_t *addr, uint_t nlinks, uint16_t nick)
KM_NOSLEEP);
if (bfp != NULL) {
bcopy(addr, bfp->bf_dest, ETHERADDRL);
- bfp->bf_lastheard = lbolt;
+ bfp->bf_lastheard = ddi_get_lbolt();
bfp->bf_maxlinks = nlinks;
bfp->bf_links = (bridge_link_t **)(bfp + 1);
bfp->bf_trill_nick = nick;
@@ -1436,7 +1436,7 @@ bridge_timer(void *arg)
while ((bfp = bfnext) != NULL) {
bfnext = AVL_NEXT(&bip->bi_fwd, bfp);
if (!(bfp->bf_flags & BFF_LOCALADDR) &&
- (lbolt - bfp->bf_lastheard) > age_limit) {
+ (ddi_get_lbolt() - bfp->bf_lastheard) > age_limit) {
ASSERT(bfp->bf_flags & BFF_INTREE);
avl_remove(&bip->bi_fwd, bfp);
bfp->bf_flags &= ~BFF_INTREE;
@@ -1608,7 +1608,7 @@ bridge_learn(bridge_link_t *blp, const uint8_t *saddr, uint16_t ingress_nick,
if (bfp->bf_trill_nick == ingress_nick) {
for (i = 0; i < bfp->bf_nlinks; i++) {
if (bfp->bf_links[i] == blp) {
- bfp->bf_lastheard = lbolt;
+ bfp->bf_lastheard = ddi_get_lbolt();
fwd_unref(bfp);
return;
}
diff --git a/usr/src/uts/common/io/bscbus.c b/usr/src/uts/common/io/bscbus.c
index 188770f08f..b1f1ec9567 100644
--- a/usr/src/uts/common/io/bscbus.c
+++ b/usr/src/uts/common/io/bscbus.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* The "bscbus" driver provides access to the LOMlite2 virtual registers,
@@ -927,7 +927,6 @@ bscbus_cmd(HANDLE_TYPE *hdlp, ptrdiff_t vreg, uint_t val, uint_t cmd)
{
struct bscbus_channel_state *csp;
clock_t start;
- clock_t tick;
uint8_t status;
/*
@@ -1018,8 +1017,8 @@ bscbus_cmd(HANDLE_TYPE *hdlp, ptrdiff_t vreg, uint_t val, uint_t cmd)
(csp->cmdstate != BSCBUS_CMDSTATE_ERROR)) {
ASSERT(csp->cmdstate != BSCBUS_CMDSTATE_IDLE);
- tick = ddi_get_lbolt() + csp->poll_hz;
- if ((cv_timedwait(csp->lo_cv, csp->lo_mutex, tick) == -1) &&
+ if ((cv_reltimedwait(csp->lo_cv, csp->lo_mutex,
+ csp->poll_hz, TR_CLOCK_TICK) == -1) &&
csp->cmdstate != BSCBUS_CMDSTATE_READY &&
csp->cmdstate != BSCBUS_CMDSTATE_ERROR) {
if (!csp->interrupt_failed) {
diff --git a/usr/src/uts/common/io/bscv.c b/usr/src/uts/common/io/bscv.c
index 77de4d47b7..829268c822 100644
--- a/usr/src/uts/common/io/bscv.c
+++ b/usr/src/uts/common/io/bscv.c
@@ -3128,9 +3128,8 @@ bscv_event_daemon(void *arg)
if (ssp->event_sleep) {
ssp->task_flags |= TASK_SLEEPING_FLG;
/* Sleep until there is something to do */
- (void) cv_timedwait(&ssp->task_cv,
- &ssp->task_mu,
- poll_period + ddi_get_lbolt());
+ (void) cv_reltimedwait(&ssp->task_cv,
+ &ssp->task_mu, poll_period, TR_CLOCK_TICK);
ssp->task_flags &= ~TASK_SLEEPING_FLG;
ssp->event_sleep = B_FALSE;
}
diff --git a/usr/src/uts/common/io/comstar/port/fcoet/fcoet.c b/usr/src/uts/common/io/comstar/port/fcoet/fcoet.c
index 5478888c16..6271f35b86 100644
--- a/usr/src/uts/common/io/comstar/port/fcoet/fcoet.c
+++ b/usr/src/uts/common/io/comstar/port/fcoet/fcoet.c
@@ -759,9 +759,8 @@ fcoet_watchdog(void *arg)
}
atomic_or_32(&ss->ss_flags, SS_FLAG_DOG_WAITING);
- (void) cv_timedwait(&ss->ss_watch_cv,
- &ss->ss_watch_mutex, ddi_get_lbolt() +
- (clock_t)tmp_delay);
+ (void) cv_reltimedwait(&ss->ss_watch_cv, &ss->ss_watch_mutex,
+ (clock_t)tmp_delay, TR_CLOCK_TICK);
atomic_and_32(&ss->ss_flags, ~SS_FLAG_DOG_WAITING);
}
diff --git a/usr/src/uts/common/io/comstar/port/fct/discovery.c b/usr/src/uts/common/io/comstar/port/fct/discovery.c
index 96a80c2558..a9e16bb39d 100644
--- a/usr/src/uts/common/io/comstar/port/fct/discovery.c
+++ b/usr/src/uts/common/io/comstar/port/fct/discovery.c
@@ -180,8 +180,8 @@ fct_port_worker(void *arg)
}
atomic_or_32(&iport->iport_flags,
IPORT_WORKER_DOING_TIMEDWAIT);
- (void) cv_timedwait(&iport->iport_worker_cv,
- &iport->iport_worker_lock, ddi_get_lbolt() + dl);
+ (void) cv_reltimedwait(&iport->iport_worker_cv,
+ &iport->iport_worker_lock, dl, TR_CLOCK_TICK);
atomic_and_32(&iport->iport_flags,
~IPORT_WORKER_DOING_TIMEDWAIT);
} else {
@@ -192,9 +192,9 @@ fct_port_worker(void *arg)
if (tmp_delay < 0) {
tmp_delay = (int64_t)short_delay;
}
- (void) cv_timedwait(&iport->iport_worker_cv,
- &iport->iport_worker_lock, ddi_get_lbolt() +
- (clock_t)tmp_delay);
+ (void) cv_reltimedwait(&iport->iport_worker_cv,
+ &iport->iport_worker_lock, (clock_t)tmp_delay,
+ TR_CLOCK_TICK);
atomic_and_32(&iport->iport_flags,
~IPORT_WORKER_DOING_WAIT);
}
diff --git a/usr/src/uts/common/io/comstar/port/iscsit/iscsit_isns.c b/usr/src/uts/common/io/comstar/port/iscsit/iscsit_isns.c
index c74139877d..a8714f88a5 100644
--- a/usr/src/uts/common/io/comstar/port/iscsit/iscsit_isns.c
+++ b/usr/src/uts/common/io/comstar/port/iscsit/iscsit_isns.c
@@ -1101,8 +1101,8 @@ isnst_monitor(void *arg)
break;
DTRACE_PROBE(iscsit__isns__monitor__sleep);
- (void) cv_timedwait(&isns_idle_cv, &isns_monitor_mutex,
- ddi_get_lbolt() + monitor_idle_interval);
+ (void) cv_reltimedwait(&isns_idle_cv, &isns_monitor_mutex,
+ monitor_idle_interval, TR_CLOCK_TICK);
DTRACE_PROBE1(iscsit__isns__monitor__wakeup,
boolean_t, isns_monitor_thr_running);
}
diff --git a/usr/src/uts/common/io/comstar/port/qlt/qlt.c b/usr/src/uts/common/io/comstar/port/qlt/qlt.c
index ae1ed80a0b..4f4e078cd7 100644
--- a/usr/src/uts/common/io/comstar/port/qlt/qlt.c
+++ b/usr/src/uts/common/io/comstar/port/qlt/qlt.c
@@ -2446,8 +2446,8 @@ qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
qlt_mbox_wait_loop:;
/* Wait for mailbox command completion */
- if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
- + drv_usectohz(MBOX_TIMEOUT)) < 0) {
+ if (cv_reltimedwait(&qlt->mbox_cv, &qlt->mbox_lock,
+ drv_usectohz(MBOX_TIMEOUT), TR_CLOCK_TICK) < 0) {
(void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
"cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
info[79] = 0;
@@ -3147,9 +3147,9 @@ qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
qlt->rp_id_in_dereg = rp->rp_id;
qlt_submit_preq_entries(qlt, 1);
- dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
- if (cv_timedwait(&qlt->rp_dereg_cv,
- &qlt->preq_lock, dereg_req_timer) > 0) {
+ dereg_req_timer = drv_usectohz(DEREG_RP_TIMEOUT);
+ if (cv_reltimedwait(&qlt->rp_dereg_cv, &qlt->preq_lock,
+ dereg_req_timer, TR_CLOCK_TICK) > 0) {
ret = qlt->rp_dereg_status;
} else {
ret = FCT_BUSY;
@@ -4874,13 +4874,14 @@ qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
*/
mutex_enter(&qlt->mbox_lock);
if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
+ clock_t timeout = drv_usectohz(1000000);
/*
* Wait to grab the mailboxes
*/
for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
(qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
- (void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
- ddi_get_lbolt() + drv_usectohz(1000000));
+ (void) cv_reltimedwait(&qlt->mbox_cv, &qlt->mbox_lock,
+ timeout, TR_CLOCK_TICK);
if (retries > 5) {
mutex_exit(&qlt->mbox_lock);
EL(qlt, "can't drain out mailbox commands\n");
diff --git a/usr/src/uts/common/io/comstar/stmf/stmf.c b/usr/src/uts/common/io/comstar/stmf/stmf.c
index 0a89a6dc6b..3e67d5fc9e 100644
--- a/usr/src/uts/common/io/comstar/stmf/stmf.c
+++ b/usr/src/uts/common/io/comstar/stmf/stmf.c
@@ -5713,7 +5713,7 @@ stmf_worker_task(void *arg)
stmf_data_buf_t *dbuf;
stmf_lu_t *lu;
clock_t wait_timer = 0;
- clock_t wait_ticks;
+ clock_t wait_ticks, wait_delta = 0;
uint32_t old, new;
uint8_t curcmd;
uint8_t abort_free;
@@ -5739,6 +5739,7 @@ stmf_worker_loop:;
dec_qdepth = 0;
if (wait_timer && (ddi_get_lbolt() >= wait_timer)) {
wait_timer = 0;
+ wait_delta = 0;
if (w->worker_wait_head) {
ASSERT(w->worker_wait_tail);
if (w->worker_task_head == NULL)
@@ -5815,6 +5816,7 @@ out_itask_flag_loop:
w->worker_wait_tail = itask;
if (wait_timer == 0) {
wait_timer = ddi_get_lbolt() + wait_ticks;
+ wait_delta = wait_ticks;
}
} else if ((--(itask->itask_ncmds)) != 0) {
itask->itask_worker_next = NULL;
@@ -5900,12 +5902,15 @@ out_itask_flag_loop:
if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) {
if (w->worker_ref_count == 0)
goto stmf_worker_loop;
- else
+ else {
wait_timer = ddi_get_lbolt() + 1;
+ wait_delta = 1;
+ }
}
w->worker_flags &= ~STMF_WORKER_ACTIVE;
if (wait_timer) {
- (void) cv_timedwait(&w->worker_cv, &w->worker_lock, wait_timer);
+ (void) cv_reltimedwait(&w->worker_cv, &w->worker_lock,
+ wait_delta, TR_CLOCK_TICK);
} else {
cv_wait(&w->worker_cv, &w->worker_lock);
}
@@ -6902,8 +6907,8 @@ stmf_svc_loop:
}
stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE;
- (void) cv_timedwait(&stmf_state.stmf_cv, &stmf_state.stmf_lock,
- ddi_get_lbolt() + td);
+ (void) cv_reltimedwait(&stmf_state.stmf_cv,
+ &stmf_state.stmf_lock, td, TR_CLOCK_TICK);
stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE;
}
goto stmf_svc_loop;
diff --git a/usr/src/uts/common/io/drm/drmP.h b/usr/src/uts/common/io/drm/drmP.h
index e04dc759c7..4c6934db87 100644
--- a/usr/src/uts/common/io/drm/drmP.h
+++ b/usr/src/uts/common/io/drm/drmP.h
@@ -222,22 +222,22 @@ typedef struct drm_wait_queue {
mutex_exit(&(q)->lock); \
}
-#define jiffies ddi_get_lbolt()
-#define DRM_WAIT_ON(ret, q, timeout, condition) \
-mutex_enter(&(q)->lock); \
-while (!(condition)) { \
- ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + timeout); \
- if (ret == -1) { \
- ret = EBUSY; \
- break; \
- } else if (ret == 0) { \
- ret = EINTR; \
- break; \
- } else { \
- ret = 0; \
- } \
-} \
-mutex_exit(&(q)->lock);
+#define DRM_WAIT_ON(ret, q, timeout, condition) \
+ mutex_enter(&(q)->lock); \
+ while (!(condition)) { \
+ ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
+ TR_CLOCK_TICK); \
+ if (ret == -1) { \
+ ret = EBUSY; \
+ break; \
+ } else if (ret == 0) { \
+ ret = EINTR; \
+ break; \
+ } else { \
+ ret = 0; \
+ } \
+ } \
+ mutex_exit(&(q)->lock);
#define DRM_GETSAREA() \
{ \
@@ -397,7 +397,7 @@ typedef struct drm_lock_data {
/* Uniq. identifier of holding process */
kcondvar_t lock_cv; /* lock queue - SOLARIS Specific */
kmutex_t lock_mutex; /* lock - SOLARIS Specific */
- unsigned long lock_time; /* Time of last lock in jiffies */
+ unsigned long lock_time; /* Time of last lock in clock ticks */
} drm_lock_data_t;
/*
diff --git a/usr/src/uts/common/io/drm/drm_lock.c b/usr/src/uts/common/io/drm/drm_lock.c
index 69a43a7598..6930a47e03 100644
--- a/usr/src/uts/common/io/drm/drm_lock.c
+++ b/usr/src/uts/common/io/drm/drm_lock.c
@@ -140,7 +140,7 @@ drm_lock(DRM_IOCTL_ARGS)
for (;;) {
if (drm_lock_take(&dev->lock, lock.context)) {
dev->lock.filp = fpriv;
- dev->lock.lock_time = jiffies;
+ dev->lock.lock_time = ddi_get_lbolt();
break; /* Got lock */
}
ret = cv_wait_sig(&(dev->lock.lock_cv),
diff --git a/usr/src/uts/common/io/e1000g/e1000g_tx.c b/usr/src/uts/common/io/e1000g/e1000g_tx.c
index 1b752b8100..9d58d9b127 100644
--- a/usr/src/uts/common/io/e1000g/e1000g_tx.c
+++ b/usr/src/uts/common/io/e1000g/e1000g_tx.c
@@ -733,7 +733,7 @@ e1000g_fill_tx_ring(e1000g_tx_ring_t *tx_ring, LIST_DESCRIBER *pending_list,
first_packet = NULL;
}
- packet->tickstamp = lbolt64;
+ packet->tickstamp = ddi_get_lbolt64();
previous_packet = packet;
packet = (p_tx_sw_packet_t)
@@ -1053,7 +1053,7 @@ e1000g_recycle(e1000g_tx_ring_t *tx_ring)
* with then there is no reason to check the rest
* of the queue.
*/
- delta = lbolt64 - packet->tickstamp;
+ delta = ddi_get_lbolt64() - packet->tickstamp;
break;
}
}
diff --git a/usr/src/uts/common/io/ecpp.c b/usr/src/uts/common/io/ecpp.c
index d2aa9a05f1..c7c03826b5 100644
--- a/usr/src/uts/common/io/ecpp.c
+++ b/usr/src/uts/common/io/ecpp.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -774,9 +774,9 @@ ecpp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
* Wait if there's any activity on the port
*/
if ((pp->e_busy == ECPP_BUSY) || (pp->e_busy == ECPP_FLUSH)) {
- (void) cv_timedwait(&pp->pport_cv, &pp->umutex,
- ddi_get_lbolt() +
- SUSPEND_TOUT * drv_usectohz(1000000));
+ (void) cv_reltimedwait(&pp->pport_cv, &pp->umutex,
+ SUSPEND_TOUT * drv_usectohz(1000000),
+ TR_CLOCK_TICK);
if ((pp->e_busy == ECPP_BUSY) ||
(pp->e_busy == ECPP_FLUSH)) {
pp->suspended = FALSE;
diff --git a/usr/src/uts/common/io/emul64_bsd.c b/usr/src/uts/common/io/emul64_bsd.c
index 8077ec90da..3d91f696bf 100644
--- a/usr/src/uts/common/io/emul64_bsd.c
+++ b/usr/src/uts/common/io/emul64_bsd.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1369,8 +1369,8 @@ emul64_yield_check()
ticks = drv_usectohz(emul64_yield_length);
if (ticks == 0)
ticks = 1;
- (void) cv_timedwait(&emul64_yield_cv,
- &emul64_yield_mutex, ddi_get_lbolt() + ticks);
+ (void) cv_reltimedwait(&emul64_yield_cv, &emul64_yield_mutex,
+ ticks, TR_CLOCK_TICK);
emul64_io_count = 0;
emul64_waiting = FALSE;
diff --git a/usr/src/uts/common/io/fcoe/fcoe_fc.c b/usr/src/uts/common/io/fcoe/fcoe_fc.c
index ba7b6cbad5..1aab1e5128 100644
--- a/usr/src/uts/common/io/fcoe/fcoe_fc.c
+++ b/usr/src/uts/common/io/fcoe/fcoe_fc.c
@@ -210,8 +210,8 @@ tx_frame:
MAC_TX_NO_ENQUEUE, &ret_mblk);
if (ret_cookie != NULL) {
mutex_enter(&mac->fm_mutex);
- (void) cv_timedwait(&mac->fm_tx_cv, &mac->fm_mutex,
- ddi_get_lbolt() + drv_usectohz(100000));
+ (void) cv_reltimedwait(&mac->fm_tx_cv, &mac->fm_mutex,
+ drv_usectohz(100000), TR_CLOCK_TICK);
mutex_exit(&mac->fm_mutex);
if (mac->fm_state == FCOE_MAC_STATE_OFFLINE) {
diff --git a/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c b/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c
index 48e17432cc..fa7c2c44ce 100644
--- a/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c
+++ b/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c
@@ -2481,7 +2481,7 @@ ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
{
ql_adapter_state_t *ha;
int rval;
- clock_t timer;
+ clock_t timer = drv_usectohz(30000000);
ls_code_t els;
la_els_rjt_t rjt;
ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
@@ -2501,11 +2501,8 @@ ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
/* 30 seconds from now */
- timer = ddi_get_lbolt();
- timer += drv_usectohz(30000000);
-
- if (cv_timedwait(&ha->pha->cv_dr_suspended,
- &ha->pha->task_daemon_mutex, timer) == -1) {
+ if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
+ &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
/*
* The timeout time 'timer' was
* reached without the condition
@@ -2832,7 +2829,7 @@ static int
ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
{
ql_adapter_state_t *ha;
- clock_t timer;
+ clock_t timer = drv_usectohz(30000000);
int rval = FC_SUCCESS;
ha = ql_fca_handle_to_state(fca_handle);
@@ -2852,11 +2849,8 @@ ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
/* 30 seconds from now */
- timer = ddi_get_lbolt();
- timer += drv_usectohz(30000000);
-
- if (cv_timedwait(&ha->pha->cv_dr_suspended,
- &ha->pha->task_daemon_mutex, timer) == -1) {
+ if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
+ &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
/*
* The timeout time 'timer' was
* reached without the condition
@@ -11474,7 +11468,7 @@ static int
ql_dump_firmware(ql_adapter_state_t *vha)
{
int rval;
- clock_t timer;
+ clock_t timer = drv_usectohz(30000000);
ql_adapter_state_t *ha = vha->pha;
QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
@@ -11513,11 +11507,8 @@ ql_dump_firmware(ql_adapter_state_t *vha)
ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
/* 30 seconds from now */
- timer = ddi_get_lbolt();
- timer += drv_usectohz(30000000);
-
- if (cv_timedwait(&ha->cv_dr_suspended,
- &ha->task_daemon_mutex, timer) == -1) {
+ if (cv_reltimedwait(&ha->cv_dr_suspended,
+ &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
/*
* The timeout time 'timer' was
* reached without the condition
@@ -11593,18 +11584,15 @@ ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
if (lock_needed == TRUE) {
/* Acquire mailbox register lock. */
MBX_REGISTER_LOCK(ha);
-
+ timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
/* Check for mailbox available, if not wait for signal. */
while (ha->mailbox_flags & MBX_BUSY_FLG) {
ha->mailbox_flags = (uint8_t)
(ha->mailbox_flags | MBX_WANT_FLG);
/* 30 seconds from now */
- timer = ddi_get_lbolt();
- timer += (ha->mcp->timeout + 2) *
- drv_usectohz(1000000);
- if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
- timer) == -1) {
+ if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
+ timer, TR_CLOCK_TICK) == -1) {
/*
* The timeout time 'timer' was
* reached without the condition
@@ -15619,7 +15607,7 @@ ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
static int
ql_suspend_adapter(ql_adapter_state_t *ha)
{
- clock_t timer;
+ clock_t timer = 32 * drv_usectohz(1000000);
QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
@@ -15636,10 +15624,8 @@ ql_suspend_adapter(ql_adapter_state_t *ha)
(ha->mailbox_flags | MBX_WANT_FLG);
/* 30 seconds from now */
- timer = ddi_get_lbolt();
- timer += 32 * drv_usectohz(1000000);
- if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
- timer) == -1) {
+ if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
+ timer, TR_CLOCK_TICK) == -1) {
/* Release mailbox register lock. */
MBX_REGISTER_UNLOCK(ha);
diff --git a/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_mbx.c b/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_mbx.c
index 8771808a8d..db57b0408d 100644
--- a/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_mbx.c
+++ b/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_mbx.c
@@ -110,10 +110,9 @@ ql_mailbox_command(ql_adapter_state_t *vha, mbx_cmd_t *mcp)
}
/* Set timeout after command that is running. */
- timer = ddi_get_lbolt();
- timer += (mcp->timeout + 20) * drv_usectohz(1000000);
- cv_stat = cv_timedwait_sig(&ha->cv_mbx_wait,
- &ha->pha->mbx_mutex, timer);
+ timer = (mcp->timeout + 20) * drv_usectohz(1000000);
+ cv_stat = cv_reltimedwait_sig(&ha->cv_mbx_wait,
+ &ha->pha->mbx_mutex, timer, TR_CLOCK_TICK);
if (cv_stat == -1 || cv_stat == 0) {
/*
* The timeout time 'timer' was
@@ -162,14 +161,12 @@ ql_mailbox_command(ql_adapter_state_t *vha, mbx_cmd_t *mcp)
!(ha->task_daemon_flags & (TASK_THREAD_CALLED |
TASK_DAEMON_POWERING_DOWN)) &&
!ddi_in_panic()) {
+ timer = mcp->timeout * drv_usectohz(1000000);
while (!(ha->mailbox_flags & (MBX_INTERRUPT | MBX_ABORT)) &&
!(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
- /* 30 seconds from now */
- timer = ddi_get_lbolt();
- timer += mcp->timeout * drv_usectohz(1000000);
- if (cv_timedwait(&ha->cv_mbx_intr, &ha->pha->mbx_mutex,
- timer) == -1) {
+ if (cv_reltimedwait(&ha->cv_mbx_intr,
+ &ha->pha->mbx_mutex, timer, TR_CLOCK_TICK) == -1) {
/*
* The timeout time 'timer' was
* reached without the condition
@@ -3924,7 +3921,7 @@ ql_stop_firmware(ql_adapter_state_t *ha)
* ha: adapter state pointer.
* mem: pointer to dma memory object for command.
* dev: Device address (A0h or A2h).
- * addr: Data address on SFP EEPROM (0–255).
+ * addr: Data address on SFP EEPROM (0-255).
*
* Returns:
* ql local function return status code.
diff --git a/usr/src/uts/common/io/fibre-channel/ulp/fcp.c b/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
index 06ea627d6c..f423ccd4f6 100644
--- a/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
+++ b/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
@@ -10172,7 +10172,7 @@ fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
}
/* note the attach time */
- pptr->port_attach_time = lbolt64;
+ pptr->port_attach_time = ddi_get_lbolt64();
/* all done */
return (res);
@@ -15661,7 +15661,7 @@ fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
struct fcp_port *pptr = fcp_dip2port(parent);
reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
- (lbolt64 - pptr->port_attach_time);
+ (ddi_get_lbolt64() - pptr->port_attach_time);
if (reset_delay < 0) {
reset_delay = 0;
}
@@ -15706,7 +15706,7 @@ fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
ddi_get_lbolt() + (clock_t)reset_delay);
reset_delay =
(int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
- (lbolt64 - pptr->port_attach_time);
+ (ddi_get_lbolt64() - pptr->port_attach_time);
}
mutex_exit(&pptr->port_mutex);
/* drain taskq to make sure nodes are created */
diff --git a/usr/src/uts/common/io/ib/adapters/hermon/hermon_stats.c b/usr/src/uts/common/io/ib/adapters/hermon/hermon_stats.c
index 222d077eb5..bd35303075 100644
--- a/usr/src/uts/common/io/ib/adapters/hermon/hermon_stats.c
+++ b/usr/src/uts/common/io/ib/adapters/hermon/hermon_stats.c
@@ -729,6 +729,7 @@ hermon_kstat_perfcntr64_update_thread(void *arg)
hermon_state_t *state = (hermon_state_t *)arg;
hermon_ks_info_t *ksi = state->hs_ks_info;
uint_t i;
+ clock_t delta = drv_usectohz(1000000);
mutex_enter(&ksi->hki_perfcntr64_lock);
/*
@@ -743,9 +744,8 @@ hermon_kstat_perfcntr64_update_thread(void *arg)
}
}
/* sleep for a second */
- (void) cv_timedwait(&ksi->hki_perfcntr64_cv,
- &ksi->hki_perfcntr64_lock,
- ddi_get_lbolt() + drv_usectohz(1000000));
+ (void) cv_reltimedwait(&ksi->hki_perfcntr64_cv,
+ &ksi->hki_perfcntr64_lock, delta, TR_CLOCK_TICK);
}
ksi->hki_perfcntr64_flags = 0;
mutex_exit(&ksi->hki_perfcntr64_lock);
diff --git a/usr/src/uts/common/io/ib/clients/rds/rdsib_buf.c b/usr/src/uts/common/io/ib/clients/rds/rdsib_buf.c
index b8a0e2fb30..e2a7c43ada 100644
--- a/usr/src/uts/common/io/ib/clients/rds/rdsib_buf.c
+++ b/usr/src/uts/common/io/ib/clients/rds/rdsib_buf.c
@@ -963,8 +963,8 @@ rds_get_send_buf(rds_ep_t *ep, uint_t nbuf)
if (spool->pool_nfree < nbuf) {
/* wait for buffers to become available */
spool->pool_cv_count += nbuf;
- ret = cv_timedwait_sig(&spool->pool_cv, &spool->pool_lock,
- ddi_get_lbolt() + drv_usectohz(waittime));
+ ret = cv_reltimedwait_sig(&spool->pool_cv, &spool->pool_lock,
+ drv_usectohz(waittime), TR_CLOCK_TICK);
/* ret = cv_wait_sig(&spool->pool_cv, &spool->pool_lock); */
if (ret == 0) {
/* signal pending */
diff --git a/usr/src/uts/common/io/ib/mgt/ibdm/ibdm.c b/usr/src/uts/common/io/ib/mgt/ibdm/ibdm.c
index 8ac5b06efa..7f2abe7114 100644
--- a/usr/src/uts/common/io/ib/mgt/ibdm/ibdm.c
+++ b/usr/src/uts/common/io/ib/mgt/ibdm/ibdm.c
@@ -4702,13 +4702,14 @@ void
ibdm_ibnex_port_settle_wait(ib_guid_t hca_guid, int dft_wait)
{
time_t wait_time;
+ clock_t delta;
mutex_enter(&ibdm.ibdm_hl_mutex);
while ((wait_time = ibdm_get_waittime(hca_guid, dft_wait)) > 0) {
- (void) cv_timedwait(&ibdm.ibdm_port_settle_cv,
- &ibdm.ibdm_hl_mutex,
- ddi_get_lbolt() + drv_usectohz(wait_time * 1000000));
+ delta = wait_time * drv_usectohz(wait_time * 1000000);
+ (void) cv_reltimedwait(&ibdm.ibdm_port_settle_cv,
+ &ibdm.ibdm_hl_mutex, delta, TR_CLOCK_TICK);
}
mutex_exit(&ibdm.ibdm_hl_mutex);
diff --git a/usr/src/uts/common/io/idm/idm_impl.c b/usr/src/uts/common/io/idm/idm_impl.c
index 273465734a..a271c22036 100644
--- a/usr/src/uts/common/io/idm/idm_impl.c
+++ b/usr/src/uts/common/io/idm/idm_impl.c
@@ -1079,7 +1079,7 @@ void
idm_wd_thread(void *arg)
{
idm_conn_t *ic;
- clock_t wake_time;
+ clock_t wake_time = SEC_TO_TICK(IDM_WD_INTERVAL);
clock_t idle_time;
/* Record the thread id for thread_join() */
@@ -1164,9 +1164,8 @@ idm_wd_thread(void *arg)
mutex_exit(&ic->ic_state_mutex);
}
- wake_time = ddi_get_lbolt() + SEC_TO_TICK(IDM_WD_INTERVAL);
- (void) cv_timedwait(&idm.idm_wd_cv, &idm.idm_global_mutex,
- wake_time);
+ (void) cv_reltimedwait(&idm.idm_wd_cv, &idm.idm_global_mutex,
+ wake_time, TR_CLOCK_TICK);
}
mutex_exit(&idm.idm_global_mutex);
diff --git a/usr/src/uts/common/io/ipw/ipw2100.c b/usr/src/uts/common/io/ipw/ipw2100.c
index 4e22a3b195..8c6bdbbe6f 100644
--- a/usr/src/uts/common/io/ipw/ipw2100.c
+++ b/usr/src/uts/common/io/ipw/ipw2100.c
@@ -1071,13 +1071,14 @@ ipw2100_cmd(struct ipw2100_softc *sc, uint32_t type, void *buf, size_t len)
/*
* wait for command done
*/
+ clk = drv_usectohz(1000000); /* 1 second */
mutex_enter(&sc->sc_ilock);
while (sc->sc_done == 0) {
/*
* pending for the response
*/
- clk = ddi_get_lbolt() + drv_usectohz(1000000); /* 1 second */
- if (cv_timedwait(&sc->sc_cmd_cond, &sc->sc_ilock, clk) < 0)
+ if (cv_reltimedwait(&sc->sc_cmd_cond, &sc->sc_ilock,
+ clk, TR_CLOCK_TICK) < 0)
break;
}
mutex_exit(&sc->sc_ilock);
diff --git a/usr/src/uts/common/io/ipw/ipw2100_hw.c b/usr/src/uts/common/io/ipw/ipw2100_hw.c
index 9b0b34e800..f8c7d10583 100644
--- a/usr/src/uts/common/io/ipw/ipw2100_hw.c
+++ b/usr/src/uts/common/io/ipw/ipw2100_hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -30,8 +30,6 @@
* SUCH DAMAGE.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Intel Wireless PRO/2100 mini-PCI adapter driver
* ipw2100_hw.c is used to handle hardware operation and firmware operations.
@@ -483,12 +481,13 @@ ipw2100_load_fw(struct ipw2100_softc *sc)
/*
* wait for interrupt to notify fw initialization is done
*/
+ clk = drv_usectohz(5000000); /* 5 second */
while (!(sc->sc_flags & IPW2100_FLAG_FW_INITED)) {
/*
* wait longer for the fw initialized
*/
- clk = ddi_get_lbolt() + drv_usectohz(5000000); /* 5 second */
- if (cv_timedwait(&sc->sc_fw_cond, &sc->sc_ilock, clk) < 0)
+ if (cv_reltimedwait(&sc->sc_fw_cond, &sc->sc_ilock, clk,
+ TR_CLOCK_TICK) < 0)
break;
}
mutex_exit(&sc->sc_ilock);
diff --git a/usr/src/uts/common/io/iwi/ipw2200.c b/usr/src/uts/common/io/iwi/ipw2200.c
index 505a946a07..d52e069496 100644
--- a/usr/src/uts/common/io/iwi/ipw2200.c
+++ b/usr/src/uts/common/io/iwi/ipw2200.c
@@ -1014,12 +1014,12 @@ ipw2200_cmd(struct ipw2200_softc *sc,
/*
* Wait for command done
*/
+ clk = drv_usectohz(5000000);
mutex_enter(&sc->sc_ilock);
while (sc->sc_done[idx] == 0) {
/* pending */
- clk = ddi_get_lbolt() + drv_usectohz(5000000); /* 5 second */
- if (cv_timedwait(&sc->sc_cmd_status_cond, &sc->sc_ilock, clk)
- < 0)
+ if (cv_reltimedwait(&sc->sc_cmd_status_cond, &sc->sc_ilock,
+ clk, TR_CLOCK_TICK) < 0)
break;
}
mutex_exit(&sc->sc_ilock);
diff --git a/usr/src/uts/common/io/iwi/ipw2200_hw.c b/usr/src/uts/common/io/iwi/ipw2200_hw.c
index 8a25125d53..65f27024b4 100644
--- a/usr/src/uts/common/io/iwi/ipw2200_hw.c
+++ b/usr/src/uts/common/io/iwi/ipw2200_hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -30,8 +30,6 @@
* SUCH DAMAGE.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Intel Wireless PRO/2200 mini-PCI adapter driver
* ipw2200_hw.c is used t handle hardware operations and firmware operations.
@@ -376,7 +374,7 @@ ipw2200_load_fw(struct ipw2200_softc *sc, uint8_t *buf, size_t size)
uint32_t src, dst, ctl, len, sum, off;
uint32_t sentinel;
int ntries, err, cnt, i;
- clock_t clk;
+ clock_t clk = drv_usectohz(5000000); /* 5 second */
ipw2200_imem_put32(sc, 0x3000a0, 0x27000);
@@ -500,8 +498,8 @@ ipw2200_load_fw(struct ipw2200_softc *sc, uint8_t *buf, size_t size)
/*
* There is an enhancement! we just change from 1s to 5s
*/
- clk = ddi_get_lbolt() + drv_usectohz(5000000); /* 5 second */
- if (cv_timedwait(&sc->sc_fw_cond, &sc->sc_ilock, clk) < 0)
+ if (cv_reltimedwait(&sc->sc_fw_cond, &sc->sc_ilock, clk,
+ TR_CLOCK_TICK) < 0)
break;
}
mutex_exit(&sc->sc_ilock);
diff --git a/usr/src/uts/common/io/lvm/md/md_subr.c b/usr/src/uts/common/io/lvm/md/md_subr.c
index 69ac3da188..5297c56dc5 100644
--- a/usr/src/uts/common/io/lvm/md/md_subr.c
+++ b/usr/src/uts/common/io/lvm/md/md_subr.c
@@ -1346,6 +1346,7 @@ callb_md_cpr(void *arg, int code)
{
callb_cpr_t *cp = (callb_cpr_t *)arg;
int ret = 0; /* assume success */
+ clock_t delta;
mutex_enter(cp->cc_lockp);
@@ -1368,10 +1369,11 @@ callb_md_cpr(void *arg, int code)
mutex_exit(&md_cpr_resync.md_resync_mutex);
cp->cc_events |= CALLB_CPR_START;
+ delta = CPR_KTHREAD_TIMEOUT_SEC * hz;
while (!(cp->cc_events & CALLB_CPR_SAFE))
- /* cv_timedwait() returns -1 if it times out. */
- if ((ret = cv_timedwait(&cp->cc_callb_cv, cp->cc_lockp,
- lbolt + CPR_KTHREAD_TIMEOUT_SEC * hz)) == -1)
+ /* cv_reltimedwait() returns -1 if it times out. */
+ if ((ret = cv_reltimedwait(&cp->cc_callb_cv,
+ cp->cc_lockp, delta, TR_CLOCK_TICK)) == -1)
break;
break;
@@ -4202,6 +4204,7 @@ callb_md_mrs_cpr(void *arg, int code)
{
callb_cpr_t *cp = (callb_cpr_t *)arg;
int ret = 0; /* assume success */
+ clock_t delta;
mutex_enter(cp->cc_lockp);
@@ -4214,10 +4217,11 @@ callb_md_mrs_cpr(void *arg, int code)
*/
md_mn_clear_commd_present();
cp->cc_events |= CALLB_CPR_START;
+ delta = CPR_KTHREAD_TIMEOUT_SEC * hz;
while (!(cp->cc_events & CALLB_CPR_SAFE))
/* cv_timedwait() returns -1 if it times out. */
- if ((ret = cv_timedwait(&cp->cc_callb_cv, cp->cc_lockp,
- lbolt + CPR_KTHREAD_TIMEOUT_SEC * hz)) == -1)
+ if ((ret = cv_reltimedwait(&cp->cc_callb_cv,
+ cp->cc_lockp, delta, TR_CLOCK_TICK)) == -1)
break;
break;
diff --git a/usr/src/uts/common/io/lvm/raid/raid.c b/usr/src/uts/common/io/lvm/raid/raid.c
index 22ae2f547c..2240e46aa9 100644
--- a/usr/src/uts/common/io/lvm/raid/raid.c
+++ b/usr/src/uts/common/io/lvm/raid/raid.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -959,7 +959,8 @@ raid_build_incore(void *p, int snarfing)
MD_RAID_COPY_RESYNC;
else
preserve_flags |=
- MD_RAID_REGEN_RESYNC;
+ /* CSTYLED */
+ MD_RAID_REGEN_RESYNC;
}
}
} else { /* no hot spares */
@@ -3307,7 +3308,6 @@ raid_write(mr_unit_t *un, md_raidcs_t *cs)
md_raidps_t *ps;
mdi_unit_t *ui;
minor_t mnum;
- clock_t timeout;
ASSERT(IO_READER_HELD(un));
ps = cs->cs_ps;
@@ -3333,11 +3333,10 @@ raid_write(mr_unit_t *un, md_raidcs_t *cs)
*/
while (raid_check_pw(cs)) {
mutex_enter(&un->un_mx);
- (void) drv_getparm(LBOLT, &timeout);
- timeout += md_wr_wait;
un->un_rflags |= MD_RFLAG_NEEDPW;
STAT_INC(raid_prewrite_waits);
- (void) cv_timedwait(&un->un_cv, &un->un_mx, timeout);
+ (void) cv_reltimedwait(&un->un_cv, &un->un_mx, md_wr_wait,
+ TR_CLOCK_TICK);
un->un_rflags &= ~MD_RFLAG_NEEDPW;
mutex_exit(&un->un_mx);
}
diff --git a/usr/src/uts/common/io/mac/mac_sched.c b/usr/src/uts/common/io/mac/mac_sched.c
index ff3cba81ed..b8ebe45a82 100644
--- a/usr/src/uts/common/io/mac/mac_sched.c
+++ b/usr/src/uts/common/io/mac/mac_sched.c
@@ -274,8 +274,9 @@ boolean_t mac_latency_optimize = B_TRUE;
ASSERT(MUTEX_HELD(&(mac_srs)->srs_lock)); \
ASSERT(((mac_srs)->srs_type & SRST_TX) || \
MUTEX_HELD(&(mac_srs)->srs_bw->mac_bw_lock)); \
- if ((mac_srs)->srs_bw->mac_bw_curr_time != lbolt) { \
- (mac_srs)->srs_bw->mac_bw_curr_time = lbolt; \
+ clock_t now = ddi_get_lbolt(); \
+ if ((mac_srs)->srs_bw->mac_bw_curr_time != now) { \
+ (mac_srs)->srs_bw->mac_bw_curr_time = now; \
(mac_srs)->srs_bw->mac_bw_used = 0; \
if ((mac_srs)->srs_bw->mac_bw_state & SRS_BW_ENFORCED) \
(mac_srs)->srs_bw->mac_bw_state &= ~SRS_BW_ENFORCED; \
@@ -1813,14 +1814,16 @@ mac_rx_srs_drain_bw(mac_soft_ring_set_t *mac_srs, uint_t proc_type)
int cnt = 0;
mac_client_impl_t *mcip = mac_srs->srs_mcip;
mac_srs_rx_t *srs_rx = &mac_srs->srs_rx;
+ clock_t now;
ASSERT(MUTEX_HELD(&mac_srs->srs_lock));
ASSERT(mac_srs->srs_type & SRST_BW_CONTROL);
again:
/* Check if we are doing B/W control */
mutex_enter(&mac_srs->srs_bw->mac_bw_lock);
- if (mac_srs->srs_bw->mac_bw_curr_time != lbolt) {
- mac_srs->srs_bw->mac_bw_curr_time = lbolt;
+ now = ddi_get_lbolt();
+ if (mac_srs->srs_bw->mac_bw_curr_time != now) {
+ mac_srs->srs_bw->mac_bw_curr_time = now;
mac_srs->srs_bw->mac_bw_used = 0;
if (mac_srs->srs_bw->mac_bw_state & SRS_BW_ENFORCED)
mac_srs->srs_bw->mac_bw_state &= ~SRS_BW_ENFORCED;
@@ -2860,6 +2863,7 @@ mac_tx_bw_mode(mac_soft_ring_set_t *mac_srs, mblk_t *mp_chain,
mblk_t *tail;
mac_tx_cookie_t cookie = NULL;
mac_srs_tx_t *srs_tx = &mac_srs->srs_tx;
+ clock_t now;
ASSERT(TX_BANDWIDTH_MODE(mac_srs));
ASSERT(mac_srs->srs_type & SRST_BW_CONTROL);
@@ -2886,8 +2890,9 @@ mac_tx_bw_mode(mac_soft_ring_set_t *mac_srs, mblk_t *mp_chain,
return (cookie);
}
MAC_COUNT_CHAIN(mac_srs, mp_chain, tail, cnt, sz);
- if (mac_srs->srs_bw->mac_bw_curr_time != lbolt) {
- mac_srs->srs_bw->mac_bw_curr_time = lbolt;
+ now = ddi_get_lbolt();
+ if (mac_srs->srs_bw->mac_bw_curr_time != now) {
+ mac_srs->srs_bw->mac_bw_curr_time = now;
mac_srs->srs_bw->mac_bw_used = 0;
} else if (mac_srs->srs_bw->mac_bw_used >
mac_srs->srs_bw->mac_bw_limit) {
@@ -2962,6 +2967,7 @@ mac_tx_srs_drain(mac_soft_ring_set_t *mac_srs, uint_t proc_type)
boolean_t is_subflow;
mac_tx_stats_t stats;
mac_srs_tx_t *srs_tx = &mac_srs->srs_tx;
+ clock_t now;
saved_pkt_count = 0;
ASSERT(mutex_owned(&mac_srs->srs_lock));
@@ -3028,8 +3034,9 @@ mac_tx_srs_drain(mac_soft_ring_set_t *mac_srs, uint_t proc_type)
mac_srs->srs_bw->mac_bw_limit)
continue;
- if (mac_srs->srs_bw->mac_bw_curr_time != lbolt) {
- mac_srs->srs_bw->mac_bw_curr_time = lbolt;
+ now = ddi_get_lbolt();
+ if (mac_srs->srs_bw->mac_bw_curr_time != now) {
+ mac_srs->srs_bw->mac_bw_curr_time = now;
mac_srs->srs_bw->mac_bw_used = sz;
continue;
}
@@ -3114,8 +3121,9 @@ mac_tx_srs_drain(mac_soft_ring_set_t *mac_srs, uint_t proc_type)
mac_srs->srs_bw->mac_bw_limit)
continue;
- if (mac_srs->srs_bw->mac_bw_curr_time != lbolt) {
- mac_srs->srs_bw->mac_bw_curr_time = lbolt;
+ now = ddi_get_lbolt();
+ if (mac_srs->srs_bw->mac_bw_curr_time != now) {
+ mac_srs->srs_bw->mac_bw_curr_time = now;
mac_srs->srs_bw->mac_bw_used = 0;
continue;
}
diff --git a/usr/src/uts/common/io/mii/mii.c b/usr/src/uts/common/io/mii/mii.c
index 2ce85886dc..5e95471f94 100644
--- a/usr/src/uts/common/io/mii/mii.c
+++ b/usr/src/uts/common/io/mii/mii.c
@@ -2139,9 +2139,8 @@ _mii_task(void *_mh)
break;
default:
- (void) cv_timedwait(&mh->m_cv, &mh->m_lock,
- ddi_get_lbolt() + drv_usectohz(wait));
- break;
+ (void) cv_reltimedwait(&mh->m_cv, &mh->m_lock,
+ drv_usectohz(wait), TR_CLOCK_TICK);
}
}
diff --git a/usr/src/uts/common/io/mms/dmd/dmd.c b/usr/src/uts/common/io/mms/dmd/dmd.c
index 30d38d7ad2..2b6db211b7 100644
--- a/usr/src/uts/common/io/mms/dmd/dmd.c
+++ b/usr/src/uts/common/io/mms/dmd/dmd.c
@@ -18,7 +18,7 @@
*
* CDDL HEADER END
*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -3349,7 +3349,6 @@ dmd_signal_drm(dmd_drm_t *drm)
drm_reply_t *rep = &drm->drm_shr_rep;
int inst = drm->drm_inst;
pid_t pid = drm->drm_cur_pid;
- clock_t cur_ticks;
clock_t to;
int rc = 0;
@@ -3399,13 +3398,14 @@ dmd_signal_drm(dmd_drm_t *drm)
*/
DMD_DEBUG((CE_CONT, "[%d:%d] Waiting for drive manager "
"to get request\n", inst, pid));
+
+ /* Seconds to wait for DM */
+ to = drv_usectohz(DMD_WAIT_DM_GET_SEC * 1000000);
+
while ((drm->drm_shr_flags & DRM_SHR_REQ_VALID) &&
(drm->drm_shr_flags & DRM_SHR_WAIT_RESUME)) {
- cur_ticks = ddi_get_lbolt();
- to = cur_ticks + drv_usectohz(DMD_WAIT_DM_GET_SEC * 1000000);
- /* Seconds to wait for DM */
- rc = cv_timedwait(&drm->drm_shr_res_cv,
- &drm->drm_shr_mutex, to);
+ rc = cv_reltimedwait(&drm->drm_shr_res_cv,
+ &drm->drm_shr_mutex, to, TR_CLOCK_TICK);
if (rc == -1) {
/* timedout */
DMD_DEBUG((CE_NOTE, "[%d:%d] dmd_signal_drm: "
diff --git a/usr/src/uts/common/io/net80211/net80211_ioctl.c b/usr/src/uts/common/io/net80211/net80211_ioctl.c
index f541e52902..93212719e3 100644
--- a/usr/src/uts/common/io/net80211/net80211_ioctl.c
+++ b/usr/src/uts/common/io/net80211/net80211_ioctl.c
@@ -686,11 +686,11 @@ static void
wifi_wait_scan(struct ieee80211com *ic)
{
ieee80211_impl_t *im = ic->ic_private;
+ clock_t delta = drv_usectohz(WAIT_SCAN_MAX * 1000);
while ((ic->ic_flags & (IEEE80211_F_SCAN | IEEE80211_F_ASCAN)) != 0) {
- if (cv_timedwait_sig(&im->im_scan_cv, &ic->ic_genlock,
- ddi_get_lbolt() + drv_usectohz(WAIT_SCAN_MAX * 1000)) !=
- 0) {
+ if (cv_reltimedwait_sig(&im->im_scan_cv, &ic->ic_genlock,
+ delta, TR_CLOCK_TICK) != 0) {
break;
}
}
diff --git a/usr/src/uts/common/io/nxge/nxge_mac.c b/usr/src/uts/common/io/nxge/nxge_mac.c
index 8a8c8652ca..12e79eb97e 100644
--- a/usr/src/uts/common/io/nxge/nxge_mac.c
+++ b/usr/src/uts/common/io/nxge/nxge_mac.c
@@ -5469,11 +5469,9 @@ nxge_link_monitor(p_nxge_t nxgep, link_mon_enable_t enable)
}
nxgep->poll_state = LINK_MONITOR_STOPPING;
- rv = cv_timedwait(&nxgep->poll_cv,
- &nxgep->poll_lock,
- ddi_get_lbolt() +
+ rv = cv_reltimedwait(&nxgep->poll_cv, &nxgep->poll_lock,
drv_usectohz(LM_WAIT_MULTIPLIER *
- LINK_MONITOR_PERIOD));
+ LINK_MONITOR_PERIOD), TR_CLOCK_TICK);
if (rv == -1) {
NXGE_DEBUG_MSG((nxgep, MAC_CTL,
"==> stopping port %d: "
diff --git a/usr/src/uts/common/io/pciex/hotplug/pcishpc.c b/usr/src/uts/common/io/pciex/hotplug/pcishpc.c
index cc92020517..272ea39a2d 100644
--- a/usr/src/uts/common/io/pciex/hotplug/pcishpc.c
+++ b/usr/src/uts/common/io/pciex/hotplug/pcishpc.c
@@ -1719,9 +1719,9 @@ pcishpc_attn_btn_handler(pcie_hp_slot_t *slot_p)
PCIE_HP_LED_BLINK);
/* wait for 5 seconds before taking any action */
- if (cv_timedwait(&slot_p->hs_attn_btn_cv,
+ if (cv_reltimedwait(&slot_p->hs_attn_btn_cv,
&slot_p->hs_ctrl->hc_mutex,
- ddi_get_lbolt() + SEC_TO_TICK(5)) == -1) {
+ SEC_TO_TICK(5), TR_CLOCK_TICK) == -1) {
/*
* It is a time out;
* make sure the ATTN pending flag is
diff --git a/usr/src/uts/common/io/rsm/rsm.c b/usr/src/uts/common/io/rsm/rsm.c
index 70840c0c60..a9512f8da3 100644
--- a/usr/src/uts/common/io/rsm/rsm.c
+++ b/usr/src/uts/common/io/rsm/rsm.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -3263,7 +3263,6 @@ rsm_unpublish(rsmseg_t *seg, int mode)
rsm_access_entry_t *rsmpi_acl;
int acl_len;
int e;
- clock_t ticks;
adapter_t *adapter;
DBG_DEFINE(category, RSM_KERNEL_AGENT | RSM_EXPORT);
@@ -3377,10 +3376,9 @@ rsm_unpublish(rsmseg_t *seg, int mode)
/* can be fine tuned when we have better numbers */
/* A long term fix would be to send cv_signal */
/* from the intr callback routine */
- (void) drv_getparm(LBOLT, &ticks);
- ticks += drv_usectohz(1000);
- /* currently nobody signals this wait */
- (void) cv_timedwait(&seg->s_cv, &seg->s_lock, ticks);
+ /* currently nobody signals this wait */
+ (void) cv_reltimedwait(&seg->s_cv, &seg->s_lock,
+ drv_usectohz(1000), TR_CLOCK_TICK);
DBG_PRINTF((category, RSM_ERR,
"rsm_unpublish: SEG_IN_USE\n"));
@@ -5294,7 +5292,6 @@ rsmipc_send(rsm_node_id_t dest, rsmipc_request_t *req, rsmipc_reply_t *reply)
int credit_check = 0;
int retry_cnt = 0;
int min_retry_cnt = 10;
- clock_t ticks;
rsm_send_t is;
rsmipc_slot_t *rslot;
adapter_t *adapter;
@@ -5646,10 +5643,8 @@ again:
}
/* wait for a reply signal, a SIGINT, or 5 sec. timeout */
- (void) drv_getparm(LBOLT, &ticks);
- ticks += drv_usectohz(5000000);
- e = cv_timedwait_sig(&rslot->rsmipc_cv, &rslot->rsmipc_lock,
- ticks);
+ e = cv_reltimedwait_sig(&rslot->rsmipc_cv, &rslot->rsmipc_lock,
+ drv_usectohz(5000000), TR_CLOCK_TICK);
if (e < 0) {
/* timed out - retry */
e = RSMERR_TIMEOUT;
@@ -5878,7 +5873,6 @@ rsmipc_send_controlmsg(path_t *path, int msgtype)
int e;
int retry_cnt = 0;
int min_retry_cnt = 10;
- clock_t timeout;
adapter_t *adapter;
rsm_send_t is;
rsm_send_q_handle_t ipc_handle;
@@ -5946,9 +5940,8 @@ rsmipc_send_controlmsg(path_t *path, int msgtype)
"rsmipc_send_controlmsg:rsm_send error=%d", e));
if (++retry_cnt == min_retry_cnt) { /* backoff before retry */
- timeout = ddi_get_lbolt() + drv_usectohz(10000);
- (void) cv_timedwait(&path->sendq_token.sendq_cv,
- &path->mutex, timeout);
+ (void) cv_reltimedwait(&path->sendq_token.sendq_cv,
+ &path->mutex, drv_usectohz(10000), TR_CLOCK_TICK);
retry_cnt = 0;
}
} while (path->state == RSMKA_PATH_ACTIVE);
diff --git a/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c b/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c
index fad9286e02..e597576cd3 100644
--- a/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c
+++ b/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c
@@ -6898,8 +6898,8 @@ nv_sgp_activity_led_ctl(void *arg)
mutex_enter(&cmn->nvs_tlock);
ticks = drv_usectohz(cmn->nvs_taskq_delay);
if (ticks > 0)
- (void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
- ddi_get_lbolt() + ticks);
+ (void) cv_reltimedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
+ ticks, TR_CLOCK_TICK);
mutex_exit(&cmn->nvs_tlock);
} while (ticks > 0);
}
diff --git a/usr/src/uts/common/io/sata/impl/sata.c b/usr/src/uts/common/io/sata/impl/sata.c
index eca21dde57..53af17ce53 100644
--- a/usr/src/uts/common/io/sata/impl/sata.c
+++ b/usr/src/uts/common/io/sata/impl/sata.c
@@ -16653,7 +16653,7 @@ sata_event_daemon(void *arg)
_NOTE(ARGUNUSED(arg))
#endif
sata_hba_inst_t *sata_hba_inst;
- clock_t lbolt;
+ clock_t delta;
SATADBG1(SATA_DBG_EVENTS_DAEMON, NULL,
"SATA event daemon started\n", NULL);
@@ -16715,11 +16715,11 @@ loop:
* wait timeout. Exit if there is a termination request (driver
* unload).
*/
+ delta = drv_usectohz(SATA_EVNT_DAEMON_SLEEP_TIME);
do {
- lbolt = ddi_get_lbolt();
- lbolt += drv_usectohz(SATA_EVNT_DAEMON_SLEEP_TIME);
mutex_enter(&sata_event_mutex);
- (void) cv_timedwait(&sata_event_cv, &sata_event_mutex, lbolt);
+ (void) cv_reltimedwait(&sata_event_cv, &sata_event_mutex,
+ delta, TR_CLOCK_TICK);
if (sata_event_thread_active != 0) {
mutex_exit(&sata_event_mutex);
diff --git a/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi_login.c b/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi_login.c
index 5f821deb95..d0aa21bf86 100644
--- a/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi_login.c
+++ b/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi_login.c
@@ -2111,6 +2111,7 @@ iscsi_login_connect(iscsi_conn_t *icp)
struct sockaddr *addr;
idm_conn_req_t cr;
idm_status_t rval;
+ clock_t lbolt;
ASSERT(icp != NULL);
isp = icp->conn_sess;
diff --git a/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi_thread.c b/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi_thread.c
index 976dca3ac9..6f99c98448 100644
--- a/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi_thread.c
+++ b/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi_thread.c
@@ -325,8 +325,8 @@ iscsi_thread_wait(
if (timeout == -1) {
cv_wait(&thread->sign.cdv, &thread->sign.mtx);
} else {
- rtn = cv_timedwait(&thread->sign.cdv, &thread->sign.mtx,
- (ddi_get_lbolt() + timeout));
+ rtn = cv_reltimedwait(&thread->sign.cdv, &thread->sign.mtx,
+ timeout, TR_CLOCK_TICK);
}
/* Check the signals. */
diff --git a/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas_impl.c b/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas_impl.c
index b0e2c4891d..5b32a808bc 100644
--- a/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas_impl.c
+++ b/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas_impl.c
@@ -1315,8 +1315,8 @@ mptsas_update_flash(mptsas_t *mpt, caddr_t ptrbuffer, uint32_t size,
MPTSAS_START_CMD(mpt, request_desc_low, 0);
rvalue = 0;
- (void) cv_timedwait(&mpt->m_fw_cv, &mpt->m_mutex,
- MPTSAS_CV_TIMEOUT(60));
+ (void) cv_reltimedwait(&mpt->m_fw_cv, &mpt->m_mutex,
+ drv_usectohz(60 * MICROSEC), TR_CLOCK_TICK);
if (!(cmd->cmd_flags & CFLAG_FINISHED)) {
if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
diff --git a/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c b/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c
index a942c35993..9e8be4b259 100644
--- a/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c
+++ b/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c
@@ -5182,9 +5182,9 @@ vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
"%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip));
while (svp->svp_cmds != 0) {
- if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex,
- ddi_get_lbolt() +
- drv_usectohz(vhci_path_quiesce_timeout * 1000000)) == -1) {
+ if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
+ drv_usectohz(vhci_path_quiesce_timeout * 1000000),
+ TR_CLOCK_TICK) == -1) {
/*
* The timeout time reached without the condition
* being signaled.
@@ -7369,9 +7369,9 @@ vhci_quiesce_lun(struct scsi_vhci_lun *vlun)
svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
mutex_enter(&svp->svp_mutex);
while (svp->svp_cmds != 0) {
- if (cv_timedwait(&svp->svp_cv, &svp->svp_mutex,
- ddi_get_lbolt() + drv_usectohz
- (vhci_path_quiesce_timeout * 1000000)) == -1) {
+ if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
+ drv_usectohz(vhci_path_quiesce_timeout * 1000000),
+ TR_CLOCK_TICK) == -1) {
mutex_exit(&svp->svp_mutex);
mdi_rele_path(pip);
VHCI_DEBUG(1, (CE_WARN, NULL,
diff --git a/usr/src/uts/common/io/scsi/impl/scsi_watch.c b/usr/src/uts/common/io/scsi/impl/scsi_watch.c
index 7a0ad8aed0..9cc8b6e3a2 100644
--- a/usr/src/uts/common/io/scsi/impl/scsi_watch.c
+++ b/usr/src/uts/common/io/scsi/impl/scsi_watch.c
@@ -426,7 +426,6 @@ void
scsi_watch_suspend(opaque_t token)
{
struct scsi_watch_request *swr = (struct scsi_watch_request *)NULL;
- clock_t now;
clock_t halfsec_delay = drv_usectohz(500000);
SW_DEBUG(0, sw_label, SCSI_DEBUG, "scsi_watch_suspend:\n");
@@ -454,9 +453,8 @@ scsi_watch_suspend(opaque_t token)
* till all outstanding cmds are complete
*/
swr->swr_what = SWR_SUSPEND_REQUESTED;
- now = ddi_get_lbolt();
- (void) cv_timedwait(&sw.sw_cv, &sw.sw_mutex,
- now + halfsec_delay);
+ (void) cv_reltimedwait(&sw.sw_cv, &sw.sw_mutex,
+ halfsec_delay, TR_CLOCK_TICK);
} else {
swr->swr_what = SWR_SUSPENDED;
break;
@@ -656,7 +654,6 @@ static void
scsi_watch_thread()
{
struct scsi_watch_request *swr, *next;
- clock_t now;
clock_t last_delay = 0;
clock_t next_delay = 0;
clock_t onesec = drv_usectohz(1000000);
@@ -819,7 +816,6 @@ head:
} else {
next_delay = exit_delay;
}
- now = ddi_get_lbolt();
mutex_enter(&cpr_mutex);
if (!sw_cmd_count) {
@@ -832,7 +828,8 @@ head:
* signalled, the delay is not accurate but that doesn't
* really matter
*/
- (void) cv_timedwait(&sw.sw_cv, &sw.sw_mutex, now + next_delay);
+ (void) cv_reltimedwait(&sw.sw_cv, &sw.sw_mutex, next_delay,
+ TR_CLOCK_TICK);
mutex_exit(&sw.sw_mutex);
mutex_enter(&cpr_mutex);
if (sw_cpr_flag == 1) {
diff --git a/usr/src/uts/common/io/sdcard/impl/sda_slot.c b/usr/src/uts/common/io/sdcard/impl/sda_slot.c
index 0e908ecd26..8de0539039 100644
--- a/usr/src/uts/common/io/sdcard/impl/sda_slot.c
+++ b/usr/src/uts/common/io/sdcard/impl/sda_slot.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -718,9 +718,9 @@ sda_slot_thread(void *arg)
if ((slot->s_xfrp != NULL) || (slot->s_reap)) {
/* Wait 3 sec (reap attempts). */
- (void) cv_timedwait(&slot->s_evcv,
- &slot->s_evlock,
- ddi_get_lbolt() + drv_usectohz(3000000));
+ (void) cv_reltimedwait(&slot->s_evcv,
+ &slot->s_evlock, drv_usectohz(3000000),
+ TR_CLOCK_TICK);
} else {
(void) cv_wait(&slot->s_evcv, &slot->s_evlock);
}
diff --git a/usr/src/uts/common/io/softmac/softmac_pkt.c b/usr/src/uts/common/io/softmac/softmac_pkt.c
index 01b8f70abc..4641fb3372 100644
--- a/usr/src/uts/common/io/softmac/softmac_pkt.c
+++ b/usr/src/uts/common/io/softmac/softmac_pkt.c
@@ -122,8 +122,8 @@ softmac_output(softmac_lower_t *slp, mblk_t *mp, t_uscalar_t dl_prim,
mutex_enter(&slp->sl_mutex);
while (slp->sl_pending_prim != DL_PRIM_INVAL) {
- if (cv_timedwait(&slp->sl_cv, &slp->sl_mutex,
- lbolt + ACKTIMEOUT) == -1)
+ if (cv_reltimedwait(&slp->sl_cv, &slp->sl_mutex, ACKTIMEOUT,
+ TR_CLOCK_TICK) == -1)
break;
}
diff --git a/usr/src/uts/common/io/usb/hcd/ehci/ehci_isoch.c b/usr/src/uts/common/io/usb/hcd/ehci/ehci_isoch.c
index e9c517300c..d1e85fe9b9 100644
--- a/usr/src/uts/common/io/usb/hcd/ehci/ehci_isoch.c
+++ b/usr/src/uts/common/io/usb/hcd/ehci/ehci_isoch.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -266,8 +266,6 @@ ehci_wait_for_isoc_completion(
ehci_state_t *ehcip,
ehci_pipe_private_t *pp)
{
- clock_t xfer_cmpl_time_wait;
-
ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
if (pp->pp_itw_head == NULL) {
@@ -275,12 +273,8 @@ ehci_wait_for_isoc_completion(
return;
}
- /* Get the number of clock ticks to wait */
- xfer_cmpl_time_wait = drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000);
-
- (void) cv_timedwait(&pp->pp_xfer_cmpl_cv,
- &ehcip->ehci_int_mutex,
- ddi_get_lbolt() + xfer_cmpl_time_wait);
+ (void) cv_reltimedwait(&pp->pp_xfer_cmpl_cv, &ehcip->ehci_int_mutex,
+ drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000), TR_CLOCK_TICK);
if (pp->pp_itw_head) {
USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
diff --git a/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c b/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c
index dc413417f2..76f333ccf6 100644
--- a/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c
+++ b/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c
@@ -1197,8 +1197,8 @@ ehci_init_check_status(ehci_state_t *ehcip)
ehcip->ehci_flags |= EHCI_CV_INTR;
/* We need to add a delay to allow the chip time to start running */
- (void) cv_timedwait(&ehcip->ehci_async_schedule_advance_cv,
- &ehcip->ehci_int_mutex, ddi_get_lbolt() + sof_time_wait);
+ (void) cv_reltimedwait(&ehcip->ehci_async_schedule_advance_cv,
+ &ehcip->ehci_int_mutex, sof_time_wait, TR_CLOCK_TICK);
/*
* Check EHCI host controller is running, otherwise return failure.
diff --git a/usr/src/uts/common/io/usb/hcd/ehci/ehci_xfer.c b/usr/src/uts/common/io/usb/hcd/ehci/ehci_xfer.c
index d956029c61..a6afec8afd 100644
--- a/usr/src/uts/common/io/usb/hcd/ehci/ehci_xfer.c
+++ b/usr/src/uts/common/io/usb/hcd/ehci/ehci_xfer.c
@@ -3734,7 +3734,6 @@ ehci_wait_for_transfers_completion(
ehci_pipe_private_t *pp)
{
ehci_trans_wrapper_t *next_tw = pp->pp_tw_head;
- clock_t xfer_cmpl_time_wait;
ehci_qtd_t *qtd;
USB_DPRINTF_L4(PRINT_MASK_LISTS,
@@ -3781,12 +3780,8 @@ ehci_wait_for_transfers_completion(
return;
}
- /* Get the number of clock ticks to wait */
- xfer_cmpl_time_wait = drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000);
-
- (void) cv_timedwait(&pp->pp_xfer_cmpl_cv,
- &ehcip->ehci_int_mutex,
- ddi_get_lbolt() + xfer_cmpl_time_wait);
+ (void) cv_reltimedwait(&pp->pp_xfer_cmpl_cv, &ehcip->ehci_int_mutex,
+ drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000), TR_CLOCK_TICK);
if (pp->pp_count_done_qtds) {
diff --git a/usr/src/uts/common/io/usb/hcd/openhci/ohci.c b/usr/src/uts/common/io/usb/hcd/openhci/ohci.c
index 8f5a21c573..b181fbb2c2 100644
--- a/usr/src/uts/common/io/usb/hcd/openhci/ohci.c
+++ b/usr/src/uts/common/io/usb/hcd/openhci/ohci.c
@@ -1687,8 +1687,8 @@ ohci_init_ctlr(ohci_state_t *ohcip)
ASSERT(Get_OpReg(hcr_intr_enable) & HCR_INTR_SOF);
- (void) cv_timedwait(&ohcip->ohci_SOF_cv,
- &ohcip->ohci_int_mutex, ddi_get_lbolt() + sof_time_wait);
+ (void) cv_reltimedwait(&ohcip->ohci_SOF_cv,
+ &ohcip->ohci_int_mutex, sof_time_wait, TR_CLOCK_TICK);
/* Wait for the SOF or timeout event */
if (ohcip->ohci_sof_flag == B_FALSE) {
@@ -10221,8 +10221,8 @@ ohci_wait_for_sof(ohci_state_t *ohcip)
ASSERT(Get_OpReg(hcr_intr_enable) & HCR_INTR_SOF);
/* Wait for the SOF or timeout event */
- rval = cv_timedwait(&ohcip->ohci_SOF_cv,
- &ohcip->ohci_int_mutex, ddi_get_lbolt() + sof_time_wait);
+ rval = cv_reltimedwait(&ohcip->ohci_SOF_cv,
+ &ohcip->ohci_int_mutex, sof_time_wait, TR_CLOCK_TICK);
/*
* Get the current usb frame number after woken up either
@@ -10412,7 +10412,6 @@ ohci_wait_for_transfers_completion(
{
ohci_trans_wrapper_t *head_tw = pp->pp_tw_head;
ohci_trans_wrapper_t *next_tw;
- clock_t xfer_cmpl_time_wait;
ohci_td_t *tailp, *headp, *nextp;
ohci_td_t *head_td, *next_td;
ohci_ed_t *ept = pp->pp_ept;
@@ -10488,12 +10487,8 @@ ohci_wait_for_transfers_completion(
return;
}
- /* Get the number of clock ticks to wait */
- xfer_cmpl_time_wait = drv_usectohz(OHCI_XFER_CMPL_TIMEWAIT * 1000000);
-
- (void) cv_timedwait(&pp->pp_xfer_cmpl_cv,
- &ohcip->ohci_int_mutex,
- ddi_get_lbolt() + xfer_cmpl_time_wait);
+ (void) cv_reltimedwait(&pp->pp_xfer_cmpl_cv, &ohcip->ohci_int_mutex,
+ drv_usectohz(OHCI_XFER_CMPL_TIMEWAIT * 1000000), TR_CLOCK_TICK);
if (pp->pp_count_done_tds) {
diff --git a/usr/src/uts/common/io/usb/hcd/uhci/uhciutil.c b/usr/src/uts/common/io/usb/hcd/uhci/uhciutil.c
index 27b4048506..9ba005f05b 100644
--- a/usr/src/uts/common/io/usb/hcd/uhci/uhciutil.c
+++ b/usr/src/uts/common/io/usb/hcd/uhci/uhciutil.c
@@ -4750,7 +4750,7 @@ uhci_wait_for_sof(uhci_state_t *uhcip)
int n, error;
ushort_t cmd_reg;
usb_frame_number_t before_frame_number, after_frame_number;
- clock_t time, rval;
+ clock_t rval;
USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
"uhci_wait_for_sof: uhcip = %p", (void *)uhcip);
@@ -4768,9 +4768,8 @@ uhci_wait_for_sof(uhci_state_t *uhcip)
SetTD_ioc(uhcip, uhcip->uhci_sof_td, 1);
uhcip->uhci_cv_signal = B_TRUE;
- time = ddi_get_lbolt() + UHCI_ONE_SECOND;
- rval = cv_timedwait(&uhcip->uhci_cv_SOF,
- &uhcip->uhci_int_mutex, time);
+ rval = cv_reltimedwait(&uhcip->uhci_cv_SOF,
+ &uhcip->uhci_int_mutex, UHCI_ONE_SECOND, TR_CLOCK_TICK);
after_frame_number = uhci_get_sw_frame_number(uhcip);
if ((rval == -1) &&
diff --git a/usr/src/uts/common/io/usb/hwa/hwahc/hwahc_util.c b/usr/src/uts/common/io/usb/hwa/hwahc/hwahc_util.c
index 318faae8d8..65d828afeb 100644
--- a/usr/src/uts/common/io/usb/hwa/hwahc/hwahc_util.c
+++ b/usr/src/uts/common/io/usb/hwa/hwahc/hwahc_util.c
@@ -159,7 +159,6 @@ static void
hwahc_wait_for_xfer_completion(hwahc_state_t *hwahcp, hwahc_pipe_private_t *pp)
{
wusb_wa_rpipe_hdl_t *hdl = pp->pp_rp;
- clock_t xfer_cmpl_time_wait;
mutex_enter(&hdl->rp_mutex);
if (hdl->rp_state != WA_RPIPE_STATE_ACTIVE) {
@@ -170,9 +169,8 @@ hwahc_wait_for_xfer_completion(hwahc_state_t *hwahcp, hwahc_pipe_private_t *pp)
mutex_exit(&hdl->rp_mutex);
/* wait 3s */
- xfer_cmpl_time_wait = drv_usectohz(3000000);
- (void) cv_timedwait(&pp->pp_xfer_cmpl_cv, &hwahcp->hwahc_mutex,
- ddi_get_lbolt() + xfer_cmpl_time_wait);
+ (void) cv_reltimedwait(&pp->pp_xfer_cmpl_cv, &hwahcp->hwahc_mutex,
+ drv_usectohz(3000000), TR_CLOCK_TICK);
mutex_enter(&hdl->rp_mutex);
if (hdl->rp_state == WA_RPIPE_STATE_ACTIVE) {
diff --git a/usr/src/uts/common/io/usb/usba/hubdi.c b/usr/src/uts/common/io/usb/usba/hubdi.c
index a9cf1e714f..8a04aca273 100644
--- a/usr/src/uts/common/io/usb/usba/hubdi.c
+++ b/usr/src/uts/common/io/usb/usba/hubdi.c
@@ -4609,7 +4609,7 @@ hubd_reset_port(hubd_t *hubd, usb_port_t port)
uint16_t status;
uint16_t change;
int i;
- clock_t current_time;
+ clock_t delta;
USB_DPRINTF_L4(DPRINT_MASK_PORT, hubd->h_log_handle,
"hubd_reset_port: port=%d", port);
@@ -4646,6 +4646,7 @@ hubd_reset_port(hubd_t *hubd, usb_port_t port)
/*
* wait for port status change event
*/
+ delta = drv_usectohz(hubd_device_delay / 10);
for (i = 0; i < hubd_retry_enumerate; i++) {
/*
* start polling ep1 for receiving notification on
@@ -4657,12 +4658,9 @@ hubd_reset_port(hubd_t *hubd, usb_port_t port)
* sleep a max of 100ms for reset completion
* notification to be received
*/
- current_time = ddi_get_lbolt();
if (hubd->h_port_reset_wait & port_mask) {
- rval = cv_timedwait(&hubd->h_cv_reset_port,
- &hubd->h_mutex,
- current_time +
- drv_usectohz(hubd_device_delay / 10));
+ rval = cv_reltimedwait(&hubd->h_cv_reset_port,
+ &hubd->h_mutex, delta, TR_CLOCK_TICK);
if ((rval <= 0) &&
(hubd->h_port_reset_wait & port_mask)) {
/* we got woken up because of a timeout */
@@ -8606,7 +8604,7 @@ usba_hubdi_decr_power_budget(dev_info_t *dip, usba_device_t *child_ud)
static int
hubd_wait_for_hotplug_exit(hubd_t *hubd)
{
- clock_t until = ddi_get_lbolt() + drv_usectohz(1000000);
+ clock_t until = drv_usectohz(1000000);
int rval;
ASSERT(mutex_owned(HUBD_MUTEX(hubd)));
@@ -8614,8 +8612,8 @@ hubd_wait_for_hotplug_exit(hubd_t *hubd)
if (hubd->h_hotplug_thread) {
USB_DPRINTF_L3(DPRINT_MASK_HOTPLUG, hubd->h_log_handle,
"waiting for hubd hotplug thread exit");
- rval = cv_timedwait(&hubd->h_cv_hotplug_dev,
- &hubd->h_mutex, until);
+ rval = cv_reltimedwait(&hubd->h_cv_hotplug_dev,
+ &hubd->h_mutex, until, TR_CLOCK_TICK);
if ((rval <= 0) && (hubd->h_hotplug_thread)) {
diff --git a/usr/src/uts/common/io/usb/usba/wa.c b/usr/src/uts/common/io/usb/usba/wa.c
index 535f67f2c2..24da7f7ced 100644
--- a/usr/src/uts/common/io/usb/usba/wa.c
+++ b/usr/src/uts/common/io/usb/usba/wa.c
@@ -2432,8 +2432,8 @@ wusb_wa_xfer_timeout_handler(void *arg)
* aborted, wait it.
*/
if ((wr->wr_has_aborted == 0) &&
- (cv_timedwait(&wr->wr_cv, &hdl->rp_mutex,
- ddi_get_lbolt() + drv_usectohz(100 * 1000))
+ (cv_reltimedwait(&wr->wr_cv, &hdl->rp_mutex,
+ drv_usectohz(100 * 1000), TR_CLOCK_TICK)
>= 0)) {
/* 100ms, random number, long enough? */
diff --git a/usr/src/uts/common/io/vscan/vscan_door.c b/usr/src/uts/common/io/vscan/vscan_door.c
index 674a61f382..efd5ca083d 100644
--- a/usr/src/uts/common/io/vscan/vscan_door.c
+++ b/usr/src/uts/common/io/vscan/vscan_door.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -105,9 +105,9 @@ vscan_door_close(void)
/* wait for any in-progress requests to complete */
time_left = SEC_TO_TICK(vs_door_close_timeout);
while ((vscan_door_call_count > 0) && (time_left > 0)) {
- timeout = lbolt + time_left;
- time_left = cv_timedwait(&vscan_door_cv,
- &vscan_door_mutex, timeout);
+ timeout = time_left;
+ time_left = cv_reltimedwait(&vscan_door_cv, &vscan_door_mutex,
+ timeout, TR_CLOCK_TICK);
}
if (time_left == -1)
diff --git a/usr/src/uts/common/io/vscan/vscan_drv.c b/usr/src/uts/common/io/vscan/vscan_drv.c
index 989c835d83..6247eea5f7 100644
--- a/usr/src/uts/common/io/vscan/vscan_drv.c
+++ b/usr/src/uts/common/io/vscan/vscan_drv.c
@@ -20,11 +20,10 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-
#include <sys/stat.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
@@ -519,10 +518,9 @@ vscan_drv_close(dev_t dev, int flag, int otyp, cred_t *credp)
static void
vscan_drv_delayed_disable(void)
{
- clock_t timeout = lbolt + SEC_TO_TICK(vs_reconnect_timeout);
-
mutex_enter(&vscan_drv_mutex);
- (void) cv_timedwait(&vscan_drv_cv, &vscan_drv_mutex, timeout);
+ (void) cv_reltimedwait(&vscan_drv_cv, &vscan_drv_mutex,
+ SEC_TO_TICK(vs_reconnect_timeout), TR_CLOCK_TICK);
if (vscan_drv_state == VS_DRV_DELAYED_DISABLE) {
vscan_svc_disable();
diff --git a/usr/src/uts/common/io/vscan/vscan_svc.c b/usr/src/uts/common/io/vscan/vscan_svc.c
index bb07530321..e26fa9e292 100644
--- a/usr/src/uts/common/io/vscan/vscan_svc.c
+++ b/usr/src/uts/common/io/vscan/vscan_svc.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/stat.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
@@ -502,9 +500,9 @@ vscan_svc_scan_file(vnode_t *vp, cred_t *cr, int async)
++(req->vsr_refcnt);
time_left = SEC_TO_TICK(vs_scan_wait);
while ((time_left > 0) && (req->vsr_state != VS_SVC_REQ_COMPLETE)) {
- timeout = lbolt + time_left;
- time_left = cv_timedwait_sig(&(req->vsr_cv),
- &vscan_svc_mutex, timeout);
+ timeout = time_left;
+ time_left = cv_reltimedwait_sig(&(req->vsr_cv),
+ &vscan_svc_mutex, timeout, TR_CLOCK_TICK);
}
if (time_left == -1) {
@@ -589,8 +587,8 @@ vscan_svc_reql_handler(void)
DTRACE_PROBE2(vscan__req__counts, char *, "handler wait",
vscan_svc_counts_t *, &vscan_svc_counts);
- (void) cv_timedwait(&vscan_svc_reql_cv, &vscan_svc_mutex,
- lbolt + SEC_TO_TICK(VS_REQL_HANDLER_TIMEOUT));
+ (void) cv_reltimedwait(&vscan_svc_reql_cv, &vscan_svc_mutex,
+ SEC_TO_TICK(VS_REQL_HANDLER_TIMEOUT), TR_CLOCK_TICK);
DTRACE_PROBE2(vscan__req__counts, char *, "handler wake",
vscan_svc_counts_t *, &vscan_svc_counts);
diff --git a/usr/src/uts/common/io/winlockio.c b/usr/src/uts/common/io/winlockio.c
index 454bd0f60e..67ff79046a 100644
--- a/usr/src/uts/common/io/winlockio.c
+++ b/usr/src/uts/common/io/winlockio.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1402,8 +1402,8 @@ seglock_lockfault(devmap_cookie_t dhp, SegProc *sdp, SegLock *lp, uint_t rw)
* locksleep cv and thus kick the other timed waiters
* and cause everyone to restart in a new timedwait
*/
- rval = cv_timedwait_sig(&lp->locksleep,
- &lp->mutex, ddi_get_lbolt() + lp->timeout);
+ rval = cv_reltimedwait_sig(&lp->locksleep,
+ &lp->mutex, lp->timeout, TR_CLOCK_TICK);
}
/*
diff --git a/usr/src/uts/common/os/acct.c b/usr/src/uts/common/os/acct.c
index a8da2f8fb0..1e968ef12f 100644
--- a/usr/src/uts/common/os/acct.c
+++ b/usr/src/uts/common/os/acct.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -27,8 +27,6 @@
/* All Rights Reserved */
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/sysmacros.h>
#include <sys/param.h>
@@ -406,7 +404,7 @@ acct(char st)
ag->acctbuf.ac_utime = acct_compress(NSEC_TO_TICK(p->p_acct[LMS_USER]));
ag->acctbuf.ac_stime = acct_compress(
NSEC_TO_TICK(p->p_acct[LMS_SYSTEM] + p->p_acct[LMS_TRAP]));
- ag->acctbuf.ac_etime = acct_compress(lbolt - ua->u_ticks);
+ ag->acctbuf.ac_etime = acct_compress(ddi_get_lbolt() - ua->u_ticks);
ag->acctbuf.ac_mem = acct_compress((ulong_t)ua->u_mem);
ag->acctbuf.ac_io = acct_compress((ulong_t)p->p_ru.ioch);
ag->acctbuf.ac_rw = acct_compress((ulong_t)(p->p_ru.inblock +
diff --git a/usr/src/uts/common/os/bio.c b/usr/src/uts/common/os/bio.c
index a828f28272..0db01f80d7 100644
--- a/usr/src/uts/common/os/bio.c
+++ b/usr/src/uts/common/os/bio.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -36,8 +36,6 @@
* contributors.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/t_lock.h>
#include <sys/sysmacros.h>
@@ -231,7 +229,7 @@ bread_common(void *arg, dev_t dev, daddr_t blkno, long bsize)
/* ufs && snapshots */
(*bio_snapshot_strategy)(&ufsvfsp->vfs_snapshot, bp);
} else {
- ufsvfsp->vfs_iotstamp = lbolt;
+ ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
ub.ub_breads.value.ul++; /* ufs && !logging */
(void) bdev_strategy(bp);
}
@@ -374,7 +372,7 @@ bdwrite(struct buf *bp)
ASSERT(SEMA_HELD(&bp->b_sem));
CPU_STATS_ADD_K(sys, lwrite, 1);
if ((bp->b_flags & B_DELWRI) == 0)
- bp->b_start = lbolt;
+ bp->b_start = ddi_get_lbolt();
/*
* B_DONE allows others to use the buffer, B_DELWRI causes the
* buffer to be written before being reused, and setting b_resid
@@ -443,7 +441,7 @@ brelse(struct buf *bp)
if ((bp->b_flags & (B_READ | B_RETRYWRI)) == B_RETRYWRI) {
bp->b_flags |= B_DELWRI;
/* keep fsflush from trying continuously to flush */
- bp->b_start = lbolt;
+ bp->b_start = ddi_get_lbolt();
} else
bp->b_flags |= B_AGE|B_STALE;
bp->b_flags &= ~B_ERROR;
@@ -559,7 +557,7 @@ struct buf *
getblk(dev_t dev, daddr_t blkno, long bsize)
{
return (getblk_common(/* ufsvfsp */ NULL, dev,
- blkno, bsize, /* errflg */ 0));
+ blkno, bsize, /* errflg */ 0));
}
/*
@@ -1147,9 +1145,9 @@ binit(void)
* Invalid user specified value, emit a warning.
*/
cmn_err(CE_WARN, "binit: bufhwm_pct(%d) out of \
- range(1..%d). Using %d as default.",
- bufhwm_pct,
- 100 / BIO_MAX_PERCENT, 100 / BIO_BUF_PERCENT);
+ range(1..%d). Using %d as default.",
+ bufhwm_pct,
+ 100 / BIO_MAX_PERCENT, 100 / BIO_BUF_PERCENT);
}
bio_default_hwm = MIN(physmem / pct,
@@ -1165,10 +1163,10 @@ binit(void)
* Invalid user specified value, emit a warning.
*/
cmn_err(CE_WARN,
- "binit: bufhwm(%d) out \
- of range(%d..%lu). Using %lu as default",
- bufhwm,
- BIO_MIN_HWM, bio_max_hwm, bio_max_hwm);
+ "binit: bufhwm(%d) out \
+ of range(%d..%lu). Using %lu as default",
+ bufhwm,
+ BIO_MIN_HWM, bio_max_hwm, bio_max_hwm);
}
/*
@@ -1243,9 +1241,9 @@ biodone_tnf_probe(struct buf *bp)
{
/* Kernel probe */
TNF_PROBE_3(biodone, "io blockio", /* CSTYLED */,
- tnf_device, device, bp->b_edev,
- tnf_diskaddr, block, bp->b_lblkno,
- tnf_opaque, buf, bp);
+ tnf_device, device, bp->b_edev,
+ tnf_diskaddr, block, bp->b_lblkno,
+ tnf_opaque, buf, bp);
}
/*
@@ -1329,23 +1327,22 @@ pageio_setup(struct page *pp, size_t len, struct vnode *vp, int flags)
CPU_STATS_ADDQ(cpup, vm, maj_fault, 1);
/* Kernel probe */
TNF_PROBE_2(major_fault, "vm pagefault", /* CSTYLED */,
- tnf_opaque, vnode, pp->p_vnode,
- tnf_offset, offset, pp->p_offset);
+ tnf_opaque, vnode, pp->p_vnode,
+ tnf_offset, offset, pp->p_offset);
}
/*
* Update statistics for pages being paged in
*/
if (pp != NULL && pp->p_vnode != NULL) {
if (IS_SWAPFSVP(pp->p_vnode)) {
- CPU_STATS_ADDQ(cpup, vm, anonpgin,
- btopr(len));
+ CPU_STATS_ADDQ(cpup, vm, anonpgin, btopr(len));
} else {
if (pp->p_vnode->v_flag & VVMEXEC) {
CPU_STATS_ADDQ(cpup, vm, execpgin,
- btopr(len));
+ btopr(len));
} else {
CPU_STATS_ADDQ(cpup, vm, fspgin,
- btopr(len));
+ btopr(len));
}
}
}
@@ -1354,9 +1351,9 @@ pageio_setup(struct page *pp, size_t len, struct vnode *vp, int flags)
"page_ws_in:pp %p", pp);
/* Kernel probe */
TNF_PROBE_3(pagein, "vm pageio io", /* CSTYLED */,
- tnf_opaque, vnode, pp->p_vnode,
- tnf_offset, offset, pp->p_offset,
- tnf_size, size, len);
+ tnf_opaque, vnode, pp->p_vnode,
+ tnf_offset, offset, pp->p_offset,
+ tnf_size, size, len);
}
bp = kmem_zalloc(sizeof (struct buf), KM_SLEEP);
@@ -1872,7 +1869,7 @@ top:
*/
mutex_enter(&bfree_lock);
bfreelist.b_flags |= B_WANTED;
- (void) cv_timedwait(&bio_mem_cv, &bfree_lock, lbolt+hz);
+ (void) cv_reltimedwait(&bio_mem_cv, &bfree_lock, hz, TR_CLOCK_TICK);
mutex_exit(&bfree_lock);
goto top;
}
@@ -1985,7 +1982,7 @@ biomodified(struct buf *bp)
while (npf > 0) {
ppattr = hat_pagesync(pp, HAT_SYNC_DONTZERO |
- HAT_SYNC_STOPON_MOD);
+ HAT_SYNC_STOPON_MOD);
if (ppattr & P_MOD)
return (1);
pp = pp->p_next;
@@ -2058,7 +2055,7 @@ bioclone(struct buf *bp, off_t off, size_t len, dev_t dev, daddr_t blkno,
ASSERT(bp->b_flags & B_PHYS);
bufp->b_shadow = bp->b_shadow +
- btop(((uintptr_t)bp->b_un.b_addr & PAGEOFFSET) + off);
+ btop(((uintptr_t)bp->b_un.b_addr & PAGEOFFSET) + off);
bufp->b_un.b_addr = (caddr_t)((uintptr_t)bp->b_un.b_addr + off);
if (bp->b_flags & B_REMAPPED)
bufp->b_proc = NULL;
@@ -2077,7 +2074,7 @@ bioclone(struct buf *bp, off_t off, size_t len, dev_t dev, daddr_t blkno,
bufp->b_un.b_addr = (caddr_t)(o & PAGEOFFSET);
} else {
bufp->b_un.b_addr =
- (caddr_t)((uintptr_t)bp->b_un.b_addr + off);
+ (caddr_t)((uintptr_t)bp->b_un.b_addr + off);
if (bp->b_flags & B_REMAPPED)
bufp->b_proc = NULL;
}
diff --git a/usr/src/uts/common/os/callb.c b/usr/src/uts/common/os/callb.c
index b0a9264762..5c98caac90 100644
--- a/usr/src/uts/common/os/callb.c
+++ b/usr/src/uts/common/os/callb.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/param.h>
#include <sys/t_lock.h>
#include <sys/types.h>
@@ -296,9 +294,9 @@ callb_generic_cpr(void *arg, int code)
#ifdef CPR_NOT_THREAD_SAFE
while (!(cp->cc_events & CALLB_CPR_SAFE))
/* cv_timedwait() returns -1 if it times out. */
- if ((ret = cv_timedwait(&cp->cc_callb_cv,
- cp->cc_lockp,
- lbolt + callb_timeout_sec * hz)) == -1)
+ if ((ret = cv_reltimedwait(&cp->cc_callb_cv,
+ cp->cc_lockp, (callb_timeout_sec * hz),
+ TR_CLOCK_TICK)) == -1)
break;
#endif
break;
diff --git a/usr/src/uts/common/os/clock.c b/usr/src/uts/common/os/clock.c
index 384e17b57d..c0c581a215 100644
--- a/usr/src/uts/common/os/clock.c
+++ b/usr/src/uts/common/os/clock.c
@@ -21,13 +21,11 @@
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
-
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-
#include <sys/param.h>
#include <sys/t_lock.h>
#include <sys/types.h>
@@ -76,6 +74,9 @@
#include <sys/timex.h>
#include <sys/inttypes.h>
+#include <sys/sunddi.h>
+#include <sys/clock_impl.h>
+
/*
* clock() is called straight from the clock cyclic; see clock_init().
*
@@ -239,11 +240,73 @@ int32_t pps_calcnt = 0; /* calibration intervals */
int32_t pps_errcnt = 0; /* calibration errors */
int32_t pps_stbcnt = 0; /* stability limit exceeded */
-/* The following variables require no explicit locking */
-volatile clock_t lbolt; /* time in Hz since last boot */
-volatile int64_t lbolt64; /* lbolt64 won't wrap for 2.9 billion yrs */
-
kcondvar_t lbolt_cv;
+
+/*
+ * Hybrid lbolt implementation:
+ *
+ * The service historically provided by the lbolt and lbolt64 variables has
+ * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
+ * original symbols removed from the system. The once clock driven variables are
+ * now implemented in an event driven fashion, backed by gethrtime() coarsed to
+ * the appropriate clock resolution. The default event driven implementation is
+ * complemented by a cyclic driven one, active only during periods of intense
+ * activity around the DDI lbolt routines, when a lbolt specific cyclic is
+ * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
+ * rely on the original low cost of consulting a memory position.
+ *
+ * The implementation uses the number of calls to these routines and the
+ * frequency of these to determine when to transition from event to cyclic
+ * driven and vice-versa. These values are kept on a per CPU basis for
+ * scalability reasons and to prevent CPUs from constantly invalidating a single
+ * cache line when modifying a global variable. The transition from event to
+ * cyclic mode happens once the thresholds are crossed, and activity on any CPU
+ * can cause such transition.
+ *
+ * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
+ * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
+ * lbolt_cyclic_driven() according to the current mode. When the thresholds
+ * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
+ * fire at a nsec_per_tick interval and increment an internal variable at
+ * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
+ * will simply return the value of such variable. lbolt_cyclic() will attempt
+ * to shut itself off at each threshold interval (sampling period for calls
+ * to the DDI lbolt routines), and return to the event driven mode, but will
+ * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
+ *
+ * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
+ * for the cyclic subsystem to be intialized.
+ *
+ */
+static int64_t lbolt_bootstrap(void);
+int64_t lbolt_event_driven(void);
+int64_t lbolt_cyclic_driven(void);
+int64_t (*lbolt_hybrid)(void) = lbolt_bootstrap;
+uint_t lbolt_ev_to_cyclic(caddr_t, caddr_t);
+
+/*
+ * lbolt's cyclic, installed by clock_init().
+ */
+static void lbolt_cyclic(void);
+
+/*
+ * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
+ * from switching back to event driven, once it reaches cyclic mode.
+ */
+static boolean_t lbolt_cyc_only = B_FALSE;
+
+/*
+ * Cache aligned, per CPU structure with lbolt usage statistics.
+ */
+static lbolt_cpu_t *lb_cpu;
+
+/*
+ * Single, cache aligned, structure with all the information required by
+ * the lbolt implementation.
+ */
+lbolt_info_t *lb_info;
+
+
int one_sec = 1; /* turned on once every second */
static int fsflushcnt; /* counter for t_fsflushr */
int dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */
@@ -322,6 +385,8 @@ void (*cpucaps_clock_callout)() = NULL;
extern clock_t clock_tick_proc_max;
+static int64_t deadman_counter = 0;
+
static void
clock(void)
{
@@ -338,6 +403,7 @@ clock(void)
int s;
int do_lgrp_load;
int i;
+ clock_t now = LBOLT_NO_ACCOUNT; /* current tick */
if (panicstr)
return;
@@ -405,8 +471,10 @@ clock(void)
do_lgrp_load = 1;
}
- if (one_sec)
+ if (one_sec) {
loadavg_update();
+ deadman_counter++;
+ }
/*
* First count the threads waiting on kpreempt queues in each
@@ -544,15 +612,6 @@ clock(void)
clock_tick_schedule(one_sec);
/*
- * bump time in ticks
- *
- * We rely on there being only one clock thread and hence
- * don't need a lock to protect lbolt.
- */
- lbolt++;
- atomic_add_64((uint64_t *)&lbolt64, (int64_t)1);
-
- /*
* Check for a callout that needs be called from the clock
* thread to support the membership protocol in a clustered
* system. Copy the function pointer so that we can reset
@@ -753,7 +812,7 @@ clock(void)
* the clock; record that.
*/
clock_adj_hist[adj_hist_entry++ %
- CLOCK_ADJ_HIST_SIZE] = lbolt64;
+ CLOCK_ADJ_HIST_SIZE] = now;
s = hr_clock_lock();
timedelta = (int64_t)drift*NANOSEC;
hr_clock_unlock(s);
@@ -882,30 +941,84 @@ clock(void)
void
clock_init(void)
{
- cyc_handler_t hdlr;
- cyc_time_t when;
-
- hdlr.cyh_func = (cyc_func_t)clock;
- hdlr.cyh_level = CY_LOCK_LEVEL;
- hdlr.cyh_arg = NULL;
+ cyc_handler_t clk_hdlr, timer_hdlr, lbolt_hdlr;
+ cyc_time_t clk_when, lbolt_when;
+ int i, sz;
+ intptr_t buf;
- when.cyt_when = 0;
- when.cyt_interval = nsec_per_tick;
+ /*
+ * Setup handler and timer for the clock cyclic.
+ */
+ clk_hdlr.cyh_func = (cyc_func_t)clock;
+ clk_hdlr.cyh_level = CY_LOCK_LEVEL;
+ clk_hdlr.cyh_arg = NULL;
- mutex_enter(&cpu_lock);
- clock_cyclic = cyclic_add(&hdlr, &when);
- mutex_exit(&cpu_lock);
+ clk_when.cyt_when = 0;
+ clk_when.cyt_interval = nsec_per_tick;
/*
* cyclic_timer is dedicated to the ddi interface, which
* uses the same clock resolution as the system one.
*/
- hdlr.cyh_func = (cyc_func_t)cyclic_timer;
- hdlr.cyh_level = CY_LOCK_LEVEL;
- hdlr.cyh_arg = NULL;
+ timer_hdlr.cyh_func = (cyc_func_t)cyclic_timer;
+ timer_hdlr.cyh_level = CY_LOCK_LEVEL;
+ timer_hdlr.cyh_arg = NULL;
+ /*
+ * Setup the necessary structures for the lbolt cyclic and add the
+ * soft interrupt which will switch from event to cyclic mode when
+ * under high pil.
+ */
+ lbolt_hdlr.cyh_func = (cyc_func_t)lbolt_cyclic;
+ lbolt_hdlr.cyh_level = CY_LOCK_LEVEL;
+ lbolt_hdlr.cyh_arg = NULL;
+
+ lbolt_when.cyt_interval = nsec_per_tick;
+
+ if (lbolt_cyc_only) {
+ lbolt_when.cyt_when = 0;
+ lbolt_hybrid = lbolt_cyclic_driven;
+ } else {
+ lbolt_when.cyt_when = CY_INFINITY;
+ lbolt_hybrid = lbolt_event_driven;
+ }
+
+ /*
+ * Allocate cache line aligned space for the per CPU lbolt data and
+ * lb_info structure. We also initialize these structures with their
+ * default values and install the softint to change from event to
+ * cyclic driven mode.
+ */
+ sz = sizeof (lbolt_info_t) + CPU_CACHE_COHERENCE_SIZE;
+ buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
+ lb_info = (lbolt_info_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
+
+ if (hz != HZ_DEFAULT)
+ lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL *
+ hz/HZ_DEFAULT;
+ else
+ lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL;
+
+ lb_info->lbi_thresh_calls = LBOLT_THRESH_CALLS;
+
+ sz = (sizeof (lbolt_info_t) * max_ncpus) + CPU_CACHE_COHERENCE_SIZE;
+ buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
+ lb_cpu = (lbolt_cpu_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
+
+ for (i = 0; i < max_ncpus; i++)
+ lb_cpu[i].lbc_counter = lb_info->lbi_thresh_calls;
+
+ lbolt_softint_add();
+
+ /*
+ * Grab cpu_lock and install all three cyclics.
+ */
mutex_enter(&cpu_lock);
- ddi_timer_cyclic = cyclic_add(&hdlr, &when);
+
+ clock_cyclic = cyclic_add(&clk_hdlr, &clk_when);
+ ddi_timer_cyclic = cyclic_add(&timer_hdlr, &clk_when);
+ lb_info->lbi_cyclic_id = cyclic_add(&lbolt_hdlr, &lbolt_when);
+
mutex_exit(&cpu_lock);
}
@@ -1631,8 +1744,8 @@ delay_common(clock_t ticks)
return;
}
- deadline = lbolt + ticks;
- while ((timeleft = deadline - lbolt) > 0) {
+ deadline = ddi_get_lbolt() + ticks;
+ while ((timeleft = deadline - ddi_get_lbolt()) > 0) {
mutex_enter(&t->t_delay_lock);
id = timeout_default(delay_wakeup, t, timeleft);
cv_wait(&t->t_delay_cv, &t->t_delay_lock);
@@ -1686,7 +1799,7 @@ delay_sig(clock_t ticks)
return (0);
}
- deadline = lbolt + ticks;
+ deadline = ddi_get_lbolt() + ticks;
mutex_enter(&t->t_delay_lock);
do {
rc = cv_timedwait_sig(&t->t_delay_cv,
@@ -1807,15 +1920,6 @@ deadman(void)
if (CPU->cpu_id != panic_cpu.cpu_id)
return;
- /*
- * If we're panicking, the deadman cyclic continues to increase
- * lbolt in case the dump device driver relies on this for
- * timeouts. Note that we rely on deadman() being invoked once
- * per second, and credit lbolt and lbolt64 with hz ticks each.
- */
- lbolt += hz;
- lbolt64 += hz;
-
if (!deadman_panic_timers)
return; /* allow all timers to be manually disabled */
@@ -1840,8 +1944,8 @@ deadman(void)
return;
}
- if (lbolt != CPU->cpu_deadman_lbolt) {
- CPU->cpu_deadman_lbolt = lbolt;
+ if (deadman_counter != CPU->cpu_deadman_counter) {
+ CPU->cpu_deadman_counter = deadman_counter;
CPU->cpu_deadman_countdown = deadman_seconds;
return;
}
@@ -1879,7 +1983,7 @@ deadman(void)
static void
deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
{
- cpu->cpu_deadman_lbolt = 0;
+ cpu->cpu_deadman_counter = 0;
cpu->cpu_deadman_countdown = deadman_seconds;
hdlr->cyh_func = (cyc_func_t)deadman;
@@ -1892,9 +1996,6 @@ deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
* more likely that only one CPU will panic in case of a
* timeout. This is (strictly speaking) an aesthetic, not a
* technical consideration.
- *
- * The interval must be one second in accordance with the
- * code in deadman() above to increase lbolt during panic.
*/
when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
when->cyt_interval = NANOSEC;
@@ -2184,3 +2285,226 @@ calcloadavg(int nrun, uint64_t *hp_ave)
hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4;
}
}
+
+/*
+ * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
+ * calculate the value of lbolt according to the current mode. In the event
+ * driven mode (the default), lbolt is calculated by dividing the current hires
+ * time by the number of nanoseconds per clock tick. In the cyclic driven mode
+ * an internal variable is incremented at each firing of the lbolt cyclic
+ * and returned by lbolt_cyclic_driven().
+ *
+ * The system will transition from event to cyclic driven mode when the number
+ * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
+ * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
+ * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
+ * causing enough activity to cross the thresholds.
+ */
+static int64_t
+lbolt_bootstrap(void)
+{
+ return (0);
+}
+
+/* ARGSUSED */
+uint_t
+lbolt_ev_to_cyclic(caddr_t arg1, caddr_t arg2)
+{
+ hrtime_t ts, exp;
+ int ret;
+
+ ASSERT(lbolt_hybrid != lbolt_cyclic_driven);
+
+ kpreempt_disable();
+
+ ts = gethrtime();
+ lb_info->lbi_internal = (ts/nsec_per_tick);
+
+ /*
+ * Align the next expiration to a clock tick boundary.
+ */
+ exp = ts + nsec_per_tick - 1;
+ exp = (exp/nsec_per_tick) * nsec_per_tick;
+
+ ret = cyclic_reprogram(lb_info->lbi_cyclic_id, exp);
+ ASSERT(ret);
+
+ lbolt_hybrid = lbolt_cyclic_driven;
+ lb_info->lbi_cyc_deactivate = B_FALSE;
+ lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
+
+ kpreempt_enable();
+
+ ret = atomic_dec_32_nv(&lb_info->lbi_token);
+ ASSERT(ret == 0);
+
+ return (1);
+}
+
+int64_t
+lbolt_event_driven(void)
+{
+ hrtime_t ts;
+ int64_t lb;
+ int ret, cpu = CPU->cpu_seqid;
+
+ ts = gethrtime();
+ ASSERT(ts > 0);
+
+ ASSERT(nsec_per_tick > 0);
+ lb = (ts/nsec_per_tick);
+
+ /*
+ * Switch to cyclic mode if the number of calls to this routine
+ * has reached the threshold within the interval.
+ */
+ if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) {
+
+ if (--lb_cpu[cpu].lbc_counter == 0) {
+ /*
+ * Reached the threshold within the interval, reset
+ * the usage statistics.
+ */
+ lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
+ lb_cpu[cpu].lbc_cnt_start = lb;
+
+ /*
+ * Make sure only one thread reprograms the
+ * lbolt cyclic and changes the mode.
+ */
+ if (panicstr == NULL &&
+ atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
+
+ if (lbolt_hybrid == lbolt_cyclic_driven) {
+ ret = atomic_dec_32_nv(
+ &lb_info->lbi_token);
+ ASSERT(ret == 0);
+ return (lb);
+ }
+
+ lbolt_softint_post();
+ }
+ }
+ } else {
+ /*
+ * Exceeded the interval, reset the usage statistics.
+ */
+ lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
+ lb_cpu[cpu].lbc_cnt_start = lb;
+ }
+
+ ASSERT(lb >= lb_info->lbi_debug_time);
+
+ return (lb - lb_info->lbi_debug_time);
+}
+
+int64_t
+lbolt_cyclic_driven(void)
+{
+ int64_t lb = lb_info->lbi_internal;
+ int cpu = CPU->cpu_seqid;
+
+ if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) {
+
+ if (lb_cpu[cpu].lbc_counter == 0)
+ /*
+ * Reached the threshold within the interval,
+ * prevent the lbolt cyclic from turning itself
+ * off.
+ */
+ lb_info->lbi_cyc_deactivate = B_FALSE;
+ else
+ lb_cpu[cpu].lbc_counter--;
+ } else {
+ /*
+ * Only reset the usage statistics when the interval has
+ * exceeded.
+ */
+ lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
+ lb_cpu[cpu].lbc_cnt_start = lb;
+ }
+
+ ASSERT(lb >= lb_info->lbi_debug_time);
+
+ return (lb - lb_info->lbi_debug_time);
+}
+
+/*
+ * The lbolt_cyclic() routine will fire at a nsec_per_tick rate to satisfy
+ * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
+ * It is inactive by default, and will be activated when switching from event
+ * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
+ * by lbolt_cyclic_driven().
+ */
+static void
+lbolt_cyclic(void)
+{
+ int ret;
+
+ lb_info->lbi_internal++;
+
+ if (!lbolt_cyc_only) {
+
+ if (lb_info->lbi_cyc_deactivate) {
+ /*
+ * Switching from cyclic to event driven mode.
+ */
+ if (atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
+
+ if (lbolt_hybrid == lbolt_event_driven) {
+ ret = atomic_dec_32_nv(
+ &lb_info->lbi_token);
+ ASSERT(ret == 0);
+ return;
+ }
+
+ kpreempt_disable();
+
+ lbolt_hybrid = lbolt_event_driven;
+ ret = cyclic_reprogram(lb_info->lbi_cyclic_id,
+ CY_INFINITY);
+ ASSERT(ret);
+
+ kpreempt_enable();
+
+ ret = atomic_dec_32_nv(&lb_info->lbi_token);
+ ASSERT(ret == 0);
+ }
+ }
+
+ /*
+ * The lbolt cyclic should not try to deactivate itself before
+ * the sampling period has elapsed.
+ */
+ if (lb_info->lbi_internal - lb_info->lbi_cyc_deac_start >=
+ lb_info->lbi_thresh_interval) {
+ lb_info->lbi_cyc_deactivate = B_TRUE;
+ lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
+ }
+ }
+}
+
+/*
+ * Since the lbolt service was historically cyclic driven, it must be 'stopped'
+ * when the system drops into the kernel debugger. lbolt_debug_entry() is
+ * called by the KDI system claim callbacks to record a hires timestamp at
+ * debug enter time. lbolt_debug_return() is called by the sistem release
+ * callbacks to account for the time spent in the debugger. The value is then
+ * accumulated in the lb_info structure and used by lbolt_event_driven() and
+ * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
+ */
+void
+lbolt_debug_entry(void)
+{
+ lb_info->lbi_debug_ts = gethrtime();
+}
+
+void
+lbolt_debug_return(void)
+{
+ if (nsec_per_tick > 0)
+ lb_info->lbi_debug_time +=
+ ((gethrtime() - lb_info->lbi_debug_ts)/nsec_per_tick);
+
+ lb_info->lbi_debug_ts = 0;
+}
diff --git a/usr/src/uts/common/os/clock_tick.c b/usr/src/uts/common/os/clock_tick.c
index b1b94720ff..9d2005660d 100644
--- a/usr/src/uts/common/os/clock_tick.c
+++ b/usr/src/uts/common/os/clock_tick.c
@@ -33,6 +33,7 @@
#include <sys/atomic.h>
#include <sys/cpu.h>
#include <sys/clock_tick.h>
+#include <sys/clock_impl.h>
#include <sys/sysmacros.h>
#include <vm/rm.h>
@@ -274,7 +275,7 @@ clock_tick_schedule_one(clock_tick_set_t *csp, int pending, processorid_t cid)
*/
ctp = clock_tick_cpu[cid];
mutex_enter(&ctp->ct_lock);
- ctp->ct_lbolt = lbolt;
+ ctp->ct_lbolt = (clock_t)LBOLT_NO_ACCOUNT;
ctp->ct_pending += pending;
ctp->ct_start = csp->ct_start;
ctp->ct_end = csp->ct_end;
@@ -441,7 +442,8 @@ clock_tick_schedule(int one_sec)
if (clock_tick_scan >= end)
clock_tick_scan = 0;
- clock_tick_execute_common(0, clock_tick_scan, end, lbolt, 1);
+ clock_tick_execute_common(0, clock_tick_scan, end,
+ (clock_t)LBOLT_NO_ACCOUNT, 1);
return;
}
@@ -474,7 +476,7 @@ clock_tick_schedule(int one_sec)
* we want to handle this before we block on anything and allow
* the pinned thread below the current thread to escape.
*/
- clock_tick_process(CPU, lbolt, clock_tick_pending);
+ clock_tick_process(CPU, (clock_t)LBOLT_NO_ACCOUNT, clock_tick_pending);
mutex_enter(&clock_tick_lock);
diff --git a/usr/src/uts/common/os/condvar.c b/usr/src/uts/common/os/condvar.c
index 18406bea26..f6f6a4dba3 100644
--- a/usr/src/uts/common/os/condvar.c
+++ b/usr/src/uts/common/os/condvar.c
@@ -224,14 +224,36 @@ clock_t
cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim)
{
hrtime_t hrtim;
+ clock_t now = ddi_get_lbolt();
- if (tim <= lbolt)
+ if (tim <= now)
return (-1);
- hrtim = TICK_TO_NSEC(tim - lbolt);
+ hrtim = TICK_TO_NSEC(tim - now);
return (cv_timedwait_hires(cvp, mp, hrtim, nsec_per_tick, 0));
}
+/*
+ * Same as cv_timedwait() except that the third argument is a relative
+ * timeout value, as opposed to an absolute one. There is also a fourth
+ * argument that specifies how accurately the timeout must be implemented.
+ */
+clock_t
+cv_reltimedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t delta, time_res_t res)
+{
+ hrtime_t exp;
+
+ ASSERT(TIME_RES_VALID(res));
+
+ if (delta <= 0)
+ return (-1);
+
+ if ((exp = TICK_TO_NSEC(delta)) < 0)
+ exp = CY_INFINITY;
+
+ return (cv_timedwait_hires(cvp, mp, exp, time_res[res], 0));
+}
+
clock_t
cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
hrtime_t res, int flag)
@@ -445,11 +467,30 @@ cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim)
{
hrtime_t hrtim;
- hrtim = TICK_TO_NSEC(tim - lbolt);
+ hrtim = TICK_TO_NSEC(tim - ddi_get_lbolt());
return (cv_timedwait_sig_hires(cvp, mp, hrtim, nsec_per_tick, 0));
}
/*
+ * Same as cv_timedwait_sig() except that the third argument is a relative
+ * timeout value, as opposed to an absolute one. There is also a fourth
+ * argument that specifies how accurately the timeout must be implemented.
+ */
+clock_t
+cv_reltimedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t delta,
+ time_res_t res)
+{
+ hrtime_t exp;
+
+ ASSERT(TIME_RES_VALID(res));
+
+ if ((exp = TICK_TO_NSEC(delta)) < 0)
+ exp = CY_INFINITY;
+
+ return (cv_timedwait_sig_hires(cvp, mp, exp, time_res[res], 0));
+}
+
+/*
* Like cv_wait_sig_swap but allows the caller to indicate (with a
* non-NULL sigret) that they will take care of signalling the cv
* after wakeup, if necessary. This is a vile hack that should only
@@ -607,10 +648,10 @@ cv_wait_stop(kcondvar_t *cvp, kmutex_t *mp, int wakeup_time)
/*
* Wakeup in wakeup_time milliseconds, i.e., human time.
*/
- tim = lbolt + MSEC_TO_TICK(wakeup_time);
+ tim = ddi_get_lbolt() + MSEC_TO_TICK(wakeup_time);
mutex_enter(&t->t_wait_mutex);
id = realtime_timeout_default((void (*)(void *))cv_wakeup, t,
- tim - lbolt);
+ tim - ddi_get_lbolt());
thread_lock(t); /* lock the thread */
cv_block((condvar_impl_t *)cvp);
thread_unlock_nopreempt(t);
diff --git a/usr/src/uts/common/os/damap.c b/usr/src/uts/common/os/damap.c
index b25860e20e..0fe2279c7c 100644
--- a/usr/src/uts/common/os/damap.c
+++ b/usr/src/uts/common/os/damap.c
@@ -975,7 +975,7 @@ dam_addr_stable_cb(void *arg)
int tpend = 0;
int64_t next_tmov = mapp->dam_stabletmo;
int64_t tmo_delta;
- int64_t ts = lbolt64;
+ int64_t ts = ddi_get_lbolt64();
DTRACE_PROBE1(damap__addr__stable__cb__entry, dam_t *, mapp);
DAM_LOCK(mapp, MAP_LOCK);
@@ -1129,7 +1129,7 @@ dam_add_report(dam_t *mapp, dam_da_t *passp, id_t addrid, int report)
passp->da_last_report = gethrtime();
mapp->dam_last_update = gethrtime();
passp->da_report_cnt++;
- passp->da_deadline = lbolt64 + mapp->dam_stabletmo;
+ passp->da_deadline = ddi_get_lbolt64() + mapp->dam_stabletmo;
if (report == RPT_ADDR_DEL)
passp->da_flags |= DA_RELE;
else if (report == RPT_ADDR_ADD)
diff --git a/usr/src/uts/common/os/ddi.c b/usr/src/uts/common/os/ddi.c
index ee817d9c8d..a37d91e92a 100644
--- a/usr/src/uts/common/os/ddi.c
+++ b/usr/src/uts/common/os/ddi.c
@@ -23,12 +23,10 @@
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* UNIX Device Driver Interface functions
*
@@ -305,7 +303,7 @@ drv_getparm(unsigned int parm, void *valuep)
mutex_exit(&p->p_lock);
break;
case LBOLT:
- *(clock_t *)valuep = lbolt;
+ *(clock_t *)valuep = ddi_get_lbolt();
break;
case TIME:
if ((now = gethrestime_sec()) == 0) {
@@ -1220,5 +1218,5 @@ kvtoppid(caddr_t addr)
void
time_to_wait(clock_t *now, clock_t time)
{
- *now = lbolt + time;
+ *now = ddi_get_lbolt() + time;
}
diff --git a/usr/src/uts/common/os/ddi_intr_irm.c b/usr/src/uts/common/os/ddi_intr_irm.c
index e2ef224286..c20987c55c 100644
--- a/usr/src/uts/common/os/ddi_intr_irm.c
+++ b/usr/src/uts/common/os/ddi_intr_irm.c
@@ -607,7 +607,7 @@ i_ddi_irm_set_cb(dev_info_t *dip, boolean_t has_cb_flag)
static void
irm_balance_thread(ddi_irm_pool_t *pool_p)
{
- clock_t interval, wakeup;
+ clock_t interval;
DDI_INTR_IRMDBG((CE_CONT, "irm_balance_thread: pool_p %p\n",
(void *)pool_p));
@@ -637,9 +637,8 @@ irm_balance_thread(ddi_irm_pool_t *pool_p)
if ((interval > 0) &&
!(pool_p->ipool_flags & DDI_IRM_FLAG_WAITERS) &&
!(pool_p->ipool_flags & DDI_IRM_FLAG_EXIT)) {
- wakeup = ddi_get_lbolt() + interval;
- (void) cv_timedwait(&pool_p->ipool_cv,
- &pool_p->ipool_lock, wakeup);
+ (void) cv_reltimedwait(&pool_p->ipool_cv,
+ &pool_p->ipool_lock, interval, TR_CLOCK_TICK);
}
/* Check if awakened to exit */
diff --git a/usr/src/uts/common/os/devcache.c b/usr/src/uts/common/os/devcache.c
index 1e32e5c950..a96d178d66 100644
--- a/usr/src/uts/common/os/devcache.c
+++ b/usr/src/uts/common/os/devcache.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1051,9 +1051,9 @@ nvpflush_daemon(void)
for (;;) {
CALLB_CPR_SAFE_BEGIN(&cprinfo);
while (do_nvpflush == 0) {
- clk = cv_timedwait(&nvpflush_cv, &nvpflush_lock,
- ddi_get_lbolt() +
- (nvpdaemon_idle_time * TICKS_PER_SECOND));
+ clk = cv_reltimedwait(&nvpflush_cv, &nvpflush_lock,
+ (nvpdaemon_idle_time * TICKS_PER_SECOND),
+ TR_CLOCK_TICK);
if ((clk == -1 && do_nvpflush == 0 &&
nvpflush_timer_busy == 0) || sys_shutdown) {
/*
diff --git a/usr/src/uts/common/os/dumpsubr.c b/usr/src/uts/common/os/dumpsubr.c
index d7c93ada2e..e8d5a166e3 100644
--- a/usr/src/uts/common/os/dumpsubr.c
+++ b/usr/src/uts/common/os/dumpsubr.c
@@ -69,6 +69,7 @@
#include <vm/pvn.h>
#include <vm/seg.h>
#include <vm/seg_kmem.h>
+#include <sys/clock_impl.h>
#include <bzip2/bzlib.h>
@@ -2553,6 +2554,11 @@ dumpsys(void)
}
/*
+ * Store a hires timestamp so we can look it up during debugging.
+ */
+ lbolt_debug_entry();
+
+ /*
* Leave room for the message and ereport save areas and terminal dump
* header.
*/
diff --git a/usr/src/uts/common/os/fork.c b/usr/src/uts/common/os/fork.c
index be432f73bc..52d41ac4b9 100644
--- a/usr/src/uts/common/os/fork.c
+++ b/usr/src/uts/common/os/fork.c
@@ -1112,7 +1112,7 @@ getproc(proc_t **cpp, int kernel)
flist_fork(P_FINFO(pp), P_FINFO(cp));
gethrestime(&uarea->u_start);
- uarea->u_ticks = lbolt;
+ uarea->u_ticks = ddi_get_lbolt();
uarea->u_mem = rm_asrss(pp->p_as);
uarea->u_acflag = AFORK;
diff --git a/usr/src/uts/common/os/kstat_fr.c b/usr/src/uts/common/os/kstat_fr.c
index 07ceb0c30d..308fa30837 100644
--- a/usr/src/uts/common/os/kstat_fr.c
+++ b/usr/src/uts/common/os/kstat_fr.c
@@ -845,7 +845,7 @@ system_misc_kstat_update(kstat_t *ksp, int rw)
if (curproc->p_zone->zone_id == 0) {
zone_boot_time = boot_time;
- zone_lbolt = lbolt;
+ zone_lbolt = ddi_get_lbolt();
} else {
struct timeval tvp;
hrt2tv(curproc->p_zone->zone_zsched->p_mstart, &tvp);
diff --git a/usr/src/uts/common/os/logsubr.c b/usr/src/uts/common/os/logsubr.c
index a473e10ae9..2f01d90320 100644
--- a/usr/src/uts/common/os/logsubr.c
+++ b/usr/src/uts/common/os/logsubr.c
@@ -362,7 +362,8 @@ log_conswitch(log_t *src, log_t *dst)
tmp = hmp->b_next;
hmp->b_next = NULL;
hlc = (log_ctl_t *)hmp->b_rptr;
- hlc->ttime = gethrestime_sec() - (lbolt - hlc->ltime) / hz;
+ hlc->ttime = gethrestime_sec() -
+ (ddi_get_lbolt() - hlc->ltime) / hz;
(void) putq(dst->log_q, hmp);
hmp = tmp;
}
@@ -611,7 +612,7 @@ log_sendmsg(mblk_t *mp, zoneid_t zoneid)
* that contain good data.
*
*/
- lc->ltime = lbolt;
+ lc->ltime = ddi_get_lbolt();
if (timechanged) {
lc->ttime = gethrestime_sec();
} else {
diff --git a/usr/src/uts/common/os/mem_cage.c b/usr/src/uts/common/os/mem_cage.c
index c539e09635..4bfe673595 100644
--- a/usr/src/uts/common/os/mem_cage.c
+++ b/usr/src/uts/common/os/mem_cage.c
@@ -170,7 +170,7 @@ static struct kcage_stats_scan kcage_stats_scan_zero;
KCAGE_STAT_SETZ(scans[kcage_stats.scan_index].m, v)
#define KCAGE_STAT_INC_SCAN_INDEX \
- KCAGE_STAT_SET_SCAN(scan_lbolt, lbolt); \
+ KCAGE_STAT_SET_SCAN(scan_lbolt, ddi_get_lbolt()); \
KCAGE_STAT_SET_SCAN(scan_id, kcage_stats.scan_index); \
kcage_stats.scan_index = \
(kcage_stats.scan_index + 1) % KCAGE_STATS_NSCANS; \
@@ -1721,7 +1721,7 @@ loop:
last_pass = 0;
#ifdef KCAGE_STATS
- scan_start = lbolt;
+ scan_start = ddi_get_lbolt();
#endif
again:
@@ -1943,7 +1943,7 @@ again:
KCAGE_STAT_SET_SCAN(kt_freemem_end, freemem);
KCAGE_STAT_SET_SCAN(kt_kcage_freemem_end, kcage_freemem);
- KCAGE_STAT_SET_SCAN(kt_ticks, lbolt - scan_start);
+ KCAGE_STAT_SET_SCAN(kt_ticks, ddi_get_lbolt() - scan_start);
KCAGE_STAT_INC_SCAN_INDEX;
goto loop;
}
diff --git a/usr/src/uts/common/os/mem_config.c b/usr/src/uts/common/os/mem_config.c
index ac7844da9c..b2ff63eec5 100644
--- a/usr/src/uts/common/os/mem_config.c
+++ b/usr/src/uts/common/os/mem_config.c
@@ -1629,8 +1629,8 @@ delthr_get_freemem(struct mem_handle *mhp)
cv_signal(&proc_pageout->p_cv);
mutex_enter(&mhp->mh_mutex);
- (void) cv_timedwait(&mhp->mh_cv, &mhp->mh_mutex,
- (lbolt + DEL_FREE_WAIT_TICKS));
+ (void) cv_reltimedwait(&mhp->mh_cv, &mhp->mh_mutex,
+ DEL_FREE_WAIT_TICKS, TR_CLOCK_TICK);
mutex_exit(&mhp->mh_mutex);
page_needfree(-(spgcnt_t)free_get);
@@ -2249,8 +2249,8 @@ delete_memory_thread(caddr_t amhp)
*/
MDSTAT_INCR(mhp, ndelay);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
- (void) cv_timedwait(&mhp->mh_cv, &mhp->mh_mutex,
- (lbolt + DEL_BUSY_WAIT_TICKS));
+ (void) cv_reltimedwait(&mhp->mh_cv, &mhp->mh_mutex,
+ DEL_BUSY_WAIT_TICKS, TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&cprinfo, &mhp->mh_mutex);
}
}
diff --git a/usr/src/uts/common/os/modctl.c b/usr/src/uts/common/os/modctl.c
index 1ddec74d87..93d620d0cd 100644
--- a/usr/src/uts/common/os/modctl.c
+++ b/usr/src/uts/common/os/modctl.c
@@ -3875,7 +3875,7 @@ void
mod_uninstall_daemon(void)
{
callb_cpr_t cprinfo;
- clock_t ticks = 0;
+ clock_t ticks;
mod_aul_thread = curthread;
@@ -3890,10 +3890,9 @@ mod_uninstall_daemon(void)
* the default for a non-DEBUG kernel.
*/
if (mod_uninstall_interval) {
- ticks = ddi_get_lbolt() +
- drv_usectohz(mod_uninstall_interval * 1000000);
- (void) cv_timedwait(&mod_uninstall_cv,
- &mod_uninstall_lock, ticks);
+ ticks = drv_usectohz(mod_uninstall_interval * 1000000);
+ (void) cv_reltimedwait(&mod_uninstall_cv,
+ &mod_uninstall_lock, ticks, TR_CLOCK_TICK);
} else {
cv_wait(&mod_uninstall_cv, &mod_uninstall_lock);
}
diff --git a/usr/src/uts/common/os/panic.c b/usr/src/uts/common/os/panic.c
index 26d4b98e82..ec4bec5c37 100644
--- a/usr/src/uts/common/os/panic.c
+++ b/usr/src/uts/common/os/panic.c
@@ -142,6 +142,7 @@
#include <sys/errorq.h>
#include <sys/panic.h>
#include <sys/fm/util.h>
+#include <sys/clock_impl.h>
/*
* Panic variables which are set once during the QUIESCE state by the
@@ -221,6 +222,11 @@ panicsys(const char *format, va_list alist, struct regs *rp, int on_panic_stack)
t->t_bound_cpu = cp;
t->t_preempt++;
+ /*
+ * Switch lbolt to event driven mode.
+ */
+ lbolt_hybrid = lbolt_event_driven;
+
panic_enter_hw(s);
/*
@@ -275,8 +281,8 @@ panicsys(const char *format, va_list alist, struct regs *rp, int on_panic_stack)
panicstr = (char *)format;
va_copy(panicargs, alist);
- panic_lbolt = lbolt;
- panic_lbolt64 = lbolt64;
+ panic_lbolt = (clock_t)LBOLT_NO_ACCOUNT;
+ panic_lbolt64 = LBOLT_NO_ACCOUNT;
panic_hrestime = hrestime;
panic_hrtime = gethrtime_waitfree();
panic_thread = t;
diff --git a/usr/src/uts/common/os/sched.c b/usr/src/uts/common/os/sched.c
index fa3aa5ba95..c1d6569f11 100644
--- a/usr/src/uts/common/os/sched.c
+++ b/usr/src/uts/common/os/sched.c
@@ -20,16 +20,13 @@
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/param.h>
#include <sys/types.h>
#include <sys/sysmacros.h>
@@ -339,7 +336,7 @@ top:
* we are conservative.
*/
divisor = 1;
- swapout_time = (lbolt - swapin_proc_time) / hz;
+ swapout_time = (ddi_get_lbolt() - swapin_proc_time) / hz;
if (swapout_time > maxslp / 2)
divisor = 2;
@@ -572,10 +569,10 @@ top:
stack_pages = btopr(stack_size);
/* Kernel probe */
TNF_PROBE_4(swapin_lwp, "vm swap swapin", /* CSTYLED */,
- tnf_pid, pid, pp->p_pid,
- tnf_lwpid, lwpid, tp->t_tid,
- tnf_kthread_id, tid, tp,
- tnf_ulong, page_count, stack_pages);
+ tnf_pid, pid, pp->p_pid,
+ tnf_lwpid, lwpid, tp->t_tid,
+ tnf_kthread_id, tid, tp,
+ tnf_ulong, page_count, stack_pages);
rw_enter(&kas.a_lock, RW_READER);
err = segkp_fault(segkp->s_as->a_hat, segkp,
@@ -604,7 +601,8 @@ top:
tp->t_schedflag |= TS_LOAD;
dq_sruninc(tp);
- tp->t_stime = lbolt; /* set swapin time */
+ /* set swapin time */
+ tp->t_stime = ddi_get_lbolt();
thread_unlock(tp);
nswapped--;
@@ -688,7 +686,7 @@ top:
* sleeping.
*/
if (tp->t_state != TS_SLEEP)
- tp->t_stime = lbolt;
+ tp->t_stime = ddi_get_lbolt();
thread_unlock(tp);
nswapped++;
@@ -707,13 +705,13 @@ top:
ws_pages += stack_pages;
/* Kernel probe */
TNF_PROBE_4(swapout_lwp,
- "vm swap swapout",
- /* CSTYLED */,
- tnf_pid, pid, pp->p_pid,
- tnf_lwpid, lwpid, tp->t_tid,
- tnf_kthread_id, tid, tp,
- tnf_ulong, page_count,
- stack_pages);
+ "vm swap swapout",
+ /* CSTYLED */,
+ tnf_pid, pid, pp->p_pid,
+ tnf_lwpid, lwpid, tp->t_tid,
+ tnf_kthread_id, tid, tp,
+ tnf_ulong, page_count,
+ stack_pages);
rw_enter(&kas.a_lock, RW_READER);
err = segkp_fault(segkp->s_as->a_hat,
@@ -764,8 +762,8 @@ top:
"swapout: pp %p pages_pushed %lu", pp, ws_pages);
/* Kernel probe */
TNF_PROBE_2(swapout_process, "vm swap swapout", /* CSTYLED */,
- tnf_pid, pid, pp->p_pid,
- tnf_ulong, page_count, ws_pages);
+ tnf_pid, pid, pp->p_pid,
+ tnf_ulong, page_count, ws_pages);
}
*swrss = ws_pages;
return (swapped_lwps);
@@ -868,7 +866,7 @@ process_swap_queue(void)
ASSERT(tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ));
tp->t_schedflag &= ~(TS_LOAD | TS_ON_SWAPQ);
- tp->t_stime = lbolt; /* swapout time */
+ tp->t_stime = ddi_get_lbolt(); /* swapout time */
disp_lock_exit(&swapped_lock);
lock_clear(&tp->t_lock);
@@ -881,10 +879,10 @@ process_swap_queue(void)
/* Kernel probe */
TNF_PROBE_4(swapout_lwp, "vm swap swapout", /* CSTYLED */,
- tnf_pid, pid, pp->p_pid,
- tnf_lwpid, lwpid, tp->t_tid,
- tnf_kthread_id, tid, tp,
- tnf_ulong, page_count, stack_pages);
+ tnf_pid, pid, pp->p_pid,
+ tnf_lwpid, lwpid, tp->t_tid,
+ tnf_kthread_id, tid, tp,
+ tnf_ulong, page_count, stack_pages);
rw_enter(&kas.a_lock, RW_READER);
err = segkp_fault(segkp->s_as->a_hat, segkp, tp->t_swap,
@@ -930,9 +928,9 @@ process_swap_queue(void)
pp, ws_pages);
/* Kernel probe */
TNF_PROBE_2(swapout_process, "vm swap swapout",
- /* CSTYLED */,
- tnf_pid, pid, pp->p_pid,
- tnf_ulong, page_count, ws_pages);
+ /* CSTYLED */,
+ tnf_pid, pid, pp->p_pid,
+ tnf_ulong, page_count, ws_pages);
}
pp->p_swrss += ws_pages;
disp_lock_enter(&swapped_lock);
diff --git a/usr/src/uts/common/os/sig.c b/usr/src/uts/common/os/sig.c
index 763214a60d..932b4db606 100644
--- a/usr/src/uts/common/os/sig.c
+++ b/usr/src/uts/common/os/sig.c
@@ -1240,8 +1240,8 @@ utstop_timedwait(clock_t ticks)
{
mutex_enter(&thread_stop_lock);
if (num_utstop > 0)
- (void) cv_timedwait(&utstop_cv, &thread_stop_lock,
- ticks + lbolt);
+ (void) cv_reltimedwait(&utstop_cv, &thread_stop_lock, ticks,
+ TR_CLOCK_TICK);
mutex_exit(&thread_stop_lock);
}
diff --git a/usr/src/uts/common/os/softint.c b/usr/src/uts/common/os/softint.c
index 8a9600cabd..ecdb038c79 100644
--- a/usr/src/uts/common/os/softint.c
+++ b/usr/src/uts/common/os/softint.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/t_lock.h>
@@ -192,7 +190,7 @@ softcall_init(void)
mutex_init(&softcall_lock, NULL, MUTEX_SPIN,
(void *)ipltospl(SPL8));
softcall_state = SOFT_IDLE;
- softcall_tick = lbolt;
+ softcall_tick = ddi_get_lbolt();
/*
* Since softcall_delay is expressed as 1 = 10 milliseconds.
@@ -345,11 +343,11 @@ softcall(void (*func)(void *), void *arg)
intr:
if (softcall_state & SOFT_IDLE) {
softcall_state = SOFT_PEND;
- softcall_tick = lbolt;
+ softcall_tick = ddi_get_lbolt();
mutex_exit(&softcall_lock);
siron();
} else if (softcall_state & (SOFT_DRAIN|SOFT_PEND)) {
- now = lbolt;
+ now = ddi_get_lbolt();
w = now - softcall_tick;
if (w <= softcall_delay || ncpus == 1) {
mutex_exit(&softcall_lock);
@@ -379,7 +377,7 @@ intr:
*/
softcall_pokecount = 0;
}
- softcall_lastpoke = lbolt;
+ softcall_lastpoke = now;
if (!(softcall_state & SOFT_STEAL)) {
softcall_state |= SOFT_STEAL;
@@ -387,7 +385,7 @@ intr:
* We want to give some more chance before
* fishing around again.
*/
- softcall_tick = lbolt;
+ softcall_tick = now;
}
/* softcall_lock will be released by this routine */
@@ -472,7 +470,7 @@ softint(void)
CPUSET_ADD(*softcall_cpuset, cpu_id);
for (;;) {
- softcall_tick = lbolt;
+ softcall_tick = ddi_get_lbolt();
if ((sc = softhead) != NULL) {
func = sc->sc_func;
arg = sc->sc_arg;
diff --git a/usr/src/uts/common/os/strsubr.c b/usr/src/uts/common/os/strsubr.c
index 22bdc86e03..3f64f77cff 100644
--- a/usr/src/uts/common/os/strsubr.c
+++ b/usr/src/uts/common/os/strsubr.c
@@ -205,8 +205,6 @@ static void *str_stack_init(netstackid_t stackid, netstack_t *ns);
static void str_stack_shutdown(netstackid_t stackid, void *arg);
static void str_stack_fini(netstackid_t stackid, void *arg);
-extern void time_to_wait(clock_t *, clock_t);
-
/*
* run_queues is no longer used, but is kept in case some 3rd party
* module/driver decides to use it.
@@ -521,7 +519,7 @@ struct qinit passthru_winit = {
*qpp = qp; \
} \
qp->q_sqflags |= Q_SQQUEUED; \
- qp->q_sqtstamp = lbolt; \
+ qp->q_sqtstamp = ddi_get_lbolt(); \
sq->sq_nqueues++; \
} \
}
@@ -3689,14 +3687,11 @@ streams_bufcall_service(void)
mutex_enter(&strbcall_lock);
}
if (strbcalls.bc_head != NULL) {
- clock_t wt, tick;
-
STRSTAT(bcwaits);
/* Wait for memory to become available */
CALLB_CPR_SAFE_BEGIN(&cprinfo);
- tick = SEC_TO_TICK(60);
- time_to_wait(&wt, tick);
- (void) cv_timedwait(&memavail_cv, &strbcall_lock, wt);
+ (void) cv_reltimedwait(&memavail_cv, &strbcall_lock,
+ SEC_TO_TICK(60), TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
}
@@ -3879,7 +3874,7 @@ sqenable(syncq_t *sq)
}
}
- sq->sq_tstamp = lbolt;
+ sq->sq_tstamp = ddi_get_lbolt();
STRSTAT(sqenables);
/* Attempt a taskq dispatch */
@@ -7866,7 +7861,7 @@ link_rempassthru(queue_t *passq)
clock_t
str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs)
{
- clock_t ret, now, tick;
+ clock_t ret;
if (tim < 0) {
if (nosigs) {
@@ -7879,12 +7874,12 @@ str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs)
/*
* convert milliseconds to clock ticks
*/
- tick = MSEC_TO_TICK_ROUNDUP(tim);
- time_to_wait(&now, tick);
if (nosigs) {
- ret = cv_timedwait(cvp, mp, now);
+ ret = cv_reltimedwait(cvp, mp,
+ MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK);
} else {
- ret = cv_timedwait_sig(cvp, mp, now);
+ ret = cv_reltimedwait_sig(cvp, mp,
+ MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK);
}
} else {
ret = -1;
@@ -8160,7 +8155,7 @@ qenable_locked(queue_t *q)
return;
/* Record the time of qenable */
- q->q_qtstamp = lbolt;
+ q->q_qtstamp = ddi_get_lbolt();
/*
* Put the queue in the stp list and schedule it for background
diff --git a/usr/src/uts/common/os/sunddi.c b/usr/src/uts/common/os/sunddi.c
index 2aed02e371..8c1b4144d0 100644
--- a/usr/src/uts/common/os/sunddi.c
+++ b/usr/src/uts/common/os/sunddi.c
@@ -83,6 +83,7 @@
#include <net/if.h>
#include <sys/rctl.h>
#include <sys/zone.h>
+#include <sys/clock_impl.h>
#include <sys/ddi.h>
extern pri_t minclsyspri;
@@ -5630,7 +5631,13 @@ ddi_get_cred(void)
clock_t
ddi_get_lbolt(void)
{
- return (lbolt);
+ return ((clock_t)lbolt_hybrid());
+}
+
+int64_t
+ddi_get_lbolt64(void)
+{
+ return (lbolt_hybrid());
}
time_t
diff --git a/usr/src/uts/common/os/sunmdi.c b/usr/src/uts/common/os/sunmdi.c
index 76422e1678..7ac92f096d 100644
--- a/usr/src/uts/common/os/sunmdi.c
+++ b/usr/src/uts/common/os/sunmdi.c
@@ -3199,9 +3199,9 @@ mdi_pi_free(mdi_pathinfo_t *pip, int flags)
"!%d cmds still pending on path: %s %p",
MDI_PI(pip)->pi_ref_cnt,
mdi_pi_spathname(pip), (void *)pip));
- if (cv_timedwait(&MDI_PI(pip)->pi_ref_cv,
- &MDI_PI(pip)->pi_mutex,
- ddi_get_lbolt() + drv_usectohz(60 * 1000000)) == -1) {
+ if (cv_reltimedwait(&MDI_PI(pip)->pi_ref_cv,
+ &MDI_PI(pip)->pi_mutex, drv_usectohz(60 * 1000000),
+ TR_CLOCK_TICK) == -1) {
/*
* The timeout time reached without ref_cnt being zero
* being signaled.
@@ -3875,9 +3875,9 @@ i_mdi_pi_offline(mdi_pathinfo_t *pip, int flags)
"!%d cmds still pending on path %s %p",
MDI_PI(pip)->pi_ref_cnt, mdi_pi_spathname(pip),
(void *)pip));
- if (cv_timedwait(&MDI_PI(pip)->pi_ref_cv,
- &MDI_PI(pip)->pi_mutex,
- ddi_get_lbolt() + drv_usectohz(60 * 1000000)) == -1) {
+ if (cv_reltimedwait(&MDI_PI(pip)->pi_ref_cv,
+ &MDI_PI(pip)->pi_mutex, drv_usectohz(60 * 1000000),
+ TR_CLOCK_TICK) == -1) {
/*
* The timeout time reached without ref_cnt being zero
* being signaled.
@@ -8051,7 +8051,7 @@ lookup_vhcache_client(mdi_vhci_cache_t *vhcache, char *ct_name, char *ct_addr,
(mod_hash_key_t)name_addr, &hv) == 0) {
if (token) {
token->lt_cct = (mdi_vhcache_client_t *)hv;
- token->lt_cct_lookup_time = lbolt64;
+ token->lt_cct_lookup_time = ddi_get_lbolt64();
}
} else {
if (token) {
@@ -9097,7 +9097,7 @@ vhcache_do_discovery(mdi_vhci_config_t *vhc)
* stale /dev/[r]dsk links.
*/
if (mdi_path_discovery_interval != -1 &&
- lbolt64 >= vhc->vhc_path_discovery_cutoff_time)
+ ddi_get_lbolt64() >= vhc->vhc_path_discovery_cutoff_time)
goto out;
rv = 0;
@@ -9127,7 +9127,7 @@ vhcache_discover_paths(mdi_vhci_t *vh)
NDI_NO_EVENT, BUS_CONFIG_ALL, DDI_MAJOR_T_NONE);
mutex_enter(&vhc->vhc_lock);
- vhc->vhc_path_discovery_cutoff_time = lbolt64 +
+ vhc->vhc_path_discovery_cutoff_time = ddi_get_lbolt64() +
mdi_path_discovery_interval * TICKS_PER_SECOND;
mutex_exit(&vhc->vhc_lock);
rv = 1;
@@ -9339,7 +9339,7 @@ clean_vhcache(mdi_vhci_config_t *vhc)
free_vhcache_phci(cphci);
}
- vhcache->vhcache_clean_time = lbolt64;
+ vhcache->vhcache_clean_time = ddi_get_lbolt64();
rw_exit(&vhcache->vhcache_lock);
vhcache_dirty(vhc);
}
diff --git a/usr/src/uts/common/os/sunpm.c b/usr/src/uts/common/os/sunpm.c
index 8f585d218c..d709c547e1 100644
--- a/usr/src/uts/common/os/sunpm.c
+++ b/usr/src/uts/common/os/sunpm.c
@@ -547,7 +547,7 @@ static dev_info_t *cfb_dip = 0;
static dev_info_t *cfb_dip_detaching = 0;
uint_t cfb_inuse = 0;
static ddi_softintr_t pm_soft_id;
-static clock_t pm_soft_pending;
+static boolean_t pm_soft_pending;
int pm_scans_disabled = 0;
/*
@@ -7732,7 +7732,7 @@ pm_cfb_softint(caddr_t int_handler_arg)
/* acquired in debug_enter before calling pm_cfb_trigger */
pm_cfb_rele();
mutex_enter(&pm_cfb_lock);
- pm_soft_pending = 0;
+ pm_soft_pending = B_FALSE;
mutex_exit(&pm_cfb_lock);
rval = DDI_INTR_CLAIMED;
} else
@@ -7859,23 +7859,19 @@ pm_cfb_trigger(void)
mutex_enter(&pm_cfb_lock);
/*
- * If machine appears to be hung, pulling the keyboard connector of
+ * If the machine appears to be hung, pulling the keyboard connector of
* the console will cause a high level interrupt and go to debug_enter.
* But, if the fb is powered down, this routine will be called to bring
- * it up (by generating a softint to do the work). If soft interrupts
- * are not running, and the keyboard connector is pulled again, the
- * following code detects this condition and calls panic which allows
- * the fb to be brought up from high level.
- *
- * If two nearly simultaneous calls to debug_enter occur (both from
- * high level) the code described above will cause a panic.
+ * it up (by generating a softint to do the work). If a second attempt
+ * at triggering this softint happens before the first one completes,
+ * we panic as softints are most likely not being handled.
*/
- if (lbolt <= pm_soft_pending) {
- panicstr = "pm_cfb_trigger: lbolt not advancing";
+ if (pm_soft_pending) {
+ panicstr = "pm_cfb_trigger: failed to enter the debugger";
panic(panicstr); /* does a power up at any intr level */
/* NOTREACHED */
}
- pm_soft_pending = lbolt;
+ pm_soft_pending = B_TRUE;
mutex_exit(&pm_cfb_lock);
ddi_trigger_softintr(pm_soft_id);
}
diff --git a/usr/src/uts/common/os/taskq.c b/usr/src/uts/common/os/taskq.c
index 93faf53dbc..27c220686c 100644
--- a/usr/src/uts/common/os/taskq.c
+++ b/usr/src/uts/common/os/taskq.c
@@ -1254,7 +1254,7 @@ taskq_thread_wait(taskq_t *tq, kmutex_t *mx, kcondvar_t *cv,
if (timeout < 0)
cv_wait(cv, mx);
else
- ret = cv_timedwait(cv, mx, lbolt + timeout);
+ ret = cv_reltimedwait(cv, mx, timeout, TR_CLOCK_TICK);
if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
CALLB_CPR_SAFE_END(cprinfo, mx);
diff --git a/usr/src/uts/common/os/vm_pageout.c b/usr/src/uts/common/os/vm_pageout.c
index 2a521fdb5d..28045763e5 100644
--- a/usr/src/uts/common/os/vm_pageout.c
+++ b/usr/src/uts/common/os/vm_pageout.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -36,8 +36,6 @@
* contributors.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/t_lock.h>
#include <sys/param.h>
@@ -808,7 +806,7 @@ loop:
} else {
nscan_limit = desscan;
}
- pageout_lbolt = lbolt;
+ pageout_lbolt = ddi_get_lbolt();
sample_start = gethrtime();
/*
@@ -830,7 +828,7 @@ loop:
* just every once in a while.
*/
if ((pcount & PAGES_POLL_MASK) == PAGES_POLL_MASK) {
- pageout_cycle_ticks = lbolt - pageout_lbolt;
+ pageout_cycle_ticks = ddi_get_lbolt() - pageout_lbolt;
if (pageout_cycle_ticks >= pageout_ticks) {
++pageout_timeouts;
break;
diff --git a/usr/src/uts/common/os/zone.c b/usr/src/uts/common/os/zone.c
index 61ecbf509a..c2d878f91c 100644
--- a/usr/src/uts/common/os/zone.c
+++ b/usr/src/uts/common/os/zone.c
@@ -2299,7 +2299,7 @@ zone_status_timedwait(zone_t *zone, clock_t tim, zone_status_t status)
clock_t
zone_status_timedwait_sig(zone_t *zone, clock_t tim, zone_status_t status)
{
- clock_t timeleft = tim - lbolt;
+ clock_t timeleft = tim - ddi_get_lbolt();
ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
@@ -4169,8 +4169,8 @@ zone_empty(zone_t *zone)
* which can be called from the exit path.
*/
ASSERT(MUTEX_NOT_HELD(&zonehash_lock));
- while ((waitstatus = zone_status_timedwait_sig(zone, lbolt + hz,
- ZONE_IS_EMPTY)) == -1) {
+ while ((waitstatus = zone_status_timedwait_sig(zone,
+ ddi_get_lbolt() + hz, ZONE_IS_EMPTY)) == -1) {
killall(zone->zone_id);
}
/*
diff --git a/usr/src/uts/common/pcmcia/nexus/pcmcia.c b/usr/src/uts/common/pcmcia/nexus/pcmcia.c
index ed4e21fc55..d60fc96ba8 100644
--- a/usr/src/uts/common/pcmcia/nexus/pcmcia.c
+++ b/usr/src/uts/common/pcmcia/nexus/pcmcia.c
@@ -2379,7 +2379,7 @@ pcmcia_1275_name(int socket, struct pcm_device_info *info,
void
pcmcia_vers1_name(int socket, struct pcm_device_info *info,
- client_handle_t handle)
+ client_handle_t handle)
{
cistpl_vers_1_t vers1;
tuple_t tuple;
@@ -2396,6 +2396,7 @@ pcmcia_vers1_name(int socket, struct pcm_device_info *info,
(i = csx_GetFirstTuple(handle, &tuple)) == SUCCESS) {
i = csx_Parse_CISTPL_VERS_1(handle, &tuple, &vers1);
if (i == SUCCESS) {
+ /* BEGIN CSTYLED */
for (i = 0, len = 0, space = 0; i < vers1.ns; i++) {
if ((space + len + strlen(info->pd_vers1_name)) >=
sizeof (info->pd_vers1_name))
@@ -2412,6 +2413,7 @@ pcmcia_vers1_name(int socket, struct pcm_device_info *info,
len--;
space = 1;
}
+ /* END CSTYLED */
info->pd_vers1_name[len] = '\0';
info->pd_flags |= PCM_NAME_VERS1;
}
@@ -2673,34 +2675,33 @@ pcmcia_get_mem_regs(struct pcm_regs *regs, struct pcm_device_info *info,
&device);
if (ret == CS_SUCCESS) {
- curr_base = 0;
- for (ret = 0; ret < device.num_devices;
- ret++) {
- /* need to order these for real mem first */
- if (device.devnode[ret].type !=
- CISTPL_DEVICE_DTYPE_NULL) {
- /* how to represent types??? */
- regs[num_regs].phys_hi =
- PC_REG_PHYS_HI(0, 0,
+ curr_base = 0;
+ for (ret = 0; ret < device.num_devices; ret++) {
+ /* need to order these for real mem first */
+ if (device.devnode[ret].type !=
+ CISTPL_DEVICE_DTYPE_NULL) {
+ /* how to represent types??? */
+ regs[num_regs].phys_hi =
+ PC_REG_PHYS_HI(0, 0,
pctype,
space,
info->pd_socket,
info->pd_function,
0);
- regs[num_regs].phys_lo = curr_base;
- len = device.devnode[ret].size_in_bytes;
- curr_base += len;
- regs[num_regs].phys_len = len;
- num_regs++;
- } else {
- /*
- * NULL device is a "hole"
- */
- curr_base +=
+ regs[num_regs].phys_lo = curr_base;
+ len = device.devnode[ret].size_in_bytes;
+ curr_base += len;
+ regs[num_regs].phys_len = len;
+ num_regs++;
+ } else {
+ /*
+ * NULL device is a "hole"
+ */
+ curr_base +=
device.devnode[ret].size_in_bytes;
- }
+ }
}
- }
+ }
}
return (num_regs);
}
@@ -2729,22 +2730,25 @@ pcmcia_get_io_regs(struct pcm_regs *regs, struct pcm_device_info *info,
len = 0;
if (csx_GetFirstTuple(info->pd_handle, &tuple) == CS_SUCCESS) {
- if (csx_Parse_CISTPL_CONFIG(info->pd_handle,
- &tuple, &config) != CS_SUCCESS) {
- info->pd_flags |= PCM_NO_CONFIG; /* must be memory */
- return (0);
- }
- curr = 0;
+ if (csx_Parse_CISTPL_CONFIG(info->pd_handle,
+ &tuple, &config) != CS_SUCCESS) {
+ info->pd_flags |= PCM_NO_CONFIG; /* must be memory */
+ return (0);
+ }
+ curr = 0;
+
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ tuple.Socket = info->pd_socket;
+ tuple.Attributes = 0;
+ bzero(tmp, sizeof (tmp));
- tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- tuple.Socket = info->pd_socket;
- tuple.Attributes = 0;
- bzero(tmp, sizeof (tmp));
while (csx_GetNextTuple(info->pd_handle, &tuple) == CS_SUCCESS) {
- bzero(&cftable, sizeof (cftable));
- if (csx_Parse_CISTPL_CFTABLE_ENTRY(info->pd_handle,
- &tuple, &cftable) ==
- CS_SUCCESS) {
+ bzero(&cftable, sizeof (cftable));
+
+ if (csx_Parse_CISTPL_CFTABLE_ENTRY(info->pd_handle,
+ &tuple, &cftable) == CS_SUCCESS) {
+
+ /* BEGIN CSTYLED */
if (cftable.flags & CISTPL_CFTABLE_TPCE_FS_IO) {
/* we have an I/O entry */
if (cftable.io.flags &
@@ -2808,7 +2812,8 @@ pcmcia_get_io_regs(struct pcm_regs *regs, struct pcm_device_info *info,
if (config.last == cftable.index)
break;
}
- }
+ /* END CSTYLE */
+ }
}
if (found == 1) {
/*
@@ -3086,50 +3091,50 @@ pcmcia_create_dev_info(int socket)
card_info.pd_flags |= PCM_MULTI_FUNCTION;
}
for (i = 0; i < functions; i++) {
- register int flags;
- lsocket = CS_MAKE_SOCKET_NUMBER(socket, i);
- card_info.pd_socket = socket;
- card_info.pd_function = i;
+ register int flags;
+ lsocket = CS_MAKE_SOCKET_NUMBER(socket, i);
+ card_info.pd_socket = socket;
+ card_info.pd_function = i;
/*
* new name construction
*/
- if (functions != 1) {
- /* need per function handle */
- card_info.pd_function = i;
- /* get new handle */
- }
- pcmcia_1275_name(lsocket, &card_info,
+ if (functions != 1) {
+ /* need per function handle */
+ card_info.pd_function = i;
+ /* get new handle */
+ }
+ pcmcia_1275_name(lsocket, &card_info,
card_info.pd_handle);
- pcmcia_vers1_name(lsocket, &card_info,
+ pcmcia_vers1_name(lsocket, &card_info,
card_info.pd_handle);
- pcmcia_generic_name(lsocket, &card_info,
+ pcmcia_generic_name(lsocket, &card_info,
card_info.pd_handle);
- flags = card_info.pd_flags;
- if (!(flags & PCM_NAME_1275)) {
- if (flags & PCM_NAME_VERS1) {
- (void) strcpy(card_info.pd_bind_name,
- PCMDEV_NAMEPREF);
- card_info.pd_bind_name[sizeof (PCMDEV_NAMEPREF)] =
- ',';
- (void) strncpy(card_info.pd_bind_name +
- sizeof (PCMDEV_NAMEPREF),
- card_info.pd_vers1_name,
- MODMAXNAMELEN -
- sizeof (PCMDEV_NAMEPREF));
- pcmcia_fix_string(card_info.pd_bind_name);
- } else {
- /*
- * have a CIS but not the right info
- * so treat as generic "pccard"
- */
- (void) strcpy(card_info.pd_generic_name,
- "pccard,memory");
- card_info.pd_flags |= PCM_NAME_GENERIC;
- (void) strcpy(card_info.pd_bind_name,
- "pccard,memory");
+ flags = card_info.pd_flags;
+ if (!(flags & PCM_NAME_1275)) {
+ if (flags & PCM_NAME_VERS1) {
+ (void) strcpy(card_info.pd_bind_name,
+ PCMDEV_NAMEPREF);
+ card_info.pd_bind_name[
+ sizeof (PCMDEV_NAMEPREF)] = ',';
+ (void) strncpy(card_info.pd_bind_name +
+ sizeof (PCMDEV_NAMEPREF),
+ card_info.pd_vers1_name,
+ MODMAXNAMELEN -
+ sizeof (PCMDEV_NAMEPREF));
+ pcmcia_fix_string(card_info.pd_bind_name);
+ } else {
+ /*
+ * have a CIS but not the right info
+ * so treat as generic "pccard"
+ */
+ (void) strcpy(card_info.pd_generic_name,
+ "pccard,memory");
+ card_info.pd_flags |= PCM_NAME_GENERIC;
+ (void) strcpy(card_info.pd_bind_name,
+ "pccard,memory");
+ }
}
- }
- pcmcia_init_devinfo(pdip, &card_info);
+ pcmcia_init_devinfo(pdip, &card_info);
}
return;
}
@@ -4276,7 +4281,6 @@ void
pcmcia_wait_insert(dev_info_t *dip)
{
int i, f, tries, done;
- clock_t tm;
struct pcmcia_adapter *adapt = NULL;
anp_t *nexus;
@@ -4313,10 +4317,9 @@ pcmcia_wait_insert(dev_info_t *dip)
}
}
if (!done) {
- tm = ddi_get_lbolt();
- (void) cv_timedwait(&pcmcia_condvar,
- &pcmcia_global_lock,
- tm + drv_usectohz(100000));
+ (void) cv_reltimedwait(&pcmcia_condvar,
+ &pcmcia_global_lock, drv_usectohz(100000),
+ TR_CLOCK_TICK);
} else {
tries = 0;
}
diff --git a/usr/src/uts/common/pcmcia/pem/pem.c b/usr/src/uts/common/pcmcia/pem/pem.c
index 5a6842ef61..882a290cba 100644
--- a/usr/src/uts/common/pcmcia/pem/pem.c
+++ b/usr/src/uts/common/pcmcia/pem/pem.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -311,6 +311,7 @@ pem_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
return (DDI_FAILURE);
mutex_enter(&pem_intr_lock);
(void) pcmcia_set_em_handler(NULL, NULL, 0, 0x1234, NULL, NULL);
+ tm = drv_usectohz(100000);
while (pem_softint_posted > 0) {
/*
@@ -318,9 +319,8 @@ pem_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
* interrupts to be processed before removing the
* soft interrupt handler.
*/
- tm = ddi_get_lbolt();
- (void) cv_timedwait(&pem_condvar, &pem_intr_lock,
- tm + drv_usectohz(100000));
+ (void) cv_reltimedwait(&pem_condvar, &pem_intr_lock,
+ tm, TR_CLOCK_TICK);
}
ddi_remove_softintr(pem_intr_id);
diff --git a/usr/src/uts/common/rpc/clnt_clts.c b/usr/src/uts/common/rpc/clnt_clts.c
index b4386df80c..202615b9e7 100644
--- a/usr/src/uts/common/rpc/clnt_clts.c
+++ b/usr/src/uts/common/rpc/clnt_clts.c
@@ -579,7 +579,7 @@ call_again:
goto done;
}
- round_trip = lbolt;
+ round_trip = ddi_get_lbolt();
error = clnt_clts_dispatch_send(p->cku_endpnt->e_wq, mp,
&p->cku_addr, call, p->cku_xid, p->cku_cred);
@@ -628,7 +628,7 @@ tryread:
if (lwp != NULL)
lwp->lwp_nostop++;
- cv_timout += lbolt;
+ cv_timout += ddi_get_lbolt();
if (h->cl_nosignal)
while ((cv_wait_ret =
@@ -771,7 +771,7 @@ getresponse:
resp = tmp;
}
- round_trip = lbolt - round_trip;
+ round_trip = ddi_get_lbolt() - round_trip;
/*
* Van Jacobson timer algorithm here, only if NOT a retransmission.
*/
@@ -1006,7 +1006,6 @@ done:
* and try again.
*/
(void) delay(hz/10);
- /* (void) sleep((caddr_t)&lbolt, PZERO-4); */
}
if (stries-- > 0) {
RCSTAT_INCR(p->cku_stats, rcretrans);
diff --git a/usr/src/uts/common/rpc/clnt_cots.c b/usr/src/uts/common/rpc/clnt_cots.c
index 2cce0e918d..362abfe01b 100644
--- a/usr/src/uts/common/rpc/clnt_cots.c
+++ b/usr/src/uts/common/rpc/clnt_cots.c
@@ -792,7 +792,7 @@ clnt_cots_kcallit(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args,
enum clnt_stat status;
struct timeval cwait;
bool_t delay_first = FALSE;
- clock_t ticks;
+ clock_t ticks, now;
RPCLOG(2, "clnt_cots_kcallit, procnum %u\n", procnum);
COTSRCSTAT_INCR(p->cku_stats, rccalls);
@@ -1154,7 +1154,7 @@ dispatch_again:
p->cku_recv_attempts = 1;
#ifdef RPCDEBUG
- time_sent = lbolt;
+ time_sent = ddi_get_lbolt();
#endif
/*
@@ -1182,7 +1182,7 @@ read_again:
if (lwp != NULL)
lwp->lwp_nostop++;
- oldlbolt = lbolt;
+ oldlbolt = ddi_get_lbolt();
timout = wait.tv_sec * drv_usectohz(1000000) +
drv_usectohz(wait.tv_usec) + oldlbolt;
/*
@@ -1267,19 +1267,20 @@ read_again:
if (status != RPC_SUCCESS) {
switch (status) {
case RPC_TIMEDOUT:
+ now = ddi_get_lbolt();
if (interrupted) {
COTSRCSTAT_INCR(p->cku_stats, rcintrs);
p->cku_err.re_status = RPC_INTR;
p->cku_err.re_errno = EINTR;
RPCLOG(1, "clnt_cots_kcallit: xid 0x%x",
p->cku_xid);
- RPCLOG(1, "signal interrupted at %ld", lbolt);
+ RPCLOG(1, "signal interrupted at %ld", now);
RPCLOG(1, ", was sent at %ld\n", time_sent);
} else {
COTSRCSTAT_INCR(p->cku_stats, rctimeouts);
p->cku_err.re_errno = ETIMEDOUT;
RPCLOG(1, "clnt_cots_kcallit: timed out at %ld",
- lbolt);
+ now);
RPCLOG(1, ", was sent at %ld\n", time_sent);
}
break;
@@ -1715,7 +1716,7 @@ connmgr_cwait(struct cm_xprt *cm_entry, const struct timeval *waitp,
*/
timout = waitp->tv_sec * drv_usectohz(1000000) +
- drv_usectohz(waitp->tv_usec) + lbolt;
+ drv_usectohz(waitp->tv_usec) + ddi_get_lbolt();
if (nosignal) {
while ((cv_stat = cv_timedwait(&cm_entry->x_conn_cv,
@@ -1933,7 +1934,7 @@ use_new_conn:
bcopy(lru_entry->x_src.buf, srcaddr->buf, srcaddr->len);
RPCLOG(2, "connmgr_get: call going out on %p\n",
(void *)lru_entry);
- lru_entry->x_time = lbolt;
+ lru_entry->x_time = ddi_get_lbolt();
CONN_HOLD(lru_entry);
if ((i > 1) && (prev != &cm_hd)) {
@@ -2052,7 +2053,7 @@ start_retry_loop:
} else {
CONN_HOLD(cm_entry);
- cm_entry->x_time = lbolt;
+ cm_entry->x_time = ddi_get_lbolt();
mutex_exit(&connmgr_lock);
RPCLOG(2, "connmgr_get: found old "
"transport %p for retry\n",
@@ -2250,7 +2251,7 @@ start_retry_loop:
cm_entry->x_src.len = cm_entry->x_src.maxlen = srcaddr->len;
cm_entry->x_tiptr = tiptr;
- cm_entry->x_time = lbolt;
+ cm_entry->x_time = ddi_get_lbolt();
if (tiptr->tp_info.servtype == T_COTS_ORD)
cm_entry->x_ordrel = TRUE;
@@ -2281,7 +2282,7 @@ start_retry_loop:
*/
cm_entry->x_early_disc = FALSE;
cm_entry->x_needdis = (cm_entry->x_connected == FALSE);
- cm_entry->x_ctime = lbolt;
+ cm_entry->x_ctime = ddi_get_lbolt();
/*
* Notify any threads waiting that the connection attempt is done.
@@ -2407,7 +2408,7 @@ connmgr_wrapconnect(
* actually timed out. So ensure that before the next
* connection attempt we do a disconnect.
*/
- cm_entry->x_ctime = lbolt;
+ cm_entry->x_ctime = ddi_get_lbolt();
cm_entry->x_thread = FALSE;
cv_broadcast(&cm_entry->x_conn_cv);
@@ -2436,7 +2437,7 @@ connmgr_wrapconnect(
}
bcopy(cm_entry->x_src.buf, srcaddr->buf, srcaddr->len);
}
- cm_entry->x_time = lbolt;
+ cm_entry->x_time = ddi_get_lbolt();
mutex_exit(&connmgr_lock);
return (cm_entry);
}
@@ -2462,16 +2463,13 @@ connmgr_dis_and_wait(struct cm_xprt *cm_entry)
}
if (cm_entry->x_waitdis == TRUE) {
- clock_t curlbolt;
clock_t timout;
RPCLOG(8, "connmgr_dis_and_wait waiting for "
"T_DISCON_REQ's ACK for connection %p\n",
(void *)cm_entry);
- curlbolt = ddi_get_lbolt();
- timout = clnt_cots_min_conntout *
- drv_usectohz(1000000) + curlbolt;
+ timout = clnt_cots_min_conntout * drv_usectohz(1000000);
/*
* The TPI spec says that the T_DISCON_REQ
@@ -2479,8 +2477,8 @@ connmgr_dis_and_wait(struct cm_xprt *cm_entry)
* the ACK may never get sent. So don't
* block forever.
*/
- (void) cv_timedwait(&cm_entry->x_dis_cv,
- &connmgr_lock, timout);
+ (void) cv_reltimedwait(&cm_entry->x_dis_cv,
+ &connmgr_lock, timout, TR_CLOCK_TICK);
}
/*
* If we got the ACK, break. If we didn't,
@@ -3711,13 +3709,13 @@ waitforack(calllist_t *e, t_scalar_t ack_prim, const struct timeval *waitp,
while (e->call_reply == NULL) {
if (waitp != NULL) {
timout = waitp->tv_sec * drv_usectohz(MICROSEC) +
- drv_usectohz(waitp->tv_usec) + lbolt;
+ drv_usectohz(waitp->tv_usec);
if (nosignal)
- cv_stat = cv_timedwait(&e->call_cv,
- &clnt_pending_lock, timout);
+ cv_stat = cv_reltimedwait(&e->call_cv,
+ &clnt_pending_lock, timout, TR_CLOCK_TICK);
else
- cv_stat = cv_timedwait_sig(&e->call_cv,
- &clnt_pending_lock, timout);
+ cv_stat = cv_reltimedwait_sig(&e->call_cv,
+ &clnt_pending_lock, timout, TR_CLOCK_TICK);
} else {
if (nosignal)
cv_wait(&e->call_cv, &clnt_pending_lock);
diff --git a/usr/src/uts/common/rpc/rpcib.c b/usr/src/uts/common/rpc/rpcib.c
index 080771852e..3d3635f001 100644
--- a/usr/src/uts/common/rpc/rpcib.c
+++ b/usr/src/uts/common/rpc/rpcib.c
@@ -2419,7 +2419,7 @@ rib_send_resp(CONN *conn, struct clist *cl, uint32_t msgid)
{
rdma_stat ret = RDMA_SUCCESS;
struct rdma_done_list *rd;
- clock_t timout, cv_wait_ret;
+ clock_t cv_wait_ret;
caddr_t *wid = NULL;
rib_qp_t *qp = ctoqp(conn);
@@ -2435,11 +2435,9 @@ rib_send_resp(CONN *conn, struct clist *cl, uint32_t msgid)
/*
* Wait for RDMA_DONE from remote end
*/
- timout =
- drv_usectohz(REPLY_WAIT_TIME * 1000000) + ddi_get_lbolt();
- cv_wait_ret = cv_timedwait(&rd->rdma_done_cv,
- &qp->rdlist_lock,
- timout);
+ cv_wait_ret = cv_reltimedwait(&rd->rdma_done_cv,
+ &qp->rdlist_lock, drv_usectohz(REPLY_WAIT_TIME * 1000000),
+ TR_CLOCK_TICK);
rdma_done_rm(qp, rd);
diff --git a/usr/src/uts/common/rpc/rpcmod.c b/usr/src/uts/common/rpc/rpcmod.c
index 97b9c2a805..cab50d67cd 100644
--- a/usr/src/uts/common/rpc/rpcmod.c
+++ b/usr/src/uts/common/rpc/rpcmod.c
@@ -1617,7 +1617,7 @@ mir_rput(queue_t *q, mblk_t *mp)
* is used in mir_timer().
*/
mir->mir_clntreq = 1;
- mir->mir_use_timestamp = lbolt;
+ mir->mir_use_timestamp = ddi_get_lbolt();
} else {
freemsg(head_mp);
}
@@ -2260,6 +2260,7 @@ mir_timer(void *arg)
queue_t *wq = (queue_t *)arg;
mir_t *mir = (mir_t *)wq->q_ptr;
boolean_t notify;
+ clock_t now;
mutex_enter(&mir->mir_mutex);
@@ -2298,17 +2299,18 @@ mir_timer(void *arg)
* The timer interval can be changed for individual
* streams with the ND variable "mir_idle_timeout".
*/
+ now = ddi_get_lbolt();
if (mir->mir_clntreq > 0 && mir->mir_use_timestamp +
- MSEC_TO_TICK(mir->mir_idle_timeout) - lbolt >= 0) {
+ MSEC_TO_TICK(mir->mir_idle_timeout) - now >= 0) {
clock_t tout;
tout = mir->mir_idle_timeout -
- TICK_TO_MSEC(lbolt - mir->mir_use_timestamp);
+ TICK_TO_MSEC(now - mir->mir_use_timestamp);
if (tout < 0)
tout = 1000;
#if 0
printf("mir_timer[%d < %d + %d]: reset client timer "
- "to %d (ms)\n", TICK_TO_MSEC(lbolt),
+ "to %d (ms)\n", TICK_TO_MSEC(now),
TICK_TO_MSEC(mir->mir_use_timestamp),
mir->mir_idle_timeout, tout);
#endif
@@ -2318,7 +2320,7 @@ mir_timer(void *arg)
return;
}
#if 0
-printf("mir_timer[%d]: doing client timeout\n", lbolt / hz);
+printf("mir_timer[%d]: doing client timeout\n", now / hz);
#endif
/*
* We are disconnecting, but not necessarily
@@ -2441,7 +2443,7 @@ mir_wput(queue_t *q, mblk_t *mp)
* connection is active.
*/
mir->mir_clntreq = 1;
- mir->mir_use_timestamp = lbolt;
+ mir->mir_use_timestamp = ddi_get_lbolt();
}
/*
diff --git a/usr/src/uts/common/rpc/svc.c b/usr/src/uts/common/rpc/svc.c
index 6252978211..7c2f7f2f5e 100644
--- a/usr/src/uts/common/rpc/svc.c
+++ b/usr/src/uts/common/rpc/svc.c
@@ -2044,8 +2044,8 @@ svc_poll(SVCPOOL *pool, SVCMASTERXPRT *xprt, SVCXPRT *clone_xprt)
* for suspend and wait for a request.
*/
pool->p_asleep++;
- timeleft = cv_timedwait_sig(&pool->p_req_cv, &pool->p_req_lock,
- pool->p_timeout + lbolt);
+ timeleft = cv_reltimedwait_sig(&pool->p_req_cv,
+ &pool->p_req_lock, pool->p_timeout, TR_CLOCK_TICK);
/*
* If the drowsy flag is on this means that
diff --git a/usr/src/uts/common/sys/Makefile b/usr/src/uts/common/sys/Makefile
index 9ffc1d4a44..63c569a5c1 100644
--- a/usr/src/uts/common/sys/Makefile
+++ b/usr/src/uts/common/sys/Makefile
@@ -105,6 +105,7 @@ CHKHDRS= \
cladm.h \
class.h \
clconf.h \
+ clock_impl.h \
cmlb.h \
cmn_err.h \
compress.h \
diff --git a/usr/src/uts/common/sys/clock_impl.h b/usr/src/uts/common/sys/clock_impl.h
new file mode 100644
index 0000000000..85f26a16c6
--- /dev/null
+++ b/usr/src/uts/common/sys/clock_impl.h
@@ -0,0 +1,125 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CLOCK_IMPL_H
+#define _SYS_CLOCK_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (defined(_KERNEL) || defined(_KMEMUSER))
+#include <sys/types.h>
+#include <sys/cpuvar.h>
+#include <sys/cyclic.h>
+#include <sys/time.h>
+
+/*
+ * Default clock rate in Hz.
+ */
+#define HZ_DEFAULT 100
+
+/*
+ * Thresholds over which we switch between event and cyclic driven lbolt. The
+ * current default values were derived experimentally and will keep the
+ * system on event driven mode when idle and respond to activity around the
+ * lbolt DDI functions by switching to cyclic mode.
+ */
+#define LBOLT_THRESH_CALLS (75)
+#define LBOLT_THRESH_INTERVAL (1)
+
+/*
+ * Both lbolt_cpu_t and lbolt_info_t are cache line sized and aligned,
+ * please take that in consideration if modifying these.
+ */
+typedef struct lbolt_cpu {
+ int64_t lbc_counter; /* number of calls to the DDI lbolt routines */
+ int64_t lbc_cnt_start; /* beggining of the cnt interval (in ticks) */
+ char lbc_pad[CPU_CACHE_COHERENCE_SIZE - (2 * sizeof (int64_t))];
+} lbolt_cpu_t;
+
+typedef struct lbolt_info {
+ cyclic_id_t lbi_cyclic_id; /* lbolt's cyclic id */
+ int64_t lbi_internal; /* lbolt source when on cyclic mode */
+ int64_t lbi_debug_time; /* time spent in the debugger */
+ int64_t lbi_debug_ts; /* last time we dropped into kmdb */
+ int64_t lbi_thresh_calls; /* max calls per interval */
+ int64_t lbi_thresh_interval; /* interval window for the # of calls */
+ uint32_t lbi_token; /* synchronize cyclic mode switch */
+ boolean_t lbi_cyc_deactivate; /* lbolt_cyclic self deactivation */
+ int64_t lbi_cyc_deac_start; /* deactivation interval */
+} lbolt_info_t;
+
+extern int64_t lbolt_event_driven(void);
+extern int64_t lbolt_cyclic_driven(void);
+extern int64_t (*lbolt_hybrid)(void);
+extern uint_t lbolt_ev_to_cyclic(caddr_t, caddr_t);
+
+extern void lbolt_softint_add(void);
+extern void lbolt_softint_post(void);
+
+extern void lbolt_debug_entry(void);
+extern void lbolt_debug_return(void);
+
+extern lbolt_info_t *lb_info;
+
+/*
+ * LBOLT_WAITFREE provides a non-waiting version of lbolt.
+ */
+#define LBOLT_WAITFREE \
+ (lbolt_hybrid == lbolt_event_driven ? \
+ ((gethrtime_waitfree()/nsec_per_tick) - \
+ lb_info->lbi_debug_time) : \
+ (lb_info->lbi_internal - lb_info->lbi_debug_time))
+
+/*
+ * LBOLT_FASTPATH should *only* be used where the cost of calling
+ * ddi_get_lbolt() affects performance. This is currently only used by the
+ * TCP/IP code and will be removed once it's no longer required.
+ */
+#define LBOLT_FASTPATH \
+ (lbolt_hybrid == lbolt_event_driven ? ddi_get_lbolt() : \
+ (clock_t)(lb_info->lbi_internal - lb_info->lbi_debug_time))
+
+/*
+ * LBOLT_NO_ACCOUNT is used by lbolt consumers who fire at a periodic rate,
+ * such as clock(), for which the lbolt usage statistics are not updated.
+ * This is specially important for consumers whose rate may be modified by
+ * the user, resulting in an unaccounted for increase in activity around the
+ * lbolt routines that could cause a mode switch.
+ */
+#define LBOLT_NO_ACCOUNT \
+ (lbolt_hybrid == lbolt_event_driven ? \
+ ((gethrtime()/nsec_per_tick) - lb_info->lbi_debug_time) : \
+ (lb_info->lbi_internal - lb_info->lbi_debug_time))
+
+#endif /* _KERNEL || _KMEMUSER */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CLOCK_IMPL_H */
diff --git a/usr/src/uts/common/sys/condvar.h b/usr/src/uts/common/sys/condvar.h
index 5d618e6284..be2240a8ed 100644
--- a/usr/src/uts/common/sys/condvar.h
+++ b/usr/src/uts/common/sys/condvar.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -35,8 +34,6 @@
#ifndef _SYS_CONDVAR_H
#define _SYS_CONDVAR_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifndef _ASM
#include <sys/types.h>
#include <sys/time.h>
@@ -64,6 +61,25 @@ typedef enum {
CV_DRIVER
} kcv_type_t;
+/*
+ * Time resolution values used in cv_reltimedwait() and cv_reltimedwait_sig()
+ * to specify how accurately a relative timeout must expire - if it can be
+ * anticipated or deferred.
+ */
+enum time_res {
+ TR_NANOSEC,
+ TR_MICROSEC,
+ TR_MILLISEC,
+ TR_SEC,
+ TR_CLOCK_TICK,
+ TR_COUNT
+};
+
+typedef enum time_res time_res_t;
+
+extern time_res_t time_res[];
+
+#define TIME_RES_VALID(tr) (tr >= TR_NANOSEC && tr < TR_COUNT)
#if defined(_KERNEL)
@@ -76,8 +92,11 @@ extern void cv_destroy(kcondvar_t *);
extern void cv_wait(kcondvar_t *, kmutex_t *);
extern void cv_wait_stop(kcondvar_t *, kmutex_t *, int);
extern clock_t cv_timedwait(kcondvar_t *, kmutex_t *, clock_t);
+extern clock_t cv_reltimedwait(kcondvar_t *, kmutex_t *, clock_t, time_res_t);
extern int cv_wait_sig(kcondvar_t *, kmutex_t *);
extern clock_t cv_timedwait_sig(kcondvar_t *, kmutex_t *, clock_t);
+extern clock_t cv_reltimedwait_sig(kcondvar_t *, kmutex_t *, clock_t,
+ time_res_t);
extern int cv_wait_sig_swap(kcondvar_t *, kmutex_t *);
extern int cv_wait_sig_swap_core(kcondvar_t *, kmutex_t *, int *);
extern void cv_signal(kcondvar_t *);
diff --git a/usr/src/uts/common/sys/cpucaps_impl.h b/usr/src/uts/common/sys/cpucaps_impl.h
index ba4132993f..95afd21827 100644
--- a/usr/src/uts/common/sys/cpucaps_impl.h
+++ b/usr/src/uts/common/sys/cpucaps_impl.h
@@ -20,15 +20,13 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CPUCAPS_IMPL_H
#define _SYS_CPUCAPS_IMPL_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -67,7 +65,7 @@ typedef struct cpucap {
struct zone *cap_zone; /* zone for the cap */
waitq_t cap_waitq; /* waitq for capped threads */
kstat_t *cap_kstat; /* cpucaps specific kstat */
- int64_t cap_lbolt; /* zone cap specific */
+ int64_t cap_gen; /* zone cap specific */
hrtime_t cap_value; /* scaled CPU usage cap */
hrtime_t cap_usage; /* current CPU usage */
disp_lock_t cap_usagelock; /* protects cap_usage above */
diff --git a/usr/src/uts/common/sys/cpuvar.h b/usr/src/uts/common/sys/cpuvar.h
index 1493aa6a33..aece259a35 100644
--- a/usr/src/uts/common/sys/cpuvar.h
+++ b/usr/src/uts/common/sys/cpuvar.h
@@ -168,7 +168,7 @@ typedef struct cpu {
ftrace_data_t cpu_ftrace; /* per cpu ftrace data */
- clock_t cpu_deadman_lbolt; /* used by deadman() */
+ clock_t cpu_deadman_counter; /* used by deadman() */
uint_t cpu_deadman_countdown; /* used by deadman() */
kmutex_t cpu_cpc_ctxlock; /* protects context for idle thread */
diff --git a/usr/src/uts/common/sys/fcoe/fcoe_common.h b/usr/src/uts/common/sys/fcoe/fcoe_common.h
index 6ab5ea2cbf..efb763a196 100644
--- a/usr/src/uts/common/sys/fcoe/fcoe_common.h
+++ b/usr/src/uts/common/sys/fcoe/fcoe_common.h
@@ -371,7 +371,7 @@ typedef struct fcoe_fcp_xfer_rdy {
/*
* timestamp (golbal variable in sys/systm.h)
*/
-#define CURRENT_CLOCK lbolt
+#define CURRENT_CLOCK (ddi_get_lbolt())
#define FCOE_SEC2TICK(x_sec) (drv_usectohz((x_sec) * 1000000))
/*
diff --git a/usr/src/uts/common/sys/scsi/adapters/mpt_sas/mptsas_var.h b/usr/src/uts/common/sys/scsi/adapters/mpt_sas/mptsas_var.h
index 657d3a31e4..7dafe2ec2d 100644
--- a/usr/src/uts/common/sys/scsi/adapters/mpt_sas/mptsas_var.h
+++ b/usr/src/uts/common/sys/scsi/adapters/mpt_sas/mptsas_var.h
@@ -967,13 +967,6 @@ typedef struct mptsas_dma_alloc_state
#define MPTSAS_PASS_THRU_TIME_DEFAULT 60 /* 60 seconds */
/*
- * macro for getting value in micro-seconds since last boot to be used as
- * timeout in cv_timedwait call.
- */
-#define MPTSAS_CV_TIMEOUT(timeout) (ddi_get_lbolt() + \
- drv_usectohz(timeout * MICROSEC))
-
-/*
* macro to return the effective address of a given per-target field
*/
#define EFF_ADDR(start, offset) ((start) + (offset))
diff --git a/usr/src/uts/common/sys/sunddi.h b/usr/src/uts/common/sys/sunddi.h
index 1ba098608c..1534fe4caf 100644
--- a/usr/src/uts/common/sys/sunddi.h
+++ b/usr/src/uts/common/sys/sunddi.h
@@ -1511,9 +1511,6 @@ ddivoid();
cred_t *
ddi_get_cred(void);
-clock_t
-ddi_get_lbolt(void);
-
time_t
ddi_get_time(void);
diff --git a/usr/src/uts/common/sys/systm.h b/usr/src/uts/common/sys/systm.h
index 4e529b9cce..84ccfb9991 100644
--- a/usr/src/uts/common/sys/systm.h
+++ b/usr/src/uts/common/sys/systm.h
@@ -61,8 +61,6 @@ typedef uintptr_t pc_t;
extern int hz; /* system clock rate */
extern struct vnode *rootdir; /* pointer to vnode of root directory */
extern struct vnode *devicesdir; /* pointer to /devices vnode */
-extern volatile clock_t lbolt; /* time in HZ since last boot */
-extern volatile int64_t lbolt64; /* lbolt computed as 64-bit value */
extern int interrupts_unleashed; /* set after the spl0() in main() */
extern char runin; /* scheduling flag */
diff --git a/usr/src/uts/common/sys/time.h b/usr/src/uts/common/sys/time.h
index 860ec10383..fcb9a290fe 100644
--- a/usr/src/uts/common/sys/time.h
+++ b/usr/src/uts/common/sys/time.h
@@ -359,6 +359,9 @@ extern void timevalsub(struct timeval *, struct timeval *);
extern void timevalfix(struct timeval *);
extern void dtrace_hres_tick(void);
+extern clock_t ddi_get_lbolt(void);
+extern int64_t ddi_get_lbolt64(void);
+
#if defined(_SYSCALL32)
extern void hrt2ts32(hrtime_t, timestruc32_t *);
#endif
diff --git a/usr/src/uts/common/syscall/poll.c b/usr/src/uts/common/syscall/poll.c
index f1002f69b4..bed14f800a 100644
--- a/usr/src/uts/common/syscall/poll.c
+++ b/usr/src/uts/common/syscall/poll.c
@@ -323,10 +323,11 @@ poll_common(pollfd_t *fds, nfds_t nfds, timespec_t *tsp, k_sigset_t *ksetp)
t->t_hold = *ksetp;
t->t_flag |= T_TOMASK;
/*
- * Call cv_timedwait_sig() just to check for signals.
+ * Call cv_reltimedwait_sig() just to check for signals.
* We will return immediately with either 0 or -1.
*/
- if (!cv_timedwait_sig(&t->t_delay_cv, &p->p_lock, lbolt)) {
+ if (!cv_reltimedwait_sig(&t->t_delay_cv, &p->p_lock, 0,
+ TR_CLOCK_TICK)) {
mutex_exit(&p->p_lock);
error = EINTR;
goto pollout;
diff --git a/usr/src/uts/common/syscall/times.c b/usr/src/uts/common/syscall/times.c
index cefa942d57..cd268c0c0f 100644
--- a/usr/src/uts/common/syscall/times.c
+++ b/usr/src/uts/common/syscall/times.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -28,8 +27,6 @@
/* All Rights Reserved */
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/param.h>
#include <sys/types.h>
#include <sys/sysmacros.h>
@@ -65,7 +62,7 @@ times(struct tms *tp)
if (copyout(&p_time, tp, sizeof (p_time)))
return (set_errno(EFAULT));
- ret_lbolt = lbolt;
+ ret_lbolt = ddi_get_lbolt();
return (ret_lbolt == -1 ? 0 : ret_lbolt);
}
@@ -95,7 +92,7 @@ times32(struct tms32 *tp)
if (copyout(&p_time, tp, sizeof (p_time)))
return (set_errno(EFAULT));
- ret_lbolt = (clock32_t)lbolt;
+ ret_lbolt = (clock32_t)ddi_get_lbolt();
return (ret_lbolt == (clock32_t)-1 ? 0 : ret_lbolt);
}
diff --git a/usr/src/uts/common/syscall/uadmin.c b/usr/src/uts/common/syscall/uadmin.c
index a9aaff7892..1bdfbbfd0b 100644
--- a/usr/src/uts/common/syscall/uadmin.c
+++ b/usr/src/uts/common/syscall/uadmin.c
@@ -24,7 +24,6 @@
* Use is subject to license terms.
*/
-
#include <sys/param.h>
#include <sys/types.h>
#include <sys/sysmacros.h>
@@ -113,8 +112,8 @@ killall(zoneid_t zoneid)
} else {
sigtoproc(p, NULL, SIGKILL);
mutex_exit(&p->p_lock);
- (void) cv_timedwait(&p->p_srwchan_cv, &pidlock,
- lbolt + hz);
+ (void) cv_reltimedwait(&p->p_srwchan_cv,
+ &pidlock, hz, TR_CLOCK_TICK);
p = practive;
}
} else {
diff --git a/usr/src/uts/common/vm/seg_kp.c b/usr/src/uts/common/vm/seg_kp.c
index d65b3062bc..af684f4c06 100644
--- a/usr/src/uts/common/vm/seg_kp.c
+++ b/usr/src/uts/common/vm/seg_kp.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -31,8 +31,6 @@
* under license from the Regents of the University of California.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* segkp is a segment driver that administers the allocation and deallocation
* of pageable variable size chunks of kernel virtual address space. Each
@@ -133,12 +131,12 @@ ulong_t *segkp_bitmap;
* then the stack situation has become quite serious; if much more stack
* is consumed, we have the potential of scrogging the next thread/LWP
* structure. To help debug the "can't happen" panics which may
- * result from this condition, we record lbolt and the calling thread
- * in red_deep_lbolt and red_deep_thread respectively.
+ * result from this condition, we record hrestime and the calling thread
+ * in red_deep_hires and red_deep_thread respectively.
*/
#define RED_DEEP_THRESHOLD 2000
-clock_t red_deep_lbolt;
+hrtime_t red_deep_hires;
kthread_t *red_deep_thread;
uint32_t red_nmapped;
@@ -854,10 +852,10 @@ segkp_map_red(void)
* LWP structure. That situation could result in
* a very hard-to-debug panic, so, in the spirit of
* recording the name of one's killer in one's own
- * blood, we're going to record lbolt and the calling
+ * blood, we're going to record hrestime and the calling
* thread.
*/
- red_deep_lbolt = lbolt;
+ red_deep_hires = hrestime.tv_nsec;
red_deep_thread = curthread;
}
@@ -1325,7 +1323,7 @@ segkp_find(struct seg *seg, caddr_t vaddr)
mutex_enter(&segkp_lock);
do {
for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
- kpd = kpd->kp_next) {
+ kpd = kpd->kp_next) {
if (vaddr >= kpd->kp_base &&
vaddr < kpd->kp_base + kpd->kp_len) {
mutex_exit(&segkp_lock);
diff --git a/usr/src/uts/common/vm/seg_spt.c b/usr/src/uts/common/vm/seg_spt.c
index 99ff54dc2c..94ea959f3a 100644
--- a/usr/src/uts/common/vm/seg_spt.c
+++ b/usr/src/uts/common/vm/seg_spt.c
@@ -2832,14 +2832,14 @@ segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
mutex_enter(&sptd->spt_lock);
- end_lbolt = lbolt + (hz * spt_pcache_wait);
+ end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
/*
* Try to wait for pages to get kicked out of the seg_pcache.
*/
while (sptd->spt_gen == gen &&
(sptd->spt_flags & DISM_PPA_CHANGED) &&
- lbolt < end_lbolt) {
+ ddi_get_lbolt() < end_lbolt) {
if (!cv_timedwait_sig(&sptd->spt_cv,
&sptd->spt_lock, end_lbolt)) {
break;
diff --git a/usr/src/uts/common/vm/vm_page.c b/usr/src/uts/common/vm/vm_page.c
index 43f153f19f..fbf616bd4a 100644
--- a/usr/src/uts/common/vm/vm_page.c
+++ b/usr/src/uts/common/vm/vm_page.c
@@ -6440,7 +6440,8 @@ page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap)
if (pc_cb[cb_index].duration == -1) {
bp1->expires = (clock_t)-1;
} else {
- bp1->expires = lbolt + pc_cb[cb_index].duration;
+ bp1->expires = ddi_get_lbolt() +
+ pc_cb[cb_index].duration;
}
} else {
/* There's no callback registered so don't add to the hash */
@@ -6878,7 +6879,7 @@ page_capture_take_action(page_t *pp, uint_t flags, void *datap)
* Check for expiration time first as we can just free it up if it's
* expired.
*/
- if (lbolt > bp1->expires && bp1->expires != -1) {
+ if (ddi_get_lbolt() > bp1->expires && bp1->expires != -1) {
kmem_free(bp1, sizeof (*bp1));
return (ret);
}
@@ -7217,7 +7218,8 @@ page_capture_async()
bp1 = page_capture_hash[i].lists[0].next;
while (bp1 != &page_capture_hash[i].lists[0]) {
/* Check expiration time */
- if ((lbolt > bp1->expires && bp1->expires != -1) ||
+ if ((ddi_get_lbolt() > bp1->expires &&
+ bp1->expires != -1) ||
page_deleted(bp1->pp)) {
page_capture_hash[i].lists[0].next = bp1->next;
bp1->next->prev =
@@ -7372,13 +7374,13 @@ page_capture_thread(void)
if (outstanding) {
page_capture_handle_outstanding();
CALLB_CPR_SAFE_BEGIN(&c);
- (void) cv_timedwait(&pc_cv, &pc_thread_mutex,
- lbolt + pc_thread_shortwait);
+ (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex,
+ pc_thread_shortwait, TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&c, &pc_thread_mutex);
} else {
CALLB_CPR_SAFE_BEGIN(&c);
- (void) cv_timedwait(&pc_cv, &pc_thread_mutex,
- lbolt + pc_thread_longwait);
+ (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex,
+ pc_thread_longwait, TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&c, &pc_thread_mutex);
}
}
diff --git a/usr/src/uts/common/vm/vm_seg.c b/usr/src/uts/common/vm/vm_seg.c
index 2791f7b29b..65c6c5ecdc 100644
--- a/usr/src/uts/common/vm/vm_seg.c
+++ b/usr/src/uts/common/vm/vm_seg.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -36,8 +36,6 @@
* contributors.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* VM - segment management.
*/
@@ -212,7 +210,7 @@ extern struct seg_ops segspt_shmops;
#define IS_PFLAGS_WIRED(flags) ((flags) & SEGP_FORCE_WIRED)
#define IS_PCP_WIRED(pcp) IS_PFLAGS_WIRED((pcp)->p_flags)
-#define LBOLT_DELTA(t) ((ulong_t)(lbolt - (t)))
+#define LBOLT_DELTA(t) ((ulong_t)(ddi_get_lbolt() - (t)))
#define PCP_AGE(pcp) LBOLT_DELTA((pcp)->p_lbolt)
@@ -643,7 +641,7 @@ again:
* free it immediately since
* it may be reactivated very soon.
*/
- pcp->p_lbolt = lbolt;
+ pcp->p_lbolt = ddi_get_lbolt();
pcp->p_ref = 1;
}
mutex_exit(&hp->p_hmutex);
@@ -666,7 +664,7 @@ again:
* Mark this entry as referenced just in case
* we'll free our own pcp entry soon.
*/
- pcp->p_lbolt = lbolt;
+ pcp->p_lbolt = ddi_get_lbolt();
pcp->p_ref = 1;
}
if (pmtx != NULL) {
@@ -1544,8 +1542,8 @@ seg_pasync_thread(void)
mutex_enter(&seg_pasync_mtx);
for (;;) {
CALLB_CPR_SAFE_BEGIN(&cpr_info);
- (void) cv_timedwait(&seg_pasync_cv, &seg_pasync_mtx,
- lbolt + segpcache_reap_ticks);
+ (void) cv_reltimedwait(&seg_pasync_cv, &seg_pasync_mtx,
+ segpcache_reap_ticks, TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&cpr_info, &seg_pasync_mtx);
if (seg_pdisabled == 0) {
seg_ppurge_async(0);
diff --git a/usr/src/uts/common/xen/io/xdf.c b/usr/src/uts/common/xen/io/xdf.c
index 48ad9c7ee5..2e45882e5d 100644
--- a/usr/src/uts/common/xen/io/xdf.c
+++ b/usr/src/uts/common/xen/io/xdf.c
@@ -2015,8 +2015,9 @@ xdf_connect_locked(xdf_t *vdp, boolean_t wait)
rv = cv_wait_sig(&vdp->xdf_dev_cv, &vdp->xdf_dev_lk);
} else {
/* delay for 0.1 sec */
- rv = cv_timedwait_sig(&vdp->xdf_dev_cv,
- &vdp->xdf_dev_lk, lbolt + drv_usectohz(100*1000));
+ rv = cv_reltimedwait_sig(&vdp->xdf_dev_cv,
+ &vdp->xdf_dev_lk, drv_usectohz(100*1000),
+ TR_CLOCK_TICK);
if (rv == -1)
timeouts++;
}
diff --git a/usr/src/uts/common/xen/io/xpvtap.c b/usr/src/uts/common/xen/io/xpvtap.c
index e07489391b..a54b6490f0 100644
--- a/usr/src/uts/common/xen/io/xpvtap.c
+++ b/usr/src/uts/common/xen/io/xpvtap.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1374,10 +1374,8 @@ xpvtap_user_response_get(xpvtap_state_t *state, blkif_response_t *resp,
static void xpvtap_user_app_stop(caddr_t arg)
{
xpvtap_state_t *state;
- clock_t timeout;
clock_t rc;
-
state = (xpvtap_state_t *)arg;
/*
@@ -1386,9 +1384,9 @@ static void xpvtap_user_app_stop(caddr_t arg)
*/
mutex_enter(&state->bt_open.bo_mutex);
if (state->bt_open.bo_opened) {
- timeout = ddi_get_lbolt() + drv_usectohz(10000000);
- rc = cv_timedwait(&state->bt_open.bo_exit_cv,
- &state->bt_open.bo_mutex, timeout);
+ rc = cv_reltimedwait(&state->bt_open.bo_exit_cv,
+ &state->bt_open.bo_mutex, drv_usectohz(10000000),
+ TR_CLOCK_TICK);
if (rc <= 0) {
cmn_err(CE_NOTE, "!user process still has driver open, "
"deferring detach\n");
diff --git a/usr/src/uts/i86pc/io/tzmon/tzmon.c b/usr/src/uts/i86pc/io/tzmon/tzmon.c
index af6ec1ad1f..0f44040994 100644
--- a/usr/src/uts/i86pc/io/tzmon/tzmon.c
+++ b/usr/src/uts/i86pc/io/tzmon/tzmon.c
@@ -590,8 +590,8 @@ tzmon_monitor(void *ctx)
mutex_enter(&tzp->lock);
ticks = drv_usectohz(tzp->polling_period * 1000000);
if (ticks > 0)
- (void) cv_timedwait(&zone_list_condvar, &tzp->lock,
- ddi_get_lbolt() + ticks);
+ (void) cv_reltimedwait(&zone_list_condvar,
+ &tzp->lock, ticks, TR_CLOCK_TICK);
mutex_exit(&tzp->lock);
} while (ticks > 0);
}
diff --git a/usr/src/uts/i86pc/os/graphics.c b/usr/src/uts/i86pc/os/graphics.c
index c60ae37c6f..c8ae9c3e60 100644
--- a/usr/src/uts/i86pc/os/graphics.c
+++ b/usr/src/uts/i86pc/os/graphics.c
@@ -136,13 +136,13 @@ progressbar_step()
static void
progressbar_thread(void *arg)
{
- clock_t end;
+ clock_t end = drv_usectohz(150000);
mutex_enter(&pbar_lock);
while (graphics_mode) {
progressbar_step();
- end = ddi_get_lbolt() + drv_usectohz(150000);
- (void) cv_timedwait(&pbar_cv, &pbar_lock, end);
+ (void) cv_reltimedwait(&pbar_cv, &pbar_lock, end,
+ TR_CLOCK_TICK);
}
mutex_exit(&pbar_lock);
}
diff --git a/usr/src/uts/i86pc/os/machdep.c b/usr/src/uts/i86pc/os/machdep.c
index 0ee328cc6d..24ead5d6bd 100644
--- a/usr/src/uts/i86pc/os/machdep.c
+++ b/usr/src/uts/i86pc/os/machdep.c
@@ -130,6 +130,8 @@
#include <sys/traptrace.h>
#endif /* TRAPTRACE */
+#include <sys/clock_impl.h>
+
extern void audit_enterprom(int);
extern void audit_exitprom(int);
@@ -1365,3 +1367,24 @@ dtrace_linear_pc(struct regs *rp, proc_t *p, caddr_t *linearp)
return (0);
}
+
+/*
+ * We need to post a soft interrupt to reprogram the lbolt cyclic when
+ * switching from event to cyclic driven lbolt. The following code adds
+ * and posts the softint for x86.
+ */
+static ddi_softint_hdl_impl_t lbolt_softint_hdl =
+ {0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
+
+void
+lbolt_softint_add(void)
+{
+ (void) add_avsoftintr((void *)&lbolt_softint_hdl, LOCK_LEVEL,
+ (avfunc)lbolt_ev_to_cyclic, "lbolt_ev_to_cyclic", NULL, NULL);
+}
+
+void
+lbolt_softint_post(void)
+{
+ (*setsoftint)(CBE_LOCK_PIL, lbolt_softint_hdl.ih_pending);
+}
diff --git a/usr/src/uts/i86xpv/os/balloon.c b/usr/src/uts/i86xpv/os/balloon.c
index be071f322f..2370e6eaf9 100644
--- a/usr/src/uts/i86xpv/os/balloon.c
+++ b/usr/src/uts/i86xpv/os/balloon.c
@@ -673,8 +673,8 @@ balloon_worker_thread(void)
* We weren't able to fully complete the request
* last time through, so try again.
*/
- (void) cv_timedwait(&bln_cv, &bln_mutex,
- lbolt + (bln_wait * hz));
+ (void) cv_reltimedwait(&bln_cv, &bln_mutex,
+ (bln_wait * hz), TR_CLOCK_TICK);
} else {
cv_wait(&bln_cv, &bln_mutex);
}
diff --git a/usr/src/uts/intel/io/heci/heci_init.c b/usr/src/uts/intel/io/heci/heci_init.c
index 7248ff6d21..0f5e4da22f 100644
--- a/usr/src/uts/intel/io/heci/heci_init.c
+++ b/usr/src/uts/intel/io/heci/heci_init.c
@@ -302,11 +302,8 @@ heci_hw_init(struct iamt_heci_device *dev)
/* wait for ME to turn on ME_RDY */
err = 0;
while (!dev->recvd_msg && err != -1) {
- clock_t tm;
- tm = ddi_get_lbolt();
- err = cv_timedwait(&dev->wait_recvd_msg,
- &dev->device_lock,
- tm + HECI_INTEROP_TIMEOUT);
+ err = cv_reltimedwait(&dev->wait_recvd_msg,
+ &dev->device_lock, HECI_INTEROP_TIMEOUT, TR_CLOCK_TICK);
}
if (err == -1 && !dev->recvd_msg) {
@@ -572,6 +569,7 @@ host_start_message(struct iamt_heci_device *dev)
struct hbm_host_version_request *host_start_req;
struct hbm_host_stop_request *host_stop_req;
int err = 0;
+ clock_t delta = (clock_t)(timeout * HZ);
/* host start message */
mutex_enter(&dev->device_lock);
@@ -600,10 +598,8 @@ host_start_message(struct iamt_heci_device *dev)
DBG("call wait_event_interruptible_timeout for response message.\n");
err = 0;
while (err != -1 && !dev->recvd_msg) {
- clock_t tm;
- tm = ddi_get_lbolt();
- err = cv_timedwait(&dev->wait_recvd_msg,
- &dev->device_lock, tm + timeout * HZ);
+ err = cv_reltimedwait(&dev->wait_recvd_msg, &dev->device_lock,
+ delta, TR_CLOCK_TICK);
}
if (err == -1 && !dev->recvd_msg) {
DBG("wait_timeout failed on host start response message.\n");
@@ -655,6 +651,7 @@ host_enum_clients_message(struct iamt_heci_device *dev)
struct hbm_host_enum_request *host_enum_req;
int err = 0;
uint8_t i, j;
+ clock_t delta = (clock_t)(timeout * HZ);
mutex_enter(&dev->device_lock);
@@ -680,11 +677,8 @@ host_enum_clients_message(struct iamt_heci_device *dev)
dev->recvd_msg = 0;
err = 0;
while (!dev->recvd_msg && err != -1) {
- clock_t tm;
- tm = ddi_get_lbolt();
- err = cv_timedwait(&dev->wait_recvd_msg,
- &dev->device_lock,
- tm + timeout * HZ);
+ err = cv_reltimedwait(&dev->wait_recvd_msg, &dev->device_lock,
+ delta, TR_CLOCK_TICK);
}
if (err == -1 && !dev->recvd_msg) {
DBG("wait_event_interruptible_timeout failed "
@@ -723,6 +717,7 @@ host_client_properties(struct iamt_heci_device *dev,
struct heci_msg_hdr *heci_hdr;
struct hbm_props_request *host_cli_req;
int err;
+ clock_t delta = 10 * HZ;
mutex_enter(&dev->device_lock);
heci_hdr = (struct heci_msg_hdr *)&dev->wr_msg_buf[0];
@@ -747,11 +742,8 @@ host_client_properties(struct iamt_heci_device *dev,
err = 0;
while (!dev->recvd_msg && err != -1) {
- clock_t tm;
- tm = ddi_get_lbolt();
- err = cv_timedwait(&dev->wait_recvd_msg,
- &dev->device_lock,
- tm + 10 * HZ);
+ err = cv_reltimedwait(&dev->wait_recvd_msg, &dev->device_lock,
+ delta, TR_CLOCK_TICK);
}
if (err == -1 && !dev->recvd_msg) {
DBG("wait failed on props resp msg.\n");
@@ -928,6 +920,7 @@ heci_connect_me_client(struct iamt_heci_device *dev,
long timeout)
{
int err = 0;
+ clock_t delta = (clock_t)(timeout * HZ);
if ((dev == NULL) || (priv == NULL))
return (0);
@@ -943,11 +936,8 @@ heci_connect_me_client(struct iamt_heci_device *dev,
while (!(HECI_FILE_CONNECTED == priv->state ||
HECI_FILE_DISCONNECTED == priv->state) &&
err != -1) {
- clock_t tm;
- tm = ddi_get_lbolt();
- err = cv_timedwait(&dev->wait_recvd_msg,
- &dev->device_lock,
- tm + timeout*HZ);
+ err = cv_reltimedwait(&dev->wait_recvd_msg, &dev->device_lock,
+ delta, TR_CLOCK_TICK);
}
if (HECI_FILE_CONNECTED != priv->state) {
heci_remove_client_from_file_list(dev, priv->host_client_id);
@@ -1100,6 +1090,7 @@ heci_disconnect_host_client(struct iamt_heci_device *dev,
int rets, err;
long timeout = 15; /* 15 seconds */
struct heci_cb_private *priv_cb;
+ clock_t delta = (clock_t)(timeout * HZ);
if ((!dev) || (!file_ext))
return (-ENODEV);
@@ -1136,11 +1127,8 @@ heci_disconnect_host_client(struct iamt_heci_device *dev,
while (err != -1 &&
(HECI_FILE_DISCONNECTED != file_ext->state)) {
- clock_t tm;
- tm = ddi_get_lbolt();
- err = cv_timedwait(&dev->wait_recvd_msg,
- &dev->device_lock,
- tm + timeout * HZ);
+ err = cv_reltimedwait(&dev->wait_recvd_msg, &dev->device_lock,
+ delta, TR_CLOCK_TICK);
}
mutex_exit(&dev->device_lock);
diff --git a/usr/src/uts/intel/io/heci/heci_main.c b/usr/src/uts/intel/io/heci/heci_main.c
index 73dc79a7ca..c09cda21f5 100644
--- a/usr/src/uts/intel/io/heci/heci_main.c
+++ b/usr/src/uts/intel/io/heci/heci_main.c
@@ -411,11 +411,8 @@ heci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
err = 0;
while (!dev->wd_stoped && err != -1) {
- clock_t tm;
- tm = ddi_get_lbolt();
- err = cv_timedwait(&dev->wait_stop_wd,
- &dev->device_lock,
- tm + 10*HZ);
+ err = cv_reltimedwait(&dev->wait_stop_wd,
+ &dev->device_lock, 10*HZ, TR_CLOCK_TICK);
}
if (!dev->wd_stoped) {
@@ -1450,11 +1447,8 @@ heci_quiesce(dev_info_t *dip)
err = 0;
while (!dev->wd_stoped && err != -1) {
- clock_t tm;
- tm = ddi_get_lbolt();
- err = cv_timedwait(&dev->wait_stop_wd,
- &dev->device_lock,
- tm + 10*HZ);
+ err = cv_reltimedwait(&dev->wait_stop_wd,
+ &dev->device_lock, 10*HZ, TR_CLOCK_TICK);
}
if (!dev->wd_stoped) {
@@ -1527,11 +1521,8 @@ heci_suspend(dev_info_t *dip)
err = 0;
while (!device->wd_stoped && err != -1) {
- clock_t tm;
- tm = ddi_get_lbolt();
- err = cv_timedwait(&device->wait_stop_wd,
- &device->device_lock,
- tm + 10*HZ);
+ err = cv_reltimedwait(&device->wait_stop_wd,
+ &device->device_lock, 10*HZ, TR_CLOCK_TICK);
}
if (!device->wd_stoped) {
diff --git a/usr/src/uts/intel/io/heci/io_heci.c b/usr/src/uts/intel/io/heci/io_heci.c
index 31fc742544..994cde3194 100644
--- a/usr/src/uts/intel/io/heci/io_heci.c
+++ b/usr/src/uts/intel/io/heci/io_heci.c
@@ -336,10 +336,8 @@ fail:
err = 0;
while (err != -1 && HECI_FILE_CONNECTED != file_ext->state &&
HECI_FILE_DISCONNECTED != file_ext->state) {
- clock_t tm;
- tm = ddi_get_lbolt();
- err = cv_timedwait(&dev->wait_recvd_msg,
- &dev->device_lock, tm + timeout * HZ);
+ err = cv_reltimedwait(&dev->wait_recvd_msg,
+ &dev->device_lock, timeout * HZ, TR_CLOCK_TICK);
}
mutex_exit(&dev->device_lock);
diff --git a/usr/src/uts/intel/os/arch_kdi.c b/usr/src/uts/intel/os/arch_kdi.c
index abc2dfa599..4aa852ae2c 100644
--- a/usr/src/uts/intel/os/arch_kdi.c
+++ b/usr/src/uts/intel/os/arch_kdi.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Kernel/Debugger Interface (KDI) routines. Called during debugger under
* various system states (boot, while running, while the debugger has control).
@@ -44,10 +42,13 @@
#include <sys/trap.h>
#include <sys/kobj.h>
#include <sys/kobj_impl.h>
+#include <sys/clock_impl.h>
static void
kdi_system_claim(void)
{
+ lbolt_debug_entry();
+
psm_notifyf(PSM_DEBUG_ENTER);
}
@@ -55,6 +56,8 @@ static void
kdi_system_release(void)
{
psm_notifyf(PSM_DEBUG_EXIT);
+
+ lbolt_debug_return();
}
static cpu_t *
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.h b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
index 327b2fcf36..f075093c2f 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.h
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
@@ -796,7 +796,7 @@ struct ctx_trace {
(ptr)->sc_sfmmu_stealing = (stealing_sfmmu); \
(ptr)->sc_cnum = (cnum); \
(ptr)->sc_type = (type); \
- (ptr)->sc_time = lbolt; \
+ (ptr)->sc_time = ddi_get_lbolt(); \
(ptr) = NEXT_CTXTR(ptr); \
num_ctx_stolen += (type); \
mutex_exit(mutex);
diff --git a/usr/src/uts/sun/io/scsi/adapters/sf.c b/usr/src/uts/sun/io/scsi/adapters/sf.c
index bea7656096..70f758a01f 100644
--- a/usr/src/uts/sun/io/scsi/adapters/sf.c
+++ b/usr/src/uts/sun/io/scsi/adapters/sf.c
@@ -268,7 +268,7 @@ static int sf_reset_flag = 1; /* bool: to allow reset after LIP */
static int sf_abort_flag = 0; /* bool: to do just one abort */
#endif
-extern volatile int64_t lbolt64;
+extern int64_t ddi_get_lbolt64(void);
/*
* for converting between target number (switch) and hard address/AL_PA
@@ -834,7 +834,7 @@ sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
*/
(void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
- sf->sf_reset_time = lbolt64;
+ sf->sf_reset_time = ddi_get_lbolt64();
return (DDI_SUCCESS);
default:
@@ -1225,7 +1225,7 @@ sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
ASSERT(sf);
reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) -
- (lbolt64 - sf->sf_reset_time);
+ (ddi_get_lbolt64() - sf->sf_reset_time);
if (reset_delay < 0)
reset_delay = 0;
@@ -6490,7 +6490,6 @@ sf_check_reset_delay(void *arg)
uint_t lip_cnt, reset_timeout_flag = FALSE;
clock_t lb;
-
lb = ddi_get_lbolt();
mutex_enter(&sf_global_mutex);
diff --git a/usr/src/uts/sun/io/zs_async.c b/usr/src/uts/sun/io/zs_async.c
index bc5a1961b4..61832b854b 100644
--- a/usr/src/uts/sun/io/zs_async.c
+++ b/usr/src/uts/sun/io/zs_async.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Asynchronous protocol handler for Z8530 chips
* Handles normal UNIX support for terminals & modems
@@ -805,7 +802,7 @@ again:
if (speed > CBAUD) {
za->za_ttycommon.t_cflag |= CBAUDEXT;
za->za_ttycommon.t_cflag |=
- ((speed - CBAUD - 1) & CBAUD);
+ ((speed - CBAUD - 1) & CBAUD);
} else {
za->za_ttycommon.t_cflag &= ~CBAUDEXT;
za->za_ttycommon.t_cflag |= (speed & CBAUD);
@@ -823,7 +820,7 @@ again:
zsa_program(za, za->za_ttycommon.t_cflag & (CIBAUDEXT|CIBAUD));
zsa_set_za_rcv_flags_mask(za);
} else if ((za->za_ttycommon.t_flags & TS_XCLUDE) &&
- secpolicy_excl_open(cr) != 0) {
+ secpolicy_excl_open(cr) != 0) {
mutex_exit(zs->zs_excl_hi);
if (set_zsoptinit && !(za->za_flags & ISOPEN))
zsopinit(zs, &zsops_null);
@@ -867,7 +864,7 @@ again:
mutex_exit(zs->zs_excl);
mutex_exit(zs->zs_ocexcl);
(void) ddi_dev_is_needed(zs->zs_dip,
- 0, 1);
+ 0, 1);
mutex_enter(zs->zs_ocexcl);
mutex_enter(zs->zs_excl);
}
@@ -1094,8 +1091,8 @@ nodrain:
* bounce us back to the top; just continue
* closing as if nothing had happened.
*/
- tmp = cv_timedwait_sig(&zs->zs_flags_cv, zs->zs_excl,
- ddi_get_lbolt() + drv_usectohz(10000));
+ tmp = cv_reltimedwait_sig(&zs->zs_flags_cv, zs->zs_excl,
+ drv_usectohz(10000), TR_CLOCK_TICK);
if (zs->zs_suspended) {
mutex_exit(zs->zs_excl);
(void) ddi_dev_is_needed(zs->zs_dip, 0, 1);
@@ -1473,27 +1470,28 @@ zsa_wput(queue_t *q, mblk_t *mp)
(SCC_READ0() & ZSRR0_CD)) {
ZSA_KICK_RCV;
} else {
- ZSA_KICK_RCV;
- if (!(SCC_READ0() & ZSRR0_RX_READY)) {
- /*
- * settle time for 1 character shift
- */
- mutex_exit(zs->zs_excl_hi);
- mutex_exit(zs->zs_excl);
- delay(ztdelay(
- SPEED(za->za_ttycommon.t_cflag))/3 + 1);
- mutex_enter(zs->zs_excl);
- mutex_enter(zs->zs_excl_hi);
- if (!(SCC_READ0() & ZSRR0_CD))
- ZSA_KICK_RCV;
- }
- while ((SCC_READ0() &
- (ZSRR0_CD|ZSRR0_RX_READY)) == ZSRR0_RX_READY) {
- /*
- * Empty Receiver
- */
- (void) SCC_READDATA();
- }
+ ZSA_KICK_RCV;
+ if (!(SCC_READ0() & ZSRR0_RX_READY)) {
+ /*
+ * settle time for 1 character shift
+ */
+ mutex_exit(zs->zs_excl_hi);
+ mutex_exit(zs->zs_excl);
+ delay(ztdelay(SPEED(
+ za->za_ttycommon.t_cflag))/3 + 1);
+ mutex_enter(zs->zs_excl);
+ mutex_enter(zs->zs_excl_hi);
+ if (!(SCC_READ0() & ZSRR0_CD))
+ ZSA_KICK_RCV;
+ }
+ while ((SCC_READ0() &
+ (ZSRR0_CD | ZSRR0_RX_READY)) ==
+ ZSRR0_RX_READY) {
+ /*
+ * Empty Receiver
+ */
+ (void) SCC_READDATA();
+ }
}
mutex_exit(zs->zs_excl_hi);
flushq(RD(q), FLUSHDATA);
@@ -2142,38 +2140,39 @@ zsa_softint(struct zscom *zs)
za_kick_active = za->za_kick_active;
while (!za_kick_active) {
- ZSA_SEEQ(bp);
- if (!bp)
- break;
+ ZSA_SEEQ(bp);
+ if (!bp)
+ break;
- allocbcount++;
-
- if (bp->b_datap->db_type <= QPCTL) {
- if (!(canputnext(q))) {
- if (za->za_grace_flow_control >=
- zsa_grace_flow_control) {
- if (za->za_ttycommon.t_cflag & CRTSXOFF) {
- allocbcount--;
- break;
- }
- ZSA_GETQ(bp);
- freemsg(bp);
- do_ttycommon_qfull = 1;
- continue;
+ allocbcount++;
+
+ if (bp->b_datap->db_type <= QPCTL) {
+ if (!(canputnext(q))) {
+ if (za->za_grace_flow_control >=
+ zsa_grace_flow_control) {
+ if (za->za_ttycommon.t_cflag &
+ CRTSXOFF) {
+ allocbcount--;
+ break;
+ }
+ ZSA_GETQ(bp);
+ freemsg(bp);
+ do_ttycommon_qfull = 1;
+ continue;
+ } else
+ za->za_grace_flow_control++;
} else
- za->za_grace_flow_control++;
- } else
- za->za_grace_flow_control = 0;
- }
- ZSA_GETQ(bp);
- if (!head) {
- head = bp;
- } else {
- if (!tail)
- tail = head;
- tail->b_next = bp;
- tail = bp;
- }
+ za->za_grace_flow_control = 0;
+ }
+ ZSA_GETQ(bp);
+ if (!head) {
+ head = bp;
+ } else {
+ if (!tail)
+ tail = head;
+ tail->b_next = bp;
+ tail = bp;
+ }
}
if (allocbcount)
@@ -2187,7 +2186,7 @@ zsa_softint(struct zscom *zs)
* carrier up?
*/
if ((r0 & ZSRR0_CD) ||
- (za->za_ttycommon.t_flags & TS_SOFTCAR)) {
+ (za->za_ttycommon.t_flags & TS_SOFTCAR)) {
/*
* carrier present
*/
@@ -2275,12 +2274,13 @@ zsa_softint(struct zscom *zs)
* any cluster of overrun errrors.
*/
if ((!za->za_kick_rcv_id) && (zs->zs_rd_cur || za_kick_active)) {
- if (g_zsticks)
- za->za_kick_rcv_id = timeout(zsa_kick_rcv, zs, g_zsticks);
- else
- za->za_kick_rcv_id = timeout(zsa_kick_rcv, zs,
- zsticks[SPEED(za->za_ttycommon.t_cflag)]);
- za->za_kick_rcv_count = ZA_KICK_RCV_COUNT;
+ if (g_zsticks)
+ za->za_kick_rcv_id = timeout(zsa_kick_rcv, zs,
+ g_zsticks);
+ else
+ za->za_kick_rcv_id = timeout(zsa_kick_rcv, zs,
+ zsticks[SPEED(za->za_ttycommon.t_cflag)]);
+ za->za_kick_rcv_count = ZA_KICK_RCV_COUNT;
}
za->za_soft_active = 1;
mutex_exit(zs->zs_excl);
@@ -2518,17 +2518,19 @@ zsa_start_retransmit:
za->za_rcv_flags_mask &= ~DO_TRANSMIT;
if (za->za_ttycommon.t_cflag & CRTSCTS) {
if ((SCC_READ0() & (ZSRR0_CTS|ZSRR0_TX_READY)) !=
- (ZSRR0_CTS|ZSRR0_TX_READY)) {
+ (ZSRR0_CTS|ZSRR0_TX_READY)) {
za->za_rcv_flags_mask |= DO_RETRANSMIT;
za->za_flags |= ZAS_BUSY;
mutex_exit(zs->zs_excl_hi);
return;
}
za->za_rcv_flags_mask &= ~DO_RETRANSMIT;
- } else if (!(SCC_READ0() & ZSRR0_TX_READY)) {
+ } else {
+ if (!(SCC_READ0() & ZSRR0_TX_READY)) {
za->za_flags |= ZAS_BUSY;
mutex_exit(zs->zs_excl_hi);
return;
+ }
}
/*
* If the transmitter is ready, shove the first
@@ -2628,38 +2630,39 @@ zsa_kick_rcv(void *arg)
for (;;) {
- ZSA_SEEQ(mp);
- if (!mp)
- break;
+ ZSA_SEEQ(mp);
+ if (!mp)
+ break;
- allocbcount++;
-
- if (mp->b_datap->db_type <= QPCTL) {
- if (!(canputnext(q))) {
- if (za->za_grace_flow_control >=
- zsa_grace_flow_control) {
- if (za->za_ttycommon.t_cflag & CRTSXOFF) {
- allocbcount--;
- break;
- }
- ZSA_GETQ(mp);
- freemsg(mp);
- do_ttycommon_qfull = 1;
- continue;
+ allocbcount++;
+
+ if (mp->b_datap->db_type <= QPCTL) {
+ if (!(canputnext(q))) {
+ if (za->za_grace_flow_control >=
+ zsa_grace_flow_control) {
+ if (za->za_ttycommon.t_cflag &
+ CRTSXOFF) {
+ allocbcount--;
+ break;
+ }
+ ZSA_GETQ(mp);
+ freemsg(mp);
+ do_ttycommon_qfull = 1;
+ continue;
+ } else
+ za->za_grace_flow_control++;
} else
- za->za_grace_flow_control++;
- } else
- za->za_grace_flow_control = 0;
- }
- ZSA_GETQ(mp);
- if (!head) {
- head = mp;
- } else {
- if (!tail)
- tail = head;
- tail->b_next = mp;
- tail = mp;
- }
+ za->za_grace_flow_control = 0;
+ }
+ ZSA_GETQ(mp);
+ if (!head) {
+ head = mp;
+ } else {
+ if (!tail)
+ tail = head;
+ tail->b_next = mp;
+ tail = mp;
+ }
}
if (allocbcount)
@@ -2987,11 +2990,11 @@ zsa_program(struct asyncline *za, int setibaud)
if (baudrate > CBAUD) {
za->za_ttycommon.t_cflag |= CIBAUDEXT;
za->za_ttycommon.t_cflag |=
- (((baudrate - CBAUD - 1) << IBSHIFT) & CIBAUD);
+ (((baudrate - CBAUD - 1) << IBSHIFT) & CIBAUD);
} else {
za->za_ttycommon.t_cflag &= ~CIBAUDEXT;
za->za_ttycommon.t_cflag |=
- ((baudrate << IBSHIFT) & CIBAUD);
+ ((baudrate << IBSHIFT) & CIBAUD);
}
}
diff --git a/usr/src/uts/sun4/os/machdep.c b/usr/src/uts/sun4/os/machdep.c
index d4f2669abd..07a1f185da 100644
--- a/usr/src/uts/sun4/os/machdep.c
+++ b/usr/src/uts/sun4/os/machdep.c
@@ -59,6 +59,8 @@
#include <vm/seg_kmem.h>
#include <sys/hold_page.h>
#include <sys/cpu.h>
+#include <sys/ivintr.h>
+#include <sys/clock_impl.h>
int maxphys = MMU_PAGESIZE * 16; /* 128k */
int klustsize = MMU_PAGESIZE * 16; /* 128k */
@@ -679,11 +681,31 @@ kdi_plat_call(void (*platfn)(void))
}
}
+/*
+ * kdi_system_claim and release are defined here for all sun4 platforms and
+ * pointed to by mach_kdi_init() to provide default callbacks for such systems.
+ * Specific sun4u or sun4v platforms may implement their own claim and release
+ * routines, at which point their respective callbacks will be updated.
+ */
+static void
+kdi_system_claim(void)
+{
+ lbolt_debug_entry();
+}
+
+static void
+kdi_system_release(void)
+{
+ lbolt_debug_return();
+}
+
void
mach_kdi_init(kdi_t *kdi)
{
kdi->kdi_plat_call = kdi_plat_call;
kdi->kdi_kmdb_enter = kmdb_enter;
+ kdi->pkdi_system_claim = kdi_system_claim;
+ kdi->pkdi_system_release = kdi_system_release;
kdi->mkdi_cpu_index = kdi_cpu_index;
kdi->mkdi_trap_vatotte = kdi_trap_vatotte;
kdi->mkdi_kernpanic = kdi_kernpanic;
@@ -850,3 +872,23 @@ void
progressbar_key_abort(ldi_ident_t li)
{
}
+
+/*
+ * We need to post a soft interrupt to reprogram the lbolt cyclic when
+ * switching from event to cyclic driven lbolt. The following code adds
+ * and posts the softint for sun4 platforms.
+ */
+static uint64_t lbolt_softint_inum;
+
+void
+lbolt_softint_add(void)
+{
+ lbolt_softint_inum = add_softintr(LOCK_LEVEL,
+ (softintrfunc)lbolt_ev_to_cyclic, NULL, SOFTINT_MT);
+}
+
+void
+lbolt_softint_post(void)
+{
+ setsoftint(lbolt_softint_inum);
+}
diff --git a/usr/src/uts/sun4u/cpu/us3_common.c b/usr/src/uts/sun4u/cpu/us3_common.c
index 4c67aed2c6..7b95703ac1 100644
--- a/usr/src/uts/sun4u/cpu/us3_common.c
+++ b/usr/src/uts/sun4u/cpu/us3_common.c
@@ -74,6 +74,7 @@
#include <sys/errorq.h>
#include <sys/errclassify.h>
#include <sys/pghw.h>
+#include <sys/clock_impl.h>
#ifdef CHEETAHPLUS_ERRATUM_25
#include <sys/xc_impl.h>
@@ -906,8 +907,8 @@ mondo_recover(uint16_t cpuid, int bn)
return (retval);
}
- CHEETAH_LIVELOCK_ENTRY_NEXT(histp)
- CHEETAH_LIVELOCK_ENTRY_SET(histp, lbolt, lbolt);
+ CHEETAH_LIVELOCK_ENTRY_NEXT(histp);
+ CHEETAH_LIVELOCK_ENTRY_SET(histp, lbolt, LBOLT_WAITFREE);
CHEETAH_LIVELOCK_ENTRY_SET(histp, cpuid, cpuid);
CHEETAH_LIVELOCK_ENTRY_SET(histp, buddy, CPU->cpu_id);
diff --git a/usr/src/uts/sun4u/daktari/io/hpc3130_dak.c b/usr/src/uts/sun4u/daktari/io/hpc3130_dak.c
index f83fecf533..596c4c8bc0 100644
--- a/usr/src/uts/sun4u/daktari/io/hpc3130_dak.c
+++ b/usr/src/uts/sun4u/daktari/io/hpc3130_dak.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1805,7 +1805,8 @@ hpc3130_verify_slot_power(hpc3130_unit_t *hpc3130_p, i2c_client_hdl_t handle,
uint8_t tries = 0;
uint8_t status;
int result = HPC_SUCCESS;
- clock_t tm, timeleft;
+ clock_t timeleft;
+ clock_t tm = drv_usectohz(300000);
boolean_t slot_actual_state;
boolean_t failure = B_FALSE;
hpc3130_slot_table_entry_t *ste;
@@ -1823,10 +1824,8 @@ hpc3130_verify_slot_power(hpc3130_unit_t *hpc3130_p, i2c_client_hdl_t handle,
while ((slot_actual_state != slot_target_state) &&
(failure != B_TRUE)) {
- tm = ddi_get_lbolt();
- tm += drv_usectohz(300000);
- timeleft = cv_timedwait(&hpc3130_p->hpc3130_cond,
- &hpc3130_p->hpc3130_mutex, tm);
+ timeleft = cv_reltimedwait(&hpc3130_p->hpc3130_cond,
+ &hpc3130_p->hpc3130_mutex, tm, TR_CLOCK_TICK);
if (timeleft == -1) {
if (tries++ < HPC3130_POWER_TRIES) {
/*
diff --git a/usr/src/uts/sun4u/io/rmc_comm_drvintf.c b/usr/src/uts/sun4u/io/rmc_comm_drvintf.c
index 43820dbd4f..6c173379e0 100644
--- a/usr/src/uts/sun4u/io/rmc_comm_drvintf.c
+++ b/usr/src/uts/sun4u/io/rmc_comm_drvintf.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -30,8 +30,6 @@
*
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Header files
*/
@@ -79,7 +77,7 @@ rmc_comm_request_response(rmc_comm_msg_t *request,
* get the soft state struct (instance 0)
*/
if ((rcs = rmc_comm_getstate(NULL, 0,
- "rmc_comm_request_response")) == NULL)
+ "rmc_comm_request_response")) == NULL)
return (RCENOSOFTSTATE);
do {
@@ -111,7 +109,7 @@ rmc_comm_request_nowait(rmc_comm_msg_t *request, uint8_t flag)
* get the soft state struct (instance 0)
*/
if ((rcs = rmc_comm_getstate(NULL, 0,
- "rmc_comm_request_response")) == NULL)
+ "rmc_comm_request_response")) == NULL)
return (RCENOSOFTSTATE);
/*
@@ -278,7 +276,7 @@ rmc_comm_send_req_resp(struct rmc_comm_state *rcs, rmc_comm_msg_t *request,
dp_req_resp_t *drr;
dp_message_t *exp_resp;
dp_message_t req;
- clock_t resend_clockt;
+ clock_t resend_clockt, delta;
clock_t stop_clockt;
int err;
@@ -385,16 +383,17 @@ rmc_comm_send_req_resp(struct rmc_comm_state *rcs, rmc_comm_msg_t *request,
*/
DPRINTF(rcs, DAPI, (CE_CONT, "send request=%x\n", request->msg_type));
+ delta = drv_usectohz(TX_RETRY_TIME * 1000);
+
while ((err = rmc_comm_dp_msend(rcs, &req)) == RCNOERR) {
- resend_clockt = ddi_get_lbolt() +
- drv_usectohz(TX_RETRY_TIME * 1000);
+ resend_clockt = ddi_get_lbolt() + delta;
/*
* wait for a reply or an acknowledgement
*/
- (void) cv_timedwait(drr->cv_wait_reply, dps->dp_mutex,
- resend_clockt);
+ (void) cv_reltimedwait(drr->cv_wait_reply, dps->dp_mutex,
+ delta, TR_CLOCK_TICK);
DPRINTF(rcs, DAPI, (CE_CONT,
"reqresp send status: flags=%02x req=%x resp=%x tick=%ld\n",
@@ -494,7 +493,7 @@ rmc_comm_request_response_bp(rmc_comm_msg_t *request_bp,
* get the soft state struct (instance 0)
*/
if ((rcs = rmc_comm_getstate(NULL, 0,
- "rmc_comm_request_response_bp")) == NULL)
+ "rmc_comm_request_response_bp")) == NULL)
return (RCENOSOFTSTATE);
/*
@@ -701,8 +700,8 @@ rmc_comm_unreg_intr(uint8_t msg_type, rmc_comm_intrfunc_t intr_handler)
msgintr = &rcs->dp_state.msg_intr;
if (msgintr->intr_handler != NULL &&
- msgintr->intr_msg_type == msg_type &&
- msgintr->intr_handler == intr_handler) {
+ msgintr->intr_msg_type == msg_type &&
+ msgintr->intr_handler == intr_handler) {
ddi_remove_softintr(msgintr->intr_id);
msgintr->intr_handler = NULL;
@@ -739,7 +738,7 @@ rmc_comm_send_srecord_bp(caddr_t buf, int buflen,
* get the soft state struct (instance 0)
*/
if ((rcs = rmc_comm_getstate(NULL, 0,
- "rmc_comm_request_response_bp")) == NULL)
+ "rmc_comm_request_response_bp")) == NULL)
return (RCENOSOFTSTATE);
/*
diff --git a/usr/src/uts/sun4u/lw2plus/io/lombus.c b/usr/src/uts/sun4u/lw2plus/io/lombus.c
index dc556e20df..5de129460f 100644
--- a/usr/src/uts/sun4u/lw2plus/io/lombus.c
+++ b/usr/src/uts/sun4u/lw2plus/io/lombus.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* The "lombus" driver provides access to the LOMlite2 virtual registers,
@@ -827,7 +827,6 @@ lombus_cmd(HANDLE_TYPE *hdlp, ptrdiff_t vreg, uint_t val, uint_t cmd)
{
struct lombus_state *ssp;
clock_t start;
- clock_t tick;
uint8_t *p;
/*
@@ -891,10 +890,11 @@ lombus_cmd(HANDLE_TYPE *hdlp, ptrdiff_t vreg, uint_t val, uint_t cmd)
start = ddi_get_lbolt();
ssp->deadline = start + drv_usectohz(LOMBUS_CTS_TIMEOUT/1000);
while (!sio_lom_ready(ssp)) {
- if ((tick = ddi_get_lbolt()) > ssp->deadline)
+ if (ddi_get_lbolt() > ssp->deadline)
break;
- tick += drv_usectohz(LOMBUS_CTS_POLL/1000);
- cv_timedwait(ssp->lo_cv, ssp->lo_mutex, tick);
+
+ cv_reltimedwait(ssp->lo_cv, ssp->lo_mutex,
+ drv_usectohz(LOMBUS_CTS_POLL/1000), TR_CLOCK_TICK);
}
/*
@@ -926,8 +926,8 @@ lombus_cmd(HANDLE_TYPE *hdlp, ptrdiff_t vreg, uint_t val, uint_t cmd)
ssp->result = DUMMY_VALUE;
ssp->cmdstate = LOMBUS_CMDSTATE_WAITING;
while (ssp->cmdstate == LOMBUS_CMDSTATE_WAITING) {
- tick = ddi_get_lbolt() + drv_usectohz(LOMBUS_CMD_POLL/1000);
- if (cv_timedwait(ssp->lo_cv, ssp->lo_mutex, tick) == -1)
+ if (cv_reltimedwait(ssp->lo_cv, ssp->lo_mutex,
+ drv_usectohz(LOMBUS_CMD_POLL/1000), TR_CLOCK_TICK) == -1)
lombus_receive(ssp);
}
diff --git a/usr/src/uts/sun4u/lw8/os/lw8_platmod.c b/usr/src/uts/sun4u/lw8/os/lw8_platmod.c
index d7159e9f37..66e3a991eb 100644
--- a/usr/src/uts/sun4u/lw8/os/lw8_platmod.c
+++ b/usr/src/uts/sun4u/lw8/os/lw8_platmod.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/time.h>
#include <sys/cpuvar.h>
#include <sys/dditypes.h>
@@ -72,6 +70,7 @@
#include <sys/plat_ecc_dimm.h>
#include <sys/lgrp.h>
+#include <sys/clock_impl.h>
static int sg_debug = 0;
@@ -1458,6 +1457,8 @@ sg_prom_cpci_dr_check(void)
static void
sg_system_claim(void)
{
+ lbolt_debug_entry();
+
prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
}
@@ -1465,6 +1466,8 @@ static void
sg_system_release(void)
{
prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
+
+ lbolt_debug_return();
}
static void
diff --git a/usr/src/uts/sun4u/ngdr/io/dr_mem.c b/usr/src/uts/sun4u/ngdr/io/dr_mem.c
index 43ec37d952..d0d090ce56 100644
--- a/usr/src/uts/sun4u/ngdr/io/dr_mem.c
+++ b/usr/src/uts/sun4u/ngdr/io/dr_mem.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -62,15 +62,14 @@ extern int kcage_on;
/* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
static char *dr_ie_fmt = "dr_mem.c %d";
-static int dr_post_detach_mem_unit(dr_mem_unit_t *mp);
-static int dr_reserve_mem_spans(memhandle_t *mhp,
- struct memlist *mlist);
-static int dr_select_mem_target(dr_handle_t *hp,
- dr_mem_unit_t *mp, struct memlist *ml);
-static void dr_init_mem_unit_data(dr_mem_unit_t *mp);
+static int dr_post_detach_mem_unit(dr_mem_unit_t *mp);
+static int dr_reserve_mem_spans(memhandle_t *mhp, struct memlist *mlist);
+static int dr_select_mem_target(dr_handle_t *hp, dr_mem_unit_t *mp,
+ struct memlist *ml);
+static void dr_init_mem_unit_data(dr_mem_unit_t *mp);
-static int memlist_canfit(struct memlist *s_mlist,
- struct memlist *t_mlist);
+static int memlist_canfit(struct memlist *s_mlist,
+ struct memlist *t_mlist);
/*
* dr_mem_unit_t.sbm_flags
@@ -134,9 +133,9 @@ dr_get_memlist(dr_mem_unit_t *mp)
endpa = _ptob64(physmax + 1);
if (endpa > basepa)
mlist = memlist_del_span(
- mlist,
- basepa,
- endpa - basepa);
+ mlist,
+ basepa,
+ endpa - basepa);
}
if (mlist) {
@@ -147,7 +146,7 @@ dr_get_memlist(dr_mem_unit_t *mp)
/* if no mlist yet, try platform layer */
if (!mlist) {
err = drmach_mem_get_memlist(
- mp->sbm_cm.sbdev_id, &mlist);
+ mp->sbm_cm.sbdev_id, &mlist);
if (err) {
DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
mlist = NULL; /* paranoia */
@@ -209,8 +208,8 @@ dr_release_mem(dr_common_unit_t *cp)
cv_init(&rms.cond, NULL, CV_DRIVER, NULL);
mutex_enter(&rms.lock);
- err = kphysm_del_start(mp->sbm_memhandle,
- dr_mem_del_done, (void *) &rms);
+ err = kphysm_del_start(mp->sbm_memhandle, dr_mem_del_done,
+ (void *) &rms);
if (err == KPHYSM_OK) {
/* wait for completion or interrupt */
while (!rms.done) {
@@ -266,9 +265,9 @@ dr_release_mem(dr_common_unit_t *cp)
default:
cmn_err(CE_WARN,
- "%s: unexpected kphysm error code %d,"
- " id 0x%p",
- f, err, mp->sbm_cm.sbdev_id);
+ "%s: unexpected kphysm error code %d,"
+ " id 0x%p",
+ f, err, mp->sbm_cm.sbdev_id);
e_code = ESBD_IO;
break;
@@ -306,8 +305,8 @@ dr_attach_mem(dr_handle_t *hp, dr_common_unit_t *cp)
sbd_error_t *err;
rv = kphysm_add_memory_dynamic(
- (pfn_t)(mc->address >> PAGESHIFT),
- (pgcnt_t)(mc->size >> PAGESHIFT));
+ (pfn_t)(mc->address >> PAGESHIFT),
+ (pgcnt_t)(mc->size >> PAGESHIFT));
if (rv != KPHYSM_OK) {
/*
* translate kphysm error and
@@ -335,7 +334,7 @@ dr_attach_mem(dr_handle_t *hp, dr_common_unit_t *cp)
}
err = drmach_mem_add_span(
- mp->sbm_cm.sbdev_id, mc->address, mc->size);
+ mp->sbm_cm.sbdev_id, mc->address, mc->size);
if (err) {
DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
break;
@@ -348,7 +347,7 @@ dr_attach_mem(dr_handle_t *hp, dr_common_unit_t *cp)
if (mp->sbm_cm.sbdev_error != NULL) {
dr_lock_status(hp->h_bd);
err = drmach_unconfigure(cp->sbdev_id,
- DEVI_BRANCH_DESTROY);
+ DEVI_BRANCH_DESTROY);
if (err)
sbd_err_clear(&err);
dr_unlock_status(hp->h_bd);
@@ -361,7 +360,7 @@ static void
dr_mem_ecache_scrub(dr_mem_unit_t *mp, struct memlist *mlist)
{
#ifdef DEBUG
- clock_t stime = lbolt;
+ clock_t stime = ddi_get_lbolt();
#endif /* DEBUG */
struct memlist *ml;
@@ -384,14 +383,14 @@ dr_mem_ecache_scrub(dr_mem_unit_t *mp, struct memlist *mlist)
dst_pa = ml->address;
if (ml->address & PAGEOFFSET)
cmn_err(CE_WARN,
- "%s: address (0x%lx) not on "
- "page boundary", f, ml->address);
+ "%s: address (0x%lx) not on "
+ "page boundary", f, ml->address);
nbytes = ml->size;
if (ml->size & PAGEOFFSET)
cmn_err(CE_WARN,
- "%s: size (0x%lx) not on "
- "page boundary", f, ml->size);
+ "%s: size (0x%lx) not on "
+ "page boundary", f, ml->size);
/*LINTED*/
while (nbytes > 0) {
@@ -413,7 +412,7 @@ dr_mem_ecache_scrub(dr_mem_unit_t *mp, struct memlist *mlist)
affinity_clear();
#ifdef DEBUG
- stime = lbolt - stime;
+ stime = ddi_get_lbolt() - stime;
PR_MEM("%s: scrub ticks = %ld (%ld secs)\n", f, stime, stime / hz);
#endif /* DEBUG */
}
@@ -429,9 +428,9 @@ dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
static fn_t f = "dr_move_memory";
PR_MEM("%s: (INLINE) moving memory from %s to %s\n",
- f,
- s_mp->sbm_cm.sbdev_path,
- t_mp->sbm_cm.sbdev_path);
+ f,
+ s_mp->sbm_cm.sbdev_path,
+ t_mp->sbm_cm.sbdev_path);
ASSERT(s_mp->sbm_flags & DR_MFLAG_SOURCE);
ASSERT(s_mp->sbm_peer == t_mp);
@@ -457,8 +456,8 @@ dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
affinity_set(drmach_mem_cpu_affinity(t_mp->sbm_cm.sbdev_id));
err = drmach_copy_rename_init(
- t_mp->sbm_cm.sbdev_id, _ptob64(t_mp->sbm_slice_offset),
- s_mp->sbm_cm.sbdev_id, c_ml, &cr_id);
+ t_mp->sbm_cm.sbdev_id, _ptob64(t_mp->sbm_slice_offset),
+ s_mp->sbm_cm.sbdev_id, c_ml, &cr_id);
if (err) {
DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
affinity_clear();
@@ -468,12 +467,12 @@ dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
srhp = dr_get_sr_handle(hp);
ASSERT(srhp);
- copytime = lbolt;
+ copytime = ddi_get_lbolt();
/* Quiesce the OS. */
if (dr_suspend(srhp)) {
cmn_err(CE_WARN, "%s: failed to quiesce OS"
- " for copy-rename", f);
+ " for copy-rename", f);
dr_release_sr_handle(srhp);
err = drmach_copy_rename_fini(cr_id);
@@ -508,7 +507,7 @@ dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
t_bp = t_mp->sbm_cm.sbdev_bp;
lgrp_plat_config(LGRP_CONFIG_MEM_RENAME,
- (uintptr_t)(s_bp->b_num | (t_bp->b_num << 16)));
+ (uintptr_t)(s_bp->b_num | (t_bp->b_num << 16)));
}
drmach_copy_rename(cr_id);
@@ -516,7 +515,7 @@ dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
/* Resume the OS. */
dr_resume(srhp);
- copytime = lbolt - copytime;
+ copytime = ddi_get_lbolt() - copytime;
dr_release_sr_handle(srhp);
err = drmach_copy_rename_fini(cr_id);
@@ -526,7 +525,7 @@ dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
affinity_clear();
PR_MEM("%s: copy-rename elapsed time = %ld ticks (%ld secs)\n",
- f, copytime, copytime / hz);
+ f, copytime, copytime / hz);
/* return -1 if dr_suspend or copy/rename recorded an error */
return (err == NULL ? 0 : -1);
@@ -607,10 +606,10 @@ dr_detach_mem(dr_handle_t *hp, dr_common_unit_t *cp)
} else {
rv = dr_move_memory(hp, s_mp, t_mp);
PR_MEM("%s: %s memory COPY-RENAME (board %d -> %d)\n",
- f,
- rv ? "FAILED" : "COMPLETED",
- s_mp->sbm_cm.sbdev_bp->b_num,
- t_mp->sbm_cm.sbdev_bp->b_num);
+ f,
+ rv ? "FAILED" : "COMPLETED",
+ s_mp->sbm_cm.sbdev_bp->b_num,
+ t_mp->sbm_cm.sbdev_bp->b_num);
if (rv != 0)
(void) dr_cancel_mem(s_mp);
@@ -656,8 +655,7 @@ dr_del_span_query(pfn_t base, pgcnt_t npages, memquery_t *mp)
again:
for (ml = mlist; ml; ml = ml->next) {
if ((ml->address & sm) != sa) {
- mlist = memlist_del_span(mlist,
- ml->address, ml->size);
+ mlist = memlist_del_span(mlist, ml->address, ml->size);
goto again;
}
}
@@ -672,7 +670,7 @@ again:
memquery_t mq;
rv = kphysm_del_span_query(
- _b64top(ml->address), _b64top(ml->size), &mq);
+ _b64top(ml->address), _b64top(ml->size), &mq);
if (rv)
break;
@@ -683,10 +681,10 @@ again:
if (mq.nonrelocatable != 0) {
if (mq.first_nonrelocatable < mp->first_nonrelocatable)
mp->first_nonrelocatable =
- mq.first_nonrelocatable;
+ mq.first_nonrelocatable;
if (mq.last_nonrelocatable > mp->last_nonrelocatable)
mp->last_nonrelocatable =
- mq.last_nonrelocatable;
+ mq.last_nonrelocatable;
}
}
@@ -749,7 +747,7 @@ dr_mem_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
bzero((caddr_t)msp, sizeof (*msp));
strncpy(msp->ms_cm.c_id.c_name, pstat.type,
- sizeof (msp->ms_cm.c_id.c_name));
+ sizeof (msp->ms_cm.c_id.c_name));
msp->ms_cm.c_id.c_type = mp->sbm_cm.sbdev_type;
msp->ms_cm.c_id.c_unit = SBD_NULL_UNIT;
msp->ms_cm.c_cond = mp->sbm_cm.sbdev_cond;
@@ -892,13 +890,13 @@ dr_pre_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
switch (state) {
case DR_STATE_UNCONFIGURED:
PR_MEM("%s: recovering from UNCONFIG for %s\n",
- f,
- mp->sbm_cm.sbdev_path);
+ f,
+ mp->sbm_cm.sbdev_path);
/* use memlist cached by dr_post_detach_mem_unit */
ASSERT(mp->sbm_mlist != NULL);
PR_MEM("%s: re-configuring cached memlist for %s:\n",
- f, mp->sbm_cm.sbdev_path);
+ f, mp->sbm_cm.sbdev_path);
PR_MEMLIST_DUMP(mp->sbm_mlist);
/* kphysm del handle should be have been freed */
@@ -908,10 +906,10 @@ dr_pre_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
case DR_STATE_CONNECTED:
PR_MEM("%s: reprogramming mem hardware on %s\n",
- f, mp->sbm_cm.sbdev_bp->b_path);
+ f, mp->sbm_cm.sbdev_bp->b_path);
PR_MEM("%s: enabling %s\n",
- f, mp->sbm_cm.sbdev_path);
+ f, mp->sbm_cm.sbdev_path);
err = drmach_mem_enable(mp->sbm_cm.sbdev_id);
if (err) {
@@ -965,7 +963,7 @@ dr_post_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
DR_DEV_INTERNAL_ERROR(&mp->sbm_cm);
PR_MEM("%s: %s memlist not in phys_install",
- f, mp->sbm_cm.sbdev_path);
+ f, mp->sbm_cm.sbdev_path);
memlist_delete(mlist);
continue;
@@ -976,9 +974,9 @@ dr_post_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
sbd_error_t *err;
err = drmach_mem_add_span(
- mp->sbm_cm.sbdev_id,
- ml->address,
- ml->size);
+ mp->sbm_cm.sbdev_id,
+ ml->address,
+ ml->size);
if (err)
DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
}
@@ -1082,19 +1080,19 @@ dr_add_memory_spans(dr_mem_unit_t *mp, struct memlist *ml)
rv = kphysm_add_memory_dynamic(base, npgs);
err = drmach_mem_add_span(
- mp->sbm_cm.sbdev_id,
- ml->address,
- ml->size);
+ mp->sbm_cm.sbdev_id,
+ ml->address,
+ ml->size);
if (err)
DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
if (rv != KPHYSM_OK) {
cmn_err(CE_WARN, "%s:"
- " unexpected kphysm_add_memory_dynamic"
- " return value %d;"
- " basepfn=0x%lx, npages=%ld\n",
- f, rv, base, npgs);
+ " unexpected kphysm_add_memory_dynamic"
+ " return value %d;"
+ " basepfn=0x%lx, npages=%ld\n",
+ f, rv, base, npgs);
continue;
}
@@ -1120,12 +1118,12 @@ dr_post_detach_mem_unit(dr_mem_unit_t *s_mp)
/* s_mp->sbm_del_mlist could be NULL, meaning no deleted spans */
PR_MEM("%s: %s: deleted memlist (EMPTY maybe okay):\n",
- f, s_mp->sbm_cm.sbdev_path);
+ f, s_mp->sbm_cm.sbdev_path);
PR_MEMLIST_DUMP(s_mp->sbm_del_mlist);
/* sanity check */
ASSERT(s_mp->sbm_del_mlist == NULL ||
- (s_mp->sbm_flags & DR_MFLAG_RELDONE) != 0);
+ (s_mp->sbm_flags & DR_MFLAG_RELDONE) != 0);
if (s_mp->sbm_flags & DR_MFLAG_SOURCE) {
t_mp = s_mp->sbm_peer;
@@ -1137,7 +1135,7 @@ dr_post_detach_mem_unit(dr_mem_unit_t *s_mp)
ASSERT(t_mp->sbm_del_mlist);
PR_MEM("%s: target %s: deleted memlist:\n",
- f, t_mp->sbm_cm.sbdev_path);
+ f, t_mp->sbm_cm.sbdev_path);
PR_MEMLIST_DUMP(t_mp->sbm_del_mlist);
} else {
/* this is no target unit */
@@ -1171,7 +1169,7 @@ dr_post_detach_mem_unit(dr_mem_unit_t *s_mp)
rv = 0;
if (s_mp->sbm_cm.sbdev_error) {
PR_MEM("%s: %s flags=%x", f,
- s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags);
+ s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags);
DR_DEV_CLR_UNREFERENCED(&s_mp->sbm_cm);
DR_DEV_CLR_RELEASED(&s_mp->sbm_cm);
dr_device_transition(&s_mp->sbm_cm, DR_STATE_CONFIGURED);
@@ -1179,7 +1177,7 @@ dr_post_detach_mem_unit(dr_mem_unit_t *s_mp)
}
if (t_mp != NULL && t_mp->sbm_cm.sbdev_error != NULL) {
PR_MEM("%s: %s flags=%x", f,
- s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags);
+ s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags);
DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm);
DR_DEV_CLR_RELEASED(&t_mp->sbm_cm);
dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED);
@@ -1308,8 +1306,8 @@ dr_post_detach_mem_unit(dr_mem_unit_t *s_mp)
PR_MEMLIST_DUMP(t_mp->sbm_dyn_segs);
PR_MEM("%s: adding back remaining portion"
- " of %s, memlist:\n",
- f, t_mp->sbm_cm.sbdev_path);
+ " of %s, memlist:\n",
+ f, t_mp->sbm_cm.sbdev_path);
PR_MEMLIST_DUMP(t_excess_mlist);
dr_add_memory_spans(s_mp, t_excess_mlist);
@@ -1336,8 +1334,8 @@ dr_post_detach_mem_unit(dr_mem_unit_t *s_mp)
if (t_mp != NULL) {
/* delete target's entire address space */
- err = drmach_mem_del_span(
- t_mp->sbm_cm.sbdev_id, t_old_basepa & ~ sm, sz);
+ err = drmach_mem_del_span(t_mp->sbm_cm.sbdev_id,
+ t_old_basepa & ~ sm, sz);
if (err)
DRERR_SET_C(&t_mp->sbm_cm.sbdev_error, &err);
ASSERT(err == NULL);
@@ -1350,7 +1348,7 @@ dr_post_detach_mem_unit(dr_mem_unit_t *s_mp)
* info and keeping the slice offset from t_new_basepa.
*/
err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id,
- s_old_basepa & ~ sm, t_new_basepa & sm);
+ s_old_basepa & ~ sm, t_new_basepa & sm);
if (err)
DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
ASSERT(err == NULL);
@@ -1358,7 +1356,7 @@ dr_post_detach_mem_unit(dr_mem_unit_t *s_mp)
} else {
/* delete board's entire address space */
err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id,
- s_old_basepa & ~ sm, sz);
+ s_old_basepa & ~ sm, sz);
if (err)
DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
ASSERT(err == NULL);
@@ -1381,7 +1379,7 @@ cleanup:
if (t_new_smallsize > 0) {
t_mp->sbm_npages = _b64top(t_new_smallsize);
PR_MEM("%s: target new size 0x%lx bytes\n",
- f, t_new_smallsize);
+ f, t_new_smallsize);
}
}
if (t_mp != NULL && t_mp->sbm_cm.sbdev_error == NULL) {
@@ -1478,8 +1476,8 @@ dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
* copy-rename.
*/
ASSERT(mp->sbm_npages != 0);
- rv = kphysm_del_span_query(
- mp->sbm_basepfn, mp->sbm_npages, &mq);
+ rv = kphysm_del_span_query(mp->sbm_basepfn, mp->sbm_npages,
+ &mq);
if (rv != KPHYSM_OK) {
DR_DEV_INTERNAL_ERROR(&mp->sbm_cm);
err_flag = 1;
@@ -1488,10 +1486,10 @@ dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
if (mq.nonrelocatable != 0) {
if (!(dr_cmd_flags(hp) &
- (SBD_FLAG_FORCE | SBD_FLAG_QUIESCE_OKAY))) {
+ (SBD_FLAG_FORCE | SBD_FLAG_QUIESCE_OKAY))) {
/* caller wasn't prompted for a suspend */
dr_dev_err(CE_WARN, &mp->sbm_cm,
- ESBD_QUIESCE_REQD);
+ ESBD_QUIESCE_REQD);
err_flag = 1;
break;
}
@@ -1511,7 +1509,7 @@ dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
if (ml == NULL) {
err_flag = 1;
PR_MEM("%s: no memlist found for %s\n",
- f, mp->sbm_cm.sbdev_path);
+ f, mp->sbm_cm.sbdev_path);
continue;
}
@@ -1527,7 +1525,7 @@ dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
mp->sbm_flags |= DR_MFLAG_RELOWNER;
if ((mq.nonrelocatable != 0) ||
- dr_reserve_mem_spans(&mp->sbm_memhandle, ml)) {
+ dr_reserve_mem_spans(&mp->sbm_memhandle, ml)) {
/*
* Either the detaching memory node contains
* non-reloc memory or we failed to reserve the
@@ -1554,9 +1552,9 @@ dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
* and hope helpful for debug
*/
cmn_err(CE_WARN, "%s: unexpected"
- " kphysm_del_release return"
- " value %d",
- f, rv);
+ " kphysm_del_release return"
+ " value %d",
+ f, rv);
}
mp->sbm_flags &= ~DR_MFLAG_RELOWNER;
@@ -1565,8 +1563,8 @@ dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
/* make sure sbm_flags is clean */
ASSERT(mp->sbm_flags == 0);
- dr_dev_err(CE_WARN,
- &mp->sbm_cm, ESBD_NO_TARGET);
+ dr_dev_err(CE_WARN, &mp->sbm_cm,
+ ESBD_NO_TARGET);
err_flag = 1;
break;
@@ -1590,13 +1588,13 @@ dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
if (mp->sbm_flags & DR_MFLAG_SOURCE) {
PR_MEM("%s: release of %s requires copy/rename;"
- " selected target board %s\n",
- f,
- mp->sbm_cm.sbdev_path,
- mp->sbm_peer->sbm_cm.sbdev_path);
+ " selected target board %s\n",
+ f,
+ mp->sbm_cm.sbdev_path,
+ mp->sbm_peer->sbm_cm.sbdev_path);
} else {
PR_MEM("%s: copy/rename not required to release %s\n",
- f, mp->sbm_cm.sbdev_path);
+ f, mp->sbm_cm.sbdev_path);
}
ASSERT(mp->sbm_flags & DR_MFLAG_RELOWNER);
@@ -1640,7 +1638,7 @@ dr_release_mem_done(dr_common_unit_t *cp)
* and hope helpful for debug
*/
cmn_err(CE_WARN, "%s: unexpected kphysm_del_release"
- " return value %d", f, rv);
+ " return value %d", f, rv);
}
s_mp->sbm_flags &= ~DR_MFLAG_RELOWNER;
@@ -1651,9 +1649,9 @@ dr_release_mem_done(dr_common_unit_t *cp)
/* XXX Can we know that sbdev_error was encountered during release? */
if (s_mp->sbm_cm.sbdev_error != NULL) {
PR_MEM("%s: %s: error %d noted\n",
- f,
- s_mp->sbm_cm.sbdev_path,
- s_mp->sbm_cm.sbdev_error->e_code);
+ f,
+ s_mp->sbm_cm.sbdev_path,
+ s_mp->sbm_cm.sbdev_error->e_code);
if (t_mp != NULL) {
ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist);
@@ -1727,11 +1725,11 @@ dr_release_mem_done(dr_common_unit_t *cp)
memlist_read_unlock();
if (rv) {
cmn_err(CE_WARN, "%s: %smem-unit (%d.%d): "
- "deleted memory still found in phys_install",
- f,
- (mp == t_mp ? "target " : ""),
- mp->sbm_cm.sbdev_bp->b_num,
- mp->sbm_cm.sbdev_unum);
+ "deleted memory still found in phys_install",
+ f,
+ (mp == t_mp ? "target " : ""),
+ mp->sbm_cm.sbdev_bp->b_num,
+ mp->sbm_cm.sbdev_unum);
DR_DEV_INTERNAL_ERROR(&s_mp->sbm_cm);
return;
@@ -1749,7 +1747,7 @@ dr_release_mem_done(dr_common_unit_t *cp)
}
PR_MEM("%s: marking %s release DONE\n",
- f, s_mp->sbm_cm.sbdev_path);
+ f, s_mp->sbm_cm.sbdev_path);
s_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED;
@@ -1763,7 +1761,7 @@ dr_release_mem_done(dr_common_unit_t *cp)
}
PR_MEM("%s: marking %s release DONE\n",
- f, t_mp->sbm_cm.sbdev_path);
+ f, t_mp->sbm_cm.sbdev_path);
t_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED;
}
@@ -1778,8 +1776,7 @@ dr_disconnect_mem(dr_mem_unit_t *mp)
#ifdef DEBUG
int state = mp->sbm_cm.sbdev_state;
- ASSERT(state == DR_STATE_CONNECTED ||
- state == DR_STATE_UNCONFIGURED);
+ ASSERT(state == DR_STATE_CONNECTED || state == DR_STATE_UNCONFIGURED);
#endif
PR_MEM("%s...\n", f);
@@ -1838,7 +1835,7 @@ dr_cancel_mem(dr_mem_unit_t *s_mp)
if (t_mp != NULL && t_mp->sbm_del_mlist != NULL) {
PR_MEM("%s: undoing target %s memory delete\n",
- f, t_mp->sbm_cm.sbdev_path);
+ f, t_mp->sbm_cm.sbdev_path);
dr_add_memory_spans(t_mp, t_mp->sbm_del_mlist);
DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm);
@@ -1846,7 +1843,7 @@ dr_cancel_mem(dr_mem_unit_t *s_mp)
if (s_mp->sbm_del_mlist != NULL) {
PR_MEM("%s: undoing %s memory delete\n",
- f, s_mp->sbm_cm.sbdev_path);
+ f, s_mp->sbm_cm.sbdev_path);
dr_add_memory_spans(s_mp, s_mp->sbm_del_mlist);
}
@@ -1874,8 +1871,8 @@ dr_cancel_mem(dr_mem_unit_t *s_mp)
DR_DEV_CLR_RELEASED(&t_mp->sbm_cm);
- dr_device_transition(
- &t_mp->sbm_cm, DR_STATE_CONFIGURED);
+ dr_device_transition(&t_mp->sbm_cm,
+ DR_STATE_CONFIGURED);
}
if (s_mp->sbm_del_mlist != s_mp->sbm_mlist)
@@ -1896,7 +1893,7 @@ dr_cancel_mem(dr_mem_unit_t *s_mp)
default:
PR_MEM("%s: WARNING unexpected state (%d) for %s\n",
- f, (int)state, s_mp->sbm_cm.sbdev_path);
+ f, (int)state, s_mp->sbm_cm.sbdev_path);
return (-1);
}
@@ -1971,8 +1968,8 @@ dr_init_mem_unit_data(dr_mem_unit_t *mp)
*/
/* TODO: curious comment. it suggests pda query should happen if this fails */
PR_MEM("%s: PDA query failed for npages."
- " Checking memlist for %s\n",
- f, mp->sbm_cm.sbdev_path);
+ " Checking memlist for %s\n",
+ f, mp->sbm_cm.sbdev_path);
mlist = dr_get_memlist(mp);
for (ml = mlist; ml; ml = ml->next)
@@ -2004,7 +2001,7 @@ dr_init_mem_unit_data(dr_mem_unit_t *mp)
lgrp_plat_config(LGRP_CONFIG_MEM_ADD, (uintptr_t)&umb);
PR_MEM("%s: %s (basepfn = 0x%lx, npgs = %ld)\n",
- f, mp->sbm_cm.sbdev_path, mp->sbm_basepfn, mp->sbm_npages);
+ f, mp->sbm_cm.sbdev_path, mp->sbm_basepfn, mp->sbm_npages);
}
static int
@@ -2030,9 +2027,9 @@ dr_reserve_mem_spans(memhandle_t *mhp, struct memlist *ml)
err = kphysm_del_span(*mhp, base, npgs);
if (err != KPHYSM_OK) {
cmn_err(CE_WARN, "%s memory reserve failed."
- " unexpected kphysm_del_span return value %d;"
- " basepfn=0x%lx npages=%ld",
- f, err, base, npgs);
+ " unexpected kphysm_del_span return value %d;"
+ " basepfn=0x%lx npages=%ld",
+ f, err, base, npgs);
return (-1);
}
@@ -2204,18 +2201,19 @@ dr_select_mem_target(dr_handle_t *hp,
* source board.
*/
rv = kphysm_del_span_query(
- s_mp->sbm_basepfn,
- s_mp->sbm_npages, &s_mq);
+ s_mp->sbm_basepfn,
+ s_mp->sbm_npages, &s_mq);
if (rv != KPHYSM_OK) {
PR_MEM("%s: %s: unexpected"
- " kphysm_del_span_query"
- " return value %d;"
- " basepfn 0x%lx, npages %ld\n",
- f,
- s_mp->sbm_cm.sbdev_path,
- rv,
- s_mp->sbm_basepfn,
- s_mp->sbm_npages);
+ " kphysm_del_span_query"
+ " return value %d;"
+ " basepfn 0x%lx,"
+ " npages %ld\n",
+ f,
+ s_mp->sbm_cm.sbdev_path,
+ rv,
+ s_mp->sbm_basepfn,
+ s_mp->sbm_npages);
/* paranoia */
s_mq.phys_pages = 0;
@@ -2237,11 +2235,11 @@ dr_select_mem_target(dr_handle_t *hp,
continue;
PR_MEM("%s: %s: nonrelocatable"
- " span (0x%lx..0x%lx)\n",
- f,
- s_mp->sbm_cm.sbdev_path,
- s_mq.first_nonrelocatable,
- s_mq.last_nonrelocatable);
+ " span (0x%lx..0x%lx)\n",
+ f,
+ s_mp->sbm_cm.sbdev_path,
+ s_mq.first_nonrelocatable,
+ s_mq.last_nonrelocatable);
}
/*
@@ -2251,11 +2249,11 @@ dr_select_mem_target(dr_handle_t *hp,
* with this target candidate.
*/
pfn = s_mq.first_nonrelocatable &
- ~t_mp->sbm_alignment_mask;
+ ~t_mp->sbm_alignment_mask;
/* skip candidate if memory is too small */
if (pfn + t_mp->sbm_npages <
- s_mq.last_nonrelocatable)
+ s_mq.last_nonrelocatable)
continue;
/*
@@ -2293,8 +2291,8 @@ dr_select_mem_target(dr_handle_t *hp,
p = p & ~t_mp->sbm_alignment_mask;
if ((p > s_mq.first_nonrelocatable) ||
- (p + t_mp->sbm_npages <
- s_mq.last_nonrelocatable)) {
+ (p + t_mp->sbm_npages <
+ s_mq.last_nonrelocatable)) {
/*
* alternative starting addr
@@ -2319,10 +2317,10 @@ dr_select_mem_target(dr_handle_t *hp,
*/
t_mp->sbm_slice_offset = pfn & sm;
PR_MEM("%s: %s:"
- " proposed mc offset 0x%lx\n",
- f,
- t_mp->sbm_cm.sbdev_path,
- t_mp->sbm_slice_offset);
+ " proposed mc offset 0x%lx\n",
+ f,
+ t_mp->sbm_cm.sbdev_path,
+ t_mp->sbm_slice_offset);
}
dr_smt_preference[preference]++;
@@ -2365,10 +2363,10 @@ dr_select_mem_target(dr_handle_t *hp,
t_ml = dr_get_memlist(t_mp);
if (t_ml == NULL) {
cmn_err(CE_WARN, "%s: no memlist for"
- " mem-unit %d, board %d",
- f,
- t_mp->sbm_cm.sbdev_bp->b_num,
- t_mp->sbm_cm.sbdev_unum);
+ " mem-unit %d, board %d",
+ f,
+ t_mp->sbm_cm.sbdev_bp->b_num,
+ t_mp->sbm_cm.sbdev_unum);
continue;
}
@@ -2400,9 +2398,9 @@ dr_select_mem_target(dr_handle_t *hp,
if (excess > 0) {
x_ml = memlist_del_span(
- x_ml,
- _ptob64(s_mp->sbm_basepfn),
- _ptob64(excess));
+ x_ml,
+ _ptob64(s_mp->sbm_basepfn),
+ _ptob64(excess));
}
ASSERT(x_ml);
@@ -2417,19 +2415,19 @@ dr_select_mem_target(dr_handle_t *hp,
/* trim off upper portion */
excess = (s_mp->sbm_basepfn + s_mp->sbm_npages)
- - (s_mq.last_nonrelocatable + 1);
+ - (s_mq.last_nonrelocatable + 1);
if (excess > 0) {
pfn_t p;
p = s_mq.last_nonrelocatable + 1;
x_ml = memlist_del_span(
- x_ml,
- _ptob64(p),
- _ptob64(excess));
+ x_ml,
+ _ptob64(p),
+ _ptob64(excess));
}
PR_MEM("%s: %s: edited source memlist:\n",
- f, s_mp->sbm_cm.sbdev_path);
+ f, s_mp->sbm_cm.sbdev_path);
PR_MEMLIST_DUMP(x_ml);
#ifdef DEBUG
@@ -2439,7 +2437,7 @@ dr_select_mem_target(dr_handle_t *hp,
d_ml = d_ml->next;
ASSERT(d_ml->address + d_ml->size ==
- _ptob64(s_mq.last_nonrelocatable + 1));
+ _ptob64(s_mq.last_nonrelocatable + 1));
#endif
/*
@@ -2456,7 +2454,7 @@ dr_select_mem_target(dr_handle_t *hp,
/* verify target can support source memory spans. */
if (memlist_canfit(d_ml, t_ml) == 0) {
PR_MEM("%s: source memlist won't"
- " fit in target memlist\n", f);
+ " fit in target memlist\n", f);
PR_MEM("%s: source memlist:\n", f);
PR_MEMLIST_DUMP(d_ml);
PR_MEM("%s: target memlist:\n", f);
@@ -2468,28 +2466,28 @@ dr_select_mem_target(dr_handle_t *hp,
/* NOTE: the value of d_ml is not used beyond this point */
PR_MEM("%s: checking for no-reloc in %s, "
- " basepfn=0x%lx, npages=%ld\n",
- f,
- t_mp->sbm_cm.sbdev_path,
- t_mp->sbm_basepfn,
- t_mp->sbm_npages);
+ " basepfn=0x%lx, npages=%ld\n",
+ f,
+ t_mp->sbm_cm.sbdev_path,
+ t_mp->sbm_basepfn,
+ t_mp->sbm_npages);
rv = kphysm_del_span_query(
- t_mp->sbm_basepfn, t_mp->sbm_npages, &mq);
+ t_mp->sbm_basepfn, t_mp->sbm_npages, &mq);
if (rv != KPHYSM_OK) {
PR_MEM("%s: kphysm_del_span_query:"
- " unexpected return value %d\n", f, rv);
+ " unexpected return value %d\n", f, rv);
continue;
}
if (mq.nonrelocatable != 0) {
PR_MEM("%s: candidate %s has"
- " nonrelocatable span [0x%lx..0x%lx]\n",
- f,
- t_mp->sbm_cm.sbdev_path,
- mq.first_nonrelocatable,
- mq.last_nonrelocatable);
+ " nonrelocatable span [0x%lx..0x%lx]\n",
+ f,
+ t_mp->sbm_cm.sbdev_path,
+ mq.first_nonrelocatable,
+ mq.last_nonrelocatable);
continue;
}
@@ -2503,10 +2501,10 @@ dr_select_mem_target(dr_handle_t *hp,
* favorite debugger.
*/
if (dr_ignore_board &
- (1 << (t_mp->sbm_cm.sbdev_bp->b_num - 1))) {
+ (1 << (t_mp->sbm_cm.sbdev_bp->b_num - 1))) {
PR_MEM("%s: dr_ignore_board flag set,"
- " ignoring %s as candidate\n",
- f, t_mp->sbm_cm.sbdev_path);
+ " ignoring %s as candidate\n",
+ f, t_mp->sbm_cm.sbdev_path);
continue;
}
#endif
@@ -2558,12 +2556,12 @@ dr_select_mem_target(dr_handle_t *hp,
s_del_pa - _ptob64(pfn));
PR_MEM("%s: %s: reserving src brd memlist:\n",
- f, s_mp->sbm_cm.sbdev_path);
+ f, s_mp->sbm_cm.sbdev_path);
PR_MEMLIST_DUMP(d_ml);
/* reserve excess spans */
- if (dr_reserve_mem_spans(
- &s_mp->sbm_memhandle, d_ml) != 0) {
+ if (dr_reserve_mem_spans(&s_mp->sbm_memhandle, d_ml)
+ != 0) {
/* likely more non-reloc pages appeared */
/* TODO: restart from top? */
@@ -2586,7 +2584,7 @@ dr_select_mem_target(dr_handle_t *hp,
*/
if (dr_reserve_mem_spans(&s_mp->sbm_memhandle, t_ml) == 0) {
PR_MEM("%s: %s: target board memory reserved\n",
- f, t_mp->sbm_cm.sbdev_path);
+ f, t_mp->sbm_cm.sbdev_path);
/* a candidate target board is now reserved */
t_mp->sbm_flags |= DR_MFLAG_RESERVED;
@@ -2598,7 +2596,7 @@ dr_select_mem_target(dr_handle_t *hp,
/* did not successfully reserve the target board. */
PR_MEM("%s: could not reserve target %s\n",
- f, t_mp->sbm_cm.sbdev_path);
+ f, t_mp->sbm_cm.sbdev_path);
/*
* NOTE: an undo of the dr_reserve_mem_span work
@@ -2621,7 +2619,7 @@ dr_select_mem_target(dr_handle_t *hp,
*/
if (c_mp == NULL) {
PR_MEM("%s: %s: target selection failed.\n",
- f, s_mp->sbm_cm.sbdev_path);
+ f, s_mp->sbm_cm.sbdev_path);
if (t_ml != NULL)
memlist_delete(t_ml);
@@ -2630,9 +2628,9 @@ dr_select_mem_target(dr_handle_t *hp,
}
PR_MEM("%s: found target %s for source %s\n",
- f,
- c_mp->sbm_cm.sbdev_path,
- s_mp->sbm_cm.sbdev_path);
+ f,
+ c_mp->sbm_cm.sbdev_path,
+ s_mp->sbm_cm.sbdev_path);
s_mp->sbm_peer = c_mp;
s_mp->sbm_flags |= DR_MFLAG_SOURCE;
@@ -2650,11 +2648,11 @@ dr_select_mem_target(dr_handle_t *hp,
if (c_mp->sbm_npages > s_mp->sbm_npages) {
s_mp->sbm_flags |= DR_MFLAG_MEMUPSIZE;
PR_MEM("%s: upsize detected (source=%ld < target=%ld)\n",
- f, s_mp->sbm_npages, c_mp->sbm_npages);
+ f, s_mp->sbm_npages, c_mp->sbm_npages);
} else if (c_mp->sbm_npages < s_mp->sbm_npages) {
s_mp->sbm_flags |= DR_MFLAG_MEMDOWNSIZE;
PR_MEM("%s: downsize detected (source=%ld > target=%ld)\n",
- f, s_mp->sbm_npages, c_mp->sbm_npages);
+ f, s_mp->sbm_npages, c_mp->sbm_npages);
}
return (0);
diff --git a/usr/src/uts/sun4u/opl/io/dm2s.c b/usr/src/uts/sun4u/opl/io/dm2s.c
index 5d9e99cc5a..d2f6c12798 100644
--- a/usr/src/uts/sun4u/opl/io/dm2s.c
+++ b/usr/src/uts/sun4u/opl/io/dm2s.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -675,7 +675,7 @@ static int
dm2s_mbox_init(dm2s_t *dm2sp)
{
int ret;
- clock_t tout;
+ clock_t tout = drv_usectohz(DM2S_MB_TOUT);
ASSERT(MUTEX_HELD(&dm2sp->ms_lock));
dm2sp->ms_target = DM2S_TARGET_ID;
@@ -735,9 +735,8 @@ dm2s_mbox_init(dm2s_t *dm2sp)
*/
DPRINTF(DBG_MBOX, ("dm2s_mbox_init: waiting...\n"));
- tout = ddi_get_lbolt() + drv_usectohz(DM2S_MB_TOUT);
- ret = cv_timedwait_sig(&dm2sp->ms_wait,
- &dm2sp->ms_lock, tout);
+ ret = cv_reltimedwait_sig(&dm2sp->ms_wait,
+ &dm2sp->ms_lock, tout, TR_CLOCK_TICK);
if (ret == 0) {
/* if interrupted, return immediately. */
DPRINTF(DBG_MBOX,
diff --git a/usr/src/uts/sun4u/opl/io/dr_mem.c b/usr/src/uts/sun4u/opl/io/dr_mem.c
index 32ef341cea..8c7ac2aa9b 100644
--- a/usr/src/uts/sun4u/opl/io/dr_mem.c
+++ b/usr/src/uts/sun4u/opl/io/dr_mem.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* DR memory support routines.
*/
@@ -496,7 +494,7 @@ dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
srhp = dr_get_sr_handle(hp);
ASSERT(srhp);
- copytime = lbolt;
+ copytime = ddi_get_lbolt();
/* Quiesce the OS. */
if (dr_suspend(srhp)) {
@@ -527,7 +525,7 @@ dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
/* Resume the OS. */
dr_resume(srhp);
- copytime = lbolt - copytime;
+ copytime = ddi_get_lbolt() - copytime;
if (err = drmach_copy_rename_fini(cr_id))
goto done;
diff --git a/usr/src/uts/sun4u/opl/io/mc-opl.c b/usr/src/uts/sun4u/opl/io/mc-opl.c
index e09fd4b80c..f49e93f09e 100644
--- a/usr/src/uts/sun4u/opl/io/mc-opl.c
+++ b/usr/src/uts/sun4u/opl/io/mc-opl.c
@@ -446,8 +446,8 @@ mc_polling_thread()
mc_pollthr_running = 1;
while (!(mc_poll_cmd & MC_POLL_EXIT)) {
mc_polling();
- cv_timedwait(&mc_polling_cv, &mc_polling_lock,
- ddi_get_lbolt() + mc_timeout_period);
+ cv_reltimedwait(&mc_polling_cv, &mc_polling_lock,
+ mc_timeout_period, TR_CLOCK_TICK);
}
mc_pollthr_running = 0;
diff --git a/usr/src/uts/sun4u/opl/io/oplkmdrv.c b/usr/src/uts/sun4u/opl/io/oplkmdrv.c
index 0d23106e9e..6f9c9efb5b 100644
--- a/usr/src/uts/sun4u/opl/io/oplkmdrv.c
+++ b/usr/src/uts/sun4u/opl/io/oplkmdrv.c
@@ -884,9 +884,9 @@ okm_mbox_init(okms_t *okmsp)
*/
DPRINTF(DBG_MBOX, ("okm_mbox_init: waiting...\n"));
- tout = ddi_get_lbolt() + drv_usectohz(OKM_MB_TOUT);
- ret = cv_timedwait_sig(&okmsp->km_wait,
- &okmsp->km_lock, tout);
+ tout = drv_usectohz(OKM_MB_TOUT);
+ ret = cv_reltimedwait_sig(&okmsp->km_wait,
+ &okmsp->km_lock, tout, TR_CLOCK_TICK);
if (ret == 0) {
/* if interrupted, return immediately. */
DPRINTF(DBG_MBOX,
diff --git a/usr/src/uts/sun4u/serengeti/io/sbdp_error.c b/usr/src/uts/sun4u/serengeti/io/sbdp_error.c
index 99acfc8426..729e1e5f03 100644
--- a/usr/src/uts/sun4u/serengeti/io/sbdp_error.c
+++ b/usr/src/uts/sun4u/serengeti/io/sbdp_error.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2002 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sunddi.h>
@@ -254,7 +252,7 @@ sbdp_passthru_reset_error(sbdp_handle_t *hp, void *arg)
int
sbdp_inject_error(const char *func_name, uint_t entry)
{
- extern volatile clock_t lbolt;
+ extern clock_t ddi_get_lbolt(void);
int index;
int value;
static char *f = "sbdp_inject_error";
@@ -270,7 +268,7 @@ sbdp_inject_error(const char *func_name, uint_t entry)
* value, use lbolt to generate the psuedo random
* response.
*/
- value = (-(int)(lbolt % 2));
+ value = (-(int)(ddi_get_lbolt() % 2));
break;
case SBDP_IE_FAILURE:
diff --git a/usr/src/uts/sun4u/serengeti/io/sbdp_mem.c b/usr/src/uts/sun4u/serengeti/io/sbdp_mem.c
index f78bf49e74..96d7d5d797 100644
--- a/usr/src/uts/sun4u/serengeti/io/sbdp_mem.c
+++ b/usr/src/uts/sun4u/serengeti/io/sbdp_mem.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* memory management for serengeti dr memory
*/
@@ -312,7 +310,7 @@ sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
}
funclen = (int)((ulong_t)_sbdp_copy_rename_end -
- (ulong_t)sbdp_copy_rename__relocatable);
+ (ulong_t)sbdp_copy_rename__relocatable);
if (funclen > PAGESIZE) {
cmn_err(CE_WARN,
@@ -363,8 +361,7 @@ sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
affinity_set(CPU_CURRENT);
scriptlen = sbdp_prep_rename_script(cph);
if (scriptlen <= 0) {
- cmn_err(CE_WARN,
- "sbdp failed to prep for copy-rename");
+ cmn_err(CE_WARN, "sbdp failed to prep for copy-rename");
sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
err = 1;
goto cleanup;
@@ -374,10 +371,9 @@ sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
indexlen = sizeof (*indexp) << 1;
if ((funclen + scriptlen + indexlen) > PAGESIZE) {
- cmn_err(CE_WARN,
- "sbdp: func len (%d) + script len (%d) "
- "+ index len (%d) > PAGESIZE (%d)",
- funclen, scriptlen, indexlen, PAGESIZE);
+ cmn_err(CE_WARN, "sbdp: func len (%d) + script len (%d) "
+ "+ index len (%d) > PAGESIZE (%d)", funclen, scriptlen,
+ indexlen, PAGESIZE);
sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
err = 1;
goto cleanup;
@@ -396,22 +392,20 @@ sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
availlen -= (int)(data_area - (ulong_t)mempage);
if (availlen < scriptlen) {
- cmn_err(CE_WARN,
- "sbdp: available len (%d) < script len (%d)",
- availlen, scriptlen);
+ cmn_err(CE_WARN, "sbdp: available len (%d) < script len (%d)",
+ availlen, scriptlen);
sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
err = 1;
goto cleanup;
}
SBDP_DBG_MEM("copy-rename script data area = 0x%lx\n",
- data_area);
+ data_area);
bcopy((caddr_t)rsbuffer, (caddr_t)data_area, scriptlen);
rsp = (sbdp_rename_script_t *)data_area;
- index_area = data_area + (ulong_t)scriptlen +
- (ulong_t)(linesize - 1);
+ index_area = data_area + (ulong_t)scriptlen + (ulong_t)(linesize - 1);
index_area &= ~((ulong_t)(linesize - 1));
indexp = (int *)index_area;
indexp[0] = 0;
@@ -421,8 +415,8 @@ sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
e_page = (ulong_t)mempage + PAGESIZE;
if (e_area > e_page) {
cmn_err(CE_WARN,
- "sbdp: index area size (%d) > available (%d)\n",
- indexlen, (int)(e_page - index_area));
+ "sbdp: index area size (%d) > available (%d)\n",
+ indexlen, (int)(e_page - index_area));
sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
err = 1;
goto cleanup;
@@ -437,7 +431,7 @@ sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
srhp->sr_flags = hp->h_flags;
- copytime = lbolt;
+ copytime = ddi_get_lbolt();
mutex_enter(&s_bdp->bd_mutex);
mlist = sbdp_memlist_dup(s_bdp->ml);
@@ -458,8 +452,7 @@ sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
*/
if (sbdp_suspend(srhp)) {
sbd_error_t *sep;
- cmn_err(CE_WARN,
- "sbdp: failed to quiesce OS for copy-rename");
+ cmn_err(CE_WARN, "sbdp: failed to quiesce OS for copy-rename");
sep = &srhp->sep;
sbdp_set_err(hp->h_err, sep->e_code, sep->e_rsc);
sbdp_release_sr_handle(srhp);
@@ -517,13 +510,13 @@ sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
err = 1;
}
- copytime = lbolt - copytime;
+ copytime = ddi_get_lbolt() - copytime;
sbdp_release_sr_handle(srhp);
sbdp_del_memlist(hp, mlist);
SBDP_DBG_MEM("copy-rename elapsed time = %ld ticks (%ld secs)\n",
- copytime, copytime / hz);
+ copytime, copytime / hz);
switch (cr_err) {
case SBDP_CR_OK:
@@ -561,7 +554,7 @@ sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
* Source and target board numbers are packaged in arg.
*/
lgrp_plat_config(LGRP_CONFIG_MEM_RENAME,
- (uintptr_t)(s_bdp->bd | (t_bdp->bd << 16)));
+ (uintptr_t)(s_bdp->bd | (t_bdp->bd << 16)));
/*
* swap list of banks
@@ -1046,7 +1039,7 @@ sbdp_prep_rename_script(sbdp_cr_handle_t *cph)
SBDP_DBG_MEM("dumping copy-rename script:\n");
for (i = 0; i < m; i++) {
SBDP_DBG_MEM("0x%lx = 0x%lx, asi 0x%x\n",
- rsp[i].masr_addr, rsp[i].masr, rsp[i].asi);
+ rsp[i].masr_addr, rsp[i].masr, rsp[i].asi);
}
DELAY(1000000);
}
diff --git a/usr/src/uts/sun4u/serengeti/os/serengeti.c b/usr/src/uts/sun4u/serengeti/os/serengeti.c
index bc2e6e2a6d..d4229431f3 100644
--- a/usr/src/uts/sun4u/serengeti/os/serengeti.c
+++ b/usr/src/uts/sun4u/serengeti/os/serengeti.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/time.h>
#include <sys/cpuvar.h>
#include <sys/dditypes.h>
@@ -72,6 +70,7 @@
#include <sys/plat_ecc_dimm.h>
#include <sys/lgrp.h>
+#include <sys/clock_impl.h>
static int sg_debug = 0;
@@ -1453,6 +1452,8 @@ sg_prom_cpci_dr_check(void)
static void
sg_system_claim(void)
{
+ lbolt_debug_entry();
+
prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
}
@@ -1460,6 +1461,8 @@ static void
sg_system_release(void)
{
prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
+
+ lbolt_debug_return();
}
static void
diff --git a/usr/src/uts/sun4u/starcat/io/dman.c b/usr/src/uts/sun4u/starcat/io/dman.c
index 779c015495..4de77a8196 100644
--- a/usr/src/uts/sun4u/starcat/io/dman.c
+++ b/usr/src/uts/sun4u/starcat/io/dman.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -7537,8 +7537,8 @@ man_kstat_update(kstat_t *ksp, int rw)
wp->mw_flags = MAN_WFLAGS_CVWAITER;
man_work_add(man_iwork_q, wp);
- wait_status = cv_timedwait_sig(&wp->mw_cv, &man_lock,
- ddi_get_lbolt() + drv_usectohz(manp->man_kstat_waittime));
+ wait_status = cv_reltimedwait_sig(&wp->mw_cv, &man_lock,
+ drv_usectohz(manp->man_kstat_waittime), TR_CLOCK_TICK);
if (wp->mw_flags & MAN_WFLAGS_DONE) {
status = wp->mw_status;
diff --git a/usr/src/uts/sun4u/starcat/io/drmach.c b/usr/src/uts/sun4u/starcat/io/drmach.c
index 1849b6a119..9afef50347 100644
--- a/usr/src/uts/sun4u/starcat/io/drmach.c
+++ b/usr/src/uts/sun4u/starcat/io/drmach.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -648,8 +648,7 @@ drmach_node_ddi_walk(drmach_node_t *np, void *data,
/*
* Root node doesn't have to be held in any way.
*/
- ddi_walk_devs(ddi_root_node(), drmach_node_ddi_walk_cb,
- (void *)&nargs);
+ ddi_walk_devs(ddi_root_node(), drmach_node_ddi_walk_cb, (void *)&nargs);
return (nargs.err);
}
@@ -875,7 +874,7 @@ drmach_node_ddi_get_proplen(drmach_node_t *np, char *name, int *len)
if (ndip == NULL) {
rv = -1;
} else if (ddi_getproplen(DDI_DEV_T_ANY, ndip, DDI_PROP_DONTPASS,
- name, len) != DDI_PROP_SUCCESS) {
+ name, len) != DDI_PROP_SUCCESS) {
rv = -1;
}
@@ -1121,8 +1120,8 @@ drmach_device_new(drmach_node_t *node,
/* every node is expected to have a name */
err = drerr_new(1, ESTC_GETPROP,
- "dip: 0x%p: property %s",
- node->n_getdip(node), OBP_NAME);
+ "dip: 0x%p: property %s",
+ node->n_getdip(node), OBP_NAME);
return (err);
}
@@ -1241,8 +1240,8 @@ drmach_board_status(drmachid_t id, drmach_status_t *stat)
if (!bp->connected) {
obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP);
err = drmach_mbox_trans(DRMSG_SHOWBOARD, bp->bnum, obufp,
- sizeof (dr_proto_hdr_t), (caddr_t)&shb,
- sizeof (dr_showboard_t));
+ sizeof (dr_proto_hdr_t), (caddr_t)&shb,
+ sizeof (dr_showboard_t));
kmem_free(obufp, sizeof (dr_proto_hdr_t));
if (err)
@@ -1269,14 +1268,14 @@ drmach_board_status(drmachid_t id, drmach_status_t *stat)
default:
stat->cond = bp->cond = SBD_COND_UNKNOWN;
DRMACH_PR("Unknown test status=0x%x from SC\n",
- shb.test_status);
+ shb.test_status);
break;
}
strncpy(stat->type, shb.board_type, sizeof (stat->type));
snprintf(stat->info, sizeof (stat->info), "Test Level=%d",
- shb.test_level);
+ shb.test_level);
} else {
stat->assigned = bp->assigned;
stat->powered = bp->powered;
@@ -1406,14 +1405,14 @@ drmach_mbox_prmsg(dr_mbox_msg_t *mbp, int dir)
} else {
DRMACH_PR("BOARDEVENT received:\n");
DRMACH_PR("init=%d ins=%d rem=%d asgn=%d\n",
- mp->dm_be.initialized,
- mp->dm_be.board_insertion,
- mp->dm_be.board_removal,
- mp->dm_be.slot_assign);
+ mp->dm_be.initialized,
+ mp->dm_be.board_insertion,
+ mp->dm_be.board_removal,
+ mp->dm_be.slot_assign);
DRMACH_PR("unasgn=%d avail=%d unavail=%d\n",
- mp->dm_be.slot_unassign,
- mp->dm_be.slot_avail,
- mp->dm_be.slot_unavail);
+ mp->dm_be.slot_unassign,
+ mp->dm_be.slot_avail,
+ mp->dm_be.slot_unavail);
}
break;
case DRMSG_MBOX_INIT:
@@ -1446,24 +1445,24 @@ drmach_mbox_prmsg(dr_mbox_msg_t *mbp, int dir)
DRMACH_PR("CLAIM Request:\n");
for (i = 0; i < 18; ++i) {
DRMACH_PR("exp%d: val=%d slice=0x%x\n", i,
- mp->dm_cr.mem_slice[i].valid,
- mp->dm_cr.mem_slice[i].slice);
+ mp->dm_cr.mem_slice[i].valid,
+ mp->dm_cr.mem_slice[i].slice);
memregs = &(mp->dm_cr.mem_regs[i]);
for (j = 0; j < S0_LPORT_COUNT; j++) {
DRMACH_PR(" MC %2d: "
- "MADR[%d] = 0x%lx, "
- "MADR[%d] = 0x%lx\n", j,
- 0, DRMACH_MCREG_TO_U64(
- memregs->madr[j][0]),
- 1, DRMACH_MCREG_TO_U64(
- memregs->madr[j][1]));
+ "MADR[%d] = 0x%lx, "
+ "MADR[%d] = 0x%lx\n", j,
+ 0, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][0]),
+ 1, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][1]));
DRMACH_PR(" : "
- "MADR[%d] = 0x%lx, "
- "MADR[%d] = 0x%lx\n",
- 2, DRMACH_MCREG_TO_U64(
- memregs->madr[j][2]),
- 3, DRMACH_MCREG_TO_U64(
- memregs->madr[j][3]));
+ "MADR[%d] = 0x%lx, "
+ "MADR[%d] = 0x%lx\n",
+ 2, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][2]),
+ 3, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][3]));
}
}
break;
@@ -1476,24 +1475,24 @@ drmach_mbox_prmsg(dr_mbox_msg_t *mbp, int dir)
DRMACH_PR("UNCLAIM Request:\n");
for (i = 0; i < 18; ++i) {
DRMACH_PR("exp%d: val=%d slice=0x%x\n", i,
- mp->dm_ur.mem_slice[i].valid,
- mp->dm_ur.mem_slice[i].slice);
+ mp->dm_ur.mem_slice[i].valid,
+ mp->dm_ur.mem_slice[i].slice);
memregs = &(mp->dm_ur.mem_regs[i]);
for (j = 0; j < S0_LPORT_COUNT; j++) {
DRMACH_PR(" MC %2d: "
- "MADR[%d] = 0x%lx, "
- "MADR[%d] = 0x%lx\n", j,
- 0, DRMACH_MCREG_TO_U64(
- memregs->madr[j][0]),
- 1, DRMACH_MCREG_TO_U64(
- memregs->madr[j][1]));
+ "MADR[%d] = 0x%lx, "
+ "MADR[%d] = 0x%lx\n", j,
+ 0, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][0]),
+ 1, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][1]));
DRMACH_PR(" : "
- "MADR[%d] = 0x%lx, "
- "MADR[%d] = 0x%lx\n",
- 2, DRMACH_MCREG_TO_U64(
- memregs->madr[j][2]),
- 3, DRMACH_MCREG_TO_U64(
- memregs->madr[j][3]));
+ "MADR[%d] = 0x%lx, "
+ "MADR[%d] = 0x%lx\n",
+ 2, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][2]),
+ 3, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][3]));
}
}
DRMACH_PR(" mem_clear=%d\n", mp->dm_ur.mem_clear);
@@ -1507,24 +1506,24 @@ drmach_mbox_prmsg(dr_mbox_msg_t *mbp, int dir)
DRMACH_PR("UNCONFIG Request:\n");
for (i = 0; i < 18; ++i) {
DRMACH_PR("exp%d: val=%d slice=0x%x\n", i,
- mp->dm_uc.mem_slice[i].valid,
- mp->dm_uc.mem_slice[i].slice);
+ mp->dm_uc.mem_slice[i].valid,
+ mp->dm_uc.mem_slice[i].slice);
memregs = &(mp->dm_uc.mem_regs[i]);
for (j = 0; j < S0_LPORT_COUNT; j++) {
DRMACH_PR(" MC %2d: "
- "MADR[%d] = 0x%lx, "
- "MADR[%d] = 0x%lx\n", j,
- 0, DRMACH_MCREG_TO_U64(
- memregs->madr[j][0]),
- 1, DRMACH_MCREG_TO_U64(
- memregs->madr[j][1]));
+ "MADR[%d] = 0x%lx, "
+ "MADR[%d] = 0x%lx\n", j,
+ 0, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][0]),
+ 1, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][1]));
DRMACH_PR(" : "
- "MADR[%d] = 0x%lx, "
- "MADR[%d] = 0x%lx\n",
- 2, DRMACH_MCREG_TO_U64(
- memregs->madr[j][2]),
- 3, DRMACH_MCREG_TO_U64(
- memregs->madr[j][3]));
+ "MADR[%d] = 0x%lx, "
+ "MADR[%d] = 0x%lx\n",
+ 2, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][2]),
+ 3, DRMACH_MCREG_TO_U64(
+ memregs->madr[j][3]));
}
}
break;
@@ -1546,22 +1545,22 @@ drmach_mbox_prmsg(dr_mbox_msg_t *mbp, int dir)
if (dir) {
DRMACH_PR("TESTBOARD Request:\n");
DRMACH_PR("\tmemaddrhi=0x%x memaddrlo=0x%x ",
- mp->dm_tb.memaddrhi,
- mp->dm_tb.memaddrlo);
+ mp->dm_tb.memaddrhi,
+ mp->dm_tb.memaddrlo);
DRMACH_PR("memlen=0x%x cpu_portid=0x%x\n",
- mp->dm_tb.memlen, mp->dm_tb.cpu_portid);
+ mp->dm_tb.memlen, mp->dm_tb.cpu_portid);
DRMACH_PR("\tforce=0x%x imm=0x%x\n",
- mp->dm_tb.force, mp->dm_tb.immediate);
+ mp->dm_tb.force, mp->dm_tb.immediate);
} else {
DRMACH_PR("TESTBOARD Reply:\n");
DRMACH_PR("\tmemaddrhi=0x%x memaddrlo=0x%x ",
- mp->dm_tr.memaddrhi,
- mp->dm_tr.memaddrlo);
+ mp->dm_tr.memaddrhi,
+ mp->dm_tr.memaddrlo);
DRMACH_PR("memlen=0x%x cpu_portid=0x%x\n",
- mp->dm_tr.memlen, mp->dm_tr.cpu_portid);
+ mp->dm_tr.memlen, mp->dm_tr.cpu_portid);
DRMACH_PR("\trecovered=0x%x test status=0x%x\n",
- mp->dm_tr.cpu_recovered,
- mp->dm_tr.test_status);
+ mp->dm_tr.cpu_recovered,
+ mp->dm_tr.test_status);
}
break;
@@ -1573,10 +1572,10 @@ drmach_mbox_prmsg(dr_mbox_msg_t *mbp, int dir)
}
DRMACH_PR("\tmemaddrhi=0x%x memaddrlo=0x%x ",
- mp->dm_ta.memaddrhi,
- mp->dm_ta.memaddrlo);
+ mp->dm_ta.memaddrhi,
+ mp->dm_ta.memaddrlo);
DRMACH_PR("memlen=0x%x cpu_portid=0x%x\n",
- mp->dm_ta.memlen, mp->dm_ta.cpu_portid);
+ mp->dm_ta.memlen, mp->dm_ta.cpu_portid);
break;
case DRMSG_SHOWBOARD:
if (dir) {
@@ -1585,13 +1584,13 @@ drmach_mbox_prmsg(dr_mbox_msg_t *mbp, int dir)
DRMACH_PR("SHOWBOARD Reply:\n");
DRMACH_PR(": empty=%d power=%d assigned=%d",
- mp->dm_sb.slot_empty,
- mp->dm_sb.power_on,
- mp->dm_sb.bd_assigned);
+ mp->dm_sb.slot_empty,
+ mp->dm_sb.power_on,
+ mp->dm_sb.bd_assigned);
DRMACH_PR(": active=%d t_status=%d t_level=%d ",
- mp->dm_sb.bd_active,
- mp->dm_sb.test_status,
- mp->dm_sb.test_level);
+ mp->dm_sb.bd_active,
+ mp->dm_sb.test_status,
+ mp->dm_sb.test_level);
DRMACH_PR(": type=%s ", mp->dm_sb.board_type);
}
break;
@@ -1601,12 +1600,11 @@ drmach_mbox_prmsg(dr_mbox_msg_t *mbp, int dir)
}
DRMACH_PR("dr hdr:\n\tid=0x%x vers=0x%x cmd=0x%x exp=0x%x slot=0x%x\n",
- php->message_id, php->drproto_version, php->command,
- php->expbrd, php->slot);
+ php->message_id, php->drproto_version, php->command,
+ php->expbrd, php->slot);
#endif
DRMACH_PR("\treply_status=0x%x error_code=0x%x\n", php->reply_status,
- php->error_code);
-
+ php->error_code);
}
/*
@@ -1636,13 +1634,13 @@ drmach_mbox_reinit(void *unused)
cmn_err(CE_NOTE, "!reinitializing DR mailbox");
obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP);
serr = drmach_mbox_trans(DRMSG_MBOX_INIT, 0, obufp,
- sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
+ sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
kmem_free(obufp, sizeof (dr_proto_hdr_t));
if (serr) {
cmn_err(CE_WARN,
- "mbox_init: MBOX_INIT failed ecode=0x%x",
- serr->e_code);
+ "mbox_init: MBOX_INIT failed ecode=0x%x",
+ serr->e_code);
sbd_err_clear(&serr);
}
mutex_enter(&drmach_g_mbox_mutex);
@@ -1678,24 +1676,24 @@ drmach_mbox_event(void)
int logsys = 0;
do {
- err = mboxsc_getmsg(KEY_SCDR, &type, &command,
- &transid, &length, (void *)msg, 0);
+ err = mboxsc_getmsg(KEY_SCDR, &type, &command, &transid,
+ &length, (void *)msg, 0);
} while (err == EAGAIN);
/* don't try to interpret anything with the wrong version number */
if ((err == 0) && (msg->p_hdr.drproto_version != DRMBX_VERSION)) {
cmn_err(CE_WARN, "mailbox version mismatch 0x%x vs 0x%x",
- msg->p_hdr.drproto_version, DRMBX_VERSION);
+ msg->p_hdr.drproto_version, DRMBX_VERSION);
mutex_enter(&drmach_g_mbox_mutex);
drmach_mbox_iflag = 0;
/* schedule a reinit handshake if one isn't pending */
if (!drmach_mbox_ipending) {
if (taskq_dispatch(system_taskq, drmach_mbox_reinit,
- NULL, TQ_NOSLEEP) != NULL) {
+ NULL, TQ_NOSLEEP) != NULL) {
drmach_mbox_ipending = 1;
} else {
cmn_err(CE_WARN,
- "failed to schedule mailbox reinit");
+ "failed to schedule mailbox reinit");
}
}
mutex_exit(&drmach_g_mbox_mutex);
@@ -1704,8 +1702,8 @@ drmach_mbox_event(void)
if ((err != 0) || (msg->p_hdr.reply_status != DRMSG_REPLY_OK)) {
cmn_err(CE_WARN,
- "Unsolicited mboxsc_getmsg failed: err=0x%x code=0x%x",
- err, msg->p_hdr.error_code);
+ "Unsolicited mboxsc_getmsg failed: err=0x%x code=0x%x",
+ err, msg->p_hdr.error_code);
} else {
dr_boardevent_t *be;
be = (dr_boardevent_t *)&msg->msgdata;
@@ -1717,12 +1715,12 @@ drmach_mbox_event(void)
/* schedule a reinit handshake if one isn't pending */
if (!drmach_mbox_ipending) {
if (taskq_dispatch(system_taskq,
- drmach_mbox_reinit, NULL, TQ_NOSLEEP)
- != NULL) {
+ drmach_mbox_reinit, NULL, TQ_NOSLEEP)
+ != NULL) {
drmach_mbox_ipending = 1;
} else {
- cmn_err(CE_WARN,
- "failed to schedule mailbox reinit");
+ cmn_err(CE_WARN, "failed to schedule "
+ "mailbox reinit");
}
}
mutex_exit(&drmach_g_mbox_mutex);
@@ -1768,9 +1766,8 @@ drmach_mbox_event(void)
if (logsys)
drmach_log_sysevent(
- DRMACH_EXPSLOT2BNUM(msg->p_hdr.expbrd,
- msg->p_hdr.slot),
- hint, SE_NOSLEEP, 1);
+ DRMACH_EXPSLOT2BNUM(msg->p_hdr.expbrd,
+ msg->p_hdr.slot), hint, SE_NOSLEEP, 1);
}
}
@@ -1844,8 +1841,8 @@ drmach_mbox_getmsg()
command = 0;
transid = 0;
length = DRMACH_MAX_MBOX_MSG_SIZE;
- err = mboxsc_getmsg(KEY_SCDR, &type, &command,
- &transid, &length, (void *)msg, drmach_to_getmsg);
+ err = mboxsc_getmsg(KEY_SCDR, &type, &command, &transid,
+ &length, (void *)msg, drmach_to_getmsg);
if (err) {
/*
@@ -1862,7 +1859,7 @@ drmach_mbox_getmsg()
*/
if ((err != ETIMEDOUT) && (err != EAGAIN)) {
cmn_err(CE_WARN,
- "mboxsc_getmsg failed, err=0x%x", err);
+ "mboxsc_getmsg failed, err=0x%x", err);
delay(drmach_mbxerr_delay * hz);
}
continue;
@@ -1872,20 +1869,20 @@ drmach_mbox_getmsg()
if (php->drproto_version != DRMBX_VERSION) {
cmn_err(CE_WARN,
- "mailbox version mismatch 0x%x vs 0x%x",
- php->drproto_version, DRMBX_VERSION);
+ "mailbox version mismatch 0x%x vs 0x%x",
+ php->drproto_version, DRMBX_VERSION);
mutex_enter(&drmach_g_mbox_mutex);
drmach_mbox_iflag = 0;
/* schedule a reinit handshake if one isn't pending */
if (!drmach_mbox_ipending) {
if (taskq_dispatch(system_taskq,
- drmach_mbox_reinit, NULL, TQ_NOSLEEP)
- != NULL) {
+ drmach_mbox_reinit, NULL, TQ_NOSLEEP)
+ != NULL) {
drmach_mbox_ipending = 1;
} else {
- cmn_err(CE_WARN,
- "failed to schedule mailbox reinit");
+ cmn_err(CE_WARN, "failed to schedule "
+ "mailbox reinit");
}
}
mutex_exit(&drmach_g_mbox_mutex);
@@ -1912,7 +1909,7 @@ drmach_mbox_getmsg()
found->e_code = php->error_code;
if (found->i_buflen > 0)
bcopy((caddr_t)&msg->msgdata, found->i_buf,
- found->i_buflen);
+ found->i_buflen);
found->m_reply = 1;
cv_signal(&found->g_cv);
@@ -1976,8 +1973,8 @@ drmach_mbox_sendmsg()
drmach_mbox_prmsg(mp, 1);
err = mboxsc_putmsg(KEY_DRSC, MBOXSC_MSG_REQUEST,
- php->command, NULL, entry->o_buflen, (void *)mp,
- drmach_to_putmsg);
+ php->command, NULL, entry->o_buflen, (void *)mp,
+ drmach_to_putmsg);
if (err) {
switch (err) {
@@ -1991,17 +1988,17 @@ drmach_mbox_sendmsg()
case ETIMEDOUT:
if (--entry->o_nretry <= 0) {
mutex_enter(
- &drmach_msglist_mutex);
+ &drmach_msglist_mutex);
drmach_msglist_unlink(entry);
mutex_exit(
- &drmach_msglist_mutex);
+ &drmach_msglist_mutex);
entry->f_error = err;
entry->p_flag = 1;
cv_signal(&entry->s_cv);
} else {
++retry;
mutex_enter(
- &drmach_msglist_mutex);
+ &drmach_msglist_mutex);
continue;
}
break;
@@ -2027,8 +2024,8 @@ drmach_mbox_sendmsg()
mutex_exit(&drmach_msglist_mutex);
mutex_enter(&drmach_sendmsg_mutex);
- (void) cv_timedwait(&drmach_sendmsg_cv,
- &drmach_sendmsg_mutex, ddi_get_lbolt() + (5 * hz));
+ (void) cv_reltimedwait(&drmach_sendmsg_cv,
+ &drmach_sendmsg_mutex, (5 * hz), TR_CLOCK_TICK);
mutex_exit(&drmach_sendmsg_mutex);
}
cmn_err(CE_WARN, "mbox_sendmsg: exiting");
@@ -2050,7 +2047,6 @@ drmach_mbox_sendmsg()
drmach_sendmsg_thread_run = -1;
thread_exit();
-
}
void
@@ -2128,12 +2124,11 @@ drmach_mbox_req_rply(dr_proto_hdr_t *hdrp, uint32_t olen, caddr_t ibufp,
cv_wait(&listp->s_cv, &listp->s_lock);
}
- to_val = ddi_get_lbolt() + (timeout * hz);
+ to_val = ddi_get_lbolt() + (timeout * hz);
if (listp->f_error) {
listp->p_flag = 0;
- cmn_err(CE_WARN, "!mboxsc_putmsg failed: 0x%x",
- listp->f_error);
+ cmn_err(CE_WARN, "!mboxsc_putmsg failed: 0x%x", listp->f_error);
php = (dr_proto_hdr_t *)listp->o_buf;
cmn_err(CE_WARN, "! cmd = 0x%x, exb = %d, slot = %d",
php->command, php->expbrd, php->slot);
@@ -2141,10 +2136,10 @@ drmach_mbox_req_rply(dr_proto_hdr_t *hdrp, uint32_t olen, caddr_t ibufp,
while (listp->m_reply == 0 && listp->f_error == 0) {
if (nosig)
crv = cv_timedwait(&listp->g_cv, &listp->g_lock,
- to_val);
+ to_val);
else
crv = cv_timedwait_sig(&listp->g_cv,
- &listp->g_lock, to_val);
+ &listp->g_lock, to_val);
switch (crv) {
case -1: /* timed out */
cmn_err(CE_WARN,
@@ -2188,7 +2183,7 @@ drmach_mbox_req_rply(dr_proto_hdr_t *hdrp, uint32_t olen, caddr_t ibufp,
to_val = ddi_get_lbolt() + (timeout * hz);
while (link->m_reply == 0 && link->f_error == 0) {
crv = cv_timedwait(&link->g_cv, &link->g_lock,
- to_val);
+ to_val);
switch (crv) {
case -1: /* timed out */
cmn_err(CE_NOTE,
@@ -2250,8 +2245,8 @@ drmach_mbx2sbderr(drmach_msglist_t *mlp)
case DRERR_POWER_OFF:
return (drerr_new(0, ESTC_POWER_OFF, "%s", a_pnt));
case DRERR_TEST_IN_PROGRESS:
- return (drerr_new(0, ESTC_TEST_IN_PROGRESS,
- "%s", a_pnt));
+ return (drerr_new(0, ESTC_TEST_IN_PROGRESS, "%s",
+ a_pnt));
case DRERR_TESTING_BUSY:
return (drerr_new(0, ESTC_TESTING_BUSY, "%s", a_pnt));
case DRERR_TEST_REQUIRED:
@@ -2259,11 +2254,11 @@ drmach_mbx2sbderr(drmach_msglist_t *mlp)
case DRERR_UNAVAILABLE:
return (drerr_new(0, ESTC_UNAVAILABLE, "%s", a_pnt));
case DRERR_RECOVERABLE:
- return (drerr_new(0, ESTC_SMS_ERR_RECOVERABLE,
- "%s", a_pnt));
+ return (drerr_new(0, ESTC_SMS_ERR_RECOVERABLE, "%s",
+ a_pnt));
case DRERR_UNRECOVERABLE:
- return (drerr_new(1, ESTC_SMS_ERR_UNRECOVERABLE,
- "%s", a_pnt));
+ return (drerr_new(1, ESTC_SMS_ERR_UNRECOVERABLE, "%s",
+ a_pnt));
default:
return (drerr_new(1, ESTC_MBOX_UNKNOWN, NULL));
}
@@ -2296,10 +2291,9 @@ drmach_mbox_trans(uint8_t msgtype, int bnum, caddr_t obufp, int olen,
imsg.expbrd = 0;
imsg.slot = 0;
- cmn_err(CE_WARN,
- "!reinitializing DR mailbox");
+ cmn_err(CE_WARN, "!reinitializing DR mailbox");
mlp = drmach_mbox_req_rply(&imsg, sizeof (imsg), 0, 0,
- 10, 5, 0, NULL);
+ 10, 5, 0, NULL);
err = drmach_mbx2sbderr(mlp);
/*
* If framework failure incoming is encountered on
@@ -2400,15 +2394,15 @@ drmach_mbox_trans(uint8_t msgtype, int bnum, caddr_t obufp, int olen,
break;
default:
- cmn_err(CE_WARN,
- "Unknown outgoing message type 0x%x", msgtype);
+ cmn_err(CE_WARN, "Unknown outgoing message type 0x%x",
+ msgtype);
err = DRMACH_INTERNAL_ERROR();
break;
}
if (err == NULL) {
- mlp = drmach_mbox_req_rply(hdrp, olen, ibufp, ilen,
- timeout, ntries, nosignals, NULL);
+ mlp = drmach_mbox_req_rply(hdrp, olen, ibufp, ilen, timeout,
+ ntries, nosignals, NULL);
err = drmach_mbx2sbderr(mlp);
/*
@@ -2449,7 +2443,7 @@ drmach_mbox_init()
drmach_mbox_istate = 0;
/* register the outgoing mailbox */
if ((err = mboxsc_init(KEY_DRSC, MBOXSC_MBOX_OUT,
- NULL)) != 0) {
+ NULL)) != 0) {
cmn_err(CE_WARN, "DR - SC mboxsc_init failed: 0x%x", err);
return (-1);
}
@@ -2474,7 +2468,7 @@ drmach_mbox_init()
/* register the incoming mailbox */
if ((err = mboxsc_init(KEY_SCDR, MBOXSC_MBOX_IN,
- drmach_mbox_event)) != 0) {
+ drmach_mbox_event)) != 0) {
cmn_err(CE_WARN, "SC - DR mboxsc_init failed: 0x%x", err);
return (-1);
}
@@ -2511,11 +2505,11 @@ drmach_mbox_init()
obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP);
serr = drmach_mbox_trans(DRMSG_MBOX_INIT, 0, obufp,
- sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
+ sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
kmem_free(obufp, sizeof (dr_proto_hdr_t));
if (serr) {
cmn_err(CE_WARN, "mbox_init: MBOX_INIT failed ecode=0x%x",
- serr->e_code);
+ serr->e_code);
sbd_err_clear(&serr);
return (-1);
}
@@ -2536,13 +2530,12 @@ drmach_mbox_fini()
drmach_getmsg_thread_run = 0;
drmach_sendmsg_thread_run = 0;
cmn_err(CE_WARN,
- "drmach_mbox_fini: waiting for mbox threads...");
+ "drmach_mbox_fini: waiting for mbox threads...");
while ((drmach_getmsg_thread_run == 0) ||
- (drmach_sendmsg_thread_run == 0)) {
+ (drmach_sendmsg_thread_run == 0)) {
continue;
}
- cmn_err(CE_WARN,
- "drmach_mbox_fini: mbox threads done.");
+ cmn_err(CE_WARN, "drmach_mbox_fini: mbox threads done.");
mutex_destroy(&drmach_msglist_mutex);
}
@@ -2550,7 +2543,7 @@ drmach_mbox_fini()
/* de-register the outgoing mailbox */
if ((err = mboxsc_fini(KEY_DRSC)) != 0) {
cmn_err(CE_WARN, "DR - SC mboxsc_fini failed: 0x%x",
- err);
+ err);
rv = -1;
}
}
@@ -2558,7 +2551,7 @@ drmach_mbox_fini()
/* de-register the incoming mailbox */
if ((err = mboxsc_fini(KEY_SCDR)) != 0) {
cmn_err(CE_WARN, "SC - DR mboxsc_fini failed: 0x%x",
- err);
+ err);
rv = -1;
}
}
@@ -2673,8 +2666,8 @@ drmach_init(void)
if (drmach_array_get(drmach_boards, bnum, &id) == -1) {
/* portid translated to an invalid board number */
cmn_err(CE_WARN, "OBP node 0x%x has"
- " invalid property value, %s=%u",
- nodeid, "portid", portid);
+ " invalid property value, %s=%u",
+ nodeid, "portid", portid);
/* clean up */
drmach_array_dispose(drmach_boards,
@@ -2711,26 +2704,26 @@ drmach_init(void)
if (gdcd->dcd_testcage_log2_mbytes_size != DCD_DR_TESTCAGE_DISABLED) {
ASSERT(gdcd->dcd_testcage_log2_mbytes_size ==
- gdcd->dcd_testcage_log2_mbytes_align);
+ gdcd->dcd_testcage_log2_mbytes_align);
drmach_iocage_paddr =
- (uint64_t)gdcd->dcd_testcage_mbyte_PA << 20;
+ (uint64_t)gdcd->dcd_testcage_mbyte_PA << 20;
drmach_iocage_size =
- 1 << (gdcd->dcd_testcage_log2_mbytes_size + 20);
+ 1 << (gdcd->dcd_testcage_log2_mbytes_size + 20);
drmach_iocage_vaddr = (caddr_t)vmem_alloc(heap_arena,
- drmach_iocage_size, VM_SLEEP);
+ drmach_iocage_size, VM_SLEEP);
hat_devload(kas.a_hat, drmach_iocage_vaddr, drmach_iocage_size,
- mmu_btop(drmach_iocage_paddr),
- PROT_READ | PROT_WRITE,
- HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
+ mmu_btop(drmach_iocage_paddr),
+ PROT_READ | PROT_WRITE,
+ HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
DRMACH_PR("gdcd size=0x%x align=0x%x PA=0x%x\n",
- gdcd->dcd_testcage_log2_mbytes_size,
- gdcd->dcd_testcage_log2_mbytes_align,
- gdcd->dcd_testcage_mbyte_PA);
+ gdcd->dcd_testcage_log2_mbytes_size,
+ gdcd->dcd_testcage_log2_mbytes_align,
+ gdcd->dcd_testcage_mbyte_PA);
DRMACH_PR("drmach size=0x%x PA=0x%lx VA=0x%p\n",
- drmach_iocage_size, drmach_iocage_paddr,
- drmach_iocage_vaddr);
+ drmach_iocage_size, drmach_iocage_paddr,
+ drmach_iocage_vaddr);
}
if (drmach_iocage_size == 0) {
@@ -2930,10 +2923,10 @@ drmach_prep_schizo_script(uint64_t *p, drmach_mem_t *mp, uint64_t new_basepa)
scsr = lddphysio(io->scsr_pa);
scsr &= ~(DRMACH_LPA_BASE_MASK |
- DRMACH_LPA_BND_MASK);
+ DRMACH_LPA_BND_MASK);
scsr |= DRMACH_PA_TO_LPA_BASE(new_basepa);
scsr |= DRMACH_PA_TO_LPA_BND(
- new_basepa + DRMACH_MEM_SLICE_SIZE);
+ new_basepa + DRMACH_MEM_SLICE_SIZE);
*p++ = io->scsr_pa;
*p++ = scsr;
@@ -2991,34 +2984,34 @@ drmach_prep_rename_script(drmach_mem_t *s_mp, drmach_mem_t *t_mp,
/* verify supplied buffer space is adequate */
ASSERT(buflen >=
- /* addr for all possible MC banks */
- (sizeof (uint64_t) * 4 * 4 * 18) +
- /* list section terminator */
- (sizeof (uint64_t) * 1) +
- /* addr/id tuple for local Panther MC idle reg */
- (sizeof (uint64_t) * 2) +
- /* list section terminator */
- (sizeof (uint64_t) * 1) +
- /* addr/id tuple for 2 boards with 4 Panther MC idle regs */
- (sizeof (uint64_t) * 2 * 2 * 4) +
- /* list section terminator */
- (sizeof (uint64_t) * 1) +
- /* addr/val tuple for 1 proc with 4 MC banks */
- (sizeof (uint64_t) * 2 * 4) +
- /* list section terminator */
- (sizeof (uint64_t) * 1) +
- /* addr/val tuple for 2 boards w/ 2 schizos each */
- (sizeof (uint64_t) * 2 * 2 * 2) +
- /* addr/val tuple for 2 boards w/ 16 MC banks each */
- (sizeof (uint64_t) * 2 * 2 * 16) +
- /* list section terminator */
- (sizeof (uint64_t) * 1) +
- /* addr/val tuple for 18 AXQs w/ two slots each */
- (sizeof (uint64_t) * 2 * 2 * 18) +
- /* list section terminator */
- (sizeof (uint64_t) * 1) +
- /* list terminator */
- (sizeof (uint64_t) * 1));
+ /* addr for all possible MC banks */
+ (sizeof (uint64_t) * 4 * 4 * 18) +
+ /* list section terminator */
+ (sizeof (uint64_t) * 1) +
+ /* addr/id tuple for local Panther MC idle reg */
+ (sizeof (uint64_t) * 2) +
+ /* list section terminator */
+ (sizeof (uint64_t) * 1) +
+ /* addr/id tuple for 2 boards with 4 Panther MC idle regs */
+ (sizeof (uint64_t) * 2 * 2 * 4) +
+ /* list section terminator */
+ (sizeof (uint64_t) * 1) +
+ /* addr/val tuple for 1 proc with 4 MC banks */
+ (sizeof (uint64_t) * 2 * 4) +
+ /* list section terminator */
+ (sizeof (uint64_t) * 1) +
+ /* addr/val tuple for 2 boards w/ 2 schizos each */
+ (sizeof (uint64_t) * 2 * 2 * 2) +
+ /* addr/val tuple for 2 boards w/ 16 MC banks each */
+ (sizeof (uint64_t) * 2 * 2 * 16) +
+ /* list section terminator */
+ (sizeof (uint64_t) * 1) +
+ /* addr/val tuple for 18 AXQs w/ two slots each */
+ (sizeof (uint64_t) * 2 * 2 * 18) +
+ /* list section terminator */
+ (sizeof (uint64_t) * 1) +
+ /* list terminator */
+ (sizeof (uint64_t) * 1));
/* copy bank list to rename script */
mutex_enter(&drmach_bus_sync_lock);
@@ -3069,13 +3062,13 @@ drmach_prep_rename_script(drmach_mem_t *s_mp, drmach_mem_t *t_mp,
/* exchange base pa. include slice offset in new target base pa */
s_new_basepa = t_basepa & ~ (DRMACH_MEM_SLICE_SIZE - 1);
t_new_basepa = (s_basepa & ~ (DRMACH_MEM_SLICE_SIZE - 1)) +
- t_slice_offset;
+ t_slice_offset;
DRMACH_PR("s_new_basepa 0x%lx\n", s_new_basepa);
DRMACH_PR("t_new_basepa 0x%lx\n", t_new_basepa);
DRMACH_PR("preparing MC MADR rename script (master is CPU%d):\n",
- CPU->cpu_id);
+ CPU->cpu_id);
/*
* Write rename script for MC on this processor. A script will
@@ -3134,12 +3127,12 @@ drmach_prep_rename_script(drmach_mem_t *s_mp, drmach_mem_t *t_mp,
*p++ = 0;
DRMACH_PR("preparing AXQ CASM rename script (EXP%d <> EXP%d):\n",
- DRMACH_BNUM2EXP(s_mp->dev.bp->bnum),
- DRMACH_BNUM2EXP(t_mp->dev.bp->bnum));
+ DRMACH_BNUM2EXP(s_mp->dev.bp->bnum),
+ DRMACH_BNUM2EXP(t_mp->dev.bp->bnum));
rv = axq_do_casm_rename_script(&p,
- DRMACH_PA_TO_SLICE(s_new_basepa),
- DRMACH_PA_TO_SLICE(t_new_basepa));
+ DRMACH_PA_TO_SLICE(s_new_basepa),
+ DRMACH_PA_TO_SLICE(t_new_basepa));
if (rv == DDI_FAILURE)
return (DRMACH_INTERNAL_ERROR());
@@ -3188,9 +3181,7 @@ drmach_prep_rename_script(drmach_mem_t *s_mp, drmach_mem_t *t_mp,
uint64_t v = *q++; /* new register value */
DRMACH_PR("0x%lx = 0x%lx, basepa 0x%lx\n",
- r,
- v,
- DRMACH_MC_UM_TO_PA(v)|DRMACH_MC_LM_TO_PA(v));
+ r, v, DRMACH_MC_UM_TO_PA(v)|DRMACH_MC_LM_TO_PA(v));
}
/* skip terminator */
@@ -3214,8 +3205,8 @@ drmach_prep_rename_script(drmach_mem_t *s_mp, drmach_mem_t *t_mp,
/* verify final terminator is present */
ASSERT(*(q + 1) == 0);
- DRMACH_PR("copy-rename script 0x%p, len %d\n",
- buf, (int)((intptr_t)p - (intptr_t)buf));
+ DRMACH_PR("copy-rename script 0x%p, len %d\n", buf,
+ (int)((intptr_t)p - (intptr_t)buf));
if (drmach_debug)
DELAY(10000000);
@@ -3318,12 +3309,12 @@ drmach_copy_rename_init(drmachid_t t_id, uint64_t t_slice_offset,
x_ml = x_ml->next;
DRMACH_PR("source copy span: base pa 0x%lx, end pa 0x%lx\n",
- s_copybasepa,
- s_copybasepa + x_ml->address + x_ml->size);
+ s_copybasepa,
+ s_copybasepa + x_ml->address + x_ml->size);
DRMACH_PR("target copy span: base pa 0x%lx, end pa 0x%lx\n",
- t_copybasepa,
- t_copybasepa + x_ml->address + x_ml->size);
+ t_copybasepa,
+ t_copybasepa + x_ml->address + x_ml->size);
DRMACH_PR("copy memlist (relative to copy base pa):\n");
DRMACH_MEMLIST_DUMP(c_ml);
@@ -3338,9 +3329,9 @@ drmach_copy_rename_init(drmachid_t t_id, uint64_t t_slice_offset,
ASSERT(err == NULL);
DRMACH_PR("current source base pa 0x%lx, size 0x%lx\n",
- s_basepa, s_size);
+ s_basepa, s_size);
DRMACH_PR("current target base pa 0x%lx, size 0x%lx\n",
- t_basepa, t_size);
+ t_basepa, t_size);
}
#endif /* DEBUG */
@@ -3370,12 +3361,12 @@ drmach_copy_rename_init(drmachid_t t_id, uint64_t t_slice_offset,
DRMACH_PR("drmach_rename function 0x%p, len %d\n", wp, len);
wp += (len + 15) & ~15;
- err = drmach_prep_rename_script(s_mp, t_mp, t_slice_offset,
- wp, PAGESIZE - (wp - bp));
+ err = drmach_prep_rename_script(s_mp, t_mp, t_slice_offset, wp,
+ PAGESIZE - (wp - bp));
if (err) {
cleanup:
xt_one(CPU->cpu_id, vtag_flushpage_tl1,
- (uint64_t)drmach_cpu_sram_va, (uint64_t)ksfmmup);
+ (uint64_t)drmach_cpu_sram_va, (uint64_t)ksfmmup);
return (err);
}
@@ -3408,11 +3399,11 @@ cleanup:
if (DRMACH_L1_SET_LPA(s_mp->dev.bp) && drmach_reprogram_lpa) {
drmach_prep_xt_mb_for_slice_update(s_mp->dev.bp,
- DRMACH_PA_TO_SLICE(t_copybasepa));
+ DRMACH_PA_TO_SLICE(t_copybasepa));
}
if (DRMACH_L1_SET_LPA(t_mp->dev.bp) && drmach_reprogram_lpa) {
drmach_prep_xt_mb_for_slice_update(t_mp->dev.bp,
- DRMACH_PA_TO_SLICE(s_copybasepa));
+ DRMACH_PA_TO_SLICE(s_copybasepa));
}
*cr_id = cr;
@@ -3434,7 +3425,7 @@ drmach_copy_rename_fini(drmachid_t id)
axq_cdc_enable_all();
xt_one(CPU->cpu_id, vtag_flushpage_tl1,
- (uint64_t)drmach_cpu_sram_va, (uint64_t)ksfmmup);
+ (uint64_t)drmach_cpu_sram_va, (uint64_t)ksfmmup);
switch (cr->ecode) {
case DRMACH_CR_OK:
@@ -3521,14 +3512,14 @@ drmach_copy_rename_fini(drmachid_t id)
drmach_msg_memregs_init(obufp->msgdata.dm_uc.mem_regs);
mutex_exit(&drmach_slice_table_lock);
(void) drmach_mbox_trans(DRMSG_UNCONFIG, cr->s_mp->dev.bp->bnum,
- (caddr_t)obufp, sizeof (dr_mbox_msg_t), (caddr_t)NULL, 0);
+ (caddr_t)obufp, sizeof (dr_mbox_msg_t), (caddr_t)NULL, 0);
kmem_free(obufp, sizeof (dr_mbox_msg_t));
done:
vmem_free(static_alloc_arena, cr, sizeof (drmach_copy_rename_t));
DRMACH_PR("waited %d out of %d tries for drmach_rename_wait on %d cpus",
- drmach_rename_ntries, drmach_cpu_ntries, drmach_rename_count);
+ drmach_rename_ntries, drmach_cpu_ntries, drmach_rename_count);
return (err);
}
@@ -3790,8 +3781,8 @@ drmach_pci_new(drmach_device_t *proto, drmachid_t *idp)
/* pci nodes are expected to have regs */
err = drerr_new(1, ESTC_GETPROP,
- "Device Node 0x%x: property %s",
- (uint_t)node->get_dnode(node), "reg");
+ "Device Node 0x%x: property %s",
+ (uint_t)node->get_dnode(node), "reg");
return (err);
}
@@ -3800,8 +3791,8 @@ drmach_pci_new(drmach_device_t *proto, drmachid_t *idp)
sbd_error_t *err;
err = drerr_new(1, ESTC_GETPROP,
- "Device Node 0x%x: property %s",
- (uint_t)node->get_dnode(node), "reg");
+ "Device Node 0x%x: property %s",
+ (uint_t)node->get_dnode(node), "reg");
return (err);
}
@@ -3848,7 +3839,7 @@ drmach_io_new(drmach_device_t *proto, drmachid_t *idp)
ip->dev.cm.status = drmach_io_status;
snprintf(ip->dev.cm.name, sizeof (ip->dev.cm.name), "%s%d",
- ip->dev.type, ip->dev.unum);
+ ip->dev.type, ip->dev.unum);
*idp = (drmachid_t)ip;
return (NULL);
@@ -3887,7 +3878,7 @@ drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts)
if (bp->cond == SBD_COND_UNUSABLE)
err = drerr_new(0,
- ESBD_FATAL_STATE, NULL);
+ ESBD_FATAL_STATE, NULL);
break;
case SBD_CMD_DISCONNECT:
if (!bp->connected)
@@ -3895,12 +3886,12 @@ drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts)
if (bp->cond == SBD_COND_UNUSABLE)
err = drerr_new(0,
- ESBD_FATAL_STATE, NULL);
+ ESBD_FATAL_STATE, NULL);
break;
default:
if (bp->cond == SBD_COND_UNUSABLE)
err = drerr_new(0,
- ESBD_FATAL_STATE, NULL);
+ ESBD_FATAL_STATE, NULL);
break;
}
@@ -3939,7 +3930,7 @@ drmach_board_assign(int bnum, drmachid_t *id)
obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP);
err = drmach_mbox_trans(DRMSG_ASSIGN, bnum, obufp,
- sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
+ sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
kmem_free(obufp, sizeof (dr_proto_hdr_t));
if (!err) {
@@ -4041,7 +4032,7 @@ drmach_board_connect(drmachid_t id, drmach_opts_t *opts)
drmach_msg_memregs_init(obufp->msgdata.dm_cr.mem_regs);
mutex_exit(&drmach_slice_table_lock);
err = drmach_mbox_trans(DRMSG_CLAIM, bp->bnum, (caddr_t)obufp,
- sizeof (dr_mbox_msg_t), (caddr_t)NULL, 0);
+ sizeof (dr_mbox_msg_t), (caddr_t)NULL, 0);
kmem_free(obufp, sizeof (dr_mbox_msg_t));
if (err) {
@@ -4051,8 +4042,8 @@ drmach_board_connect(drmachid_t id, drmach_opts_t *opts)
* unusable.
*/
if ((err->e_code == ESTC_SMS_ERR_UNRECOVERABLE) ||
- (err->e_code == ESTC_MBXRPLY))
- bp->cond = SBD_COND_UNUSABLE;
+ (err->e_code == ESTC_MBXRPLY))
+ bp->cond = SBD_COND_UNUSABLE;
return (err);
}
@@ -4119,8 +4110,8 @@ drmach_board_connect(drmachid_t id, drmach_opts_t *opts)
drmach_msg_memregs_init(obufp->msgdata.dm_ur.mem_regs);
mutex_exit(&drmach_slice_table_lock);
(void) drmach_mbox_trans(DRMSG_UNCLAIM, bp->bnum,
- (caddr_t)obufp, sizeof (dr_mbox_msg_t),
- (caddr_t)NULL, 0);
+ (caddr_t)obufp, sizeof (dr_mbox_msg_t),
+ (caddr_t)NULL, 0);
kmem_free(obufp, sizeof (dr_mbox_msg_t));
@@ -4221,8 +4212,8 @@ drmach_slice_table_update(drmach_board_t *bp, int invalidate)
return;
s = DRMACH_BNUM2SLOT(bp->bnum);
- DRMACH_PR("using AXQ casm %d.%d for slot%d.%d\n",
- axq_exp, axq_slot, e, s);
+ DRMACH_PR("using AXQ casm %d.%d for slot%d.%d\n", axq_exp, axq_slot,
+ e, s);
/* invalidate entry */
drmach_slice_table[e] &= ~0x20;
@@ -4269,13 +4260,11 @@ drmach_lpa_bb_get(drmach_board_t *s1bp, uint64_t *basep, uint64_t *boundp)
*basep = *boundp = 0;
if (drmach_array_get(drmach_boards, s1bp->bnum - 1, &s0id) == 0 &&
- s0id != 0) {
+ s0id != 0) {
uint32_t slice;
- if ((slice =
- drmach_slice_table[DRMACH_BNUM2EXP(s1bp->bnum)])
- & 0x20) {
-
+ if ((slice = drmach_slice_table[DRMACH_BNUM2EXP(s1bp->bnum)])
+ & 0x20) {
*basep = DRMACH_SLICE_TO_PA(slice & DRMACH_SLICE_MASK);
*boundp = *basep + DRMACH_MEM_SLICE_SIZE;
}
@@ -4308,7 +4297,7 @@ drmach_slot1_lpa_set(drmach_board_t *bp)
s1bp = bp;
if (s1bp->devices == NULL) {
DRMACH_PR("drmach...lpa_set: slot1=%d not present",
- bp->bnum);
+ bp->bnum);
return;
}
} else {
@@ -4317,7 +4306,7 @@ drmach_slot1_lpa_set(drmach_board_t *bp)
s1bp = id;
if (rv == -1 || s1bp == NULL || s1bp->devices == NULL) {
DRMACH_PR("drmach...lpa_set: slot1=%d not present",
- bp->bnum + 1);
+ bp->bnum + 1);
return;
}
ASSERT(DRMACH_IS_BOARD_ID(id));
@@ -4325,7 +4314,7 @@ drmach_slot1_lpa_set(drmach_board_t *bp)
mutex_enter(&drmach_slice_table_lock);
drmach_lpa_bb_get(s1bp, &new_basepa, &new_boundpa);
DRMACH_PR("drmach_...lpa_set: bnum=%d base=0x%lx bound=0x%lx\n",
- s1bp->bnum, new_basepa, new_boundpa);
+ s1bp->bnum, new_basepa, new_boundpa);
rv = drmach_array_first(s1bp->devices, &idx, &id);
while (rv == 0) {
@@ -4345,15 +4334,15 @@ drmach_slot1_lpa_set(drmach_board_t *bp)
scsr = lddphysio(io->scsr_pa);
DRMACH_PR("drmach...lpa_set: old scsr=0x%lx\n",
- scsr);
+ scsr);
scsr &= ~(DRMACH_LPA_BASE_MASK |
- DRMACH_LPA_BND_MASK);
+ DRMACH_LPA_BND_MASK);
scsr |= DRMACH_PA_TO_LPA_BASE(new_basepa);
scsr |= DRMACH_PA_TO_LPA_BND(new_boundpa);
stdphysio(io->scsr_pa, scsr);
DRMACH_PR("drmach...lpa_set: new scsr=0x%lx\n",
- scsr);
+ scsr);
last_scsr_pa = io->scsr_pa;
}
@@ -4381,11 +4370,11 @@ drmach_slot1_lpa_set(drmach_board_t *bp)
* MCPU cannot be xcalled.
*/
if ((cpu[cpuid] == NULL) ||
- (cpu[cpuid]->cpu_flags &
- CPU_READY) == 0) {
+ (cpu[cpuid]->cpu_flags &
+ CPU_READY) == 0) {
rv = drmach_array_next(s1bp->devices,
- &idx, &id);
+ &idx, &id);
continue;
}
@@ -4399,10 +4388,10 @@ drmach_slot1_lpa_set(drmach_board_t *bp)
mutex_enter(&drmach_iocage_lock);
while (drmach_iocage_is_busy)
cv_wait(&drmach_iocage_cv,
- &drmach_iocage_lock);
+ &drmach_iocage_lock);
drmach_iocage_is_busy = 1;
- drmach_iocage_mem_scrub(
- ecache_size * 2);
+ drmach_iocage_mem_scrub(ecache_size *
+ 2);
mutex_exit(&drmach_iocage_lock);
}
@@ -4433,7 +4422,7 @@ drmach_slot1_lpa_set(drmach_board_t *bp)
drmach_xt_mb[cpuid] = 0x80 | 0x40;
else
drmach_xt_mb[cpuid] = 0x80 |
- DRMACH_PA_TO_SLICE(new_basepa);
+ DRMACH_PA_TO_SLICE(new_basepa);
drmach_xt_ready = 0;
@@ -4455,8 +4444,8 @@ drmach_slot1_lpa_set(drmach_board_t *bp)
*/
if (drmach_is_cheetah) {
mutex_enter(&drmach_iocage_lock);
- drmach_iocage_mem_scrub(
- ecache_size * 2);
+ drmach_iocage_mem_scrub(ecache_size *
+ 2);
drmach_iocage_is_busy = 0;
cv_signal(&drmach_iocage_cv);
mutex_exit(&drmach_iocage_lock);
@@ -4524,7 +4513,7 @@ drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
* info now, for use by drmach_slot1_lpa_set()
*/
if (DRMACH_BNUM2SLOT(bp->bnum) == 0)
- drmach_slice_table_update(bp, 1);
+ drmach_slice_table_update(bp, 1);
drmach_msg_memregs_init(obufp->msgdata.dm_ur.mem_regs);
mutex_exit(&drmach_slice_table_lock);
@@ -4567,11 +4556,11 @@ drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
* timeout value in the Safari/Fireplane Config Reg.
*/
if (drmach_panther_boards() > 0 || drmach_unclaim_delay_all) {
- clock_t stime = lbolt;
+ clock_t stime = ddi_get_lbolt();
delay(drv_usectohz(drmach_unclaim_usec_delay));
- stime = lbolt - stime;
+ stime = ddi_get_lbolt() - stime;
DRMACH_PR("delayed %ld ticks (%ld secs) before disconnecting "
"board %s from domain\n", stime, stime / hz, bp->cm.name);
}
@@ -4580,7 +4569,7 @@ drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
obufp->msgdata.dm_ur.mem_clear = 0;
err = drmach_mbox_trans(DRMSG_UNCLAIM, bp->bnum, (caddr_t)obufp,
- sizeof (dr_mbox_msg_t), (caddr_t)NULL, 0);
+ sizeof (dr_mbox_msg_t), (caddr_t)NULL, 0);
if (err) {
/*
@@ -4589,18 +4578,18 @@ drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
* unusable.
*/
if ((err->e_code == ESTC_SMS_ERR_UNRECOVERABLE) ||
- (err->e_code == ESTC_MBXRPLY))
- bp->cond = SBD_COND_UNUSABLE;
+ (err->e_code == ESTC_MBXRPLY))
+ bp->cond = SBD_COND_UNUSABLE;
else {
DRMACH_PR("UNCLAIM failed for bnum=%d\n",
- bp->bnum);
+ bp->bnum);
DRMACH_PR("calling sc_probe_board: bnum=%d\n",
- bp->bnum);
+ bp->bnum);
scc = sc_probe_board(bp->bnum);
if (scc == NULL) {
cmn_err(CE_WARN,
"sc_probe_board failed for bnum=%d",
- bp->bnum);
+ bp->bnum);
} else {
if (DRMACH_BNUM2SLOT(bp->bnum) == 0) {
mutex_enter(
@@ -4856,8 +4845,8 @@ drmach_board_lookup(int bnum, drmachid_t *id)
obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP);
err = drmach_mbox_trans(DRMSG_SHOWBOARD, bnum, obufp,
- sizeof (dr_proto_hdr_t), (caddr_t)&shb,
- sizeof (dr_showboard_t));
+ sizeof (dr_proto_hdr_t), (caddr_t)&shb,
+ sizeof (dr_showboard_t));
kmem_free(obufp, sizeof (dr_proto_hdr_t));
if (err) {
@@ -4887,7 +4876,7 @@ drmach_board_lookup(int bnum, drmachid_t *id)
default:
bp->cond = SBD_COND_UNKNOWN;
DRMACH_PR("Unknown test status=0x%x from SC\n",
- shb.test_status);
+ shb.test_status);
break;
}
strncpy(bp->type, shb.board_type, sizeof (bp->type));
@@ -4928,7 +4917,7 @@ drmach_board_poweroff(drmachid_t id)
obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP);
err = drmach_mbox_trans(DRMSG_POWEROFF, bp->bnum, obufp,
- sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
+ sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
kmem_free(obufp, sizeof (dr_proto_hdr_t));
if (!err)
bp->powered = 0;
@@ -4950,7 +4939,7 @@ drmach_board_poweron(drmachid_t id)
obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP);
err = drmach_mbox_trans(DRMSG_POWERON, bp->bnum, obufp,
- sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
+ sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
if (!err)
bp->powered = 1;
@@ -5013,7 +5002,7 @@ drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
}
err = drmach_mbox_trans(DRMSG_TESTBOARD, bp->bnum, (caddr_t)obufp,
- sizeof (dr_mbox_msg_t), (caddr_t)&tbr, sizeof (tbr));
+ sizeof (dr_mbox_msg_t), (caddr_t)&tbr, sizeof (tbr));
if (!err)
bp->cond = SBD_COND_OK;
@@ -5025,29 +5014,25 @@ drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
switch (tbr.test_status) {
case DR_TEST_STATUS_IPOST:
bp->cond = SBD_COND_UNKNOWN;
- err = drerr_new(0, ESTC_TEST_IN_PROGRESS,
- NULL);
+ err = drerr_new(0, ESTC_TEST_IN_PROGRESS, NULL);
break;
case DR_TEST_STATUS_UNKNOWN:
bp->cond = SBD_COND_UNKNOWN;
err = drerr_new(1,
- ESTC_TEST_STATUS_UNKNOWN, NULL);
+ ESTC_TEST_STATUS_UNKNOWN, NULL);
break;
case DR_TEST_STATUS_FAILED:
bp->cond = SBD_COND_FAILED;
- err = drerr_new(1, ESTC_TEST_FAILED,
- NULL);
+ err = drerr_new(1, ESTC_TEST_FAILED, NULL);
break;
case DR_TEST_STATUS_ABORTED:
bp->cond = SBD_COND_UNKNOWN;
- err = drerr_new(1, ESTC_TEST_ABORTED,
- NULL);
+ err = drerr_new(1, ESTC_TEST_ABORTED, NULL);
break;
default:
bp->cond = SBD_COND_UNKNOWN;
- err = drerr_new(1,
- ESTC_TEST_RESULT_UNKNOWN,
- NULL);
+ err = drerr_new(1, ESTC_TEST_RESULT_UNKNOWN,
+ NULL);
break;
}
}
@@ -5059,9 +5044,9 @@ drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
*/
if (is_io) {
DRMACH_PR("drmach_board_test: tbr.cpu_recovered: %d",
- tbr.cpu_recovered);
+ tbr.cpu_recovered);
DRMACH_PR("drmach_board_test: port id: %d",
- tbr.cpu_portid);
+ tbr.cpu_portid);
/*
* Check the cpu_recovered flag in the testboard reply, or
@@ -5125,7 +5110,7 @@ drmach_board_unassign(drmachid_t id)
obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP);
err = drmach_mbox_trans(DRMSG_UNASSIGN, bp->bnum, obufp,
- sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
+ sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0);
kmem_free(obufp, sizeof (dr_proto_hdr_t));
if (!err) {
if (drmach_array_set(drmach_boards, bp->bnum, 0) != 0)
@@ -5313,9 +5298,9 @@ drmach_cpu_new(drmach_device_t *proto, drmachid_t *idp)
ASSERT(drmach_cpu_sram_tte[cp->cpuid].tte_inthi == 0 &&
drmach_cpu_sram_tte[cp->cpuid].tte_intlo == 0);
drmach_cpu_sram_tte[cp->cpuid].tte_inthi = TTE_PFN_INTHI(pfn) |
- TTE_VALID_INT | TTE_SZ_INT(TTE8K);
+ TTE_VALID_INT | TTE_SZ_INT(TTE8K);
drmach_cpu_sram_tte[cp->cpuid].tte_intlo = TTE_PFN_INTLO(pfn) |
- TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT;
+ TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT;
DRMACH_PR("drmach_cpu_new: cpuid=%d, coreid=%d, stardrb_offset=0x%lx, "
"cpu_sram_offset=0x%lx, idx=%d\n", cp->cpuid, cp->coreid,
@@ -5384,7 +5369,7 @@ drmach_cpu_start(struct cpu *cp)
if (prom_hotaddcpu(cpuid) != 0) {
cmn_err(CE_PANIC, "prom_hotaddcpu() for cpuid=%d failed.",
- cpuid);
+ cpuid);
}
restart_other_cpu(cpuid);
@@ -5421,7 +5406,7 @@ drmach_cpu_start(struct cpu *cp)
exp = (cpuid >> 5) & 0x1f;
if (drmach_slice_table[exp] & 0x20) {
drmach_xt_mb[cpuid] = 0x80 |
- (drmach_slice_table[exp] & 0x1f);
+ (drmach_slice_table[exp] & 0x1f);
} else {
drmach_xt_mb[cpuid] = 0x80 | 0x40;
}
@@ -5440,13 +5425,13 @@ drmach_cpu_start(struct cpu *cp)
mutex_exit(&drmach_xt_mb_lock);
DRMACH_PR(
- "waited %d out of %d tries for drmach_set_lpa on cpu%d",
- drmach_cpu_ntries - ntries, drmach_cpu_ntries,
- cp->cpu_id);
+ "waited %d out of %d tries for drmach_set_lpa on cpu%d",
+ drmach_cpu_ntries - ntries, drmach_cpu_ntries,
+ cp->cpu_id);
}
- xt_one(cpuid, vtag_flushpage_tl1,
- (uint64_t)drmach_cpu_sram_va, (uint64_t)ksfmmup);
+ xt_one(cpuid, vtag_flushpage_tl1, (uint64_t)drmach_cpu_sram_va,
+ (uint64_t)ksfmmup);
return (0);
}
@@ -5484,20 +5469,19 @@ drmach_cpu_start(struct cpu *cp)
static void
drmach_cpu_stop_self(void)
{
- extern void drmach_shutdown_asm(
- uint64_t, uint64_t, int, int, uint64_t);
- extern void drmach_shutdown_asm_end(void);
+ extern void drmach_shutdown_asm(uint64_t, uint64_t, int, int, uint64_t);
+ extern void drmach_shutdown_asm_end(void);
tte_t *tte;
uint_t *p, *q;
uint64_t stack_pointer;
ASSERT(((ptrdiff_t)drmach_shutdown_asm_end -
- (ptrdiff_t)drmach_shutdown_asm) < PAGESIZE);
+ (ptrdiff_t)drmach_shutdown_asm) < PAGESIZE);
tte = &drmach_cpu_sram_tte[CPU->cpu_id];
- ASSERT(TTE_IS_VALID(tte) && TTE_IS_8K(tte) &&
- TTE_IS_PRIVILEGED(tte) && TTE_IS_LOCKED(tte));
+ ASSERT(TTE_IS_VALID(tte) && TTE_IS_8K(tte) && TTE_IS_PRIVILEGED(tte) &&
+ TTE_IS_LOCKED(tte));
sfmmu_dtlb_ld_kva(drmach_cpu_sram_va, tte);
sfmmu_itlb_ld_kva(drmach_cpu_sram_va, tte);
@@ -5517,11 +5501,11 @@ drmach_cpu_stop_self(void)
/* call copy of drmach_shutdown_asm */
(*(void (*)())drmach_cpu_sram_va)(
- stack_pointer,
- drmach_iocage_paddr,
- cpunodes[CPU->cpu_id].ecache_size,
- cpunodes[CPU->cpu_id].ecache_linesize,
- va_to_pa((void *)&drmach_xt_mb[CPU->cpu_id]));
+ stack_pointer,
+ drmach_iocage_paddr,
+ cpunodes[CPU->cpu_id].ecache_size,
+ cpunodes[CPU->cpu_id].ecache_linesize,
+ va_to_pa((void *)&drmach_xt_mb[CPU->cpu_id]));
}
static void
@@ -5593,12 +5577,10 @@ drmach_cpu_status(drmachid_t id, drmach_status_t *stat)
sbd_error_t *
drmach_cpu_disconnect(drmachid_t id)
{
-
if (!DRMACH_IS_CPU_ID(id))
return (drerr_new(0, ESTC_INAPPROP, NULL));
return (NULL);
-
}
sbd_error_t *
@@ -5706,7 +5688,7 @@ drmach_dip_is_schizo_xmits_0_pci_b(dev_info_t *dip)
return (0);
if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "device_type",
- (caddr_t)dtype, &len) == DDI_PROP_SUCCESS) {
+ (caddr_t)dtype, &len) == DDI_PROP_SUCCESS) {
if (strncmp(dtype, "pci", 3) == 0) {
@@ -5714,15 +5696,15 @@ drmach_dip_is_schizo_xmits_0_pci_b(dev_info_t *dip)
* Get safari portid. All schizo/xmits 0
* safari IDs end in 0x1C.
*/
- rv = ddi_getproplen(DDI_DEV_T_ANY, dip, 0,
- "portid", &len);
+ rv = ddi_getproplen(DDI_DEV_T_ANY, dip, 0, "portid",
+ &len);
if ((rv != DDI_PROP_SUCCESS) ||
- (len > sizeof (portid)))
- return (0);
+ (len > sizeof (portid)))
+ return (0);
rv = ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0,
- "portid", (caddr_t)&portid, &len);
+ "portid", (caddr_t)&portid, &len);
if (rv != DDI_PROP_SUCCESS)
return (0);
@@ -5731,11 +5713,11 @@ drmach_dip_is_schizo_xmits_0_pci_b(dev_info_t *dip)
return (0);
if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "reg", (caddr_t)&regbuf,
- &len) == DDI_PROP_SUCCESS) {
+ DDI_PROP_DONTPASS, "reg", (caddr_t)&regbuf,
+ &len) == DDI_PROP_SUCCESS) {
pci_csr_base = regbuf[0].pci_phys_mid &
- PCI_CONF_ADDR_MASK;
+ PCI_CONF_ADDR_MASK;
kmem_free(regbuf, len);
/*
* All PCI B-Leafs are at configspace 0x70.0000.
@@ -5770,7 +5752,7 @@ drmach_dip_is_man_eri(dev_info_t *dip)
* Verify if the parent is schizo(xmits)0 and pci B leaf.
*/
if (((parent_dip = ddi_get_parent(dip)) == NULL) ||
- ((name = ddi_binding_name(parent_dip)) == NULL))
+ ((name = ddi_binding_name(parent_dip)) == NULL))
return (0);
if (strcmp(name, SCHIZO_BINDING_NAME) != 0) {
/*
@@ -5780,7 +5762,7 @@ drmach_dip_is_man_eri(dev_info_t *dip)
if ((parent_dip = ddi_get_parent(parent_dip)) == NULL)
return (0);
if (((name = ddi_binding_name(parent_dip)) == NULL) ||
- (strcmp(name, XMITS_BINDING_NAME) != 0)) {
+ (strcmp(name, XMITS_BINDING_NAME) != 0)) {
return (0);
}
}
@@ -5790,7 +5772,7 @@ drmach_dip_is_man_eri(dev_info_t *dip)
* Finally make sure it is the MAN eri.
*/
if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
- "reg", (caddr_t)&regbuf, &len) == DDI_PROP_SUCCESS) {
+ "reg", (caddr_t)&regbuf, &len) == DDI_PROP_SUCCESS) {
pci_device = PCI_REG_DEV_G(regbuf->pci_phys_hi);
pci_function = PCI_REG_FUNC_G(regbuf->pci_phys_hi);
@@ -5829,7 +5811,7 @@ drmach_board_find_io_insts(dev_info_t *dip, void *args)
}
rv = ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0,
- "portid", (caddr_t)&portid, &len);
+ "portid", (caddr_t)&portid, &len);
if (rv != DDI_PROP_SUCCESS)
return (DDI_WALK_CONTINUE);
@@ -5838,13 +5820,12 @@ drmach_board_find_io_insts(dev_info_t *dip, void *args)
return (DDI_WALK_CONTINUE);
if ((ios->iosram_inst < 0) || (ios->eri_dip == NULL)) {
- rv = ddi_getproplen(DDI_DEV_T_ANY, dip, 0,
- "name", &len);
+ rv = ddi_getproplen(DDI_DEV_T_ANY, dip, 0, "name", &len);
if (rv == DDI_PROP_SUCCESS) {
rv = ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
- 0, "name",
- (caddr_t)name, &len);
+ 0, "name",
+ (caddr_t)name, &len);
if (rv != DDI_PROP_SUCCESS)
return (DDI_WALK_CONTINUE);
@@ -5895,10 +5876,10 @@ drmach_io_pre_release(drmachid_t id)
ndi_devi_enter(rdip, &circ);
ddi_walk_devs(ddi_get_child(rdip), drmach_board_find_io_insts,
- (void *)&ios);
+ (void *)&ios);
DRMACH_PR("drmach_io_pre_release: bnum=%d iosram=%d eri=0x%p\n",
- ios.bnum, ios.iosram_inst, ios.eri_dip);
+ ios.bnum, ios.iosram_inst, ios.eri_dip);
ndi_devi_exit(rdip, circ);
if (ios.eri_dip) {
@@ -5911,11 +5892,11 @@ drmach_io_pre_release(drmachid_t id)
/* call for tunnel switch */
do {
DRMACH_PR("calling iosram_switchfrom(%d)\n",
- ios.iosram_inst);
+ ios.iosram_inst);
rv = iosram_switchfrom(ios.iosram_inst);
if (rv)
DRMACH_PR("iosram_switchfrom returned %d\n",
- rv);
+ rv);
} while (rv == EAGAIN);
if (rv)
@@ -5943,7 +5924,7 @@ drmach_io_unrelease(drmachid_t id)
int (*func)(dev_info_t *dip);
func = (int (*)(dev_info_t *))kobj_getsymvalue("man_dr_attach",
- 0);
+ 0);
if (func) {
drmach_io_inst_t ios;
@@ -5967,8 +5948,8 @@ drmach_io_unrelease(drmachid_t id)
/*
* Root node doesn't have to be held in any way.
*/
- ddi_walk_devs(dip,
- drmach_board_find_io_insts, (void *)&ios);
+ ddi_walk_devs(dip, drmach_board_find_io_insts,
+ (void *)&ios);
if (pdip) {
ndi_devi_exit(pdip, circ);
@@ -5976,13 +5957,12 @@ drmach_io_unrelease(drmachid_t id)
}
DRMACH_PR("drmach_io_unrelease: bnum=%d eri=0x%p\n",
- ios.bnum, ios.eri_dip);
+ ios.bnum, ios.eri_dip);
if (ios.eri_dip) {
DRMACH_PR("calling man_dr_attach\n");
if ((*func)(ios.eri_dip))
- err = drerr_new(0,
- ESTC_NWSWITCH, NULL);
+ err = drerr_new(0, ESTC_NWSWITCH, NULL);
/*
* Release hold acquired in
* drmach_board_find_io_insts()
@@ -6014,7 +5994,7 @@ drmach_io_release(drmachid_t id)
int (*func)(dev_info_t *dip);
func = (int (*)(dev_info_t *))kobj_getsymvalue("man_dr_detach",
- 0);
+ 0);
if (func) {
drmach_io_inst_t ios;
@@ -6038,8 +6018,8 @@ drmach_io_release(drmachid_t id)
/*
* Root node doesn't have to be held in any way.
*/
- ddi_walk_devs(dip,
- drmach_board_find_io_insts, (void *)&ios);
+ ddi_walk_devs(dip, drmach_board_find_io_insts,
+ (void *)&ios);
if (pdip) {
ndi_devi_exit(pdip, circ);
@@ -6047,13 +6027,12 @@ drmach_io_release(drmachid_t id)
}
DRMACH_PR("drmach_io_release: bnum=%d eri=0x%p\n",
- ios.bnum, ios.eri_dip);
+ ios.bnum, ios.eri_dip);
if (ios.eri_dip) {
DRMACH_PR("calling man_dr_detach\n");
if ((*func)(ios.eri_dip))
- err = drerr_new(0,
- ESTC_NWSWITCH, NULL);
+ err = drerr_new(0, ESTC_NWSWITCH, NULL);
/*
* Release hold acquired in
* drmach_board_find_io_insts()
@@ -6152,21 +6131,20 @@ drmach_io_post_attach(drmachid_t id)
/*
* Root node doesn't have to be held in any way.
*/
- ddi_walk_devs(dip, drmach_board_find_io_insts,
- (void *)&ios);
+ ddi_walk_devs(dip, drmach_board_find_io_insts, (void *)&ios);
if (pdip) {
ndi_devi_exit(pdip, circ);
ndi_rele_devi(pdip);
}
- DRMACH_PR("drmach_io_post_attach: bnum=%d eri=0x%p\n",
- ios.bnum, ios.eri_dip);
+ DRMACH_PR("drmach_io_post_attach: bnum=%d eri=0x%p\n", ios.bnum,
+ ios.eri_dip);
if (ios.eri_dip) {
int (*func)(dev_info_t *dip);
func =
- (int (*)(dev_info_t *))kobj_getsymvalue("man_dr_attach", 0);
+ (int (*)(dev_info_t *))kobj_getsymvalue("man_dr_attach", 0);
if (func) {
DRMACH_PR("calling man_dr_attach\n");
@@ -6297,8 +6275,7 @@ drmach_mem_new(drmach_device_t *proto, drmachid_t *idp)
mp->dev.cm.status = drmach_mem_status;
mp->madr_pa = madr_pa;
- snprintf(mp->dev.cm.name,
- sizeof (mp->dev.cm.name), "%s", mp->dev.type);
+ snprintf(mp->dev.cm.name, sizeof (mp->dev.cm.name), "%s", mp->dev.type);
for (count = bank = 0; bank < DRMACH_MC_NBANKS; bank++) {
uint64_t madr;
@@ -6419,13 +6396,13 @@ drmach_mem_add_span(drmachid_t id, uint64_t basepa, uint64_t size)
rv = kcage_range_add(basepfn, npages, KCAGE_DOWN);
if (rv == ENOMEM) {
cmn_err(CE_WARN, "%lu megabytes not available"
- " to kernel cage", size >> 20);
+ " to kernel cage", size >> 20);
} else if (rv != 0) {
/* catch this in debug kernels */
ASSERT(0);
cmn_err(CE_WARN, "unexpected kcage_range_add"
- " return value %d", rv);
+ " return value %d", rv);
}
return (NULL);
@@ -6570,7 +6547,7 @@ drmach_mem_get_base_physaddr(drmachid_t id, uint64_t *base_addr)
drmach_mem_read_madr(mp, bank, &madr);
if (madr & DRMACH_MC_VALID_MASK) {
addr = DRMACH_MC_UM_TO_PA(madr) |
- DRMACH_MC_LM_TO_PA(madr);
+ DRMACH_MC_LM_TO_PA(madr);
if (addr < *base_addr)
*base_addr = addr;
@@ -6657,8 +6634,8 @@ drmach_mem_get_memlist(drmachid_t id, struct memlist **ml)
chunks = gdcd->dcd_chunk_list.dcl_chunks;
while (chunks-- != 0) {
if ((chunk->mc_base_pa & mask) == pa) {
- mlist = memlist_add_span(mlist,
- chunk->mc_base_pa, chunk->mc_mbytes * 1048576);
+ mlist = memlist_add_span(mlist, chunk->mc_base_pa,
+ chunk->mc_mbytes * 1048576);
}
++chunk;
@@ -6863,11 +6840,11 @@ drmach_pt_showlpa(drmachid_t id, drmach_opts_t *opts)
dp = id;
uprintf("showlpa %s::%s portid %d, base pa %lx, bound pa %lx\n",
- dp->bp->cm.name,
- dp->cm.name,
- dp->portid,
- DRMACH_LPA_BASE_TO_PA(val),
- DRMACH_LPA_BND_TO_PA(val));
+ dp->bp->cm.name,
+ dp->cm.name,
+ dp->portid,
+ DRMACH_LPA_BASE_TO_PA(val),
+ DRMACH_LPA_BND_TO_PA(val));
return (NULL);
}
@@ -6876,9 +6853,7 @@ drmach_pt_showlpa(drmachid_t id, drmach_opts_t *opts)
static sbd_error_t *
drmach_pt_ikprobe(drmachid_t id, drmach_opts_t *opts)
{
-
drmach_board_t *bp = (drmach_board_t *)id;
-
sbd_error_t *err;
sc_gptwocfg_cookie_t scc;
@@ -6900,7 +6875,6 @@ drmach_pt_ikprobe(drmachid_t id, drmach_opts_t *opts)
static sbd_error_t *
drmach_pt_ikdeprobe(drmachid_t id, drmach_opts_t *opts)
{
-
drmach_board_t *bp;
sbd_error_t *err = NULL;
sc_gptwocfg_cookie_t scc;
@@ -6919,7 +6893,6 @@ drmach_pt_ikdeprobe(drmachid_t id, drmach_opts_t *opts)
err = drmach_board_deprobe(id);
return (err);
-
}
static sbd_error_t *
@@ -7122,9 +7095,8 @@ drmach_unconfigure(drmachid_t id, int flags)
*/
ASSERT(e_ddi_branch_held(rdip));
if (e_ddi_branch_unconfigure(rdip, &fdip, 0) != 0) {
- sbd_error_t *err = NULL;
- char *path = kmem_alloc(MAXPATHLEN,
- KM_SLEEP);
+ sbd_error_t *err = NULL;
+ char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
* If non-NULL, fdip is returned held and must be
@@ -7150,7 +7122,7 @@ drmach_unconfigure(drmachid_t id, int flags)
int (*func)(dev_info_t *dip);
func = (int (*)(dev_info_t *))kobj_getsymvalue\
- ("man_dr_attach", 0);
+ ("man_dr_attach", 0);
if (func) {
drmach_io_inst_t ios;
@@ -7179,12 +7151,12 @@ drmach_unconfigure(drmachid_t id, int flags)
*/
ASSERT(e_ddi_branch_held(rdip));
ddi_walk_devs(rdip,
- drmach_board_find_io_insts,
- (void *)&ios);
+ drmach_board_find_io_insts,
+ (void *)&ios);
DRMACH_PR("drmach_unconfigure: bnum=%d"
- " eri=0x%p\n",
- ios.bnum, ios.eri_dip);
+ " eri=0x%p\n",
+ ios.bnum, ios.eri_dip);
if (pdip) {
ndi_devi_exit(pdip, circ);
@@ -7193,7 +7165,7 @@ drmach_unconfigure(drmachid_t id, int flags)
if (ios.eri_dip) {
DRMACH_PR("calling"
- " man_dr_attach\n");
+ " man_dr_attach\n");
(void) (*func)(ios.eri_dip);
/*
* Release hold acquired in
@@ -7304,7 +7276,7 @@ drmach_cpu_poweroff(struct cpu *cp)
drmach_xt_mb[cpuid] = 0x80;
xt_one_unchecked(cp->cpu_id, (xcfunc_t *)idle_stop_xcall,
- (uint64_t)drmach_cpu_shutdown_self, NULL);
+ (uint64_t)drmach_cpu_shutdown_self, NULL);
ntries = drmach_cpu_ntries;
while (drmach_xt_mb[cpuid] && ntries) {
@@ -7330,8 +7302,8 @@ drmach_cpu_poweroff(struct cpu *cp)
}
DRMACH_PR("waited %d out of %d tries for "
- "drmach_cpu_shutdown_self on cpu%d",
- drmach_cpu_ntries - ntries, drmach_cpu_ntries, cp->cpu_id);
+ "drmach_cpu_shutdown_self on cpu%d",
+ drmach_cpu_ntries - ntries, drmach_cpu_ntries, cp->cpu_id);
/*
* Do this here instead of drmach_cpu_shutdown_self() to
@@ -7356,8 +7328,7 @@ drmach_iocage_mem_scrub(uint64_t nbytes)
if (rv != 0) {
DRMACH_PR(
"iocage scrub failed, drmach_bc_bzero returned %d\n", rv);
- rv = drmach_bc_bzero(drmach_iocage_vaddr,
- drmach_iocage_size);
+ rv = drmach_bc_bzero(drmach_iocage_vaddr, drmach_iocage_size);
if (rv != 0)
cmn_err(CE_PANIC,
"iocage scrub failed, drmach_bc_bzero rv=%d\n",
@@ -8482,7 +8453,7 @@ drmach_sr_insert(struct drmach_sr_list **lp, dev_info_t *dip)
DRMACH_PR("drmach_sr_insert: adding dip %p\n", dip);
np = (struct drmach_sr_list *)kmem_alloc(
- sizeof (struct drmach_sr_list), KM_SLEEP);
+ sizeof (struct drmach_sr_list), KM_SLEEP);
ndi_hold_devi(dip);
np->dip = dip;
@@ -8523,7 +8494,7 @@ drmach_sr_delete(struct drmach_sr_list **lp, dev_info_t *dip)
kmem_free(xp, sizeof (*xp));
DRMACH_PR("drmach_sr_delete:"
- " disposed sr node for dip %p", dip);
+ " disposed sr node for dip %p", dip);
return;
}
@@ -8555,14 +8526,14 @@ drmach_verify_sr(dev_info_t *dip, int sflag)
}
rv = ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
- "name", &len);
+ "name", &len);
if (rv == DDI_PROP_SUCCESS) {
int portid;
uint64_t reg;
struct drmach_sr_ordered *op;
rv = ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "name", (caddr_t)name, &len);
+ DDI_PROP_DONTPASS, "name", (caddr_t)name, &len);
if (rv != DDI_PROP_SUCCESS)
return (0);
@@ -8605,8 +8576,8 @@ drmach_sr_dip(dev_info_t *dip, int suspend)
name_addr = "<null>";
prom_printf("\t%s %s@%s (aka %s)\n",
- suspend ? "suspending" : "resuming",
- name, name_addr, aka);
+ suspend ? "suspending" : "resuming",
+ name, name_addr, aka);
if (suspend) {
rv = devi_detach(dip, DDI_SUSPEND);
@@ -8616,8 +8587,8 @@ drmach_sr_dip(dev_info_t *dip, int suspend)
if (rv != DDI_SUCCESS) {
prom_printf("\tFAILED to %s %s@%s\n",
- suspend ? "suspend" : "resume",
- name, name_addr);
+ suspend ? "suspend" : "resume",
+ name, name_addr);
}
}
@@ -8661,7 +8632,7 @@ void
drmach_resume_first()
{
struct drmach_sr_ordered *op = drmach_sr_ordered +
- (sizeof (drmach_sr_ordered) / sizeof (drmach_sr_ordered[0]));
+ (sizeof (drmach_sr_ordered) / sizeof (drmach_sr_ordered[0]));
if (drmach_slot1_pause_debug) {
drmach_slot1_pause_update(drmach_slot1_paused,
@@ -8711,23 +8682,23 @@ drmach_log_sysevent(int board, char *hint, int flag, int verbose)
}
if (verbose)
DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n",
- attach_pnt, hint, flag, verbose);
+ attach_pnt, hint, flag, verbose);
if ((ev = sysevent_alloc(EC_DR, ESC_DR_AP_STATE_CHANGE,
- SUNW_KERN_PUB"dr", km_flag)) == NULL) {
+ SUNW_KERN_PUB"dr", km_flag)) == NULL) {
rv = -2;
goto logexit;
}
evnt_val.value_type = SE_DATA_TYPE_STRING;
evnt_val.value.sv_string = attach_pnt;
if ((rv = sysevent_add_attr(&evnt_attr_list, DR_AP_ID,
- &evnt_val, km_flag)) != 0)
+ &evnt_val, km_flag)) != 0)
goto logexit;
evnt_val.value_type = SE_DATA_TYPE_STRING;
evnt_val.value.sv_string = hint;
if ((rv = sysevent_add_attr(&evnt_attr_list, DR_HINT,
- &evnt_val, km_flag)) != 0) {
+ &evnt_val, km_flag)) != 0) {
sysevent_free_attr(evnt_attr_list);
goto logexit;
}
@@ -8745,8 +8716,8 @@ logexit:
sysevent_free(ev);
if ((rv != 0) && verbose)
cmn_err(CE_WARN,
- "drmach_log_sysevent failed (rv %d) for %s %s\n",
- rv, attach_pnt, hint);
+ "drmach_log_sysevent failed (rv %d) for %s %s\n",
+ rv, attach_pnt, hint);
return (rv);
}
@@ -8806,7 +8777,7 @@ drmach_msg_memregs_init(dr_memregs_t regs_arr[]) {
drmach_mem_read_madr(mp, bank, &madr);
if (madr & DRMACH_MC_VALID_MASK) {
DRMACH_PR("%d.%d.%d.madr = 0x%lx\n",
- exp, mcnum, bank, madr);
+ exp, mcnum, bank, madr);
memregs->madr[mcnum][bank].hi =
DRMACH_U64_TO_MCREGHI(madr);
memregs->madr[mcnum][bank].lo =
diff --git a/usr/src/uts/sun4u/starcat/io/iosram.c b/usr/src/uts/sun4u/starcat/io/iosram.c
index e0b0f4a10b..ba3af1d2c6 100644
--- a/usr/src/uts/sun4u/starcat/io/iosram.c
+++ b/usr/src/uts/sun4u/starcat/io/iosram.c
@@ -20,11 +20,10 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-
/*
* IOSRAM leaf driver to SBBC nexus driver. This driver is used
* by Starcat Domain SW to read/write from/to the IO sram.
@@ -3261,7 +3260,7 @@ iosram_log(caddr_t fmt, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4)
seq = iosram_logseq++;
logp = &iosram_logbuf[seq % IOSRAM_MAXLOG];
logp->seq = seq;
- logp->tstamp = lbolt;
+ logp->tstamp = ddi_get_lbolt();
logp->fmt = fmt;
logp->arg1 = a1;
logp->arg2 = a2;
@@ -3501,7 +3500,7 @@ iosram_print_log(int cnt)
cmn_err(CE_CONT,
"\niosram_logseq: 0x%x lbolt: %lx iosram_log_level:%x\n",
- iosram_logseq, lbolt, iosram_log_level);
+ iosram_logseq, ddi_get_lbolt(), iosram_log_level);
cmn_err(CE_CONT, "iosram_logbuf: %p max entries:0x%x\n",
iosram_logbuf, IOSRAM_MAXLOG);
for (i = iosram_logseq; --i >= 0 && --cnt >= 0; ) {
diff --git a/usr/src/uts/sun4u/starcat/io/sckmdrv.c b/usr/src/uts/sun4u/starcat/io/sckmdrv.c
index 64e58b3207..2102bd61e8 100644
--- a/usr/src/uts/sun4u/starcat/io/sckmdrv.c
+++ b/usr/src/uts/sun4u/starcat/io/sckmdrv.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -835,8 +835,8 @@ sckm_process_msg(uint32_t cmd, uint64_t transid,
cv_signal(&sckm_udata_cv);
/* wait for daemon to process request */
- if (cv_timedwait(&sckm_cons_cv, &sckm_umutex,
- ddi_get_lbolt()+drv_usectohz(SCKM_DAEMON_TIMEOUT)) == -1) {
+ if (cv_reltimedwait(&sckm_cons_cv, &sckm_umutex,
+ drv_usectohz(SCKM_DAEMON_TIMEOUT), TR_CLOCK_TICK) == -1) {
/*
* Daemon did not process the data, report this
* error to the SC.
diff --git a/usr/src/uts/sun4u/starcat/os/starcat.c b/usr/src/uts/sun4u/starcat/os/starcat.c
index 1a90d6adb5..3a3fe9abc3 100644
--- a/usr/src/uts/sun4u/starcat/os/starcat.c
+++ b/usr/src/uts/sun4u/starcat/os/starcat.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
@@ -60,6 +58,7 @@
#include <sys/iosramreg.h>
#include <sys/iosramvar.h>
#include <sys/mc-us3.h>
+#include <sys/clock_impl.h>
/* Preallocation of spare tsb's for DR */
int starcat_tsb_spares = STARCAT_SPARE_TSB_MAX;
@@ -1286,6 +1285,8 @@ startup_platform(void)
static void
starcat_system_claim(void)
{
+ lbolt_debug_entry();
+
prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
}
@@ -1293,6 +1294,8 @@ static void
starcat_system_release(void)
{
prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
+
+ lbolt_debug_return();
}
void
diff --git a/usr/src/uts/sun4u/starfire/io/idn.c b/usr/src/uts/sun4u/starfire/io/idn.c
index a7c206d8a8..e321b39bfc 100644
--- a/usr/src/uts/sun4u/starfire/io/idn.c
+++ b/usr/src/uts/sun4u/starfire/io/idn.c
@@ -19,11 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-
#include <sys/types.h>
#include <sys/sysmacros.h>
#include <sys/open.h>
@@ -3082,9 +3081,8 @@ idn_wait_op(void *cookie, domainset_t *domsetp, int wait_timeout)
mutex_enter(&idn.dopers->dop_mutex);
while (((dwl->dw_domset | dwl->dw_errset) != dwl->dw_reqset) && !err) {
- rv = cv_timedwait_sig(&idn.dopers->dop_cv,
- &idn.dopers->dop_mutex,
- lbolt + (wait_timeout * hz));
+ rv = cv_reltimedwait_sig(&idn.dopers->dop_cv,
+ &idn.dopers->dop_mutex, (wait_timeout * hz), TR_CLOCK_TICK);
if ((dwl->dw_domset | dwl->dw_errset) == dwl->dw_reqset)
break;
@@ -5141,7 +5139,7 @@ idn_gkstat_update(kstat_t *ksp, int rw)
sg_kstat.gk_reap_count = sgkp->sk_reap_count.value.ul;
sg_kstat.gk_dropped_intrs = sgkp->sk_dropped_intrs.value.ul;
} else {
- sgkp->sk_curtime.value.ul = lbolt;
+ sgkp->sk_curtime.value.ul = ddi_get_lbolt();
sgkp->sk_reconfigs.value.ul = sg_kstat.gk_reconfigs;
sgkp->sk_reconfig_last.value.ul = sg_kstat.gk_reconfig_last;
sgkp->sk_reaps.value.ul = sg_kstat.gk_reaps;
@@ -5183,7 +5181,7 @@ idn_rw_mem(idnop_t *idnop)
static int orig_gstate = IDNGS_IGNORE;
extern struct seg ktextseg;
-#define RANDOM_INIT() (randx = lbolt)
+#define RANDOM_INIT() (randx = ddi_get_lbolt())
#define RANDOM(a, b) \
(((a) >= (b)) ? \
(a) : (((randx = randx * 1103515245L + 12345) % ((b)-(a))) + (a)))
@@ -5293,7 +5291,8 @@ idn_rw_mem(idnop_t *idnop)
int rv;
mutex_enter(&slock);
- rv = cv_timedwait_sig(&scv, &slock, lbolt+hz);
+ rv = cv_reltimedwait_sig(&scv, &slock, hz,
+ TR_CLOCK_TICK);
mutex_exit(&slock);
if (rv == 0)
break;
diff --git a/usr/src/uts/sun4u/starfire/io/idn_proto.c b/usr/src/uts/sun4u/starfire/io/idn_proto.c
index 3e66db2699..46b87d63bb 100644
--- a/usr/src/uts/sun4u/starfire/io/idn_proto.c
+++ b/usr/src/uts/sun4u/starfire/io/idn_proto.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Inter-Domain Network
*
@@ -449,11 +446,11 @@ idn_update_priority(int domid, int pri)
dp->dvote.v.priority = pri & IDNVOTE_PRI_MASK;
PR_PROTO("%s:%d: SETTING PRIORITY to req(%d) "
- "(localpri = 0x%x)\n",
- proc, domid, pri, IDNVOTE_PRIVALUE(dp->dvote));
+ "(localpri = 0x%x)\n",
+ proc, domid, pri, IDNVOTE_PRIVALUE(dp->dvote));
} else {
PR_PROTO("%s:%d: PRIORITIES UNCHANGED (pri = 0x%x)\n",
- proc, domid, IDNVOTE_PRIVALUE(dp->dvote));
+ proc, domid, IDNVOTE_PRIVALUE(dp->dvote));
}
}
@@ -471,19 +468,19 @@ idn_link(int domid, int cpuid, int pri, int waittime, idnsb_error_t *sep)
if ((cpuid < 0) || (cpuid >= NCPU)) {
cmn_err(CE_WARN,
- "IDN: 201: (LINK) invalid CPU ID (%d)", cpuid);
+ "IDN: 201: (LINK) invalid CPU ID (%d)", cpuid);
return (EINVAL);
}
if (waittime < 0) {
cmn_err(CE_WARN,
- "IDN: 202: (LINK) invalid time-out value (%d)",
- waittime);
+ "IDN: 202: (LINK) invalid time-out value (%d)",
+ waittime);
return (EINVAL);
}
if (!VALID_DOMAINID(domid)) {
cmn_err(CE_WARN,
- "IDN: 203: (LINK) invalid domain ID (%d)",
- domid);
+ "IDN: 203: (LINK) invalid domain ID (%d)",
+ domid);
return (EINVAL);
}
if (domid == idn.localid)
@@ -501,8 +498,8 @@ idn_link(int domid, int cpuid, int pri, int waittime, idnsb_error_t *sep)
case IDNDS_CONNECTED:
#ifdef DEBUG
cmn_err(CE_NOTE,
- "!IDN: domain %d (CPU ID %d) already connected",
- domid, cpuid);
+ "!IDN: domain %d (CPU ID %d) already connected",
+ domid, cpuid);
#endif /* DEBUG */
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
@@ -510,8 +507,8 @@ idn_link(int domid, int cpuid, int pri, int waittime, idnsb_error_t *sep)
default:
cmn_err(CE_WARN,
- "IDN: 204: domain %d state (%s) inappropriate",
- domid, idnds_str[dp->dstate]);
+ "IDN: 204: domain %d state (%s) inappropriate",
+ domid, idnds_str[dp->dstate]);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
return (EINVAL);
@@ -520,8 +517,8 @@ idn_link(int domid, int cpuid, int pri, int waittime, idnsb_error_t *sep)
rv = idn_open_domain(domid, cpuid, 0);
if (rv != 0) {
cmn_err(CE_WARN,
- "IDN: 205: (%s) failed to open-domain(%d,%d)",
- proc, domid, cpuid);
+ "IDN: 205: (%s) failed to open-domain(%d,%d)",
+ proc, domid, cpuid);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
return (EIO);
@@ -550,8 +547,8 @@ idn_link(int domid, int cpuid, int pri, int waittime, idnsb_error_t *sep)
* Need to wait since it happens asynchronously.
*/
PR_PROTO("%s:%d: WAITING for op(%s) for (domset 0%x)...\n",
- proc, domid, idnop_str[IDNOP_CONNECTED],
- DOMAINSET(domid));
+ proc, domid, idnop_str[IDNOP_CONNECTED],
+ DOMAINSET(domid));
rv = idn_wait_op(opcookie, &domset, waittime);
}
@@ -560,14 +557,14 @@ idn_link(int domid, int cpuid, int pri, int waittime, idnsb_error_t *sep)
if (rv == 0) {
if (waittime > 0) {
PR_PROTO("%s:%d: connect SUCCEEDED (cpu %d)\n",
- proc, domid, cpuid);
+ proc, domid, cpuid);
} else {
PR_PROTO("%s:%d: connect KICKED OFF (cpu %d)\n",
- proc, domid, cpuid);
+ proc, domid, cpuid);
}
} else {
PR_PROTO("%s:%d: connect FAILED (cpu %d)\n",
- proc, domid, cpuid);
+ proc, domid, cpuid);
}
#endif /* DEBUG */
@@ -596,16 +593,16 @@ idn_unlink(int domid, boardset_t idnset, idn_fin_t fintype,
if (waittime < 0) {
cmn_err(CE_WARN,
- "IDN: 202: (UNLINK) invalid time-out value (%d)",
- waittime);
+ "IDN: 202: (UNLINK) invalid time-out value (%d)",
+ waittime);
SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_WTIME);
SET_IDNKERR_PARAM0(sep, waittime);
return (EINVAL);
}
if (!VALID_DOMAINID(domid)) {
cmn_err(CE_WARN,
- "IDN: 203: (UNLINK) invalid domain ID (%d)",
- domid);
+ "IDN: 203: (UNLINK) invalid domain ID (%d)",
+ domid);
SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_DOMAIN);
SET_IDNKERR_PARAM0(sep, domid);
SET_IDNKERR_PARAM1(sep, -1);
@@ -614,8 +611,8 @@ idn_unlink(int domid, boardset_t idnset, idn_fin_t fintype,
if (idn.localid == IDN_NIL_DOMID) {
#ifdef DEBUG
cmn_err(CE_NOTE,
- "!IDN: %s: local domain not connected to an IDNnet",
- proc);
+ "!IDN: %s: local domain not connected to an IDNnet",
+ proc);
#endif /* DEBUG */
return (0);
}
@@ -632,8 +629,8 @@ idn_unlink(int domid, boardset_t idnset, idn_fin_t fintype,
if ((idn.state == IDNGS_OFFLINE) && !domset) {
#ifdef DEBUG
cmn_err(CE_WARN,
- "!IDN: %s: local domain not connected to an IDNnet",
- proc);
+ "!IDN: %s: local domain not connected to an IDNnet",
+ proc);
#endif /* DEBUG */
IDN_GUNLOCK();
IDN_SYNC_UNLOCK();
@@ -665,8 +662,8 @@ idn_unlink(int domid, boardset_t idnset, idn_fin_t fintype,
* for it to complete.
*/
PR_PROTO("%s:%d: WAITING for op(%s) for (domset 0%x)...\n",
- proc, domid, idnop_str[IDNOP_DISCONNECTED],
- domset);
+ proc, domid, idnop_str[IDNOP_DISCONNECTED],
+ domset);
rv = idn_wait_op(opcookie, &domset, waittime);
}
@@ -674,10 +671,10 @@ idn_unlink(int domid, boardset_t idnset, idn_fin_t fintype,
if (rv == 0) {
if (waittime > 0) {
PR_PROTO("%s:%d: disconnect SUCCEEDED\n",
- proc, domid);
+ proc, domid);
} else {
PR_PROTO("%s:%d: disconnect KICKED OFF\n",
- proc, domid);
+ proc, domid);
}
} else {
PR_PROTO("%s:%d: disconnect FAILED\n", proc, domid);
@@ -702,9 +699,9 @@ idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
* no active connections.
*/
offset = domset & ~(idn.domset.ds_trans_on |
- idn.domset.ds_connected |
- idn.domset.ds_trans_off |
- idn.domset.ds_relink);
+ idn.domset.ds_connected |
+ idn.domset.ds_trans_off |
+ idn.domset.ds_relink);
/*
* Determine subset that are really candidates.
* Note that we include those already down the path
@@ -728,11 +725,11 @@ idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
#ifdef DEBUG
if (idn.domset.ds_hitlist & domset) {
PR_HITLIST("%s: domset=%x, hitlist=%x, trans_off=%x "
- "-> relink = %x -> %x\n",
- proc, domset, idn.domset.ds_hitlist,
- idn.domset.ds_relink, idn.domset.ds_trans_off,
- idn.domset.ds_relink |
- (domset & ~idn.domset.ds_trans_off));
+ "-> relink = %x -> %x\n",
+ proc, domset, idn.domset.ds_hitlist,
+ idn.domset.ds_relink, idn.domset.ds_trans_off,
+ idn.domset.ds_relink |
+ (domset & ~idn.domset.ds_trans_off));
}
#endif /* DEBUG */
@@ -750,11 +747,11 @@ idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
if (domset == 0) {
if ((idn.domset.ds_trans_on |
- idn.domset.ds_connected |
- idn.domset.ds_trans_off |
- idn.domset.ds_relink) == 0) {
+ idn.domset.ds_connected |
+ idn.domset.ds_trans_off |
+ idn.domset.ds_relink) == 0) {
PR_HITLIST("%s:%x: HITLIST %x -> 0\n",
- proc, domset, idn.domset.ds_hitlist);
+ proc, domset, idn.domset.ds_hitlist);
idn.domset.ds_hitlist = 0;
IDN_GSTATE_TRANSITION(IDNGS_OFFLINE);
}
@@ -773,7 +770,7 @@ idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
dp = &idn_domain[d];
IDN_DLOCK_EXCL(d);
IDN_HISTORY_LOG(IDNH_RELINK, d, dp->dstate,
- idn.domset.ds_relink);
+ idn.domset.ds_relink);
ftype = fintype;
if ((dp->dcpu != IDN_NIL_DCPU) && dp->dhw.dh_boardset) {
/*
@@ -788,24 +785,24 @@ idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
*/
if ((idnset & dp->dhw.dh_boardset) == 0) {
PR_PROTO("%s:%d: boardset 0x%x "
- "NOT in IDNSET 0x%x\n",
- proc, d, dp->dhw.dh_boardset,
- idnset);
+ "NOT in IDNSET 0x%x\n",
+ proc, d, dp->dhw.dh_boardset,
+ idnset);
if (ftype != IDNFIN_FORCE_HARD)
cmn_err(CE_NOTE,
- "!IDN: 222: no IDN linkage "
- "found (b=0x%x, i=0x%x) "
- "upgrading unlink %s to %s",
- dp->dhw.dh_boardset,
- idnset, idnfin_str[ftype],
- idnfin_str[IDNFIN_FORCE_HARD]);
+ "!IDN: 222: no IDN linkage "
+ "found (b=0x%x, i=0x%x) "
+ "upgrading unlink %s to %s",
+ dp->dhw.dh_boardset,
+ idnset, idnfin_str[ftype],
+ idnfin_str[IDNFIN_FORCE_HARD]);
ftype = IDNFIN_FORCE_HARD;
} else {
PR_PROTO("%s:%d: boardset 0x%x "
- "FOUND in IDNSET 0x%x\n",
- proc, d, dp->dhw.dh_boardset,
- idnset);
+ "FOUND in IDNSET 0x%x\n",
+ proc, d, dp->dhw.dh_boardset,
+ idnset);
}
}
idn_disconnect(d, ftype, finarg, IDNDS_SYNC_TYPE(dp));
@@ -830,13 +827,13 @@ idn_connect(int domid)
if (dp->dstate != IDNDS_CLOSED) {
if (DOMAIN_IN_SET(idn.domset.ds_trans_on |
- idn.domset.ds_connected, domid)) {
+ idn.domset.ds_connected, domid)) {
PR_PROTO("%s:%d: already connected or "
- "in-progress\n", proc, domid);
+ "in-progress\n", proc, domid);
} else {
PR_PROTO("%s:%d: current state (%s) != "
- "CLOSED\n", proc, domid,
- idnds_str[dp->dstate]);
+ "CLOSED\n", proc, domid,
+ idnds_str[dp->dstate]);
}
return (-1);
}
@@ -857,7 +854,7 @@ idn_connect(int domid)
*/
static int
idn_disconnect(int domid, idn_fin_t fintype, idn_finarg_t finarg,
- idn_finsync_t finsync)
+ idn_finsync_t finsync)
{
int new_masterid, new_cpuid = IDN_NIL_DCPU;
uint_t token;
@@ -910,18 +907,18 @@ idn_disconnect(int domid, idn_fin_t fintype, idn_finarg_t finarg,
dp->dfin_sync = finsync;
PR_PROTO("%s:%d: disconnect synchronously = %s\n",
- proc, domid, (finsync == IDNFIN_SYNC_OFF) ? "OFF" :
- (finsync == IDNFIN_SYNC_NO) ? "NO" : "YES");
+ proc, domid, (finsync == IDNFIN_SYNC_OFF) ? "OFF" :
+ (finsync == IDNFIN_SYNC_NO) ? "NO" : "YES");
IDN_GLOCK_SHARED();
if (DOMAIN_IN_SET(idn.domset.ds_relink, domid) &&
- (idn.state != IDNGS_DISCONNECT)) {
+ (idn.state != IDNGS_DISCONNECT)) {
finopt = IDNFIN_OPT_RELINK;
} else {
finopt = IDNFIN_OPT_UNLINK;
PR_HITLIST("%s:%d: HITLIST %x -> %x\n",
- proc, domid, idn.domset.ds_hitlist,
- idn.domset.ds_hitlist | DOMAINSET(domid));
+ proc, domid, idn.domset.ds_hitlist,
+ idn.domset.ds_hitlist | DOMAINSET(domid));
DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
}
@@ -956,7 +953,7 @@ idn_next_xstate(idn_xstate_t o_xstate, int err, uint_t msg)
index = (msg & IDNP_ACK) ? 3 : (msg & IDNP_NACK) ? 4 : -1;
else
index = (msg & IDNP_ACK) ? 2 :
- !(msg & IDNP_ACKNACK_MASK) ? 1 : -1;
+ !(msg & IDNP_ACKNACK_MASK) ? 1 : -1;
if (index == -1) {
STRING(str);
@@ -1011,8 +1008,8 @@ idn_select_candidate(domainset_t master_set)
dp = &idn_domain[d];
if ((dp->domid == IDN_NIL_DOMID) ||
- (dp->dcpu == IDN_NIL_DCPU) ||
- ((v.ticket = dp->dvote.ticket) == 0))
+ (dp->dcpu == IDN_NIL_DCPU) ||
+ ((v.ticket = dp->dvote.ticket) == 0))
continue;
vote = IDNVOTE_ELECT(v);
@@ -1049,7 +1046,7 @@ idn_select_master(int domid, int rmasterid, int rcpuid)
ASSERT(IDN_DLOCK_IS_EXCL(domid));
PR_PROTO("%s:%d: lmasterid = %d, rmasterid = %d, rcpuid = %d\n",
- proc, domid, IDN_GET_MASTERID(), rmasterid, rcpuid);
+ proc, domid, IDN_GET_MASTERID(), rmasterid, rcpuid);
IDN_DLOCK_EXCL(idn.localid);
@@ -1067,14 +1064,14 @@ idn_select_master(int domid, int rmasterid, int rcpuid)
lmasterid = IDN_GET_MASTERID();
lindex = (lmasterid == IDN_NIL_DOMID) ? MASTER_IS_NONE :
- (lmasterid == idn.localid) ? MASTER_IS_LOCAL :
- (lmasterid == domid) ? MASTER_IS_REMOTE :
- MASTER_IS_OTHER;
+ (lmasterid == idn.localid) ? MASTER_IS_LOCAL :
+ (lmasterid == domid) ? MASTER_IS_REMOTE :
+ MASTER_IS_OTHER;
rindex = (rmasterid == IDN_NIL_DOMID) ? MASTER_IS_NONE :
- (rmasterid == domid) ? MASTER_IS_REMOTE :
- (rmasterid == idn.localid) ? MASTER_IS_LOCAL :
- MASTER_IS_OTHER;
+ (rmasterid == domid) ? MASTER_IS_REMOTE :
+ (rmasterid == idn.localid) ? MASTER_IS_LOCAL :
+ MASTER_IS_OTHER;
select = master_select_table[lindex][rindex];
@@ -1105,9 +1102,9 @@ idn_select_master(int domid, int rmasterid, int rcpuid)
*/
} else {
cmn_err(CE_WARN,
- "IDN: 206: cannot link domains "
- "with equal votes (L(%d),R(%d),0x%x)",
- idn.localid, domid, rvote);
+ "IDN: 206: cannot link domains "
+ "with equal votes (L(%d),R(%d),0x%x)",
+ idn.localid, domid, rvote);
IDN_GUNLOCK();
}
IDN_DUNLOCK(idn.localid);
@@ -1125,9 +1122,9 @@ idn_select_master(int domid, int rmasterid, int rcpuid)
rdp->dvote.v.master = 1;
} else {
cmn_err(CE_WARN,
- "IDN: 206: cannot link domains "
- "with equal votes (L(%d),R(%d),0x%x)",
- idn.localid, domid, rvote);
+ "IDN: 206: cannot link domains "
+ "with equal votes (L(%d),R(%d),0x%x)",
+ idn.localid, domid, rvote);
}
ASSERT(IDN_GET_MASTERID() == IDN_NIL_DOMID);
if (masterid != IDN_NIL_DOMID) {
@@ -1182,25 +1179,25 @@ idn_select_master(int domid, int rmasterid, int rcpuid)
IDN_GUNLOCK();
IDN_DLOCK_EXCL(rmasterid);
PR_PROTO("%s:%d: attempting connect w/remote "
- "master %d\n",
- proc, domid, rmasterid);
+ "master %d\n",
+ proc, domid, rmasterid);
rv = idn_open_domain(rmasterid, rcpuid, 0);
if (rv == 0) {
idn_connect(rmasterid);
} else if (rv < 0) {
cmn_err(CE_WARN,
- "IDN: 205: (%s) failed to "
- "open-domain(%d,%d)",
- proc, rmasterid, rcpuid);
+ "IDN: 205: (%s) failed to "
+ "open-domain(%d,%d)",
+ proc, rmasterid, rcpuid);
} else {
/*
* Must already have a connection going.
*/
PR_PROTO("%s:%d: failed "
- "idn_open_domain(%d,%d,0) "
- "(rv = %d)\n",
- proc, domid, rmasterid,
- rcpuid, rv);
+ "idn_open_domain(%d,%d,0) "
+ "(rv = %d)\n",
+ proc, domid, rmasterid,
+ rcpuid, rv);
}
IDN_DUNLOCK(rmasterid);
}
@@ -1226,17 +1223,17 @@ idn_select_master(int domid, int rmasterid, int rcpuid)
* Hit impossible condition.
*/
cmn_err(CE_WARN,
- "IDN: 207: local/remote master-id conflict "
- "(%d.lmasterid = %d, %d.rmasterid = %d)",
- idn.localid, lmasterid, domid, rmasterid);
+ "IDN: 207: local/remote master-id conflict "
+ "(%d.lmasterid = %d, %d.rmasterid = %d)",
+ idn.localid, lmasterid, domid, rmasterid);
IDN_GUNLOCK();
IDN_DUNLOCK(idn.localid);
break;
default:
cmn_err(CE_WARN,
- "IDN: 208: %s: unknown case (%d)",
- proc, (int)select);
+ "IDN: 208: %s: unknown case (%d)",
+ proc, (int)select);
IDN_GUNLOCK();
IDN_DUNLOCK(idn.localid);
ASSERT(0);
@@ -1245,12 +1242,12 @@ idn_select_master(int domid, int rmasterid, int rcpuid)
if (masterid == IDN_NIL_DOMID) {
PR_PROTO("%s:%d: NO MASTER SELECTED (rmstr=%d) sel=%s\n",
- proc, domid, rmasterid, sel);
+ proc, domid, rmasterid, sel);
} else {
PR_PROTO("%s:%d: MASTER SELECTED = %d (%s)\n",
- proc, domid, masterid,
- (masterid == idn.localid) ? "LOCAL" :
- (masterid == domid) ? "REMOTE" : "OTHER");
+ proc, domid, masterid,
+ (masterid == idn.localid) ? "LOCAL" :
+ (masterid == domid) ? "REMOTE" : "OTHER");
}
if (do_reconfig) {
@@ -1264,7 +1261,7 @@ idn_select_master(int domid, int rmasterid, int rcpuid)
IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs, gk_reconfig_last);
PR_PROTO("%s:%d: RECONFIG new masterid = %d\n",
- proc, domid, domid);
+ proc, domid, domid);
IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
IDN_SET_NEW_MASTERID(domid);
@@ -1273,9 +1270,8 @@ idn_select_master(int domid, int rmasterid, int rcpuid)
dis_set = idn.domset.ds_trans_on | idn.domset.ds_connected;
DOMAINSET_DEL(dis_set, domid);
- idn_unlink_domainset(dis_set, IDNFIN_NORMAL,
- IDNFIN_ARG_NONE, IDNFIN_OPT_RELINK,
- BOARDSET_ALL);
+ idn_unlink_domainset(dis_set, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
+ IDNFIN_OPT_RELINK, BOARDSET_ALL);
}
return ((masterid == IDN_NIL_DOMID) ? -1 : 0);
@@ -1298,8 +1294,7 @@ idn_retry_query(uint_t token, void *arg)
switch (rtype) {
case IDNRETRY_CONQ:
sync_cmd = IDNSYNC_CONNECT;
- my_ready_set = idn.domset.ds_ready_on |
- idn.domset.ds_connected;
+ my_ready_set = idn.domset.ds_ready_on | idn.domset.ds_connected;
my_ready_set &= ~idn.domset.ds_trans_off;
DOMAINSET_ADD(my_ready_set, idn.localid);
break;
@@ -1307,7 +1302,7 @@ idn_retry_query(uint_t token, void *arg)
case IDNRETRY_FINQ:
sync_cmd = IDNSYNC_DISCONNECT;
my_ready_set = idn.domset.ds_ready_off |
- ~idn.domset.ds_connected;
+ ~idn.domset.ds_connected;
break;
default:
@@ -1338,8 +1333,8 @@ idn_retry_query(uint_t token, void *arg)
IDN_DLOCK_EXCL(d);
if ((dp->dsync.s_cmd == sync_cmd) ||
- (!dp->dcookie_send &&
- (rtype == IDNRETRY_CONQ))) {
+ (!dp->dcookie_send &&
+ (rtype == IDNRETRY_CONQ))) {
if (d != domid)
IDN_DUNLOCK(d);
continue;
@@ -1351,8 +1346,7 @@ idn_retry_query(uint_t token, void *arg)
idn_send_con(d, NULL, IDNCON_QUERY, my_ready_set);
else
idn_send_fin(d, NULL, IDNFIN_QUERY, IDNFIN_ARG_NONE,
- IDNFIN_OPT_NONE, my_ready_set,
- NIL_FIN_MASTER);
+ IDNFIN_OPT_NONE, my_ready_set, NIL_FIN_MASTER);
if (d != domid)
IDN_DUNLOCK(d);
}
@@ -1394,7 +1388,7 @@ idn_send_nego(int domid, idn_msgtype_t *mtp, domainset_t conset)
ldp = &idn_domain[idn.localid];
if ((idn.state == IDNGS_RECONFIG) ||
- ((masterid = IDN_GET_MASTERID()) == IDN_NIL_DOMID)) {
+ ((masterid = IDN_GET_MASTERID()) == IDN_NIL_DOMID)) {
masterid = IDN_GET_NEW_MASTERID();
if ((masterid == idn.localid) || (masterid == domid)) {
/*
@@ -1420,9 +1414,9 @@ idn_send_nego(int domid, idn_msgtype_t *mtp, domainset_t conset)
*/
conset &= ~idn.domset.ds_hitlist;
if ((masterid != IDN_NIL_DOMID) &&
- DOMAIN_IN_SET(idn.domset.ds_hitlist, masterid)) {
+ DOMAIN_IN_SET(idn.domset.ds_hitlist, masterid)) {
PR_PROTO("%s:%d: masterid(%d) on hitlist(0x%x) -> -1\n",
- proc, domid, masterid, idn.domset.ds_hitlist);
+ proc, domid, masterid, idn.domset.ds_hitlist);
/*
* Yikes, our chosen master is on the hitlist!
*/
@@ -1446,7 +1440,7 @@ idn_send_nego(int domid, idn_msgtype_t *mtp, domainset_t conset)
}
IDNNEG_DSET_SET_MASTER(dset, domid, masterid);
ASSERT((masterid != IDN_NIL_DOMID) ?
- (idn_domain[masterid].dcpu != IDN_NIL_DCPU) : 1);
+ (idn_domain[masterid].dcpu != IDN_NIL_DCPU) : 1);
IDN_GUNLOCK();
IDN_DLOCK_SHARED(idn.localid);
@@ -1462,14 +1456,14 @@ idn_send_nego(int domid, idn_msgtype_t *mtp, domainset_t conset)
*/
PR_PROTO("%s:%d: sending nego%sto (cpu %d) "
- "[v=0x%x, cs=0x%x, mstr=%d]\n",
- proc, domid,
- (acknack & IDNP_ACK) ? "+ack " :
- (acknack & IDNP_NACK) ? "+nack " : " ",
- dp->dcpu, ticket, conset, masterid);
+ "[v=0x%x, cs=0x%x, mstr=%d]\n",
+ proc, domid,
+ (acknack & IDNP_ACK) ? "+ack " :
+ (acknack & IDNP_NACK) ? "+nack " : " ",
+ dp->dcpu, ticket, conset, masterid);
IDN_MSGTIMER_START(domid, IDNP_NEGO, 0,
- idn_msg_waittime[IDNP_NEGO], &mt.mt_cookie);
+ idn_msg_waittime[IDNP_NEGO], &mt.mt_cookie);
IDNXDC(domid, &mt, ticket, dset[0], dset[1], dset[2]);
@@ -1480,7 +1474,7 @@ idn_send_nego(int domid, idn_msgtype_t *mtp, domainset_t conset)
static int
idn_recv_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs,
- ushort_t dcookie)
+ ushort_t dcookie)
{
uint_t msg = mtp->mt_mtype;
idn_msgtype_t mt;
@@ -1496,9 +1490,9 @@ idn_recv_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs,
#ifdef DEBUG
if (DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
PR_HITLIST("%s:%d: dcpu=%d, dstate=%s, msg=%x, "
- "hitlist=%x\n",
- proc, domid, dp->dcpu, idnds_str[dp->dstate],
- msg, idn.domset.ds_hitlist);
+ "hitlist=%x\n",
+ proc, domid, dp->dcpu, idnds_str[dp->dstate],
+ msg, idn.domset.ds_hitlist);
}
#endif /* DEBUG */
@@ -1514,15 +1508,15 @@ idn_recv_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs,
if (idn_open_domain(domid, cpuid, ticket) != 0) {
PR_PROTO("%s:%d: FAILED to open doamin "
- "(ticket = 0x%x)\n",
- proc, domid, ticket);
+ "(ticket = 0x%x)\n",
+ proc, domid, ticket);
return (-1);
}
}
if ((msg & IDNP_MSGTYPE_MASK) == IDNP_NEGO) {
PR_PROTO("%s:%d: assigned SEND cookie 0x%x\n",
- proc, domid, dcookie);
+ proc, domid, dcookie);
dp->dcookie_send = dcookie;
}
@@ -1541,11 +1535,9 @@ idn_recv_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs,
if (dp->dstate == IDNDS_CONNECTED) {
DOMAINSET_ADD(idn.domset.ds_relink, domid);
IDN_HISTORY_LOG(IDNH_RELINK, domid,
- dp->dstate,
- idn.domset.ds_relink);
+ dp->dstate, idn.domset.ds_relink);
idn_disconnect(domid, IDNFIN_NORMAL,
- IDNFIN_ARG_NONE,
- IDNFIN_SYNC_YES);
+ IDNFIN_ARG_NONE, IDNFIN_SYNC_YES);
} else {
mt.mt_mtype = IDNP_NACK;
mt.mt_atype = msg;
@@ -1553,20 +1545,20 @@ idn_recv_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs,
CLR_XARGS(nargs);
if (DOMAIN_IN_SET(idn.domset.ds_hitlist,
- domid)) {
+ domid)) {
SET_XARGS_NACK_TYPE(nargs,
- IDNNACK_EXIT);
+ IDNNACK_EXIT);
} else {
int new_masterid;
int new_cpuid = IDN_NIL_DCPU;
SET_XARGS_NACK_TYPE(nargs,
- IDNNACK_RETRY);
+ IDNNACK_RETRY);
IDN_GLOCK_SHARED();
new_masterid = IDN_GET_NEW_MASTERID();
if (new_masterid == IDN_NIL_DOMID)
new_masterid =
- IDN_GET_MASTERID();
+ IDN_GET_MASTERID();
if (new_masterid != IDN_NIL_DOMID) {
idn_domain_t *mdp;
@@ -1574,9 +1566,8 @@ idn_recv_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs,
new_cpuid = mdp->dcpu;
}
SET_XARGS_NACK_ARG1(nargs,
- new_masterid);
- SET_XARGS_NACK_ARG2(nargs,
- new_cpuid);
+ new_masterid);
+ SET_XARGS_NACK_ARG2(nargs, new_cpuid);
IDN_GUNLOCK();
}
idn_send_acknack(domid, &mt, nargs);
@@ -1615,7 +1606,7 @@ idn_retry_nego(uint_t token, void *arg)
#endif /* DEBUG */
PR_PROTO("%s:%d: dxp(%s) != NEGO...bailing...\n",
- proc, domid, dp->dxp ? str : "NULL");
+ proc, domid, dp->dxp ? str : "NULL");
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
return;
@@ -1623,8 +1614,8 @@ idn_retry_nego(uint_t token, void *arg)
if (dp->dxstate != IDNXS_PEND) {
PR_PROTO("%s:%d: xstate(%s) != %s...bailing\n",
- proc, domid, idnxs_str[dp->dxstate],
- idnxs_str[IDNXS_PEND]);
+ proc, domid, idnxs_str[dp->dxstate],
+ idnxs_str[IDNXS_PEND]);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
return;
@@ -1637,9 +1628,9 @@ idn_retry_nego(uint_t token, void *arg)
* reconfig has completed.
*/
PR_PROTO("%s:%d: reconfig in-progress...try later\n",
- proc, domid);
+ proc, domid);
idn_retry_submit(idn_retry_nego, NULL, token,
- idn_msg_retrytime[IDNP_NEGO]);
+ idn_msg_retrytime[IDNP_NEGO]);
IDN_GUNLOCK();
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
@@ -1647,9 +1638,9 @@ idn_retry_nego(uint_t token, void *arg)
}
new_masterid = IDN_GET_NEW_MASTERID();
if ((idn.state == IDNGS_CONNECT) &&
- (new_masterid != IDN_NIL_DOMID) &&
- (domid != new_masterid) &&
- (idn.localid != new_masterid)) {
+ (new_masterid != IDN_NIL_DOMID) &&
+ (domid != new_masterid) &&
+ (idn.localid != new_masterid)) {
/*
* We have a new master pending and this
* guy isn't it. Wait until the local domain
@@ -1658,9 +1649,9 @@ idn_retry_nego(uint_t token, void *arg)
* guy.
*/
PR_PROTO("%s:%d: waiting for connect to new master %d\n",
- proc, domid, IDN_GET_NEW_MASTERID());
+ proc, domid, IDN_GET_NEW_MASTERID());
idn_retry_submit(idn_retry_nego, NULL, token,
- idn_msg_retrytime[IDNP_NEGO]);
+ idn_msg_retrytime[IDNP_NEGO]);
IDN_GUNLOCK();
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
@@ -1695,10 +1686,10 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (msg & IDNP_NACK) {
if (GET_XARGS_NACK_TYPE(xargs) == IDNNACK_EXIT) {
PR_HITLIST("%s:%d(%s): (msg=%x) EXIT received, "
- "adding to hitlist %x -> %x\n",
- proc, domid, idnds_str[dp->dstate], msg,
- idn.domset.ds_hitlist,
- idn.domset.ds_hitlist | DOMAINSET(domid));
+ "adding to hitlist %x -> %x\n",
+ proc, domid, idnds_str[dp->dstate], msg,
+ idn.domset.ds_hitlist,
+ idn.domset.ds_hitlist | DOMAINSET(domid));
DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
return (-1);
@@ -1709,15 +1700,14 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
PR_HITLIST("%s:%d(%s): (msg=%x) domain in hitlist (%x) - "
- "exiting phase\n",
- proc, domid, idnds_str[dp->dstate], msg,
- idn.domset.ds_hitlist);
+ "exiting phase\n",
+ proc, domid, idnds_str[dp->dstate], msg,
+ idn.domset.ds_hitlist);
return (-1);
}
- if ((dp->dstate == IDNDS_NEGO_PEND) &&
- (msg & IDNP_MSGTYPE_MASK) &&
- (msg & IDNP_ACK)) /* nego+ack */
+ if ((dp->dstate == IDNDS_NEGO_PEND) && (msg & IDNP_MSGTYPE_MASK) &&
+ (msg & IDNP_ACK)) /* nego+ack */
return (1);
dmask = (uint_t)-1;
@@ -1725,7 +1715,7 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
IDN_GLOCK_EXCL();
if (idn.state == IDNGS_DISCONNECT) {
PR_PROTO("%s:%d: DISCONNECT in-progress >>> EXIT\n",
- proc, domid);
+ proc, domid);
IDN_GUNLOCK();
return (-1);
} else if (idn.state == IDNGS_OFFLINE) {
@@ -1739,25 +1729,25 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (!DOMAIN_IN_SET(idn.domset.ds_trans_on, domid)) {
DOMAINSET_ADD(idn.domset.ds_trans_on, domid);
IDN_HISTORY_LOG(IDNH_NEGO, domid,
- idn.domset.ds_trans_on,
- idn.domset.ds_connected);
+ idn.domset.ds_trans_on,
+ idn.domset.ds_connected);
}
switch (idn.state) {
case IDNGS_RECONFIG:
PR_PROTO("%s:%d: RECONFIG in-progress >>> RETRY\n",
- proc, domid);
+ proc, domid);
IDN_GUNLOCK();
return (1);
case IDNGS_CONNECT:
new_masterid = IDN_GET_NEW_MASTERID();
if ((new_masterid != IDN_NIL_DOMID) &&
- (domid != new_masterid) &&
- (idn.localid != new_masterid)) {
+ (domid != new_masterid) &&
+ (idn.localid != new_masterid)) {
PR_PROTO("%s:%d: waiting for connect to "
- "new master %d\n",
- proc, domid, IDN_GET_NEW_MASTERID());
+ "new master %d\n",
+ proc, domid, IDN_GET_NEW_MASTERID());
IDN_GUNLOCK();
return (1);
}
@@ -1799,7 +1789,7 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
m_cpuid = dp->dcpu;
} else {
IDNNEG_DSET_GET(dset, new_masterid, m_cpuid,
- dmask);
+ dmask);
if (m_cpuid == -1) {
/*
* Something is bogus if remote domain
@@ -1807,12 +1797,12 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* doesn't have the cpuid for it.
*/
cmn_err(CE_WARN,
- "IDN: 209: remote domain (ID "
- "%d, CPU %d) reporting master "
- "(ID %d) without CPU ID",
- domid, dp->dcpu, new_masterid);
+ "IDN: 209: remote domain (ID "
+ "%d, CPU %d) reporting master "
+ "(ID %d) without CPU ID",
+ domid, dp->dcpu, new_masterid);
DOMAINSET_ADD(idn.domset.ds_hitlist,
- domid);
+ domid);
IDN_GUNLOCK();
return (-1);
}
@@ -1831,9 +1821,9 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
#ifdef DEBUG
if (idn.domset.ds_hitlist) {
PR_HITLIST("%s:%d: con_set %x -> %x (hitlist = %x)\n",
- proc, domid, con_set,
- con_set & ~idn.domset.ds_hitlist,
- idn.domset.ds_hitlist);
+ proc, domid, con_set,
+ con_set & ~idn.domset.ds_hitlist,
+ idn.domset.ds_hitlist);
}
#endif /* DEBUG */
@@ -1843,11 +1833,10 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
ASSERT(!DOMAIN_IN_SET(con_set, domid));
if ((new_masterid != IDN_NIL_DOMID) &&
- DOMAIN_IN_SET(idn.domset.ds_hitlist,
- new_masterid)) {
+ DOMAIN_IN_SET(idn.domset.ds_hitlist, new_masterid)) {
PR_HITLIST("%s:%d: new_mstr %d -> -1 (hitlist = %x)\n",
- proc, domid, new_masterid,
- idn.domset.ds_hitlist);
+ proc, domid, new_masterid,
+ idn.domset.ds_hitlist);
IDN_GUNLOCK();
return (1);
}
@@ -1872,8 +1861,8 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (masterid == idn.localid) {
if (idn_master_init() < 0) {
cmn_err(CE_WARN,
- "IDN: 210: failed to init "
- "MASTER context");
+ "IDN: 210: failed to init "
+ "MASTER context");
ldp->dvote.v.master = 0;
IDN_DUNLOCK(idn.localid);
IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
@@ -1921,8 +1910,7 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* other domains.
*/
PR_PROTO("%s:%d: still exchanging CFG "
- "w/master(%d)\n",
- proc, domid, masterid);
+ "w/master(%d)\n", proc, domid, masterid);
IDN_GUNLOCK();
return (1);
}
@@ -1948,9 +1936,8 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (dp->dsync.s_cmd != IDNSYNC_CONNECT) {
idn_sync_exit(domid, IDNSYNC_DISCONNECT);
idn_sync_enter(domid, IDNSYNC_CONNECT,
- con_set, DOMAINSET(idn.localid),
- idn_xstate_transfunc,
- (void *)IDNP_CON);
+ con_set, DOMAINSET(idn.localid), idn_xstate_transfunc,
+ (void *)IDNP_CON);
}
/*
@@ -1982,8 +1969,8 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
IDNNEG_DSET_GET(dset, d, cpuid, dmask);
if (cpuid == -1) {
PR_PROTO("%s:%d: failed to get cpuid from dset "
- "for domain %d (pset = 0x%x)\n",
- proc, domid, d, pending_set);
+ "for domain %d (pset = 0x%x)\n",
+ proc, domid, d, pending_set);
DOMAINSET_DEL(idn.domset.ds_trans_on, d);
continue;
}
@@ -1991,13 +1978,13 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
IDN_DLOCK_EXCL(d);
if ((rv = idn_open_domain(d, cpuid, 0)) != 0) {
PR_PROTO("%s:%d: failed "
- "idn_open_domain(%d,%d,0) (rv = %d)\n",
- proc, domid, d, cpuid, rv);
+ "idn_open_domain(%d,%d,0) (rv = %d)\n",
+ proc, domid, d, cpuid, rv);
if (rv < 0) {
cmn_err(CE_WARN,
- "IDN: 205: (%s) failed to "
- "open-domain(%d,%d)",
- proc, d, cpuid);
+ "IDN: 205: (%s) failed to "
+ "open-domain(%d,%d)",
+ proc, d, cpuid);
DOMAINSET_DEL(idn.domset.ds_trans_on, d);
} else if (DOMAIN_IN_SET(idn.domset.ds_trans_off, d)) {
/*
@@ -2007,7 +1994,7 @@ idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
*/
DOMAINSET_ADD(idn.domset.ds_relink, d);
IDN_HISTORY_LOG(IDNH_RELINK, d, dp->dstate,
- idn.domset.ds_relink);
+ idn.domset.ds_relink);
}
IDN_DUNLOCK(d);
continue;
@@ -2105,12 +2092,12 @@ idn_error_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (retry) {
token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
idn_retry_submit(idn_retry_nego, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_NEGO]);
+ idn_msg_retrytime[(int)IDNRETRY_NEGO]);
} else {
DOMAINSET_DEL(idn.domset.ds_relink, domid);
IDN_RESET_COOKIES(domid);
idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
- IDNDS_SYNC_TYPE(&idn_domain[domid]));
+ IDNDS_SYNC_TYPE(&idn_domain[domid]));
}
}
@@ -2164,8 +2151,7 @@ idn_action_nego_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
}
IDNNEG_DSET_SET_MASTER(dset, domid, IDN_GET_MASTERID());
ASSERT((IDN_GET_MASTERID() != IDN_NIL_DOMID) ?
- (idn_domain[IDN_GET_MASTERID()].dcpu != IDN_NIL_DCPU) :
- 1);
+ (idn_domain[IDN_GET_MASTERID()].dcpu != IDN_NIL_DCPU) : 1);
vote.ticket = idn_domain[idn.localid].dvote.ticket;
vote.v.master = 0;
CLR_XARGS(nargs);
@@ -2209,12 +2195,12 @@ idn_action_nego_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (retry) {
token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
idn_retry_submit(idn_retry_nego, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_NEGO]);
+ idn_msg_retrytime[(int)IDNRETRY_NEGO]);
} else {
DOMAINSET_DEL(idn.domset.ds_relink, domid);
IDN_RESET_COOKIES(domid);
idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
- IDNDS_SYNC_TYPE(&idn_domain[domid]));
+ IDNDS_SYNC_TYPE(&idn_domain[domid]));
}
}
}
@@ -2262,12 +2248,12 @@ idn_action_nego_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (retry) {
token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
idn_retry_submit(idn_retry_nego, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_NEGO]);
+ idn_msg_retrytime[(int)IDNRETRY_NEGO]);
} else {
DOMAINSET_DEL(idn.domset.ds_relink, domid);
IDN_RESET_COOKIES(domid);
idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
- IDNDS_SYNC_TYPE(&idn_domain[domid]));
+ IDNDS_SYNC_TYPE(&idn_domain[domid]));
}
}
}
@@ -2316,10 +2302,10 @@ idn_exit_nego(int domid, uint_t msgtype)
IDN_GLOCK_SHARED();
if ((idn.state != IDNGS_DISCONNECT) &&
- !DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
+ !DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
DOMAINSET_ADD(idn.domset.ds_relink, domid);
IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
- idn.domset.ds_relink);
+ idn.domset.ds_relink);
} else {
idn_update_op(IDNOP_ERROR, DOMAINSET(domid), NULL);
DOMAINSET_DEL(idn.domset.ds_relink, domid);
@@ -2331,8 +2317,7 @@ idn_exit_nego(int domid, uint_t msgtype)
* possible we may not have exchanged appropriate cookies.
*/
IDN_RESET_COOKIES(domid);
- idn_disconnect(domid, fintype, IDNFIN_ARG_NONE,
- IDNDS_SYNC_TYPE(dp));
+ idn_disconnect(domid, fintype, IDNFIN_ARG_NONE, IDNDS_SYNC_TYPE(dp));
}
static void
@@ -2350,7 +2335,7 @@ idn_nego_cleanup_check(int domid, int new_masterid, int new_cpuid)
IDN_GLOCK_EXCL();
if (((idn.state == IDNGS_ONLINE) && !idn.domset.ds_connected) ||
- (idn.state == IDNGS_CONNECT)) {
+ (idn.state == IDNGS_CONNECT)) {
domainset_t trans_on;
int masterid;
int retry_domid = IDN_NIL_DOMID;
@@ -2358,7 +2343,7 @@ idn_nego_cleanup_check(int domid, int new_masterid, int new_cpuid)
IDN_DLOCK_EXCL(idn.localid);
masterid = (idn.state == IDNGS_ONLINE) ?
- IDN_GET_MASTERID() : IDN_GET_NEW_MASTERID();
+ IDN_GET_MASTERID() : IDN_GET_NEW_MASTERID();
trans_on = idn.domset.ds_trans_on;
DOMAINSET_DEL(trans_on, domid);
if (trans_on == 0) {
@@ -2369,8 +2354,8 @@ idn_nego_cleanup_check(int domid, int new_masterid, int new_cpuid)
* to connect with.
*/
ASSERT((idn.state == IDNGS_ONLINE) ?
- ((idn.localid == masterid) ||
- (domid == masterid)) : 1);
+ ((idn.localid == masterid) ||
+ (domid == masterid)) : 1);
if (idn.localid == masterid)
idn_master_deinit();
ldp->dvote.v.connected = 0;
@@ -2388,16 +2373,16 @@ idn_nego_cleanup_check(int domid, int new_masterid, int new_cpuid)
* it's ourself.
*/
if ((new_masterid != IDN_NIL_DOMID) &&
- (new_masterid != idn.localid) &&
- (new_masterid != domid)) {
+ (new_masterid != idn.localid) &&
+ (new_masterid != domid)) {
IDN_DLOCK_EXCL(new_masterid);
rv = idn_open_domain(new_masterid,
- new_cpuid, 0);
+ new_cpuid, 0);
if (rv < 0) {
cmn_err(CE_WARN,
- "IDN: 205: (%s) failed to "
- "open-domain(%d,%d)",
- proc, new_masterid, new_cpuid);
+ "IDN: 205: (%s) failed to "
+ "open-domain(%d,%d)",
+ proc, new_masterid, new_cpuid);
IDN_GLOCK_EXCL();
IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
IDN_GUNLOCK();
@@ -2430,30 +2415,29 @@ idn_nego_cleanup_check(int domid, int new_masterid, int new_cpuid)
if (idn.state == IDNGS_ONLINE) {
IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs,
- gk_reconfig_last);
+ gk_reconfig_last);
IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
IDN_GUNLOCK();
idn_unlink_domainset(trans_on, IDNFIN_NORMAL,
- IDNFIN_ARG_NONE,
- IDNFIN_OPT_RELINK,
- BOARDSET_ALL);
+ IDNFIN_ARG_NONE,
+ IDNFIN_OPT_RELINK,
+ BOARDSET_ALL);
} else if ((new_masterid != IDN_NIL_DOMID) &&
- (new_masterid != idn.localid) &&
- (new_masterid != domid) &&
- !DOMAIN_IN_SET(trans_on,
- new_masterid)) {
+ (new_masterid != idn.localid) &&
+ (new_masterid != domid) &&
+ !DOMAIN_IN_SET(trans_on, new_masterid)) {
IDN_GUNLOCK();
IDN_DLOCK_EXCL(new_masterid);
rv = idn_open_domain(new_masterid,
- new_cpuid, 0);
+ new_cpuid, 0);
IDN_GLOCK_EXCL();
IDN_DUNLOCK(new_masterid);
if (rv < 0) {
cmn_err(CE_WARN,
- "IDN: 205: (%s) failed to "
- "open-domain(%d,%d)",
- proc, new_masterid,
- new_cpuid);
+ "IDN: 205: (%s) failed to "
+ "open-domain(%d,%d)",
+ proc, new_masterid,
+ new_cpuid);
IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
new_masterid = IDN_NIL_DOMID;
} else {
@@ -2477,7 +2461,7 @@ idn_nego_cleanup_check(int domid, int new_masterid, int new_cpuid)
IDN_DUNLOCK(retry_domid);
token = IDN_RETRY_TOKEN(retry_domid, IDNRETRY_NEGO);
idn_retry_submit(idn_retry_nego, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_NEGO]);
+ idn_msg_retrytime[(int)IDNRETRY_NEGO]);
}
} else {
IDN_GUNLOCK();
@@ -2485,8 +2469,8 @@ idn_nego_cleanup_check(int domid, int new_masterid, int new_cpuid)
}
static int
-idn_send_con(int domid, idn_msgtype_t *mtp,
- idn_con_t contype, domainset_t conset)
+idn_send_con(int domid, idn_msgtype_t *mtp, idn_con_t contype, domainset_t
+ conset)
{
idn_msgtype_t mt;
uint_t acknack;
@@ -2518,14 +2502,14 @@ idn_send_con(int domid, idn_msgtype_t *mtp,
ASSERT((contype == IDNCON_QUERY) ? idn_domain[domid].dcookie_send : 1);
PR_PROTO("%s:%d: sending con%sto (cpu %d) [ct=%s, cs=0x%x]\n",
- proc, domid,
- (acknack & IDNP_ACK) ? "+ack " :
- (acknack & IDNP_NACK) ? "+nack " : " ",
- idn_domain[domid].dcpu,
- idncon_str[contype], conset);
+ proc, domid,
+ (acknack & IDNP_ACK) ? "+ack " :
+ (acknack & IDNP_NACK) ? "+nack " : " ",
+ idn_domain[domid].dcpu,
+ idncon_str[contype], conset);
IDN_MSGTIMER_START(domid, IDNP_CON, (ushort_t)contype,
- idn_msg_waittime[IDNP_CON], &mt.mt_cookie);
+ idn_msg_waittime[IDNP_CON], &mt.mt_cookie);
IDNXDC(domid, &mt, (uint_t)contype, (uint_t)conset, 0, 0);
@@ -2561,10 +2545,9 @@ idn_recv_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
domainset_t query_set;
query_set = idn_sync_register(domid, IDNSYNC_CONNECT,
- ready_set, IDNSYNC_REG_REG);
+ ready_set, IDNSYNC_REG_REG);
- my_ready_set = idn.domset.ds_connected |
- idn.domset.ds_ready_on;
+ my_ready_set = idn.domset.ds_connected | idn.domset.ds_ready_on;
my_ready_set &= ~idn.domset.ds_trans_off;
DOMAINSET_ADD(my_ready_set, idn.localid);
@@ -2581,7 +2564,7 @@ idn_recv_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
token = IDN_RETRY_TOKEN(domid, IDNRETRY_CONQ);
idn_retry_submit(idn_retry_query, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_CONQ]);
+ idn_msg_retrytime[(int)IDNRETRY_CONQ]);
}
return (0);
@@ -2599,7 +2582,7 @@ idn_recv_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
INUM2STR(msgarg, lstr);
PR_PROTO("%s:%d: ERROR: NOT YET REGISTERED (%s/%s)\n",
- proc, domid, mstr, lstr);
+ proc, domid, mstr, lstr);
if (msg & IDNP_MSGTYPE_MASK) {
mt.mt_mtype = IDNP_NACK;
@@ -2640,19 +2623,19 @@ idn_retry_con(uint_t token, void *arg)
#endif /* DEBUG */
PR_PROTO("%s:%d: dxp(%s) != CON...bailing...\n",
- proc, domid, dp->dxp ? str : "NULL");
+ proc, domid, dp->dxp ? str : "NULL");
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
return;
}
if ((dp->dsync.s_cmd != IDNSYNC_CONNECT) ||
- (dp->dxstate != IDNXS_PEND)) {
+ (dp->dxstate != IDNXS_PEND)) {
PR_PROTO("%s:%d: cmd (%s) and/or xstate (%s) not "
- "expected (%s/%s)\n",
- proc, domid, idnsync_str[dp->dsync.s_cmd],
- idnxs_str[dp->dxstate], idnsync_str[IDNSYNC_CONNECT],
- idnxs_str[IDNXS_PEND]);
+ "expected (%s/%s)\n",
+ proc, domid, idnsync_str[dp->dsync.s_cmd],
+ idnxs_str[dp->dxstate], idnsync_str[IDNSYNC_CONNECT],
+ idnxs_str[IDNXS_PEND]);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
return;
@@ -2679,13 +2662,12 @@ idn_check_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
return (0);
if ((dp->dstate == IDNDS_CON_PEND) &&
- (msg & IDNP_MSGTYPE_MASK) &&
- (msg & IDNP_ACK)) /* con+ack */
+ (msg & IDNP_MSGTYPE_MASK) && (msg & IDNP_ACK)) /* con+ack */
return (1);
if (msg == 0) {
ready_set = idn.domset.ds_connected &
- ~idn.domset.ds_trans_off;
+ ~idn.domset.ds_trans_off;
} else {
ready_set = GET_XARGS_CON_DOMSET(xargs);
DOMAINSET_ADD(idn.domset.ds_ready_on, domid);
@@ -2694,7 +2676,7 @@ idn_check_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
DOMAINSET_ADD(ready_set, idn.localid);
query_set = idn_sync_register(domid, IDNSYNC_CONNECT,
- ready_set, IDNSYNC_REG_REG);
+ ready_set, IDNSYNC_REG_REG);
/*
* No need to query this domain as he's already
* in the CON sequence.
@@ -2711,7 +2693,7 @@ idn_check_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
int d;
my_ready_set = idn.domset.ds_ready_on |
- idn.domset.ds_connected;
+ idn.domset.ds_connected;
my_ready_set &= ~idn.domset.ds_trans_off;
DOMAINSET_ADD(my_ready_set, idn.localid);
@@ -2723,7 +2705,7 @@ idn_check_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
IDN_DLOCK_EXCL(d);
if ((dp->dsync.s_cmd == IDNSYNC_CONNECT) ||
- !dp->dcookie_send) {
+ !dp->dcookie_send) {
IDN_DUNLOCK(d);
continue;
}
@@ -2762,7 +2744,7 @@ idn_error_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
idn_retry_submit(idn_retry_con, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_CON]);
+ idn_msg_retrytime[(int)IDNRETRY_CON]);
}
/*ARGSUSED*/
@@ -2777,8 +2759,8 @@ idn_action_con_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
ASSERT(IDN_SYNC_IS_LOCKED());
ASSERT(IDN_DLOCK_IS_EXCL(domid));
- my_ready_set = dp->dsync.s_set_rdy |
- idn.domset.ds_ready_on | idn.domset.ds_connected;
+ my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_on |
+ idn.domset.ds_connected;
my_ready_set &= ~idn.domset.ds_trans_off;
DOMAINSET_ADD(my_ready_set, idn.localid);
@@ -2806,9 +2788,8 @@ idn_action_con_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
- my_ready_set = dp->dsync.s_set_rdy |
- idn.domset.ds_ready_on |
- idn.domset.ds_connected;
+ my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_on |
+ idn.domset.ds_connected;
my_ready_set &= ~idn.domset.ds_trans_off;
DOMAINSET_ADD(my_ready_set, idn.localid);
@@ -2840,7 +2821,7 @@ idn_action_con_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
*/
token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
idn_retry_submit(idn_retry_con, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_CON]);
+ idn_msg_retrytime[(int)IDNRETRY_CON]);
}
}
@@ -2860,7 +2841,7 @@ idn_action_con_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
*/
token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
idn_retry_submit(idn_retry_con, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_CON]);
+ idn_msg_retrytime[(int)IDNRETRY_CON]);
}
}
@@ -2893,17 +2874,17 @@ idn_final_con(int domid)
if (idn.domset.ds_trans_on == 0) {
if ((idn.domset.ds_trans_off | idn.domset.ds_relink) == 0) {
PR_HITLIST("%s:%d: HITLIST %x -> 0\n",
- proc, domid, idn.domset.ds_hitlist);
+ proc, domid, idn.domset.ds_hitlist);
idn.domset.ds_hitlist = 0;
}
PR_PROTO("%s:%d: ALL CONNECTED ************ "
- "(0x%x + 0x%x) = 0x%x\n", proc, domid,
- DOMAINSET(idn.localid), idn.domset.ds_connected,
- DOMAINSET(idn.localid) | idn.domset.ds_connected);
+ "(0x%x + 0x%x) = 0x%x\n", proc, domid,
+ DOMAINSET(idn.localid), idn.domset.ds_connected,
+ DOMAINSET(idn.localid) | idn.domset.ds_connected);
} else {
PR_PROTO("%s:%d: >>> ds_trans_on = 0x%x, ds_ready_on = 0x%x\n",
- proc, domid,
- idn.domset.ds_trans_on, idn.domset.ds_ready_on);
+ proc, domid,
+ idn.domset.ds_trans_on, idn.domset.ds_ready_on);
}
if (idn_verify_config_mbox(domid)) {
@@ -2927,7 +2908,7 @@ idn_final_con(int domid)
* which is the effect we really want anyway.
*/
idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_SMRBAD,
- IDNFIN_SYNC_YES);
+ IDNFIN_SYNC_YES);
return;
}
@@ -2968,8 +2949,8 @@ idn_final_con(int domid)
(void) timeout(idn_link_established, (void *)(uintptr_t)targ, 50);
cmn_err(CE_NOTE,
- "!IDN: 200: link (domain %d, CPU %d) connected",
- dp->domid, dp->dcpu);
+ "!IDN: 200: link (domain %d, CPU %d) connected",
+ dp->domid, dp->dcpu);
}
static void
@@ -2992,20 +2973,18 @@ idn_exit_con(int domid, uint_t msgtype)
if (idn.state != IDNGS_DISCONNECT) {
DOMAINSET_ADD(idn.domset.ds_relink, domid);
IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
- idn.domset.ds_relink);
+ idn.domset.ds_relink);
} else {
DOMAINSET_DEL(idn.domset.ds_relink, domid);
}
IDN_GUNLOCK();
- idn_disconnect(domid, fintype, IDNFIN_ARG_NONE,
- IDNDS_SYNC_TYPE(dp));
+ idn_disconnect(domid, fintype, IDNFIN_ARG_NONE, IDNDS_SYNC_TYPE(dp));
}
static int
-idn_send_fin(int domid, idn_msgtype_t *mtp, idn_fin_t fintype,
- idn_finarg_t finarg, idn_finopt_t finopt,
- domainset_t finset, uint_t finmaster)
+idn_send_fin(int domid, idn_msgtype_t *mtp, idn_fin_t fintype, idn_finarg_t
+ finarg, idn_finopt_t finopt, domainset_t finset, uint_t finmaster)
{
int need_timer = 1;
uint_t acknack;
@@ -3040,24 +3019,24 @@ idn_send_fin(int domid, idn_msgtype_t *mtp, idn_fin_t fintype,
}
PR_PROTO("%s:%d: sending fin%sto (cpu %d) "
- "[ft=%s, fa=%s, fs=0x%x, fo=%s, fm=(%d,%d)]\n",
- proc, domid,
- (acknack & IDNP_ACK) ? "+ack " :
- (acknack & IDNP_NACK) ? "+nack " : " ",
- dp->dcpu, idnfin_str[fintype], idnfinarg_str[finarg],
- (int)finset, idnfinopt_str[finopt],
- FIN_MASTER_DOMID(finmaster), FIN_MASTER_CPUID(finmaster));
+ "[ft=%s, fa=%s, fs=0x%x, fo=%s, fm=(%d,%d)]\n",
+ proc, domid,
+ (acknack & IDNP_ACK) ? "+ack " :
+ (acknack & IDNP_NACK) ? "+nack " : " ",
+ dp->dcpu, idnfin_str[fintype], idnfinarg_str[finarg],
+ (int)finset, idnfinopt_str[finopt],
+ FIN_MASTER_DOMID(finmaster), FIN_MASTER_CPUID(finmaster));
if (need_timer) {
IDN_MSGTIMER_START(domid, IDNP_FIN, (ushort_t)fintype,
- idn_msg_waittime[IDNP_FIN], &mt.mt_cookie);
+ idn_msg_waittime[IDNP_FIN], &mt.mt_cookie);
}
SET_FIN_TYPE(fintypearg, fintype);
SET_FIN_ARG(fintypearg, finarg);
- IDNXDC(domid, &mt, fintypearg, (uint_t)finset,
- (uint_t)finopt, finmaster);
+ IDNXDC(domid, &mt, fintypearg, (uint_t)finset, (uint_t)finopt,
+ finmaster);
return (0);
}
@@ -3092,23 +3071,23 @@ idn_recv_fin(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (msg & IDNP_NACK) {
PR_PROTO("%s:%d: received NACK (type = %s)\n",
- proc, domid, idnnack_str[xargs[0]]);
+ proc, domid, idnnack_str[xargs[0]]);
} else {
PR_PROTO("%s:%d: fintype = %s, finopt = %s, "
- "finarg = %s, ready_set = 0x%x\n",
- proc, domid, idnfin_str[fintype],
- idnfinopt_str[finopt],
- idnfinarg_str[finarg], ready_set);
+ "finarg = %s, ready_set = 0x%x\n",
+ proc, domid, idnfin_str[fintype],
+ idnfinopt_str[finopt],
+ idnfinarg_str[finarg], ready_set);
}
if (!(msg & IDNP_NACK) && (fintype == IDNFIN_QUERY)) {
domainset_t query_set;
query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT,
- ready_set, IDNSYNC_REG_REG);
+ ready_set, IDNSYNC_REG_REG);
my_ready_set = ~idn.domset.ds_connected |
- idn.domset.ds_ready_off;
+ idn.domset.ds_ready_off;
if (msg & IDNP_MSGTYPE_MASK) {
mt.mt_mtype = IDNP_ACK;
@@ -3126,7 +3105,7 @@ idn_recv_fin(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
token = IDN_RETRY_TOKEN(domid, IDNRETRY_FINQ);
idn_retry_submit(idn_retry_query, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_FINQ]);
+ idn_msg_retrytime[(int)IDNRETRY_FINQ]);
}
return (0);
@@ -3137,7 +3116,7 @@ idn_recv_fin(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (IDNDS_IS_CLOSED(dp)) {
PR_PROTO("%s:%d: domain already closed (%s)\n",
- proc, domid, idnds_str[dp->dstate]);
+ proc, domid, idnds_str[dp->dstate]);
if (msg & IDNP_MSGTYPE_MASK) {
/*
* fin or fin+ack.
@@ -3188,7 +3167,7 @@ idn_recv_fin(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
*/
DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
(void) idn_sync_register(domid, IDNSYNC_DISCONNECT,
- DOMAINSET_ALL, IDNSYNC_REG_REG);
+ DOMAINSET_ALL, IDNSYNC_REG_REG);
ready_set = (uint_t)DOMAINSET_ALL;
/*
* Need to transform message to allow us to
@@ -3216,16 +3195,16 @@ idn_recv_fin(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
default:
#ifdef DEBUG
cmn_err(CE_PANIC,
- "%s:%d: UNEXPECTED state = %s",
- proc, domid,
- idnds_str[dp->dstate]);
+ "%s:%d: UNEXPECTED state = %s",
+ proc, domid,
+ idnds_str[dp->dstate]);
#endif /* DEBUG */
break;
}
}
fintype = (uint_t)dp->dfin;
finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
- IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
+ IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
CLR_XARGS(xargs);
SET_XARGS_FIN_TYPE(xargs, fintype);
@@ -3259,7 +3238,7 @@ idn_retry_fin(uint_t token, void *arg)
if (dp->dxp != &xphase_fin) {
PR_PROTO("%s:%d: dxp(0x%p) != xstate_fin(0x%p)...bailing\n",
- proc, domid, dp->dxp, &xphase_fin);
+ proc, domid, dp->dxp, &xphase_fin);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
return;
@@ -3267,15 +3246,15 @@ idn_retry_fin(uint_t token, void *arg)
if (dp->dxstate != IDNXS_PEND) {
PR_PROTO("%s:%d: xstate(%s) != %s...bailing\n",
- proc, domid, idnxs_str[dp->dxstate],
- idnxs_str[IDNXS_PEND]);
+ proc, domid, idnxs_str[dp->dxstate],
+ idnxs_str[IDNXS_PEND]);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
return;
}
finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
- IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
+ IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
CLR_XARGS(xargs);
SET_XARGS_FIN_TYPE(xargs, dp->dfin);
@@ -3319,9 +3298,8 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (msg & IDNP_NACK)
return (0);
- if ((dp->dstate == IDNDS_FIN_PEND) &&
- (msg & IDNP_MSGTYPE_MASK) &&
- (msg & IDNP_ACK)) /* fin+ack */
+ if ((dp->dstate == IDNDS_FIN_PEND) && (msg & IDNP_MSGTYPE_MASK) &&
+ (msg & IDNP_ACK)) /* fin+ack */
return (1);
query_set = 0;
@@ -3338,10 +3316,10 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
IDN_GLOCK_SHARED();
conn_set = (idn.domset.ds_connected | idn.domset.ds_trans_on) &
- ~idn.domset.ds_trans_off;
+ ~idn.domset.ds_trans_off;
if ((idn.state == IDNGS_DISCONNECT) ||
- (idn.state == IDNGS_RECONFIG) ||
- (domid == IDN_GET_MASTERID()) || !conn_set) {
+ (idn.state == IDNGS_RECONFIG) ||
+ (domid == IDN_GET_MASTERID()) || !conn_set) {
/*
* If we're disconnecting, reconfiguring,
* unlinking from the master, or unlinking
@@ -3354,8 +3332,7 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
}
IDN_GUNLOCK();
- idn_shutdown_datapath(shutdown_set,
- (dp->dfin == IDNFIN_FORCE_HARD));
+ idn_shutdown_datapath(shutdown_set, (dp->dfin == IDNFIN_FORCE_HARD));
IDN_GLOCK_EXCL();
/*
@@ -3367,7 +3344,7 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
*/
if ((domid == IDN_GET_MASTERID()) && (idn.smr.rempfn != PFN_INVALID)) {
PR_PROTO("%s:%d: deconfiging CURRENT MASTER - SMR remap\n",
- proc, domid);
+ proc, domid);
IDN_DLOCK_EXCL(idn.localid);
/*
* We're going to remap the SMR,
@@ -3403,16 +3380,16 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* override it to a NORMAL fin.
*/
PR_PROTO("%s:%d: WARNING invalid fintype (%d) -> %s(%d)\n",
- proc, domid, (int)fintype,
- idnfin_str[IDNFIN_NORMAL], (int)IDNFIN_NORMAL);
+ proc, domid, (int)fintype,
+ idnfin_str[IDNFIN_NORMAL], (int)IDNFIN_NORMAL);
fintype = IDNFIN_NORMAL;
}
if (!VALID_FINOPT(finopt)) {
PR_PROTO("%s:%d: WARNING invalid finopt (%d) -> %s(%d)\n",
- proc, domid, (int)finopt,
- idnfinopt_str[IDNFIN_OPT_UNLINK],
- (int)IDNFIN_OPT_UNLINK);
+ proc, domid, (int)finopt,
+ idnfinopt_str[IDNFIN_OPT_UNLINK],
+ (int)IDNFIN_OPT_UNLINK);
finopt = IDNFIN_OPT_UNLINK;
}
@@ -3421,7 +3398,7 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
fincpuid = FIN_MASTER_CPUID(finmaster);
if ((finarg != IDNFIN_ARG_NONE) &&
- !DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
+ !DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
idnsb_error_t idnerr;
INIT_IDNKERR(&idnerr);
@@ -3459,36 +3436,34 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
DOMAINSET_DEL(domset, domid);
idn_update_op(IDNOP_ERROR, DOMAINSET_ALL,
- &idnerr);
+ &idnerr);
PR_HITLIST("%s:%d: unlink_domainset(%x) "
- "due to CFG error (relink=%x, "
- "hitlist=%x)\n", proc, domid, domset,
- idn.domset.ds_relink,
- idn.domset.ds_hitlist);
+ "due to CFG error (relink=%x, "
+ "hitlist=%x)\n", proc, domid, domset,
+ idn.domset.ds_relink,
+ idn.domset.ds_hitlist);
idn_unlink_domainset(domset, IDNFIN_NORMAL,
- finarg, IDNFIN_OPT_UNLINK,
- BOARDSET_ALL);
+ finarg, IDNFIN_OPT_UNLINK, BOARDSET_ALL);
IDN_DLOCK_EXCL(domid);
}
PR_HITLIST("%s:%d: CFG error, (conn=%x, relink=%x, "
- "hitlist=%x)\n",
- proc, domid, idn.domset.ds_connected,
- idn.domset.ds_relink, idn.domset.ds_hitlist);
+ "hitlist=%x)\n",
+ proc, domid, idn.domset.ds_connected,
+ idn.domset.ds_relink, idn.domset.ds_hitlist);
}
idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
}
- if ((finmasterid != IDN_NIL_DOMID) &&
- (!VALID_DOMAINID(finmasterid) ||
- DOMAIN_IN_SET(idn.domset.ds_hitlist, domid))) {
+ if ((finmasterid != IDN_NIL_DOMID) && (!VALID_DOMAINID(finmasterid) ||
+ DOMAIN_IN_SET(idn.domset.ds_hitlist, domid))) {
PR_HITLIST("%s:%d: finmasterid = %d -> -1, relink=%x, "
- "hitlist=%x\n",
- proc, domid, finmasterid, idn.domset.ds_relink,
- idn.domset.ds_hitlist);
+ "hitlist=%x\n",
+ proc, domid, finmasterid, idn.domset.ds_relink,
+ idn.domset.ds_hitlist);
PR_PROTO("%s:%d: WARNING invalid finmasterid (%d) -> -1\n",
- proc, domid, finmasterid);
+ proc, domid, finmasterid);
finmasterid = IDN_NIL_DOMID;
}
@@ -3497,20 +3472,19 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
DOMAINSET_ADD(idn.domset.ds_relink, domid);
IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
- idn.domset.ds_relink);
+ idn.domset.ds_relink);
} else {
DOMAINSET_DEL(idn.domset.ds_relink, domid);
DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
}
if ((domid == IDN_GET_NEW_MASTERID()) &&
- !DOMAIN_IN_SET(idn.domset.ds_relink, domid)) {
+ !DOMAIN_IN_SET(idn.domset.ds_relink, domid)) {
IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
}
- if ((idn.state != IDNGS_DISCONNECT) &&
- (idn.state != IDNGS_RECONFIG) &&
- (domid == IDN_GET_MASTERID())) {
+ if ((idn.state != IDNGS_DISCONNECT) && (idn.state != IDNGS_RECONFIG) &&
+ (domid == IDN_GET_MASTERID())) {
domainset_t dis_set, master_candidates;
IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs, gk_reconfig_last);
@@ -3519,14 +3493,14 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
IDN_GUNLOCK();
if ((finmasterid != IDN_NIL_DOMID) &&
- (finmasterid != idn.localid)) {
+ (finmasterid != idn.localid)) {
if (finmasterid != domid)
IDN_DLOCK_EXCL(finmasterid);
if (idn_open_domain(finmasterid, fincpuid, 0) < 0) {
cmn_err(CE_WARN,
- "IDN: 205: (%s) failed to "
- "open-domain(%d,%d)",
- proc, finmasterid, fincpuid);
+ "IDN: 205: (%s) failed to "
+ "open-domain(%d,%d)",
+ proc, finmasterid, fincpuid);
if (finmasterid != domid)
IDN_DUNLOCK(finmasterid);
finmasterid = IDN_NIL_DOMID;
@@ -3540,10 +3514,10 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
int m;
master_candidates = idn.domset.ds_trans_on |
- idn.domset.ds_connected |
- idn.domset.ds_relink;
+ idn.domset.ds_connected |
+ idn.domset.ds_relink;
master_candidates &= ~(idn.domset.ds_trans_off &
- ~idn.domset.ds_relink);
+ ~idn.domset.ds_relink);
DOMAINSET_DEL(master_candidates, domid);
/*
* Local domain gets to participate also.
@@ -3561,7 +3535,7 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
DOMAINSET_DEL(dis_set, domid);
idn_unlink_domainset(dis_set, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
- IDNFIN_OPT_RELINK, BOARDSET_ALL);
+ IDNFIN_OPT_RELINK, BOARDSET_ALL);
} else {
IDN_GUNLOCK();
}
@@ -3649,14 +3623,13 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (dp->dsync.s_cmd != IDNSYNC_DISCONNECT) {
idn_sync_exit(domid, IDNSYNC_CONNECT);
- idn_sync_enter(domid, IDNSYNC_DISCONNECT,
- DOMAINSET_ALL, my_ready_set,
- idn_xstate_transfunc,
- (void *)IDNP_FIN);
+ idn_sync_enter(domid, IDNSYNC_DISCONNECT, DOMAINSET_ALL,
+ my_ready_set, idn_xstate_transfunc, (void *)IDNP_FIN);
}
- query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT,
- ready_set, IDNSYNC_REG_REG);
+ query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT, ready_set,
+ IDNSYNC_REG_REG);
+
/*
* No need to query this domain as he's already
* in the FIN sequence.
@@ -3673,7 +3646,7 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
int d;
my_ready_set = idn.domset.ds_ready_off |
- ~idn.domset.ds_connected;
+ ~idn.domset.ds_connected;
for (d = 0; d < MAX_DOMAINS; d++) {
if (!DOMAIN_IN_SET(query_set, d))
@@ -3691,8 +3664,7 @@ idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
IDN_SYNC_QUERY_UPDATE(domid, d);
idn_send_fin(d, NULL, IDNFIN_QUERY, IDNFIN_ARG_NONE,
- IDNFIN_OPT_NONE, my_ready_set,
- NIL_FIN_MASTER);
+ IDNFIN_OPT_NONE, my_ready_set, NIL_FIN_MASTER);
IDN_DUNLOCK(d);
}
}
@@ -3715,7 +3687,7 @@ idn_error_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* we're forcing a hard disconnect.
*/
if ((idn_domain[domid].dfin != IDNFIN_FORCE_HARD) &&
- (msg & IDNP_MSGTYPE_MASK)) {
+ (msg & IDNP_MSGTYPE_MASK)) {
idn_msgtype_t mt;
idn_xdcargs_t nargs;
@@ -3729,7 +3701,7 @@ idn_error_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
idn_retry_submit(idn_retry_fin, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_FIN]);
+ idn_msg_retrytime[(int)IDNRETRY_FIN]);
}
static void
@@ -3747,15 +3719,14 @@ idn_action_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
ASSERT(IDN_SYNC_IS_LOCKED());
ASSERT(IDN_DLOCK_IS_HELD(domid));
- my_ready_set = dp->dsync.s_set_rdy |
- idn.domset.ds_ready_off |
- ~idn.domset.ds_connected;
+ my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_off |
+ ~idn.domset.ds_connected;
ASSERT(xargs[0] != (uint_t)IDNFIN_QUERY);
finarg = GET_XARGS_FIN_ARG(xargs);
finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
- IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
+ IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
@@ -3778,16 +3749,16 @@ idn_action_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
}
idn_xphase_transition(domid, &mt, xargs);
} else if (!msg) {
- idn_send_fin(domid, NULL, dp->dfin, finarg,
- finopt, my_ready_set, finmaster);
+ idn_send_fin(domid, NULL, dp->dfin, finarg, finopt,
+ my_ready_set, finmaster);
} else if ((msg & IDNP_ACKNACK_MASK) == 0) {
/*
* fin
*/
mt.mt_mtype = IDNP_FIN | IDNP_ACK;
mt.mt_atype = 0;
- idn_send_fin(domid, &mt, dp->dfin, finarg,
- finopt, my_ready_set, finmaster);
+ idn_send_fin(domid, &mt, dp->dfin, finarg, finopt,
+ my_ready_set, finmaster);
} else {
uint_t token;
/*
@@ -3795,7 +3766,7 @@ idn_action_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
*/
token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
idn_retry_submit(idn_retry_fin, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_FIN]);
+ idn_msg_retrytime[(int)IDNRETRY_FIN]);
}
}
@@ -3836,7 +3807,7 @@ idn_check_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
DOMAINSET_ADD(idn.domset.ds_relink, domid);
IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
- idn.domset.ds_relink);
+ idn.domset.ds_relink);
} else {
DOMAINSET_DEL(idn.domset.ds_relink, domid);
}
@@ -3883,7 +3854,7 @@ idn_check_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT,
- ready_set, IDNSYNC_REG_REG);
+ ready_set, IDNSYNC_REG_REG);
/*
* No need to query this domain as he's already
* in the FIN sequence.
@@ -3901,7 +3872,7 @@ idn_check_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
domainset_t my_ready_set;
my_ready_set = idn.domset.ds_ready_off |
- ~idn.domset.ds_connected;
+ ~idn.domset.ds_connected;
for (d = 0; d < MAX_DOMAINS; d++) {
if (!DOMAIN_IN_SET(query_set, d))
@@ -3919,8 +3890,7 @@ idn_check_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
IDN_SYNC_QUERY_UPDATE(domid, d);
idn_send_fin(d, NULL, IDNFIN_QUERY, IDNFIN_ARG_NONE,
- IDNFIN_OPT_NONE, my_ready_set,
- NIL_FIN_MASTER);
+ IDNFIN_OPT_NONE, my_ready_set, NIL_FIN_MASTER);
IDN_DUNLOCK(d);
}
}
@@ -3943,7 +3913,7 @@ idn_error_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* we're forcing a hard disconnect.
*/
if ((idn_domain[domid].dfin != IDNFIN_FORCE_HARD) &&
- (msg & IDNP_MSGTYPE_MASK)) {
+ (msg & IDNP_MSGTYPE_MASK)) {
idn_msgtype_t mt;
idn_xdcargs_t nargs;
@@ -3957,7 +3927,7 @@ idn_error_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
idn_retry_submit(idn_retry_fin, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_FIN]);
+ idn_msg_retrytime[(int)IDNRETRY_FIN]);
}
static void
@@ -3978,13 +3948,12 @@ idn_action_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
- IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
+ IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
finarg = GET_XARGS_FIN_ARG(xargs);
- my_ready_set = dp->dsync.s_set_rdy |
- idn.domset.ds_ready_off |
- ~idn.domset.ds_connected;
+ my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_off |
+ ~idn.domset.ds_connected;
IDN_GLOCK_SHARED();
new_masterid = IDN_GET_NEW_MASTERID();
@@ -4004,8 +3973,8 @@ idn_action_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
} else {
mt.mt_mtype = IDNP_FIN | IDNP_ACK;
mt.mt_atype = 0;
- idn_send_fin(domid, &mt, dp->dfin, finarg,
- finopt, my_ready_set, finmaster);
+ idn_send_fin(domid, &mt, dp->dfin, finarg, finopt,
+ my_ready_set, finmaster);
}
} else if (msg & IDNP_MSGTYPE_MASK) {
/*
@@ -4031,7 +4000,7 @@ idn_action_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
*/
token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
idn_retry_submit(idn_retry_fin, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_FIN]);
+ idn_msg_retrytime[(int)IDNRETRY_FIN]);
}
}
@@ -4051,7 +4020,7 @@ idn_action_fin_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
*/
token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
idn_retry_submit(idn_retry_fin, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_FIN]);
+ idn_msg_retrytime[(int)IDNRETRY_FIN]);
}
}
@@ -4120,18 +4089,17 @@ idn_final_fin(int domid)
if (idn.state == IDNGS_RECONFIG)
new_masterid = IDN_GET_NEW_MASTERID();
- if ((idn.domset.ds_trans_on |
- idn.domset.ds_trans_off |
- idn.domset.ds_relink) == 0) {
+ if ((idn.domset.ds_trans_on | idn.domset.ds_trans_off |
+ idn.domset.ds_relink) == 0) {
PR_HITLIST("%s:%d: HITLIST %x -> 0\n",
- proc, domid, idn.domset.ds_hitlist);
+ proc, domid, idn.domset.ds_hitlist);
idn.domset.ds_hitlist = 0;
}
if (idn.domset.ds_connected || idn.domset.ds_trans_off) {
PR_PROTO("%s:%d: ds_connected = 0x%x, ds_trans_off = 0x%x\n",
- proc, domid, idn.domset.ds_connected,
- idn.domset.ds_trans_off);
+ proc, domid, idn.domset.ds_connected,
+ idn.domset.ds_trans_off);
IDN_GUNLOCK();
goto fin_done;
}
@@ -4141,7 +4109,7 @@ idn_final_fin(int domid)
if (idn.domset.ds_trans_on != 0) {
ASSERT((idn.state != IDNGS_DISCONNECT) &&
- (idn.state != IDNGS_OFFLINE));
+ (idn.state != IDNGS_OFFLINE));
switch (idn.state) {
case IDNGS_CONNECT:
@@ -4177,9 +4145,9 @@ idn_final_fin(int domid)
case IDNGS_DISCONNECT:
case IDNGS_OFFLINE:
cmn_err(CE_WARN,
- "IDN: 211: disconnect domain %d, "
- "unexpected Gstate (%s)",
- domid, idngs_str[idn.state]);
+ "IDN: 211: disconnect domain %d, "
+ "unexpected Gstate (%s)",
+ domid, idngs_str[idn.state]);
IDN_DUNLOCK(idn.localid);
IDN_GUNLOCK();
goto fin_done;
@@ -4190,9 +4158,9 @@ idn_final_fin(int domid)
* Go into FATAL state?
*/
cmn_err(CE_PANIC,
- "IDN: 212: disconnect domain %d, "
- "bad Gstate (%d)",
- domid, idn.state);
+ "IDN: 212: disconnect domain %d, "
+ "bad Gstate (%d)",
+ domid, idn.state);
/* not reached */
break;
}
@@ -4218,7 +4186,7 @@ idn_final_fin(int domid)
IDN_GSTATE_TRANSITION(next_gstate);
ASSERT((idn.state == IDNGS_OFFLINE) ?
- (IDN_GET_MASTERID() == IDN_NIL_DOMID) : 1);
+ (IDN_GET_MASTERID() == IDN_NIL_DOMID) : 1);
IDN_GUNLOCK();
@@ -4239,7 +4207,7 @@ idn_final_fin(int domid)
* with the master only.
*/
relinkset = (new_masterid == idn.localid) ?
- idn.domset.ds_relink : DOMAINSET(new_masterid);
+ idn.domset.ds_relink : DOMAINSET(new_masterid);
DOMAINSET_DEL(relinkset, idn.localid);
@@ -4274,18 +4242,18 @@ idn_final_fin(int domid)
if (lock_held)
IDN_DUNLOCK(d);
cmn_err(CE_WARN,
- "IDN: 205: (%s.1) failed to "
- "open-domain(%d,%d)",
- proc, domid, -1);
+ "IDN: 205: (%s.1) failed to "
+ "open-domain(%d,%d)",
+ proc, domid, -1);
DOMAINSET_DEL(idn.domset.ds_relink, d);
} else {
if (lock_held)
IDN_DUNLOCK(d);
PR_PROTO("%s:%d: failed to "
- "re-open domain %d "
- "(cpu %d) [rv = %d]\n",
- proc, domid, d, idn_domain[d].dcpu,
- rv);
+ "re-open domain %d "
+ "(cpu %d) [rv = %d]\n",
+ proc, domid, d, idn_domain[d].dcpu,
+ rv);
}
}
}
@@ -4299,9 +4267,9 @@ fin_done:
(void) idn_connect(domid);
} else if (rv < 0) {
cmn_err(CE_WARN,
- "IDN: 205: (%s.2) failed to "
- "open-domain(%d,%d)",
- proc, domid, -1);
+ "IDN: 205: (%s.2) failed to "
+ "open-domain(%d,%d)",
+ proc, domid, -1);
DOMAINSET_DEL(idn.domset.ds_relink, domid);
}
}
@@ -4330,7 +4298,7 @@ idn_exit_fin(int domid, uint_t msgtype)
IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
idn_retry_submit(idn_retry_fin, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_FIN]);
+ idn_msg_retrytime[(int)IDNRETRY_FIN]);
}
/*
@@ -4364,8 +4332,8 @@ idn_xphase_transition(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
dp = &idn_domain[domid];
if ((xp = dp->dxp) == NULL) {
PR_PROTO("%s:%d: WARNING: domain xsp is NULL (msg = %s, "
- "msgarg = %s) <<<<<<<<<<<<\n",
- proc, domid, mstr, astr);
+ "msgarg = %s) <<<<<<<<<<<<\n",
+ proc, domid, mstr, astr);
return (-1);
}
o_xstate = dp->dxstate;
@@ -4376,10 +4344,10 @@ idn_xphase_transition(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
msgtype = msgarg & IDNP_MSGTYPE_MASK;
if ((o_xstate == IDNXS_PEND) && msg &&
- ((msg & IDNP_ACKNACK_MASK) == msg)) {
+ ((msg & IDNP_ACKNACK_MASK) == msg)) {
PR_PROTO("%s:%d: unwanted acknack received (o_xstate = %s, "
- "msg = %s/%s - dropping message\n",
- proc, domid, idnxs_str[(int)o_xstate], mstr, astr);
+ "msg = %s/%s - dropping message\n",
+ proc, domid, idnxs_str[(int)o_xstate], mstr, astr);
return (0);
}
@@ -4389,8 +4357,8 @@ idn_xphase_transition(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
*/
if (idn_next_xstate(o_xstate, -1, msg) == IDNXS_NIL) {
PR_PROTO("%s:%d: WARNING: o_xstate = %s, msg = %s -> NIL "
- "<<<<<<<<<\n",
- proc, domid, idnxs_str[(int)o_xstate], mstr);
+ "<<<<<<<<<\n",
+ proc, domid, idnxs_str[(int)o_xstate], mstr);
if (xfunc)
(*xfunc)(domid, msgtype);
return (-1);
@@ -4408,10 +4376,10 @@ idn_xphase_transition(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
INUM2STR(xp->xt_msgtype, xstr);
INUM2STR(msgtype, tstr);
PR_PROTO("%s:%d: WARNING: msg expected %s(0x%x), "
- "actual %s(0x%x) [msg=%s(0x%x), "
- "msgarg=%s(0x%x)]\n",
- proc, domid, xstr, xp->xt_msgtype,
- tstr, msgtype, mstr, msg, astr, msgarg);
+ "actual %s(0x%x) [msg=%s(0x%x), "
+ "msgarg=%s(0x%x)]\n",
+ proc, domid, xstr, xp->xt_msgtype,
+ tstr, msgtype, mstr, msg, astr, msgarg);
if (xfunc)
(*xfunc)(domid, msgtype);
return (-1);
@@ -4433,8 +4401,8 @@ idn_xphase_transition(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (n_xstate == IDNXS_NIL) {
PR_PROTO("%s:%d: WARNING: n_xstate = %s, msg = %s -> NIL "
- "<<<<<<<<<\n",
- proc, domid, idnxs_str[(int)n_xstate], mstr);
+ "<<<<<<<<<\n",
+ proc, domid, idnxs_str[(int)n_xstate], mstr);
if (xfunc)
(*xfunc)(domid, msgtype);
return (-1);
@@ -4480,18 +4448,18 @@ idn_xstate_transfunc(int domid, void *transarg)
default:
PR_PROTO("%s:%d: ERROR: unknown msg (0x%x) <<<<<<<<\n",
- proc, domid, msg);
+ proc, domid, msg);
return (0);
}
token = IDN_RETRY_TOKEN(domid, (msg == IDNP_CON) ?
- IDNRETRY_CON : IDNRETRY_FIN);
+ IDNRETRY_CON : IDNRETRY_FIN);
if (msg == IDNP_CON)
idn_retry_submit(idn_retry_con, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_CON]);
+ idn_msg_retrytime[(int)IDNRETRY_CON]);
else
idn_retry_submit(idn_retry_fin, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_FIN]);
+ idn_msg_retrytime[(int)IDNRETRY_FIN]);
return (1);
}
@@ -4500,9 +4468,8 @@ idn_xstate_transfunc(int domid, void *transarg)
* Entered and returns w/DLOCK & SYNC_LOCK held.
*/
static void
-idn_sync_enter(int domid, idn_synccmd_t cmd,
- domainset_t xset, domainset_t rset,
- int (*transfunc)(), void *transarg)
+idn_sync_enter(int domid, idn_synccmd_t cmd, domainset_t xset,
+ domainset_t rset, int (*transfunc)(), void *transarg)
{
int z;
idn_syncop_t *sp;
@@ -4517,7 +4484,7 @@ idn_sync_enter(int domid, idn_synccmd_t cmd,
zp = &idn.sync.sz_zone[z];
PR_SYNC("%s:%d: cmd=%s(%d), z=%d, xs=0x%x, rx=0x%x, cnt=%d\n",
- proc, domid, idnsync_str[cmd], cmd, z, xset, rset, zp->sc_cnt);
+ proc, domid, idnsync_str[cmd], cmd, z, xset, rset, zp->sc_cnt);
sp = &idn_domain[domid].dsync;
@@ -4556,7 +4523,7 @@ idn_sync_exit(int domid, idn_synccmd_t cmd)
zone = IDN_SYNC_GETZONE(cmd);
PR_SYNC("%s:%d: cmd=%s(%d) (z=%d, zone=%d)\n",
- proc, domid, idnsync_str[cmd], cmd, z, zone);
+ proc, domid, idnsync_str[cmd], cmd, z, zone);
#ifdef DEBUG
if (z != -1) {
@@ -4569,11 +4536,11 @@ idn_sync_exit(int domid, idn_synccmd_t cmd)
tot_queries += qv;
tot_domains++;
PR_SYNC("%s:%d: query_count = %d\n",
- proc, domid, qv);
+ proc, domid, qv);
}
}
PR_SYNC("%s:%d: tot_queries = %d, tot_domaines = %d\n",
- proc, domid, tot_queries, tot_domains);
+ proc, domid, tot_queries, tot_domains);
}
#endif /* DEBUG */
@@ -4613,16 +4580,16 @@ idn_sync_exit(int domid, idn_synccmd_t cmd)
DOMAINSET_DEL(sp->s_set_rdy, domid);
if ((sp->s_set_exp == sp->s_set_rdy) &&
- sp->s_transfunc) {
+ sp->s_transfunc) {
int delok;
ASSERT(sp->s_domid != domid);
PR_SYNC("%s:%d invoking transfunc "
- "for domain %d\n",
- proc, domid, sp->s_domid);
+ "for domain %d\n",
+ proc, domid, sp->s_domid);
delok = (*sp->s_transfunc)(sp->s_domid,
- sp->s_transarg);
+ sp->s_transarg);
if (delok) {
*spp = sp->s_next;
sp->s_next = NULL;
@@ -4638,8 +4605,8 @@ idn_sync_exit(int domid, idn_synccmd_t cmd)
* Entered and returns w/DLOCK & SYNC_LOCK held.
*/
static domainset_t
-idn_sync_register(int domid, idn_synccmd_t cmd,
- domainset_t ready_set, idn_syncreg_t regtype)
+idn_sync_register(int domid, idn_synccmd_t cmd, domainset_t ready_set,
+ idn_syncreg_t regtype)
{
int z;
idn_synczone_t *zp;
@@ -4652,7 +4619,7 @@ idn_sync_register(int domid, idn_synccmd_t cmd,
if ((z = IDN_SYNC_GETZONE(cmd)) == -1) {
PR_SYNC("%s:%d: ERROR: unexpected sync cmd(%d)\n",
- proc, domid, cmd);
+ proc, domid, cmd);
return (0);
}
@@ -4666,10 +4633,10 @@ idn_sync_register(int domid, idn_synccmd_t cmd,
zp = &idn.sync.sz_zone[z];
PR_SYNC("%s:%d: cmd=%s(%d), z=%d, rset=0x%x, "
- "regtype=%s(%d), sc_op=%s\n",
- proc, domid, idnsync_str[cmd], cmd, z, ready_set,
- idnreg_str[regtype], regtype,
- zp->sc_op ? idnsync_str[zp->sc_op->s_cmd] : "NULL");
+ "regtype=%s(%d), sc_op=%s\n",
+ proc, domid, idnsync_str[cmd], cmd, z, ready_set,
+ idnreg_str[regtype], regtype,
+ zp->sc_op ? idnsync_str[zp->sc_op->s_cmd] : "NULL");
for (spp = &zp->sc_op; *spp; spp = nspp) {
sp = *spp;
@@ -4678,7 +4645,7 @@ idn_sync_register(int domid, idn_synccmd_t cmd,
if (regtype == IDNSYNC_REG_NEW) {
DOMAINSET_ADD(sp->s_set_exp, domid);
PR_SYNC("%s:%d: adding new to %d (exp=0x%x)\n",
- proc, domid, sp->s_domid, sp->s_set_exp);
+ proc, domid, sp->s_domid, sp->s_set_exp);
} else if (regtype == IDNSYNC_REG_QUERY) {
query_set |= ~sp->s_set_rdy & sp->s_set_exp;
continue;
@@ -4705,9 +4672,9 @@ idn_sync_register(int domid, idn_synccmd_t cmd,
DOMAINSET_ADD(sp->s_set_rdy, domid);
PR_SYNC("%s:%d: mark READY for domain %d "
- "(r=0x%x, x=0x%x)\n",
- proc, domid, sp->s_domid,
- sp->s_set_rdy, sp->s_set_exp);
+ "(r=0x%x, x=0x%x)\n",
+ proc, domid, sp->s_domid,
+ sp->s_set_rdy, sp->s_set_exp);
query_set |= ~sp->s_set_rdy & sp->s_set_exp;
@@ -4716,9 +4683,9 @@ idn_sync_register(int domid, idn_synccmd_t cmd,
if (sp->s_msg == 0) {
sp->s_msg = 1;
PR_SYNC("%s:%d: >>>>>>>>>>> DOMAIN %d "
- "ALL CHECKED IN (0x%x)\n",
- proc, domid, sp->s_domid,
- sp->s_set_exp);
+ "ALL CHECKED IN (0x%x)\n",
+ proc, domid, sp->s_domid,
+ sp->s_set_exp);
}
#endif /* DEBUG */
@@ -4726,10 +4693,10 @@ idn_sync_register(int domid, idn_synccmd_t cmd,
int delok;
PR_SYNC("%s:%d invoking transfunc "
- "for domain %d\n",
- proc, domid, sp->s_domid);
+ "for domain %d\n",
+ proc, domid, sp->s_domid);
delok = (*sp->s_transfunc)(sp->s_domid,
- sp->s_transarg);
+ sp->s_transarg);
if (delok) {
*spp = sp->s_next;
sp->s_next = NULL;
@@ -4741,7 +4708,7 @@ idn_sync_register(int domid, idn_synccmd_t cmd,
}
PR_SYNC("%s:%d: trans_set = 0x%x, query_set = 0x%x -> 0x%x\n",
- proc, domid, trans_set, query_set, query_set & ~trans_set);
+ proc, domid, trans_set, query_set, query_set & ~trans_set);
query_set &= ~trans_set;
@@ -4761,14 +4728,14 @@ idn_sync_register_awol(int domid)
if ((z = IDN_SYNC_GETZONE(cmd)) == -1) {
PR_SYNC("%s:%d: ERROR: unexpected sync cmd(%d)\n",
- proc, domid, cmd);
+ proc, domid, cmd);
return;
}
zp = &idn.sync.sz_zone[z];
PR_SYNC("%s:%d: cmd=%s(%d), z=%d (domain %d = AWOL)\n",
- proc, domid, idnsync_str[cmd], cmd, z, domid);
+ proc, domid, idnsync_str[cmd], cmd, z, domid);
for (sp = zp->sc_op; sp; sp = sp->s_next) {
idn_domain_t *dp;
@@ -4777,7 +4744,7 @@ idn_sync_register_awol(int domid)
if (dp->dfin == IDNFIN_FORCE_HARD) {
DOMAINSET_ADD(sp->s_set_rdy, domid);
PR_SYNC("%s:%d: adding new to %d (rdy=0x%x)\n",
- proc, domid, sp->s_domid, sp->s_set_rdy);
+ proc, domid, sp->s_domid, sp->s_set_rdy);
}
}
}
@@ -4795,13 +4762,13 @@ idn_link_established(void *arg)
IDN_GLOCK_SHARED();
masterid = IDN_GET_MASTERID();
if ((masterid == IDN_NIL_DOMID) ||
- (idn_domain[masterid].dstate != IDNDS_CONNECTED)) {
+ (idn_domain[masterid].dstate != IDNDS_CONNECTED)) {
/*
* No point in doing this unless we're connected
* to the master.
*/
if ((masterid != IDN_NIL_DOMID) &&
- (idn.state == IDNGS_ONLINE)) {
+ (idn.state == IDNGS_ONLINE)) {
/*
* As long as we're still online keep
* trying.
@@ -4852,8 +4819,7 @@ idn_link_established(void *arg)
* immediatetly retried.
*/
int
-idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
- queue_t *wq, mblk_t *mp)
+idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr, queue_t *wq, mblk_t *mp)
{
int pktcnt = 0;
int msglen;
@@ -4888,7 +4854,7 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
msglen = msgdsize(mp);
PR_DATA("%s:%d: (netaddr 0x%x) msgsize=%ld, msgdsize=%d\n",
- proc, dst_domid, dst_netaddr.netaddr, msgsize(mp), msglen);
+ proc, dst_domid, dst_netaddr.netaddr, msgsize(mp), msglen);
ASSERT(wq->q_ptr);
@@ -4901,7 +4867,7 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
* No data to send. That was easy!
*/
PR_DATA("%s:%d: BAD msg length (%d) (netaddr 0x%x)\n",
- proc, dst_domid, msglen, dst_netaddr.netaddr);
+ proc, dst_domid, msglen, dst_netaddr.netaddr);
return (IDNXMIT_DROP);
}
@@ -4909,10 +4875,10 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
if (dst_domid == IDN_NIL_DOMID) {
cmn_err(CE_WARN,
- "IDN: 213: no destination specified "
- "(d=%d, c=%d, n=0x%x)",
- dst_domid, dst_netaddr.net.chan,
- dst_netaddr.net.netid);
+ "IDN: 213: no destination specified "
+ "(d=%d, c=%d, n=0x%x)",
+ dst_domid, dst_netaddr.net.chan,
+ dst_netaddr.net.netid);
IDN_KSTAT_INC(sip, si_nolink);
IDN_KSTAT_INC(sip, si_macxmt_errors);
rv = IDNXMIT_DROP;
@@ -4927,7 +4893,7 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
uchar_t echn;
echn = (uchar_t)
- ehp->ether_shost.ether_addr_octet[IDNETHER_CHANNEL];
+ ehp->ether_shost.ether_addr_octet[IDNETHER_CHANNEL];
ASSERT((uchar_t)channel == echn);
}
#endif /* DEBUG */
@@ -4979,7 +4945,7 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
dst_netaddr.net.netid = dp->dnetid;
(void) idndl_domain_etheraddr(dst_domid, channel,
- &ehp->ether_dhost);
+ &ehp->ether_dhost);
if (dst_domid == idn.localid) {
mblk_t *nmp;
@@ -4991,7 +4957,7 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
* transmitting to other domains.
*/
PR_DATA("%s:%d: dup broadcast msg for local domain\n",
- proc, dst_domid);
+ proc, dst_domid);
if ((nmp = copymsg(mp)) == NULL) {
/*
* Couldn't get a duplicate copy.
@@ -5009,8 +4975,8 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
if (dp->dnetid != dst_netaddr.net.netid) {
PR_DATA("%s:%d: dest netid (0x%x) != expected (0x%x)\n",
- proc, dst_domid, (uint_t)dst_netaddr.net.netid,
- (uint_t)dp->dnetid);
+ proc, dst_domid, (uint_t)dst_netaddr.net.netid,
+ (uint_t)dp->dnetid);
IDN_CHAN_UNLOCK_SEND(csp);
csp = NULL;
IDN_KSTAT_INC(sip, si_nolink);
@@ -5068,7 +5034,7 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
*/
IDN_CHAN_UNLOCK_SEND(csp);
not_active = idn_activate_channel(CHANSET(channel),
- IDNCHAN_OPEN);
+ IDNCHAN_OPEN);
if (!not_active) {
/*
* Only grab the lock for a recheck if we were
@@ -5100,7 +5066,7 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
* Channel is not active, should not be used.
*/
PR_DATA("%s:%d: dest channel %d NOT ACTIVE\n",
- proc, dst_domid, channel);
+ proc, dst_domid, channel);
IDN_KSTAT_INC(sip, si_linkdown);
rv = IDNXMIT_REQUEUE;
goto nocando;
@@ -5119,7 +5085,7 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
* whether it's active or not.
*/
PR_DATA("%s:%d: domain not registered with channel %d\n",
- proc, dst_domid, channel);
+ proc, dst_domid, channel);
/*
* Set csp to NULL to prevent in-progress update below.
*/
@@ -5184,7 +5150,7 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
int hdr_length;
mblk_t *nmp = mp;
uchar_t *rptr = mp->b_rptr +
- sizeof (struct ether_header);
+ sizeof (struct ether_header);
if (nmp->b_wptr <= rptr) {
/*
* Only the ethernet header was contained
@@ -5229,7 +5195,7 @@ idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
IDN_ASSIGN_DCPU(dp, dstport);
PR_DATA("%s:%d: (dstport %d) assigned %d\n",
- proc, dst_domid, (int)dstport, dp->dcpu);
+ proc, dst_domid, (int)dstport, dp->dcpu);
}
#endif /* XXX_DLPI_UNFRIENDLY */
@@ -5248,18 +5214,17 @@ retry:
* there are up to IDN_RECLAIM_MAX if it's set.
*/
reclaim_req = dp->diowanted ? -1 : IDN_RECLAIM_MAX ?
- MIN(dp->dio, IDN_RECLAIM_MAX) :
- dp->dio;
+ MIN(dp->dio, IDN_RECLAIM_MAX) : dp->dio;
(void) idn_reclaim_mboxdata(dst_domid, channel,
- reclaim_req);
+ reclaim_req);
}
if (dp->dio >= IDN_WINDOW_EMAX) {
if (lock_try(&dp->diocheck)) {
IDN_MSGTIMER_START(dst_domid, IDNP_DATA, 0,
- idn_msg_waittime[IDNP_DATA],
- &mt.mt_cookie);
+ idn_msg_waittime[IDNP_DATA],
+ &mt.mt_cookie);
/*
* We have exceeded the minimum window for
* outstanding I/O buffers to this domain.
@@ -5271,7 +5236,7 @@ retry:
* is backed up (dio is global).
*/
IDNXDC(dst_domid, &mt,
- (uint_t)dst_netaddr.net.chan, 0, 0, 0);
+ (uint_t)dst_netaddr.net.chan, 0, 0, 0);
}
/*
@@ -5284,20 +5249,20 @@ retry:
rv = IDNXMIT_DROP;
goto nocando;
}
+
/*
* Allocate a SMR I/O buffer and send it.
*/
-
if (msglen == 0) {
/*
* A zero length messages is effectively a signal
* to just send an interrupt to the remote domain.
*/
IDN_MSGTIMER_START(dst_domid, IDNP_DATA, 0,
- idn_msg_waittime[IDNP_DATA],
- &mt.mt_cookie);
+ idn_msg_waittime[IDNP_DATA],
+ &mt.mt_cookie);
IDNXDC(dst_domid, &mt,
- (uint_t)dst_netaddr.net.chan, 0, 0, 0);
+ (uint_t)dst_netaddr.net.chan, 0, 0, 0);
}
for (; (msglen > 0) && mp; msglen -= xfersize) {
int xrv;
@@ -5312,8 +5277,8 @@ retry:
serrno = smr_buf_alloc(dst_domid, xfersize, &iobufp);
if (serrno) {
PR_DATA("%s:%d: failed to alloc SMR I/O buffer "
- "(serrno = %d)\n",
- proc, dst_domid, serrno);
+ "(serrno = %d)\n",
+ proc, dst_domid, serrno);
/*
* Failure is either due to a timeout waiting
* for the master to give us a slab, OR the
@@ -5374,7 +5339,7 @@ retry:
hdrp = IDN_BUF2HDR(iobufp);
bufoffset = (smr_offset_t)IDN_ALIGNPTR(sizeof (smr_pkthdr_t),
- data_rptr);
+ data_rptr);
/*
* If the alignment of bufoffset took us pass the
* length of a smr_pkthdr_t then we need to possibly
@@ -5387,19 +5352,19 @@ retry:
#ifdef DEBUG
if (bufoffset != sizeof (smr_pkthdr_t))
PR_DATA("%s:%d: offset ALIGNMENT (%lu -> %u) "
- "(data_rptr = %p)\n",
- proc, dst_domid, sizeof (smr_pkthdr_t),
- bufoffset, data_rptr);
+ "(data_rptr = %p)\n",
+ proc, dst_domid, sizeof (smr_pkthdr_t),
+ bufoffset, data_rptr);
n_xfersize = MIN(xfersize, (IDN_SMR_BUFSIZE - bufoffset));
if (xfersize != n_xfersize) {
PR_DATA("%s:%d: xfersize ADJUST (%d -> %d)\n",
- proc, dst_domid, xfersize, n_xfersize);
+ proc, dst_domid, xfersize, n_xfersize);
cmn_err(CE_WARN, "%s: ERROR (xfersize = %d, > "
- "bufsize(%d)-bufoffset(%d) = %d)",
- proc, xfersize, IDN_SMR_BUFSIZE,
- bufoffset,
- IDN_SMR_BUFSIZE - bufoffset);
+ "bufsize(%d)-bufoffset(%d) = %d)",
+ proc, xfersize, IDN_SMR_BUFSIZE,
+ bufoffset,
+ IDN_SMR_BUFSIZE - bufoffset);
}
#endif /* DEBUG */
xfersize = MIN(xfersize, (int)(IDN_SMR_BUFSIZE - bufoffset));
@@ -5426,12 +5391,12 @@ retry:
* mblk packet.
*/
PR_DATA("%s:%d: DATA XFER to chan %d FAILED "
- "(ret=%d)\n",
- proc, dst_domid, channel, xrv);
+ "(ret=%d)\n",
+ proc, dst_domid, channel, xrv);
smr_buf_free(dst_domid, iobufp, xfersize);
PR_DATA("%s:%d: (line %d) dec(dio) -> %d\n",
- proc, dst_domid, __LINE__, dp->dio);
+ proc, dst_domid, __LINE__, dp->dio);
rv = IDNXMIT_DROP;
IDN_KSTAT_INC(sip, si_macxmt_errors);
@@ -5449,13 +5414,13 @@ retry:
#ifdef DEBUG
if (pktcnt > 1)
cmn_err(CE_WARN,
- "%s: ERROR: sent multi-pkts (%d), len = %ld",
- proc, pktcnt, orig_msglen);
+ "%s: ERROR: sent multi-pkts (%d), len = %ld",
+ proc, pktcnt, orig_msglen);
#endif /* DEBUG */
PR_DATA("%s:%d: SENT %d packets (%d @ 0x%x)\n",
- proc, dst_domid, pktcnt, dst_netaddr.net.chan,
- dst_netaddr.net.netid);
+ proc, dst_domid, pktcnt, dst_netaddr.net.chan,
+ dst_netaddr.net.netid);
IDN_CHAN_LOCK_SEND(csp);
IDN_CHAN_SEND_DONE(csp);
@@ -5513,8 +5478,8 @@ idn_send_data_loopback(idn_netaddr_t dst_netaddr, queue_t *wq, mblk_t *mp)
if (dst_netaddr.net.netid != idn_domain[idn.localid].dnetid) {
PR_DATA("%s: dst_netaddr.net.netid 0x%x != local 0x%x\n",
- proc, dst_netaddr.net.netid,
- idn_domain[idn.localid].dnetid);
+ proc, dst_netaddr.net.netid,
+ idn_domain[idn.localid].dnetid);
rv = EADDRNOTAVAIL;
goto done;
}
@@ -5628,12 +5593,12 @@ idn_recv_proto(idn_protomsg_t *hp)
if (!VALID_MSGTYPE(mtype)) {
PR_PROTO("%s:%d: ERROR: invalid message type (0x%x)\n",
- proc, domid, mtype);
+ proc, domid, mtype);
return;
}
if (!VALID_CPUID(cpuid)) {
PR_PROTO("%s:%d: ERROR: invalid cpuid (%d)\n",
- proc, domid, cpuid);
+ proc, domid, cpuid);
return;
}
@@ -5655,9 +5620,9 @@ idn_recv_proto(idn_protomsg_t *hp)
inum2str(hp->m_msgtype, str);
cmn_err(CE_WARN,
- "IDN: 214: received message (%s[0x%x]) from self "
- "(domid %d)",
- str, hp->m_msgtype, domid);
+ "IDN: 214: received message (%s[0x%x]) from self "
+ "(domid %d)",
+ str, hp->m_msgtype, domid);
return;
}
@@ -5691,8 +5656,7 @@ idn_recv_proto(idn_protomsg_t *hp)
* nack/fin - if received cookie is 0.
*/
if (((msgtype & IDNP_MSGTYPE_MASK) != IDNP_NEGO) &&
- ((mtype != IDNP_FIN) ||
- (dcookie && dp->dcookie_recv))) {
+ ((mtype != IDNP_FIN) || (dcookie && dp->dcookie_recv))) {
if (dp->dcookie_recv != dcookie) {
dp->dcookie_errcnt++;
if (dp->dcookie_err == 0) {
@@ -5703,14 +5667,14 @@ idn_recv_proto(idn_protomsg_t *hp)
*/
dp->dcookie_err = 1;
cmn_err(CE_WARN,
- "IDN: 215: invalid cookie (0x%x) "
- "for message (0x%x) from domain %d",
- dcookie, hp->m_msgtype, domid);
+ "IDN: 215: invalid cookie (0x%x) "
+ "for message (0x%x) from domain %d",
+ dcookie, hp->m_msgtype, domid);
PR_PROTO("%s:%d: received cookie (0x%x), "
- "expected (0x%x) [errcnt = %d]\n",
- proc, domid, dcookie,
- dp->dcookie_recv, dp->dcookie_errcnt);
+ "expected (0x%x) [errcnt = %d]\n",
+ proc, domid, dcookie,
+ dp->dcookie_recv, dp->dcookie_errcnt);
}
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
@@ -5788,9 +5752,11 @@ idn_recv_proto(idn_protomsg_t *hp)
#ifdef DEBUG
cmn_err(CE_PANIC,
#else /* DEBUG */
- cmn_err(CE_WARN,
+ cmn_err(CE_WARN,
#endif /* DEBUG */
+ /* CSTYLED */
"IDN: 216: (0x%x)msgtype/(0x%x)acktype rcvd from "
+ /* CSTYLED */
"domain %d", msgtype, acktype, domid);
break;
}
@@ -5839,7 +5805,7 @@ idn_send_config(int domid, int phase)
if (dp->dcfgsnddone) {
if (!dp->dcfgrcvdone) {
IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
- cfg_waittime, NULL);
+ cfg_waittime, NULL);
}
return;
}
@@ -5847,9 +5813,9 @@ idn_send_config(int domid, int phase)
IDN_DLOCK_SHARED(idn.localid);
PR_PROTO("%s:%d: sending %s config (phase %d)\n",
- proc, domid,
- idn_domain[idn.localid].dvote.v.master ? "MASTER" : "SLAVE",
- phase);
+ proc, domid,
+ idn_domain[idn.localid].dvote.v.master ? "MASTER" : "SLAVE",
+ phase);
if (idn_domain[idn.localid].dvote.v.master)
rv = idn_send_master_config(domid, phase);
@@ -5865,11 +5831,11 @@ idn_send_config(int domid, int phase)
PR_PROTO("%s:%d: SEND config DONE\n", proc, domid);
if (!dp->dcfgrcvdone) {
IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
- cfg_waittime, NULL);
+ cfg_waittime, NULL);
}
} else {
IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
- cfg_waittime, NULL);
+ cfg_waittime, NULL);
}
}
}
@@ -5894,8 +5860,8 @@ idn_reset_mboxtbl(idn_mboxtbl_t *mtp)
}
static int
-idn_get_mbox_config(int domid, int *mindex,
- smr_offset_t *mtable, smr_offset_t *mdomain)
+idn_get_mbox_config(int domid, int *mindex, smr_offset_t *mtable,
+ smr_offset_t *mdomain)
{
idn_domain_t *dp, *ldp;
@@ -5995,19 +5961,18 @@ idn_send_master_config(int domid, int phase)
case 1:
mbox_table = mbox_domain = IDN_NIL_SMROFFSET;
- idn_get_mbox_config(domid, NULL, &mbox_table,
- &mbox_domain);
+ idn_get_mbox_config(domid, NULL, &mbox_table, &mbox_domain);
/*
* ----------------------------------------------------
* Send: SLABSIZE, DATAMBOX.DOMAIN, DATAMBOX.TABLE
* ----------------------------------------------------
*/
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
- IDNCFGARG_SIZE_SLAB);
+ IDNCFGARG_SIZE_SLAB);
cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
- IDNCFGARG_DATAMBOX_DOMAIN);
+ IDNCFGARG_DATAMBOX_DOMAIN);
cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
- IDNCFGARG_DATAMBOX_TABLE);
+ IDNCFGARG_DATAMBOX_TABLE);
cfg_subtype.info.num = 3;
cfg_subtype.info.phase = phase;
dp->dcfgphase = phase;
@@ -6016,12 +5981,12 @@ idn_send_master_config(int domid, int phase)
ASSERT(mbox_table != IDN_NIL_SMROFFSET);
PR_PROTO("%s:%d:%d: sending SLABSIZE (%d), "
- "DATAMBOX.DOMAIN (0x%x), DATAMBOX.TABLE (0x%x)\n",
- proc, domid, phase, IDN_SLAB_BUFCOUNT, mbox_domain,
- mbox_table);
+ "DATAMBOX.DOMAIN (0x%x), DATAMBOX.TABLE (0x%x)\n",
+ proc, domid, phase, IDN_SLAB_BUFCOUNT, mbox_domain,
+ mbox_table);
IDNXDC(domid, &mt, cfg_subtype.val, IDN_SLAB_BUFCOUNT,
- mbox_domain, mbox_table);
+ mbox_domain, mbox_table);
break;
case 2:
@@ -6034,19 +5999,19 @@ idn_send_master_config(int domid, int phase)
*/
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_NETID, 0);
cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_BARLAR,
- IDNCFGARG_BARLAR_BAR);
+ IDNCFGARG_BARLAR_BAR);
cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_BARLAR,
- IDNCFGARG_BARLAR_LAR);
+ IDNCFGARG_BARLAR_LAR);
cfg_subtype.info.num = 3;
cfg_subtype.info.phase = phase;
dp->dcfgphase = phase;
PR_PROTO("%s:%d:%d: sending NETID (%d), "
- "BARPFN/LARPFN (0x%x/0x%x)\n",
- proc, domid, phase, ldp->dnetid, barpfn, larpfn);
+ "BARPFN/LARPFN (0x%x/0x%x)\n",
+ proc, domid, phase, ldp->dnetid, barpfn, larpfn);
IDNXDC(domid, &mt, cfg_subtype.val,
- (uint_t)ldp->dnetid, barpfn, larpfn);
+ (uint_t)ldp->dnetid, barpfn, larpfn);
break;
case 3:
@@ -6059,19 +6024,19 @@ idn_send_master_config(int domid, int phase)
* ----------------------------------------------------
*/
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_CPUSET,
- IDNCFGARG_CPUSET_UPPER);
+ IDNCFGARG_CPUSET_UPPER);
cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_CPUSET,
- IDNCFGARG_CPUSET_LOWER);
+ IDNCFGARG_CPUSET_LOWER);
cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_NMCADR, 0);
cfg_subtype.info.num = 3;
cfg_subtype.info.phase = phase;
dp->dcfgphase = phase;
PR_PROTO("%s:%d:%d: sending CPUSET (0x%x.%x), NMCADR (%d)\n",
- proc, domid, phase, cpus_u32, cpus_l32, nmcadr);
+ proc, domid, phase, cpus_u32, cpus_l32, nmcadr);
IDNXDC(domid, &mt, cfg_subtype.val,
- cpus_u32, cpus_l32, nmcadr);
+ cpus_u32, cpus_l32, nmcadr);
break;
case 4:
@@ -6082,19 +6047,19 @@ idn_send_master_config(int domid, int phase)
*/
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_BOARDSET, 0);
cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_SIZE,
- IDNCFGARG_SIZE_MTU);
+ IDNCFGARG_SIZE_MTU);
cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
- IDNCFGARG_SIZE_BUF);
+ IDNCFGARG_SIZE_BUF);
cfg_subtype.info.num = 3;
cfg_subtype.info.phase = phase;
dp->dcfgphase = phase;
PR_PROTO("%s:%d:%d: sending BOARDSET (0x%x), MTU (0x%lx), "
- "BUFSIZE (0x%x)\n", proc, domid, phase,
- ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
+ "BUFSIZE (0x%x)\n", proc, domid, phase,
+ ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
IDNXDC(domid, &mt, cfg_subtype.val,
- ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
+ ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
break;
case 5:
@@ -6104,23 +6069,23 @@ idn_send_master_config(int domid, int phase)
* ----------------------------------------------------
*/
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATASVR,
- IDNCFGARG_DATASVR_MAXNETS);
+ IDNCFGARG_DATASVR_MAXNETS);
cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATASVR,
- IDNCFGARG_DATASVR_MBXPERNET);
+ IDNCFGARG_DATASVR_MBXPERNET);
cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_OPTIONS,
- IDNCFGARG_CHECKSUM);
+ IDNCFGARG_CHECKSUM);
cfg_subtype.info.num = 3;
cfg_subtype.info.phase = phase;
dp->dcfgphase = phase;
PR_PROTO("%s:%d:%d: sending MAXNETS (%d), "
- "MBOXPERNET (%d), CKSUM (%d)\n",
- proc, domid, phase,
- IDN_MAX_NETS, IDN_MBOX_PER_NET,
- IDN_CHECKSUM);
+ "MBOXPERNET (%d), CKSUM (%d)\n",
+ proc, domid, phase,
+ IDN_MAX_NETS, IDN_MBOX_PER_NET,
+ IDN_CHECKSUM);
IDNXDC(domid, &mt, cfg_subtype.val,
- IDN_MAX_NETS, IDN_MBOX_PER_NET, IDN_CHECKSUM);
+ IDN_MAX_NETS, IDN_MBOX_PER_NET, IDN_CHECKSUM);
break;
case 6:
@@ -6130,7 +6095,7 @@ idn_send_master_config(int domid, int phase)
* ----------------------------------------------------
*/
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
- IDNCFGARG_SIZE_NWR);
+ IDNCFGARG_SIZE_NWR);
mcadr[0] = IDN_NWR_SIZE;
m = 1;
@@ -6164,21 +6129,21 @@ idn_send_master_config(int domid, int phase)
if (m > 0) {
if (phase == 6) {
PR_PROTO("%s:%d:%d: sending NWRSIZE (%d), "
- "MCADRs (0x%x, 0x%x)\n",
- proc, domid, phase,
- mcadr[0], mcadr[1], mcadr[2]);
+ "MCADRs (0x%x, 0x%x)\n",
+ proc, domid, phase,
+ mcadr[0], mcadr[1], mcadr[2]);
} else {
PR_PROTO("%s:%d:%d: sending MCADRs "
- "(0x%x, 0x%x, 0x%x)\n",
- proc, domid, phase,
- mcadr[0], mcadr[1], mcadr[2]);
+ "(0x%x, 0x%x, 0x%x)\n",
+ proc, domid, phase,
+ mcadr[0], mcadr[1], mcadr[2]);
}
cfg_subtype.info.num = m;
cfg_subtype.info.phase = phase;
dp->dcfgphase = phase;
IDNXDC(domid, &mt, cfg_subtype.val,
- mcadr[0], mcadr[1], mcadr[2]);
+ mcadr[0], mcadr[1], mcadr[2]);
} else {
rv = 1;
}
@@ -6232,7 +6197,7 @@ idn_send_slave_config(int domid, int phase)
if (mbox_index == IDN_NIL_DOMID) {
ASSERT(mbox_domain != IDN_NIL_SMROFFSET);
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
- IDNCFGARG_DATAMBOX_DOMAIN);
+ IDNCFGARG_DATAMBOX_DOMAIN);
} else {
/*
* Should only be sending Index to
@@ -6241,30 +6206,27 @@ idn_send_slave_config(int domid, int phase)
ASSERT(dp->dvote.v.master);
ASSERT(mbox_domain == IDN_NIL_SMROFFSET);
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
- IDNCFGARG_DATAMBOX_INDEX);
+ IDNCFGARG_DATAMBOX_INDEX);
}
cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATASVR,
- IDNCFGARG_DATASVR_MAXNETS);
+ IDNCFGARG_DATASVR_MAXNETS);
cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_DATASVR,
- IDNCFGARG_DATASVR_MBXPERNET);
+ IDNCFGARG_DATASVR_MBXPERNET);
cfg_subtype.info.num = 3;
cfg_subtype.info.phase = phase;
dp->dcfgphase = phase;
PR_PROTO("%s:%d:%d: sending DATAMBOX.%s (0x%x), "
- "MAXNETS (%d), MBXPERNET (%d)\n",
- proc, domid, phase,
- (IDN_CFGPARAM_ARG(cfg_subtype.param.p[0])
- == IDNCFGARG_DATAMBOX_INDEX)
- ? "INDEX" : "DOMAIN",
- (mbox_index == IDN_NIL_DOMID)
- ? mbox_domain : mbox_index,
- IDN_MAX_NETS, IDN_MBOX_PER_NET);
+ "MAXNETS (%d), MBXPERNET (%d)\n",
+ proc, domid, phase,
+ (IDN_CFGPARAM_ARG(cfg_subtype.param.p[0])
+ == IDNCFGARG_DATAMBOX_INDEX) ? "INDEX" : "DOMAIN",
+ (mbox_index == IDN_NIL_DOMID) ? mbox_domain : mbox_index,
+ IDN_MAX_NETS, IDN_MBOX_PER_NET);
IDNXDC(domid, &mt, cfg_subtype.val,
- ((mbox_index == IDN_NIL_DOMID)
- ? mbox_domain : mbox_index),
- IDN_MAX_NETS, IDN_MBOX_PER_NET);
+ ((mbox_index == IDN_NIL_DOMID) ? mbox_domain : mbox_index),
+ IDN_MAX_NETS, IDN_MBOX_PER_NET);
break;
case 2:
@@ -6278,19 +6240,19 @@ idn_send_slave_config(int domid, int phase)
cfg_subtype.val = 0;
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_NETID, 0);
cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_CPUSET,
- IDNCFGARG_CPUSET_UPPER);
+ IDNCFGARG_CPUSET_UPPER);
cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_CPUSET,
- IDNCFGARG_CPUSET_LOWER);
+ IDNCFGARG_CPUSET_LOWER);
cfg_subtype.info.num = 3;
cfg_subtype.info.phase = phase;
dp->dcfgphase = phase;
PR_PROTO("%s:%d:%d: sending NETID (%d), "
- "CPUSET (0x%x.%x)\n", proc, domid, phase,
- ldp->dnetid, cpus_u32, cpus_l32);
+ "CPUSET (0x%x.%x)\n", proc, domid, phase,
+ ldp->dnetid, cpus_u32, cpus_l32);
IDNXDC(domid, &mt, cfg_subtype.val,
- (uint_t)ldp->dnetid, cpus_u32, cpus_l32);
+ (uint_t)ldp->dnetid, cpus_u32, cpus_l32);
break;
case 3:
@@ -6302,20 +6264,20 @@ idn_send_slave_config(int domid, int phase)
cfg_subtype.val = 0;
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_BOARDSET, 0);
cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_SIZE,
- IDNCFGARG_SIZE_MTU);
+ IDNCFGARG_SIZE_MTU);
cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
- IDNCFGARG_SIZE_BUF);
+ IDNCFGARG_SIZE_BUF);
cfg_subtype.info.num = 3;
cfg_subtype.info.phase = phase;
dp->dcfgphase = phase;
PR_PROTO("%s:%d:%d: sending BOARDSET (0x%x), MTU (0x%lx), "
- "BUFSIZE (0x%x)\n",
- proc, domid, phase, ldp->dhw.dh_boardset, IDN_MTU,
- IDN_SMR_BUFSIZE);
+ "BUFSIZE (0x%x)\n",
+ proc, domid, phase, ldp->dhw.dh_boardset, IDN_MTU,
+ IDN_SMR_BUFSIZE);
IDNXDC(domid, &mt, cfg_subtype.val,
- ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
+ ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
break;
case 4:
@@ -6326,22 +6288,22 @@ idn_send_slave_config(int domid, int phase)
*/
cfg_subtype.val = 0;
cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
- IDNCFGARG_SIZE_SLAB);
+ IDNCFGARG_SIZE_SLAB);
cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_OPTIONS,
- IDNCFGARG_CHECKSUM);
+ IDNCFGARG_CHECKSUM);
cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
- IDNCFGARG_SIZE_NWR);
+ IDNCFGARG_SIZE_NWR);
cfg_subtype.info.num = 3;
cfg_subtype.info.phase = phase;
dp->dcfgphase = phase;
PR_PROTO("%s:%d:%d: sending SLABSIZE (%d), CKSUM (%d), "
- "NWRSIZE (%d)\n",
- proc, domid, phase, IDN_SLAB_BUFCOUNT,
- IDN_CHECKSUM, IDN_NWR_SIZE);
+ "NWRSIZE (%d)\n",
+ proc, domid, phase, IDN_SLAB_BUFCOUNT,
+ IDN_CHECKSUM, IDN_NWR_SIZE);
IDNXDC(domid, &mt, cfg_subtype.val,
- IDN_SLAB_BUFCOUNT, IDN_CHECKSUM, IDN_NWR_SIZE);
+ IDN_SLAB_BUFCOUNT, IDN_CHECKSUM, IDN_NWR_SIZE);
break;
default:
@@ -6421,7 +6383,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
ASSERT(domid != idn.localid);
GET_XARGS(xargs, &cfg_subtype.val, &cfg_arg[0], &cfg_arg[1],
- &cfg_arg[2]);
+ &cfg_arg[2]);
cfg_arg[3] = 0;
dp = &idn_domain[domid];
@@ -6435,7 +6397,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* timer continue and timeout if needed.
*/
PR_PROTO("%s:%d: WARNING state(%s) != CONFIG\n",
- proc, domid, idnds_str[dp->dstate]);
+ proc, domid, idnds_str[dp->dstate]);
return;
}
@@ -6450,7 +6412,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
phase = GET_XARGS_CFG_PHASE(xargs);
PR_PROTO("%s:%d: received ACK for CFG phase %d\n",
- proc, domid, phase);
+ proc, domid, phase);
if (phase != (int)dp->dcfgphase) {
/*
* Phase is not what we were
@@ -6460,7 +6422,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* and reestablish the connection.
*/
IDN_MSGTIMER_START(domid, IDNP_CFG, dp->dcfgphase,
- idn_msg_waittime[IDNP_CFG], NULL);
+ idn_msg_waittime[IDNP_CFG], NULL);
} else {
idn_send_config(domid, phase + 1);
@@ -6474,7 +6436,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
bzero(xargs, sizeof (xargs));
idn_xphase_transition(domid, NULL,
- xargs);
+ xargs);
}
IDN_SYNC_UNLOCK();
}
@@ -6525,9 +6487,9 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
default:
cmn_err(CE_WARN,
- "IDN 217: unknown CFGARG type (%d) "
- "from domain %d",
- subtype_arg, domid);
+ "IDN 217: unknown CFGARG type (%d) "
+ "from domain %d",
+ subtype_arg, domid);
break;
}
IDN_GUNLOCK();
@@ -6590,7 +6552,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
case IDNCFG_BOARDSET:
if ((dp->dhw.dh_boardset & cfg_arg[p])
- == dp->dhw.dh_boardset) {
+ == dp->dhw.dh_boardset) {
/*
* Boardset better include what we
* already know about.
@@ -6657,7 +6619,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
}
IDN_DLOCK_EXCL(idn.localid);
ldp->dmbox.m_tbl = (idn_mboxtbl_t *)
- IDN_OFFSET2ADDR(cfg_arg[p]);
+ IDN_OFFSET2ADDR(cfg_arg[p]);
IDN_DUNLOCK(idn.localid);
dp->dncfgitems++;
RCVCFG("DATAMBOX.TABLE", cfg_arg[p]);
@@ -6668,7 +6630,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
!VALID_NWROFFSET(cfg_arg[p], 4))
break;
mbtp = (idn_mboxtbl_t *)
- IDN_OFFSET2ADDR(cfg_arg[p]);
+ IDN_OFFSET2ADDR(cfg_arg[p]);
mmp = dp->dmbox.m_send;
for (c = 0; c < IDN_MAX_NETS; c++) {
@@ -6781,7 +6743,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
}
#ifdef DEBUG
PR_PROTO("%s:%d: received %s (0x%x)\n",
- proc, domid, str ? str : "<empty>", val);
+ proc, domid, str ? str : "<empty>", val);
#endif /* DEBUG */
}
@@ -6836,7 +6798,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* restart CFG timer if we've sent everything..
*/
IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
- idn_msg_waittime[IDNP_CFG], NULL);
+ idn_msg_waittime[IDNP_CFG], NULL);
}
break;
@@ -6867,10 +6829,10 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
*/
DOMAINSET_ADD(idn.domset.ds_relink, domid);
IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
- idn.domset.ds_relink);
+ idn.domset.ds_relink);
idn_disconnect(domid, IDNFIN_NORMAL,
- IDNFIN_ARG_CFGERR_FATAL,
- IDNFIN_SYNC_NO);
+ IDNFIN_ARG_CFGERR_FATAL,
+ IDNFIN_SYNC_NO);
}
IDN_SYNC_UNLOCK();
break;
@@ -6928,21 +6890,21 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
idn_update_op(IDNOP_ERROR, DOMAINSET_ALL, &idnerr);
PR_HITLIST("%s:%d: unlink_domainset(%x) due to "
- "CFG error (relink=%x, hitlist=%x)\n",
- proc, domid, domset, idn.domset.ds_relink,
- idn.domset.ds_hitlist);
+ "CFG error (relink=%x, hitlist=%x)\n",
+ proc, domid, domset, idn.domset.ds_relink,
+ idn.domset.ds_hitlist);
idn_unlink_domainset(domset, IDNFIN_NORMAL,
- CFGERR2FINARG(rv),
- IDNFIN_OPT_UNLINK,
- BOARDSET_ALL);
+ CFGERR2FINARG(rv),
+ IDNFIN_OPT_UNLINK,
+ BOARDSET_ALL);
IDN_SYNC_UNLOCK();
IDN_DLOCK_EXCL(domid);
} else {
PR_HITLIST("%s:%d: idn_disconnect(%d) due to CFG "
- "error (conn=%x, relink=%x, hitlist=%x)\n",
- proc, domid, domid, idn.domset.ds_connected,
- idn.domset.ds_relink, idn.domset.ds_hitlist);
+ "error (conn=%x, relink=%x, hitlist=%x)\n",
+ proc, domid, domid, idn.domset.ds_connected,
+ idn.domset.ds_relink, idn.domset.ds_hitlist);
/*
* If we have other connections then
* we're only going to blow away this
@@ -6952,7 +6914,7 @@ idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
DOMAINSET_DEL(idn.domset.ds_relink, domid);
idn_disconnect(domid, IDNFIN_NORMAL,
- CFGERR2FINARG(rv), IDNFIN_SYNC_NO);
+ CFGERR2FINARG(rv), IDNFIN_SYNC_NO);
IDN_SYNC_UNLOCK();
}
break;
@@ -6985,7 +6947,7 @@ idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
ASSERT(dp->dstate == IDNDS_CONFIG);
PR_PROTO("%s:%d: number received %d, number expected %d\n",
- proc, domid, (int)dp->dncfgitems, IDN_SLAVE_NCFGITEMS);
+ proc, domid, (int)dp->dncfgitems, IDN_SLAVE_NCFGITEMS);
if ((int)dp->dncfgitems < IDN_SLAVE_NCFGITEMS)
return (CFG_CONTINUE);
@@ -7007,8 +6969,8 @@ idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
* close connection.
*/
cmn_err(CE_WARN,
- "IDN: 218: missing some required config items from "
- "domain %d", domid);
+ "IDN: 218: missing some required config items from "
+ "domain %d", domid);
rv = CFG_FATAL;
goto done;
@@ -7016,8 +6978,8 @@ idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
if (!valid_mtu(dp->dmtu)) {
cmn_err(CE_WARN,
- "IDN: 219: remote domain %d MTU (%d) invalid "
- "(local.mtu = %d)", dp->domid, dp->dmtu, ldp->dmtu);
+ "IDN: 219: remote domain %d MTU (%d) invalid "
+ "(local.mtu = %d)", dp->domid, dp->dmtu, ldp->dmtu);
*exp = (uint_t)ldp->dmtu;
*act = (uint_t)dp->dmtu;
@@ -7025,9 +6987,9 @@ idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
}
if (!valid_bufsize(dp->dbufsize)) {
cmn_err(CE_WARN,
- "IDN: 220: remote domain %d BUFSIZE (%d) invalid "
- "(local.bufsize = %d)", dp->domid, dp->dbufsize,
- ldp->dbufsize);
+ "IDN: 220: remote domain %d BUFSIZE (%d) invalid "
+ "(local.bufsize = %d)", dp->domid, dp->dbufsize,
+ ldp->dbufsize);
*exp = (uint_t)ldp->dbufsize;
*act = (uint_t)dp->dbufsize;
@@ -7035,9 +6997,9 @@ idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
}
if (!valid_slabsize((int)dp->dslabsize)) {
cmn_err(CE_WARN,
- "IDN: 221: remote domain %d SLABSIZE (%d) invalid "
- "(local.slabsize = %d)",
- dp->domid, dp->dslabsize, ldp->dslabsize);
+ "IDN: 221: remote domain %d SLABSIZE (%d) invalid "
+ "(local.slabsize = %d)",
+ dp->domid, dp->dslabsize, ldp->dslabsize);
*exp = (uint_t)ldp->dslabsize;
*act = (uint_t)dp->dslabsize;
@@ -7045,9 +7007,9 @@ idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
}
if (!valid_nwrsize((int)dp->dnwrsize)) {
cmn_err(CE_WARN,
- "IDN: 223: remote domain %d NWRSIZE (%d) invalid "
- "(local.nwrsize = %d)",
- dp->domid, dp->dnwrsize, ldp->dnwrsize);
+ "IDN: 223: remote domain %d NWRSIZE (%d) invalid "
+ "(local.nwrsize = %d)",
+ dp->domid, dp->dnwrsize, ldp->dnwrsize);
*exp = (uint_t)ldp->dnwrsize;
*act = (uint_t)dp->dnwrsize;
@@ -7055,9 +7017,9 @@ idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
}
if ((int)dp->dmaxnets != IDN_MAX_NETS) {
cmn_err(CE_WARN,
- "IDN: 224: remote domain %d MAX_NETS (%d) invalid "
- "(local.maxnets = %d)",
- dp->domid, (int)dp->dmaxnets, IDN_MAX_NETS);
+ "IDN: 224: remote domain %d MAX_NETS (%d) invalid "
+ "(local.maxnets = %d)",
+ dp->domid, (int)dp->dmaxnets, IDN_MAX_NETS);
*exp = (uint_t)IDN_MAX_NETS;
*act = (uint_t)dp->dmaxnets;
@@ -7065,9 +7027,9 @@ idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
}
if ((int)dp->dmboxpernet != IDN_MBOX_PER_NET) {
cmn_err(CE_WARN,
- "IDN: 225: remote domain %d MBOX_PER_NET (%d) "
- "invalid (local.mboxpernet = %d)",
- dp->domid, (int)dp->dmboxpernet, IDN_MBOX_PER_NET);
+ "IDN: 225: remote domain %d MBOX_PER_NET (%d) "
+ "invalid (local.mboxpernet = %d)",
+ dp->domid, (int)dp->dmboxpernet, IDN_MBOX_PER_NET);
*exp = (uint_t)IDN_MBOX_PER_NET;
*act = (uint_t)dp->dmboxpernet;
@@ -7075,9 +7037,9 @@ idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
}
if ((dp->dcksum - 1) != (uchar_t)IDN_CHECKSUM) {
cmn_err(CE_WARN,
- "IDN: 226: remote domain %d CHECKSUM flag (%d) "
- "mismatches local domain's (%d)",
- dp->domid, (int)dp->dcksum - 1, IDN_CHECKSUM);
+ "IDN: 226: remote domain %d CHECKSUM flag (%d) "
+ "mismatches local domain's (%d)",
+ dp->domid, (int)dp->dcksum - 1, IDN_CHECKSUM);
*exp = (uint_t)IDN_CHECKSUM;
*act = (uint_t)(dp->dcksum - 1);
@@ -7122,7 +7084,7 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
ASSERT(dp->dstate == IDNDS_CONFIG);
PR_PROTO("%s:%d: number received %d, minimum number expected %d\n",
- proc, domid, (int)dp->dncfgitems, IDN_MASTER_NCFGITEMS);
+ proc, domid, (int)dp->dncfgitems, IDN_MASTER_NCFGITEMS);
if ((int)dp->dncfgitems < IDN_MASTER_NCFGITEMS)
return (CFG_CONTINUE);
@@ -7140,7 +7102,7 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
* we're expecting.
*/
PR_PROTO("%s:%d: haven't received all MCADRs yet.\n",
- proc, domid);
+ proc, domid);
return (CFG_CONTINUE);
}
@@ -7172,8 +7134,8 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
* close connection.
*/
cmn_err(CE_WARN,
- "IDN: 227: missing some required config items from "
- "domain %d", domid);
+ "IDN: 227: missing some required config items from "
+ "domain %d", domid);
rv = CFG_FATAL;
goto done;
@@ -7198,10 +7160,10 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
* - Could reconfigure to use smaller SMR.
*/
cmn_err(CE_WARN,
- "IDN: 228: master's SMR (%ld) larger than "
- "local's SMR (%ld)",
- idn.smr.rempfnlim - idn.smr.rempfn,
- btop(MB2B(IDN_SMR_SIZE)));
+ "IDN: 228: master's SMR (%ld) larger than "
+ "local's SMR (%ld)",
+ idn.smr.rempfnlim - idn.smr.rempfn,
+ btop(MB2B(IDN_SMR_SIZE)));
*exp = (uint_t)IDN_SMR_SIZE;
*act = (uint_t)B2MB(ptob(idn.smr.rempfnlim - idn.smr.rempfn));
@@ -7211,8 +7173,8 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
if (!valid_mtu(dp->dmtu)) {
cmn_err(CE_WARN,
- "IDN: 219: remote domain %d MTU (%d) invalid "
- "(local.mtu = %d)", dp->domid, dp->dmtu, ldp->dmtu);
+ "IDN: 219: remote domain %d MTU (%d) invalid "
+ "(local.mtu = %d)", dp->domid, dp->dmtu, ldp->dmtu);
*exp = (uint_t)ldp->dmtu;
*act = (uint_t)dp->dmtu;
@@ -7220,9 +7182,9 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
}
if (!valid_bufsize(dp->dbufsize)) {
cmn_err(CE_WARN,
- "IDN: 220: remote domain %d BUFSIZE (%d) invalid "
- "(local.bufsize = %d)", dp->domid, dp->dbufsize,
- ldp->dbufsize);
+ "IDN: 220: remote domain %d BUFSIZE (%d) invalid "
+ "(local.bufsize = %d)", dp->domid, dp->dbufsize,
+ ldp->dbufsize);
*exp = (uint_t)ldp->dbufsize;
*act = (uint_t)dp->dbufsize;
@@ -7230,9 +7192,9 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
}
if (!valid_nwrsize((int)dp->dnwrsize)) {
cmn_err(CE_WARN,
- "IDN: 223: remote domain %d NWRSIZE (%d) invalid "
- "(local.nwrsize = %d)",
- dp->domid, dp->dnwrsize, ldp->dnwrsize);
+ "IDN: 223: remote domain %d NWRSIZE (%d) invalid "
+ "(local.nwrsize = %d)",
+ dp->domid, dp->dnwrsize, ldp->dnwrsize);
*exp = (uint_t)ldp->dnwrsize;
*act = (uint_t)dp->dnwrsize;
@@ -7240,9 +7202,9 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
}
if ((int)dp->dmaxnets != IDN_MAX_NETS) {
cmn_err(CE_WARN,
- "IDN: 224: remote domain %d MAX_NETS (%d) invalid "
- "(local.maxnets = %d)",
- dp->domid, (int)dp->dmaxnets, IDN_MAX_NETS);
+ "IDN: 224: remote domain %d MAX_NETS (%d) invalid "
+ "(local.maxnets = %d)",
+ dp->domid, (int)dp->dmaxnets, IDN_MAX_NETS);
*exp = (uint_t)IDN_MAX_NETS;
*act = (uint_t)dp->dmaxnets;
@@ -7250,9 +7212,9 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
}
if ((int)dp->dmboxpernet != IDN_MBOX_PER_NET) {
cmn_err(CE_WARN,
- "IDN: 225: remote domain %d MBOX_PER_NET (%d) "
- "invalid (local.mboxpernet = %d)",
- dp->domid, (int)dp->dmboxpernet, IDN_MBOX_PER_NET);
+ "IDN: 225: remote domain %d MBOX_PER_NET (%d) "
+ "invalid (local.mboxpernet = %d)",
+ dp->domid, (int)dp->dmboxpernet, IDN_MBOX_PER_NET);
*exp = (uint_t)IDN_MBOX_PER_NET;
*act = (uint_t)dp->dmboxpernet;
@@ -7260,9 +7222,9 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
}
if ((dp->dcksum - 1) != (uchar_t)IDN_CHECKSUM) {
cmn_err(CE_WARN,
- "IDN: 226: remote domain %d CHECKSUM flag (%d) "
- "mismatches local domain's (%d)",
- dp->domid, (int)dp->dcksum - 1, IDN_CHECKSUM);
+ "IDN: 226: remote domain %d CHECKSUM flag (%d) "
+ "mismatches local domain's (%d)",
+ dp->domid, (int)dp->dcksum - 1, IDN_CHECKSUM);
*exp = (uint_t)IDN_CHECKSUM;
*act = (uint_t)(dp->dcksum - 1);
@@ -7272,12 +7234,12 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
err = 0;
for (m = 0; m < MAX_BOARDS; m++) {
if (!BOARD_IN_SET(dp->dhw.dh_boardset, m) &&
- dp->dhw.dh_mcadr[m]) {
+ dp->dhw.dh_mcadr[m]) {
cmn_err(CE_WARN,
- "IDN: 229: remote domain %d boardset (0x%x) "
- "conflicts with MCADR(board %d) [0x%x]",
- dp->domid, (uint_t)dp->dhw.dh_boardset, m,
- dp->dhw.dh_mcadr[m]);
+ "IDN: 229: remote domain %d boardset (0x%x) "
+ "conflicts with MCADR(board %d) [0x%x]",
+ dp->domid, (uint_t)dp->dhw.dh_boardset, m,
+ dp->dhw.dh_mcadr[m]);
err++;
}
if (dp->dhw.dh_mcadr[m])
@@ -7289,9 +7251,9 @@ idn_check_master_config(int domid, uint_t *exp, uint_t *act)
rv |= CFG_ERR_MCADR;
} else if (nmcadr != dp->dhw.dh_nmcadr) {
cmn_err(CE_WARN,
- "IDN: 230: remote domain %d reported number of "
- "MCADRs (%d) mismatches received (%d)",
- dp->domid, dp->dhw.dh_nmcadr, nmcadr);
+ "IDN: 230: remote domain %d reported number of "
+ "MCADRs (%d) mismatches received (%d)",
+ dp->domid, dp->dhw.dh_nmcadr, nmcadr);
*exp = (uint_t)dp->dhw.dh_nmcadr;
*act = (uint_t)nmcadr;
rv |= CFG_ERR_NMCADR;
@@ -7344,21 +7306,21 @@ idn_recv_config_done(int domid)
if (b_conflicts || !CPUSET_ISNULL(p_conflicts)) {
if (b_conflicts) {
cmn_err(CE_WARN,
- "IDN: 231: domain %d boardset "
- "(0x%x) conflicts with existing "
- "IDN boardset (0x%x)",
- domid, dp->dhw.dh_boardset,
- b_conflicts);
+ "IDN: 231: domain %d boardset "
+ "(0x%x) conflicts with existing "
+ "IDN boardset (0x%x)",
+ domid, dp->dhw.dh_boardset,
+ b_conflicts);
}
if (!CPUSET_ISNULL(p_conflicts)) {
cmn_err(CE_WARN,
- "IDN: 232: domain %d cpuset "
- "(0x%x.%0x) conflicts with existing "
- "IDN cpuset (0x%x.%0x)", domid,
- UPPER32_CPUMASK(dp->dcpuset),
- LOWER32_CPUMASK(dp->dcpuset),
- UPPER32_CPUMASK(p_conflicts),
- LOWER32_CPUMASK(p_conflicts));
+ "IDN: 232: domain %d cpuset "
+ "(0x%x.%0x) conflicts with existing "
+ "IDN cpuset (0x%x.%0x)", domid,
+ UPPER32_CPUMASK(dp->dcpuset),
+ LOWER32_CPUMASK(dp->dcpuset),
+ UPPER32_CPUMASK(p_conflicts),
+ LOWER32_CPUMASK(p_conflicts));
}
IDN_GUNLOCK();
/*
@@ -7376,7 +7338,7 @@ idn_recv_config_done(int domid)
idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
idn_disconnect(domid, IDNFIN_FORCE_HARD,
- IDNFIN_ARG_CFGERR_FATAL, IDNFIN_SYNC_NO);
+ IDNFIN_ARG_CFGERR_FATAL, IDNFIN_SYNC_NO);
IDN_SYNC_UNLOCK();
return (-1);
@@ -7398,10 +7360,10 @@ idn_recv_config_done(int domid)
*/
if (!idn_cpu_per_board((void *)NULL, dp->dcpuset, &dp->dhw)) {
cmn_err(CE_WARN,
- "IDN: 233: domain %d missing CPU per "
- "memory boardset (0x%x), CPU boardset (0x%x)",
- domid, dp->dhw.dh_boardset,
- cpuset2boardset(dp->dcpuset));
+ "IDN: 233: domain %d missing CPU per "
+ "memory boardset (0x%x), CPU boardset (0x%x)",
+ domid, dp->dhw.dh_boardset,
+ cpuset2boardset(dp->dcpuset));
IDN_GUNLOCK();
/*
@@ -7419,7 +7381,7 @@ idn_recv_config_done(int domid)
idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
idn_disconnect(domid, IDNFIN_FORCE_HARD,
- IDNFIN_ARG_CPUCFG, IDNFIN_SYNC_NO);
+ IDNFIN_ARG_CPUCFG, IDNFIN_SYNC_NO);
IDN_SYNC_UNLOCK();
return (-1);
@@ -7451,9 +7413,9 @@ idn_recv_config_done(int domid)
* Gotta bail.
*/
cmn_err(CE_WARN,
- "IDN: 234: failed to program hardware for domain %d "
- "(boardset = 0x%x)",
- domid, dp->dhw.dh_boardset);
+ "IDN: 234: failed to program hardware for domain %d "
+ "(boardset = 0x%x)",
+ domid, dp->dhw.dh_boardset);
IDN_DUNLOCK(domid);
/*
@@ -7476,7 +7438,7 @@ idn_recv_config_done(int domid)
idn_update_op(IDNOP_ERROR, DOMAINSET_ALL, &idnerr);
idn_unlink_domainset(domset, IDNFIN_NORMAL, IDNFIN_ARG_HWERR,
- IDNFIN_OPT_UNLINK, BOARDSET_ALL);
+ IDNFIN_OPT_UNLINK, BOARDSET_ALL);
IDN_SYNC_UNLOCK();
IDN_DLOCK_EXCL(domid);
@@ -7582,17 +7544,17 @@ idn_verify_config_mbox(int domid)
mbox_csum = IDN_CKSUM_MBOX(&mtp->mt_header);
if (!VALID_MBOXHDR(&mtp->mt_header, c, mbox_csum)) {
cmn_err(CE_WARN,
- "IDN: 235: [recv] mailbox (domain %d, "
- "channel %d) SMR CORRUPTED - RELINK",
- domid, c);
+ "IDN: 235: [recv] mailbox (domain %d, "
+ "channel %d) SMR CORRUPTED - RELINK",
+ domid, c);
cmn_err(CE_CONT,
- "IDN: 235: [recv] expected (cookie 0x%x, "
- "cksum 0x%x) actual (cookie 0x%x, "
- "cksum 0x%x)\n",
- IDN_GET_MBOXHDR_COOKIE(&mtp->mt_header),
- (int)mtp->mt_header.mh_cksum,
- IDN_MAKE_MBOXHDR_COOKIE(0, 0, c),
- (int)mbox_csum);
+ "IDN: 235: [recv] expected (cookie 0x%x, "
+ "cksum 0x%x) actual (cookie 0x%x, "
+ "cksum 0x%x)\n",
+ IDN_GET_MBOXHDR_COOKIE(&mtp->mt_header),
+ (int)mtp->mt_header.mh_cksum,
+ IDN_MAKE_MBOXHDR_COOKIE(0, 0, c),
+ (int)mbox_csum);
mutex_exit(&mmp[c].mm_mutex);
rv = -1;
break;
@@ -7603,23 +7565,21 @@ idn_verify_config_mbox(int domid)
* Verify pointers are valid.
*/
if (!activeptr || !VALID_NWROFFSET(activeptr, 2) ||
- !readyptr || !VALID_NWROFFSET(readyptr, 2)) {
+ !readyptr || !VALID_NWROFFSET(readyptr, 2)) {
cmn_err(CE_WARN,
- "IDN: 235: [recv] mailbox (domain %d, "
- "channel %d) SMR CORRUPTED - RELINK",
- domid, c);
+ "IDN: 235: [recv] mailbox (domain %d, "
+ "channel %d) SMR CORRUPTED - RELINK",
+ domid, c);
cmn_err(CE_CONT,
- "IDN: 235: [recv] activeptr (0x%x), "
- "readyptr (0x%x)\n",
- activeptr, readyptr);
+ "IDN: 235: [recv] activeptr (0x%x), "
+ "readyptr (0x%x)\n",
+ activeptr, readyptr);
mutex_exit(&mmp[c].mm_mutex);
rv = -1;
break;
}
- mmp[c].mm_smr_activep =
- (ushort_t *)IDN_OFFSET2ADDR(activeptr);
- mmp[c].mm_smr_readyp =
- (ushort_t *)IDN_OFFSET2ADDR(readyptr);
+ mmp[c].mm_smr_activep = (ushort_t *)IDN_OFFSET2ADDR(activeptr);
+ mmp[c].mm_smr_readyp = (ushort_t *)IDN_OFFSET2ADDR(readyptr);
mutex_exit(&mmp[c].mm_mutex);
IDN_MBOXTBL_PTR_INC(mtp);
}
@@ -7646,17 +7606,17 @@ idn_verify_config_mbox(int domid)
if (!VALID_MBOXHDR(&mtp->mt_header, c, mbox_csum)) {
cmn_err(CE_WARN,
- "IDN: 235: [send] mailbox (domain %d, "
- "channel %d) SMR CORRUPTED - RELINK",
- domid, c);
+ "IDN: 235: [send] mailbox (domain %d, "
+ "channel %d) SMR CORRUPTED - RELINK",
+ domid, c);
cmn_err(CE_CONT,
- "IDN: 235: [send] expected (cookie 0x%x, "
- "cksum 0x%x) actual (cookie 0x%x, "
- "cksum 0x%x)\n",
- IDN_GET_MBOXHDR_COOKIE(&mtp->mt_header),
- (int)mtp->mt_header.mh_cksum,
- IDN_MAKE_MBOXHDR_COOKIE(0, 0, c),
- (int)mbox_csum);
+ "IDN: 235: [send] expected (cookie 0x%x, "
+ "cksum 0x%x) actual (cookie 0x%x, "
+ "cksum 0x%x)\n",
+ IDN_GET_MBOXHDR_COOKIE(&mtp->mt_header),
+ (int)mtp->mt_header.mh_cksum,
+ IDN_MAKE_MBOXHDR_COOKIE(0, 0, c),
+ (int)mbox_csum);
mutex_exit(&mmp->mm_mutex);
rv = -1;
break;
@@ -7667,15 +7627,15 @@ idn_verify_config_mbox(int domid)
* Paranoid check.
*/
if (!activeptr || !VALID_NWROFFSET(activeptr, 2) ||
- !readyptr || !VALID_NWROFFSET(readyptr, 2)) {
+ !readyptr || !VALID_NWROFFSET(readyptr, 2)) {
cmn_err(CE_WARN,
- "IDN: 235: [send] mailbox (domain %d, "
- "channel %d) SMR CORRUPTED - RELINK",
- domid, c);
+ "IDN: 235: [send] mailbox (domain %d, "
+ "channel %d) SMR CORRUPTED - RELINK",
+ domid, c);
cmn_err(CE_CONT,
- "IDN: 235: [send] activeptr (0x%x), "
- "readyptr (0x%x)\n",
- activeptr, readyptr);
+ "IDN: 235: [send] activeptr (0x%x), "
+ "readyptr (0x%x)\n",
+ activeptr, readyptr);
mutex_exit(&mmp->mm_mutex);
rv = -1;
break;
@@ -7732,7 +7692,7 @@ idn_program_hardware(int domid)
procname_t proc = "idn_program_hardware";
PR_PROTO("%s:%d: program hw in domain %d w.r.t remote domain %d\n",
- proc, domid, idn.localid, domid);
+ proc, domid, idn.localid, domid);
dp = &idn_domain[domid];
@@ -7757,7 +7717,7 @@ idn_program_hardware(int domid)
*/
is_master = 0;
if ((idn.localid == IDN_GET_MASTERID()) &&
- lock_try(&idn.first_hwlink)) {
+ lock_try(&idn.first_hwlink)) {
/*
* This is our first HW link and I'm the
* master, which means we need to program
@@ -7767,7 +7727,7 @@ idn_program_hardware(int domid)
idn.first_hwmasterid = (short)idn.localid;
rem_pfn = idn.smr.locpfn;
rem_pfnlimit = idn.smr.locpfn +
- btop(MB2B(IDN_SMR_SIZE));
+ btop(MB2B(IDN_SMR_SIZE));
} else {
/*
* Otherwise, just a slave linking to
@@ -7791,11 +7751,10 @@ idn_program_hardware(int domid)
idn.first_hwmasterid = (short)domid;
}
- PR_PROTO("%s:%d: ADD bset (0x%x)\n",
- proc, domid, dp->dhw.dh_boardset);
+ PR_PROTO("%s:%d: ADD bset (0x%x)\n", proc, domid, dp->dhw.dh_boardset);
rv = idnxf_shmem_add(is_master, dp->dhw.dh_boardset,
- rem_pfn, rem_pfnlimit, mcadrp);
+ rem_pfn, rem_pfnlimit, mcadrp);
if (rv == 0) {
DOMAINSET_ADD(idn.domset.ds_hwlinked, domid);
@@ -7845,7 +7804,7 @@ idn_deprogram_hardware(int domid)
}
PR_PROTO("%s:%d: DEprogram hw in domain %d w.r.t remote domain %d\n",
- proc, domid, idn.localid, domid);
+ proc, domid, idn.localid, domid);
/*
* It's possible to come through this flow for domains that
@@ -7859,8 +7818,7 @@ idn_deprogram_hardware(int domid)
* CONFIG state, we need to go through the DMAP handshake.
*/
- PR_PROTO("%s:%d: SUB bset (0x%x)\n",
- proc, domid, dp->dhw.dh_boardset);
+ PR_PROTO("%s:%d: SUB bset (0x%x)\n", proc, domid, dp->dhw.dh_boardset);
if (idn.first_hwmasterid == (short)domid) {
is_master = 1;
@@ -7901,7 +7859,7 @@ idn_deconfig(int domid)
ASSERT(dp->dstate == IDNDS_DMAP);
PR_PROTO("%s:%d: (dio=%d, dioerr=%d, dnslabs=%d)\n",
- proc, domid, dp->dio, dp->dioerr, dp->dnslabs);
+ proc, domid, dp->dio, dp->dioerr, dp->dnslabs);
IDN_GLOCK_EXCL();
masterid = IDN_GET_MASTERID();
@@ -7926,7 +7884,7 @@ idn_deconfig(int domid)
DSLAB_LOCK_EXCL(domid);
if ((sp = dp->dslab) != NULL) {
PR_PROTO("%s:%d: freeing up %d dead slabs\n",
- proc, domid, dp->dnslabs);
+ proc, domid, dp->dnslabs);
smr_slab_free(domid, sp);
dp->dslab = NULL;
dp->dnslabs = 0;
@@ -7950,24 +7908,25 @@ idn_deconfig(int domid)
int nbusy = 0;
uint_t dommask = 0;
for (sp = ldp->dslab; sp; sp = sp->sl_next) {
- smr_slabbuf_t *bp;
-
- if (!smr_slab_busy(sp))
- continue;
- nbusy++;
- for (bp = sp->sl_inuse; bp; bp = bp->sb_next)
- if (bp->sb_domid != IDN_NIL_DOMID)
- DOMAINSET_ADD(dommask, bp->sb_domid);
+ smr_slabbuf_t *bp;
+
+ if (!smr_slab_busy(sp))
+ continue;
+ nbusy++;
+ for (bp = sp->sl_inuse; bp; bp = bp->sb_next)
+ if (bp->sb_domid != IDN_NIL_DOMID)
+ DOMAINSET_ADD(dommask,
+ bp->sb_domid);
}
if (nbusy)
PR_PROTO("%s:%d: found %d busy slabs "
- "(dommask = 0x%x)\n",
- proc, domid, nbusy, dommask);
+ "(dommask = 0x%x)\n",
+ proc, domid, nbusy, dommask);
}
#endif /* DEBUG */
if ((sp = ldp->dslab) != NULL) {
PR_PROTO("%s:%d: freeing up %d local slab "
- "structs\n", proc, domid, ldp->dnslabs);
+ "structs\n", proc, domid, ldp->dnslabs);
smr_slab_garbage_collection(sp);
ldp->dslab = NULL;
ldp->dnslabs = 0;
@@ -7976,14 +7935,13 @@ idn_deconfig(int domid)
DSLAB_UNLOCK(idn.localid);
}
if (dp->dio) {
- PR_PROTO("%s:%d: reset dio (%d) to 0\n",
- proc, domid, dp->dio);
+ PR_PROTO("%s:%d: reset dio (%d) to 0\n", proc, domid, dp->dio);
dp->dio = 0;
}
dp->dioerr = 0;
PR_PROTO("%s:%d: reset diocheck (%x) to 0\n",
- proc, domid, dp->diocheck);
+ proc, domid, dp->diocheck);
lock_clear(&dp->diocheck);
CHECKPOINT_CLOSED(IDNSB_CHKPT_LINK, dp->dhw.dh_boardset, 2);
@@ -8083,8 +8041,8 @@ idn_shutdown_datapath(domainset_t domset, int force)
}
void
-idn_send_cmd(int domid, idn_cmd_t cmdtype,
- uint_t arg1, uint_t arg2, uint_t arg3)
+idn_send_cmd(int domid, idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t
+ arg3)
{
idn_msgtype_t mt;
procname_t proc = "idn_send_cmd";
@@ -8095,19 +8053,18 @@ idn_send_cmd(int domid, idn_cmd_t cmdtype,
ASSERT(IDN_DLOCK_IS_HELD(domid));
- PR_PROTO("%s:%d: sending command %s\n",
- proc, domid,
- VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown");
+ PR_PROTO("%s:%d: sending command %s\n", proc, domid,
+ VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown");
IDN_MSGTIMER_START(domid, IDNP_CMD, (ushort_t)cmdtype,
- idn_msg_waittime[IDNP_CMD], &mt.mt_cookie);
+ idn_msg_waittime[IDNP_CMD], &mt.mt_cookie);
IDNXDC(domid, &mt, (uint_t)cmdtype, arg1, arg2, arg3);
}
void
-idn_send_cmdresp(int domid, idn_msgtype_t *mtp, idn_cmd_t cmdtype,
- uint_t arg1, uint_t arg2, uint_t cerrno)
+idn_send_cmdresp(int domid, idn_msgtype_t *mtp, idn_cmd_t cmdtype, uint_t arg1,
+ uint_t arg2, uint_t cerrno)
{
idn_msgtype_t mt;
@@ -8131,7 +8088,7 @@ idn_send_cmdresp(int domid, idn_msgtype_t *mtp, idn_cmd_t cmdtype,
static void
idn_send_cmd_nackresp(int domid, idn_msgtype_t *mtp, idn_cmd_t cmdtype,
- idn_nack_t nacktype)
+ idn_nack_t nacktype)
{
idn_msgtype_t mt;
@@ -8158,8 +8115,8 @@ idn_broadcast_cmd(idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t arg3)
DOMAINSET_DEL(domset, idn.localid);
PR_PROTO("%s: broadcasting command (%s) to domainset 0x%x\n",
- proc, VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
- domset);
+ proc, VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
+ domset);
mt.mt_mtype = IDNP_CMD;
mt.mt_atype = 0;
@@ -8205,26 +8162,26 @@ idn_recv_cmd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
ASSERT(!acknack || (acknack & IDNP_ACKNACK_MASK));
PR_PROTO("%s:%d: (local=%d) acknack=0x%x, cmdtype=%s(%d), "
- "a1=0x%x, a2=0x%x, a3=0x%x\n",
- proc, domid, islocal, acknack,
- VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
- cmdtype, cmdarg1, cmdarg2, cmdarg3);
+ "a1=0x%x, a2=0x%x, a3=0x%x\n",
+ proc, domid, islocal, acknack,
+ VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
+ cmdtype, cmdarg1, cmdarg2, cmdarg3);
unsup_cmd_sent = unsup_cmd_recvd = 0;
if ((IDN_GET_MASTERID() == IDN_NIL_DOMID) ||
- (dp->dstate != IDNDS_CONNECTED)) {
+ (dp->dstate != IDNDS_CONNECTED)) {
/*
* Commands cannot be handled without a valid
* master. If this is a request then nack him.
*/
PR_PROTO("%s:%d: cannot process CMD w/o master (%d, %s)\n",
- proc, domid, IDN_GET_MASTERID(),
- idnds_str[dp->dstate]);
+ proc, domid, IDN_GET_MASTERID(),
+ idnds_str[dp->dstate]);
if (!islocal && !(acknack & IDNP_ACKNACK_MASK))
idn_send_cmd_nackresp(domid, mtp, cmdtype,
- IDNNACK_NOCONN);
+ IDNNACK_NOCONN);
IDN_GUNLOCK();
return;
}
@@ -8260,12 +8217,12 @@ idn_recv_cmd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
switch (cmdtype) {
case IDNCMD_SLABALLOC:
idn_recv_slaballoc_resp(domid, cmdarg1, cmdarg2,
- cmdarg3);
+ cmdarg3);
break;
case IDNCMD_SLABFREE:
idn_recv_slabfree_resp(domid, cmdarg1, cmdarg2,
- cmdarg3);
+ cmdarg3);
break;
case IDNCMD_SLABREAP:
@@ -8273,14 +8230,12 @@ idn_recv_cmd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* We only care if successful.
*/
if (acknack & IDNP_ACK)
- idn_recv_slabreap_resp(domid, cmdarg1,
- cmdarg3);
+ idn_recv_slabreap_resp(domid, cmdarg1, cmdarg3);
break;
case IDNCMD_NODENAME:
if ((acknack & IDNP_NACK) == 0) {
- idn_recv_nodename_resp(domid, cmdarg1,
- cmdarg3);
+ idn_recv_nodename_resp(domid, cmdarg1, cmdarg3);
break;
}
switch (nack) {
@@ -8291,11 +8246,11 @@ idn_recv_cmd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* ready, try again.
*/
PR_PROTO("%s:%d: remote not ready "
- "for %s - retrying "
- "[dstate=%s]\n",
- proc, domid,
- idncmd_str[IDNCMD_NODENAME],
- idnds_str[dp->dstate]);
+ "for %s - retrying "
+ "[dstate=%s]\n",
+ proc, domid,
+ idncmd_str[IDNCMD_NODENAME],
+ idnds_str[dp->dstate]);
if (dp->dstate == IDNDS_CONNECTED)
(void) timeout(idn_retry_nodename_req,
@@ -8314,13 +8269,13 @@ idn_recv_cmd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
}
if (unsup_cmd_sent) {
PR_PROTO("%s:%d: unsupported command "
- "requested (0x%x)\n",
- proc, domid, cmdtype);
+ "requested (0x%x)\n",
+ proc, domid, cmdtype);
}
if (unsup_cmd_recvd) {
PR_PROTO("%s:%d: unsupported command "
- "response (0x%x)\n",
- proc, domid, cmdtype);
+ "response (0x%x)\n",
+ proc, domid, cmdtype);
}
} else {
/*
@@ -8355,7 +8310,7 @@ idn_recv_cmd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
* Received an unsupported IDN command.
*/
idn_send_cmd_nackresp(domid, mtp, cmdtype,
- IDNNACK_BADCMD);
+ IDNNACK_BADCMD);
}
}
}
@@ -8375,8 +8330,8 @@ idn_local_cmd(idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t arg3)
procname_t proc = "idn_local_cmd";
PR_PROTO("%s: submitting local command %s on domain %d\n",
- proc, VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
- idn.localid);
+ proc, VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
+ idn.localid);
jp = idn_protojob_alloc(KM_SLEEP);
@@ -8414,7 +8369,7 @@ idn_terminate_cmd(int domid, int serrno)
*/
if (tplist == NULL) {
PR_PROTO("%s:%d: no outstanding cmds found\n",
- proc, domid);
+ proc, domid);
/*
* There is a window where we may have caught a
* request just prior to issuing the actual
@@ -8431,7 +8386,7 @@ idn_terminate_cmd(int domid, int serrno)
ASSERT(tp->t_type == IDNP_CMD);
PR_PROTO("%s:%d: found outstanding cmd: %s\n",
- proc, domid, idncmd_str[tp->t_subtype]);
+ proc, domid, idncmd_str[tp->t_subtype]);
switch (tp->t_subtype) {
case IDNCMD_SLABALLOC:
@@ -8493,7 +8448,7 @@ idn_terminate_cmd(int domid, int serrno)
} else if (dp->dvote.v.master) {
PR_PROTO("%s:%d: abort (local domain) slaballoc waiters\n",
- proc, domid);
+ proc, domid);
(void) smr_slabwaiter_abort(idn.localid, serrno);
}
}
@@ -8517,19 +8472,19 @@ idn_send_acknack(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
if (mtp->mt_mtype & IDNP_ACK) {
PR_PROTO("%s:%d: dstate=%s, msg=(%s/%s), "
- "a1=0x%x, a2=0x%x, a3=0x%x, a4 = 0x%x\n",
- proc, domid, idnds_str[dp->dstate],
- astr, mstr, xargs[0], xargs[1],
- xargs[2], xargs[3]);
+ "a1=0x%x, a2=0x%x, a3=0x%x, a4 = 0x%x\n",
+ proc, domid, idnds_str[dp->dstate],
+ astr, mstr, xargs[0], xargs[1],
+ xargs[2], xargs[3]);
} else {
idn_nack_t nack;
nack = GET_XARGS_NACK_TYPE(xargs);
PR_PROTO("%s:%d: dstate=%s, msg=(%s/%s), "
- "nack=%s(0x%x)\n",
- proc, domid, idnds_str[dp->dstate],
- astr, mstr, idnnack_str[nack],
- (uint_t)nack);
+ "nack=%s(0x%x)\n",
+ proc, domid, idnds_str[dp->dstate],
+ astr, mstr, idnnack_str[nack],
+ (uint_t)nack);
}
}
#endif /* DEBUG */
@@ -8567,7 +8522,7 @@ idn_prealloc_slab(int nslabs)
serrno = smr_slab_alloc(idn.localid, &sp);
if (serrno != 0) {
PR_PROTO("%s: FAILED to pre-alloc'd "
- "slab (serrno = %d)\n", proc, serrno);
+ "slab (serrno = %d)\n", proc, serrno);
break;
}
/*
@@ -8598,7 +8553,7 @@ idn_recv_slaballoc_req(int domid, idn_msgtype_t *mtp, uint_t slab_size)
procname_t proc = "idn_recv_slaballoc_req";
PR_PROTO("%s: slaballoc req from domain %d (size=0x%x)\n",
- proc, domid, slab_size);
+ proc, domid, slab_size);
dp = &idn_domain[domid];
@@ -8669,23 +8624,23 @@ idn_recv_slaballoc_req(int domid, idn_msgtype_t *mtp, uint_t slab_size)
* slab into domains respective idn_domain entry
* to be associated with that domain.
*/
- idn_send_slaballoc_resp(domid, mtp,
- slab_offset, slab_size, serrno);
+ idn_send_slaballoc_resp(domid, mtp, slab_offset, slab_size,
+ serrno);
}
}
static void
-idn_send_slaballoc_resp(int domid, idn_msgtype_t *mtp,
- smr_offset_t slab_offset, uint_t slab_size, int serrno)
+idn_send_slaballoc_resp(int domid, idn_msgtype_t *mtp, smr_offset_t slab_offset,
+ uint_t slab_size, int serrno)
{
procname_t proc = "idn_send_slaballoc_resp";
PR_PROTO("%s: slaballoc resp to domain %d (off=0x%x, size=0x%x) "
- "[serrno = %d]\n",
- proc, domid, slab_offset, slab_size, serrno);
+ "[serrno = %d]\n",
+ proc, domid, slab_offset, slab_size, serrno);
- idn_send_cmdresp(domid, mtp, IDNCMD_SLABALLOC,
- slab_offset, slab_size, serrno);
+ idn_send_cmdresp(domid, mtp, IDNCMD_SLABALLOC, slab_offset, slab_size,
+ serrno);
}
/*
@@ -8695,8 +8650,8 @@ idn_send_slaballoc_resp(int domid, idn_msgtype_t *mtp,
* waiters.
*/
static void
-idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset,
- uint_t slab_size, int serrno)
+idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset, uint_t slab_size,
+ int serrno)
{
smr_slab_t *sp = NULL;
int rv;
@@ -8706,8 +8661,8 @@ idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset,
ASSERT(IDN_DLOCK_IS_EXCL(domid));
PR_PROTO("%s: slaballoc resp from domain %d (off=0x%x, size=0x%x) "
- "[serrno = %d]\n",
- proc, domid, slab_offset, slab_size, serrno);
+ "[serrno = %d]\n",
+ proc, domid, slab_offset, slab_size, serrno);
if (!serrno) {
IDN_GLOCK_SHARED();
@@ -8718,18 +8673,17 @@ idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset,
* or an old response. In either case dump it.
*/
PR_PROTO("%s: BOGUS slaballoc resp from domid %d "
- "(master = %d)\n",
- proc, domid, IDN_GET_MASTERID());
+ "(master = %d)\n",
+ proc, domid, IDN_GET_MASTERID());
serrno = EPROTO;
}
IDN_GUNLOCK();
if (!serrno &&
- !VALID_NWROFFSET(slab_offset, IDN_SMR_BUFSIZE)) {
-
+ !VALID_NWROFFSET(slab_offset, IDN_SMR_BUFSIZE)) {
PR_PROTO("%s: slab offset (0x%x) out of range "
- "(0-0x%lx)\n",
- proc, slab_offset, MB2B(IDN_NWR_SIZE));
+ "(0-0x%lx)\n",
+ proc, slab_offset, MB2B(IDN_NWR_SIZE));
serrno = EPROTO;
} else if (!serrno) {
sp = GETSTRUCT(smr_slab_t, 1);
@@ -8759,13 +8713,13 @@ idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset,
* just have to send it back.
*/
PR_PROTO("%s: failed to install response in waiting area\n",
- proc);
+ proc);
if (slab_size != 0) {
PR_PROTO("%s: sending slab back to domain %d "
- "(master = %d)\n",
- proc, domid, IDN_GET_MASTERID());
- idn_send_cmd(domid, IDNCMD_SLABFREE,
- slab_offset, slab_size, 0);
+ "(master = %d)\n",
+ proc, domid, IDN_GET_MASTERID());
+ idn_send_cmd(domid, IDNCMD_SLABFREE, slab_offset,
+ slab_size, 0);
}
if (sp) {
smr_free_buflist(sp);
@@ -8825,12 +8779,12 @@ idn_recv_slabreap_resp(int domid, int nslabs, int serrno)
if ((idn.localid != IDN_GET_MASTERID()) || (idn.localid == domid)) {
PR_PROTO("%s: unexpected slabreap resp received "
- "(domid = %d)\n", proc, domid);
+ "(domid = %d)\n", proc, domid);
ASSERT(0);
return;
}
PR_PROTO("%s: recvd reap response from domain %d for %d slabs "
- "[serrno = %d]\n", proc, domid, nslabs, serrno);
+ "[serrno = %d]\n", proc, domid, nslabs, serrno);
}
/*
@@ -8849,8 +8803,8 @@ idn_send_slabreap_resp(int domid, idn_msgtype_t *mtp, int nslabs, int serrno)
* Master never sends slabfree request to itself.
*/
static void
-idn_recv_slabfree_req(int domid, idn_msgtype_t *mtp,
- smr_offset_t slab_offset, uint_t slab_size)
+idn_recv_slabfree_req(int domid, idn_msgtype_t *mtp, smr_offset_t slab_offset,
+ uint_t slab_size)
{
smr_slab_t *sp;
int serrno;
@@ -8861,16 +8815,16 @@ idn_recv_slabfree_req(int domid, idn_msgtype_t *mtp,
if (domid == IDN_GET_MASTERID()) {
PR_PROTO("%s: unexpected slabfree req received (domid = %d)\n",
- proc, domid);
- idn_send_slabfree_resp(domid, mtp,
- slab_offset, slab_size, EACCES);
+ proc, domid);
+ idn_send_slabfree_resp(domid, mtp, slab_offset, slab_size,
+ EACCES);
return;
}
if (slab_size > IDN_SLAB_SIZE) {
PR_PROTO("%s: unexpected slab size. exp %d, recvd %d\n",
- proc, IDN_SLAB_SIZE, slab_size);
- idn_send_slabfree_resp(domid, mtp,
- slab_offset, slab_size, EINVAL);
+ proc, IDN_SLAB_SIZE, slab_size);
+ idn_send_slabfree_resp(domid, mtp, slab_offset, slab_size,
+ EINVAL);
return;
}
s_start = IDN_OFFSET2ADDR(slab_offset);
@@ -8896,8 +8850,8 @@ idn_recv_slabfree_req(int domid, idn_msgtype_t *mtp,
* Master -> Slave ONLY
*/
static void
-idn_recv_slabfree_resp(int domid, uint_t slab_offset,
- uint_t slab_size, int serrno)
+idn_recv_slabfree_resp(int domid, uint_t slab_offset, uint_t slab_size, int
+ serrno)
{
procname_t proc = "idn_recv_slabfree_resp";
@@ -8905,27 +8859,27 @@ idn_recv_slabfree_resp(int domid, uint_t slab_offset,
if (domid != IDN_GET_MASTERID()) {
PR_PROTO("%s: unexpected slabfree resp received (domid = %d)\n",
- proc, domid);
+ proc, domid);
ASSERT(0);
return;
}
if (slab_size > IDN_SLAB_SIZE) {
PR_PROTO("%s: unexpected slab size. exp %d, recvd %d\n",
- proc, IDN_SLAB_SIZE, slab_size);
+ proc, IDN_SLAB_SIZE, slab_size);
ASSERT(0);
return;
}
PR_PROTO("%s: recvd free resp from dom %d "
- "- slab (off/size) 0x%x/0x%x [serrno = %d]\n",
- proc, domid, slab_offset, slab_size, serrno);
+ "- slab (off/size) 0x%x/0x%x [serrno = %d]\n",
+ proc, domid, slab_offset, slab_size, serrno);
}
static void
-idn_send_slabfree_resp(int domid, idn_msgtype_t *mtp,
- uint_t slab_offset, uint_t slab_size, int serrno)
+idn_send_slabfree_resp(int domid, idn_msgtype_t *mtp, uint_t slab_offset,
+ uint_t slab_size, int serrno)
{
- idn_send_cmdresp(domid, mtp, IDNCMD_SLABFREE,
- slab_offset, slab_size, serrno);
+ idn_send_cmdresp(domid, mtp, IDNCMD_SLABFREE, slab_offset, slab_size,
+ serrno);
}
static void
@@ -8957,7 +8911,7 @@ idn_send_nodename_req(int domid)
* Lost connection.
*/
PR_PROTO("%s:%d: connection lost [dstate = %s]\n",
- proc, domid, idnds_str[dp->dstate]);
+ proc, domid, idnds_str[dp->dstate]);
IDN_DUNLOCK(domid);
if (!serrno)
(void) smr_buf_free(domid, b_bufp, MAXDNAME+1);
@@ -8970,7 +8924,7 @@ idn_send_nodename_req(int domid)
* the master a little too earlier.
*/
PR_PROTO("%s:%d: buffer alloc failed [dstate = %s]\n",
- proc, domid, idnds_str[dp->dstate]);
+ proc, domid, idnds_str[dp->dstate]);
(void) timeout(idn_retry_nodename_req, (void *)(uintptr_t)domid,
hz);
IDN_DUNLOCK(domid);
@@ -8985,11 +8939,11 @@ idn_send_nodename_req(int domid)
}
static void
-idn_send_nodename_resp(int domid, idn_msgtype_t *mtp,
- smr_offset_t bufoffset, int serrno)
+idn_send_nodename_resp(int domid, idn_msgtype_t *mtp, smr_offset_t bufoffset,
+ int serrno)
{
- idn_send_cmdresp(domid, mtp, IDNCMD_NODENAME,
- (uint_t)bufoffset, 0, serrno);
+ idn_send_cmdresp(domid, mtp, IDNCMD_NODENAME, (uint_t)bufoffset, 0,
+ serrno);
}
static void
@@ -9009,7 +8963,7 @@ idn_recv_nodename_req(int domid, idn_msgtype_t *mtp, smr_offset_t bufoffset)
*/
IDN_DUNLOCK(idn.localid);
idn_send_cmd_nackresp(domid, mtp, IDNCMD_NODENAME,
- IDNNACK_RETRY);
+ IDNNACK_RETRY);
return;
}
strncpy(ldp->dname, utsname.nodename, MAXDNAME - 1);
@@ -9018,7 +8972,7 @@ idn_recv_nodename_req(int domid, idn_msgtype_t *mtp, smr_offset_t bufoffset)
if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
PR_PROTO("%s:%d: invalid SMR offset received (0x%x)\n",
- proc, domid, bufoffset);
+ proc, domid, bufoffset);
IDN_DUNLOCK(idn.localid);
idn_send_nodename_resp(domid, mtp, bufoffset, EINVAL);
return;
@@ -9029,7 +8983,7 @@ idn_recv_nodename_req(int domid, idn_msgtype_t *mtp, smr_offset_t bufoffset)
if (length < strlen(ldp->dname)) {
PR_PROTO("%s:%d: buffer not big enough (req %lu, got %d)\n",
- proc, domid, strlen(ldp->dname), length);
+ proc, domid, strlen(ldp->dname), length);
IDN_DUNLOCK(idn.localid);
idn_send_nodename_resp(domid, mtp, bufoffset, EINVAL);
return;
@@ -9053,7 +9007,7 @@ idn_recv_nodename_resp(int domid, smr_offset_t bufoffset, int serrno)
if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
PR_PROTO("%s:%d: invalid SMR offset received (0x%x)\n",
- proc, domid, bufoffset);
+ proc, domid, bufoffset);
return;
}
@@ -9064,7 +9018,7 @@ idn_recv_nodename_resp(int domid, smr_offset_t bufoffset, int serrno)
if (strlen(b_bufp) > 0) {
strncpy(dp->dname, b_bufp, MAXDNAME);
PR_PROTO("%s:%d: received nodename(%s)\n",
- proc, domid, dp->dname);
+ proc, domid, dp->dname);
}
}
@@ -9091,7 +9045,7 @@ idn_master_init()
}
PR_PROTO("%s: initializing master data (domid = %d)\n",
- proc, idn.localid);
+ proc, idn.localid);
/*
* Reserve an area of the SMR for mailbox usage.
@@ -9101,12 +9055,12 @@ idn_master_init()
reserved_size = IDNROUNDUP(IDN_MBOXAREA_SIZE, IDN_SMR_BUFSIZE);
PR_PROTO("%s: reserving %lu bytes for mailbox area\n",
- proc, reserved_size);
+ proc, reserved_size);
#ifdef DEBUG
if (reserved_size > (size_t)IDN_SLAB_SIZE) {
PR_PROTO("%s: WARNING mbox area (%ld) > slab size (%d)\n",
- proc, reserved_size, IDN_SLAB_SIZE);
+ proc, reserved_size, IDN_SLAB_SIZE);
}
#endif /* DEBUG */
/*
@@ -9151,7 +9105,7 @@ idn_master_deinit()
ldp = &idn_domain[idn.localid];
PR_PROTO("%s: deinitializing master data (domid = %d)\n",
- proc, idn.localid);
+ proc, idn.localid);
ldp->dmbox.m_tbl = NULL;
idn.mboxarea = NULL;
@@ -9183,7 +9137,7 @@ idn_mark_awol(int domid, clock_t *atime)
DOMAINSET_ADD(idn.domset.ds_awol, domid);
idn.nawols++;
}
- awol = lbolt;
+ awol = ddi_get_lbolt();
if (dp->dawol.a_count++ == 0)
dp->dawol.a_time = awol;
dp->dawol.a_last = awol;
@@ -9247,7 +9201,7 @@ idn_timer_expired(void *arg)
if (tp->t_onq == 0) {
PR_TIMER("%s: timer CAUGHT TERMINATION (type = %s)\n",
- proc, str);
+ proc, str);
/*
* Timer was dequeued. Somebody is trying
* to shut it down.
@@ -9273,8 +9227,8 @@ idn_timer_expired(void *arg)
#ifdef DEBUG
PR_TIMER("%s:%d: [%s] timer EXPIRED (C=0x%x, P=0x%llx, X=0x%llx)\n",
- proc, tp->t_domid, str, tp->t_cookie,
- tp->t_posttime, tp->t_exectime);
+ proc, tp->t_domid, str, tp->t_cookie,
+ tp->t_posttime, tp->t_exectime);
#endif /* DEBUG */
/*
@@ -9303,8 +9257,8 @@ idn_timer_expired(void *arg)
* go around.
*/
IDN_MSGTIMER_START(domid, IDNP_DATA, 0,
- idn_msg_waittime[IDNP_DATA],
- &mt.mt_cookie);
+ idn_msg_waittime[IDNP_DATA],
+ &mt.mt_cookie);
} else {
lock_clear(&dp->diocheck);
}
@@ -9326,12 +9280,12 @@ idn_timer_expired(void *arg)
IDN_GUNLOCK();
idn_nego_cleanup_check(domid, IDN_NIL_DOMID,
- IDN_NIL_DCPU);
+ IDN_NIL_DCPU);
IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
idn_retry_submit(idn_retry_nego, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_NEGO]);
+ idn_msg_retrytime[(int)IDNRETRY_NEGO]);
}
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
@@ -9354,7 +9308,7 @@ idn_timer_expired(void *arg)
*/
if (tp->t_subtype == (ushort_t)IDNCMD_NODENAME) {
PR_PROTO("%s:%d: timedout waiting for nodename\n",
- proc, domid);
+ proc, domid);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
break;
@@ -9366,10 +9320,10 @@ idn_timer_expired(void *arg)
int masterid = IDN_GET_MASTERID();
IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs,
- gk_reconfig_last);
+ gk_reconfig_last);
PR_PROTO("%s:%d: RECONFIG trying old masterid = %d\n",
- proc, domid, masterid);
+ proc, domid, masterid);
IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
IDN_SET_NEW_MASTERID(masterid);
@@ -9377,12 +9331,10 @@ idn_timer_expired(void *arg)
IDN_DUNLOCK(domid);
domset = idn.domset.ds_trans_on |
- idn.domset.ds_connected;
+ idn.domset.ds_connected;
idn_unlink_domainset(domset, IDNFIN_NORMAL,
- IDNFIN_ARG_NONE,
- IDNFIN_OPT_RELINK,
- BOARDSET_ALL);
+ IDNFIN_ARG_NONE, IDNFIN_OPT_RELINK, BOARDSET_ALL);
} else {
IDN_GUNLOCK();
IDN_DUNLOCK(domid);
@@ -9402,7 +9354,7 @@ idn_timer_expired(void *arg)
IDN_GUNLOCK();
token = IDN_RETRY_TOKEN(domid, IDNRETRY_CONQ);
idn_retry_submit(idn_retry_query, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_CONQ]);
+ idn_msg_retrytime[(int)IDNRETRY_CONQ]);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
break;
@@ -9431,9 +9383,9 @@ idn_timer_expired(void *arg)
IDN_GUNLOCK();
DOMAINSET_ADD(idn.domset.ds_relink, domid);
IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
- idn.domset.ds_relink);
+ idn.domset.ds_relink);
idn_disconnect(domid, IDNFIN_FORCE_SOFT,
- IDNFIN_ARG_NONE, IDNFIN_SYNC_NO);
+ IDNFIN_ARG_NONE, IDNFIN_SYNC_NO);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
break;
@@ -9466,12 +9418,12 @@ idn_timer_expired(void *arg)
}
if (rdyset)
(void) idn_sync_register(domid,
- IDNSYNC_DISCONNECT,
- rdyset, IDNSYNC_REG_REG);
+ IDNSYNC_DISCONNECT,
+ rdyset, IDNSYNC_REG_REG);
token = IDN_RETRY_TOKEN(domid, IDNRETRY_FINQ);
idn_retry_submit(idn_retry_query, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_FINQ]);
+ idn_msg_retrytime[(int)IDNRETRY_FINQ]);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
break;
@@ -9491,7 +9443,7 @@ idn_timer_expired(void *arg)
IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
idn_retry_submit(idn_retry_fin, NULL, token,
- idn_msg_retrytime[(int)IDNRETRY_FIN]);
+ idn_msg_retrytime[(int)IDNRETRY_FIN]);
IDN_DUNLOCK(domid);
IDN_SYNC_UNLOCK();
break;
@@ -9509,14 +9461,14 @@ idn_timer_expired(void *arg)
if (awol) {
if (strlen(dname) > 0) {
cmn_err(CE_WARN,
- "IDN: 236: domain (%s) [ID %d] not "
- "responding to %s [#%d]",
- dname, domid, op, awolcount);
+ "IDN: 236: domain (%s) [ID %d] not "
+ "responding to %s [#%d]",
+ dname, domid, op, awolcount);
} else {
cmn_err(CE_WARN,
- "IDN: 236: domain [ID %d, CPU %d] not "
- "responding to %s [#%d]",
- domid, dcpu, op, awolcount);
+ "IDN: 236: domain [ID %d, CPU %d] not "
+ "responding to %s [#%d]",
+ domid, dcpu, op, awolcount);
}
}
}
@@ -9537,8 +9489,7 @@ idn_retry_check(uint_t token)
for (i = 0, rp = qp->rq_jobs; i < qp->rq_count; i++, rp = rp->rj_next)
if ((domid == IDN_RETRY_TOKEN2DOMID(rp->rj_token)) &&
- ((key == IDN_RETRY_TYPEALL) ||
- (rp->rj_token == token)))
+ ((key == IDN_RETRY_TYPEALL) || (rp->rj_token == token)))
count++;
mutex_exit(&qp->rq_mutex);
@@ -9582,8 +9533,8 @@ idn_retry_execute(void *arg)
*
*/
static void
-idn_retry_submit(void (*func)(uint_t token, void *arg),
- void *arg, uint_t token, clock_t ticks)
+idn_retry_submit(void (*func)(uint_t token, void *arg), void *arg, uint_t token,
+ clock_t ticks)
{
idn_retry_job_t *rp, *cp;
idn_retry_queue_t *qp;
@@ -9592,7 +9543,7 @@ idn_retry_submit(void (*func)(uint_t token, void *arg),
if (ticks < 0) {
PR_PROTO("%s: (token = 0x%x) WARNING ticks = %ld\n",
- proc, token, ticks);
+ proc, token, ticks);
return;
}
if (ticks == 0) /* At least one tick to get into background */
@@ -9603,13 +9554,11 @@ idn_retry_submit(void (*func)(uint_t token, void *arg),
qp = &idn.retryqueue;
mutex_enter(&qp->rq_mutex);
- for (c = 0, cp = qp->rq_jobs;
- c < qp->rq_count;
- cp = cp->rj_next, c++) {
+ for (c = 0, cp = qp->rq_jobs; c < qp->rq_count; cp = cp->rj_next, c++) {
if (cp->rj_token == token) {
PR_PROTO("%s: token = (%d,0x%x) already present\n",
- proc, IDN_RETRY_TOKEN2DOMID(token),
- IDN_RETRY_TOKEN2TYPE(token));
+ proc, IDN_RETRY_TOKEN2DOMID(token),
+ IDN_RETRY_TOKEN2TYPE(token));
break;
}
}
@@ -9657,8 +9606,8 @@ idn_retry_terminate(uint_t token)
for (i = count = 0, rp = qp->rq_jobs; i < qp->rq_count; i++) {
nrp = rp->rj_next;
if ((domid == IDN_RETRY_TOKEN2DOMID(rp->rj_token)) &&
- ((key == IDN_RETRY_TYPEALL) ||
- (rp->rj_token == token))) {
+ ((key == IDN_RETRY_TYPEALL) ||
+ (rp->rj_token == token))) {
/*
* Turn off onq field as a signal to
* the execution routine that this
@@ -9687,7 +9636,7 @@ idn_retry_terminate(uint_t token)
mutex_exit(&qp->rq_mutex);
PR_PROTO("%s: token = (%d,0x%x), dequeued = %d\n",
- proc, domid, key, count);
+ proc, domid, key, count);
for (; fp; fp = nrp) {
(void) untimeout(fp->rj_id);
@@ -9719,18 +9668,16 @@ idn_protocol_init(int nservers)
if (nservers <= 0) {
cmn_err(CE_WARN,
- "IDN: 237: invalid number (%d) of protocol servers",
- nservers);
+ "IDN: 237: invalid number (%d) of protocol servers",
+ nservers);
return (-1);
}
idn.protocol.p_jobpool = kmem_cache_create("idn_protocol_jobcache",
- sizeof (idn_protojob_t),
- 0, NULL, NULL, NULL,
- NULL, NULL, 0);
+ sizeof (idn_protojob_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
if (idn.protocol.p_jobpool == NULL) {
cmn_err(CE_WARN,
- "IDN: 238: kmem_cache_create(jobcache) failed");
+ "IDN: 238: kmem_cache_create(jobcache) failed");
return (-1);
}
@@ -9839,8 +9786,7 @@ idn_protocol_server(int *id)
procname_t proc = "idn_protocol_server";
if (id == NULL) {
- PR_PROTO("%s: id == NULL, thread exiting\n",
- proc);
+ PR_PROTO("%s: id == NULL, thread exiting\n", proc);
return;
}
ASSERT((*id >= 0) && (*id < idn_protocol_nservers));
@@ -9849,8 +9795,7 @@ idn_protocol_server(int *id)
ASSERT(pq->q_id == *id);
- PR_PROTO("%s: id %d starting up (pq = 0x%p)\n",
- proc, pq->q_id, pq);
+ PR_PROTO("%s: id %d starting up (pq = 0x%p)\n", proc, pq->q_id, pq);
/*CONSTCOND*/
while (1) {
@@ -9869,7 +9814,7 @@ idn_protocol_server(int *id)
pq->q_threadp = NULL;
mutex_exit(&pq->q_mutex);
PR_PROTO("%s: thread (%d) killed...bye bye\n",
- proc, pq->q_id);
+ proc, pq->q_id);
for (jp = jl; jp; jp = jl) {
jl = jp->j_next;
idn_protojob_free(jp);
@@ -9903,7 +9848,7 @@ idn_protocol_server_killall()
procname_t proc = "idn_protocol_server_killall";
PR_PROTO("%s: killing off %d protocol servers\n",
- proc, idn.nservers);
+ proc, idn.nservers);
pq = idn.protocol.p_serverq;
for (i = 0; i < idn.nservers; pq++, i++) {
@@ -9969,8 +9914,8 @@ idn_protojob_submit(int cookie, idn_protojob_t *jp)
INUM2STR(jp->j_msg.m_msgtype, str);
PR_PROTO("%s: job (d=%d, m=0x%x, %s) submitted to "
- "protocol server %d\n", proc, jp->j_msg.m_domid,
- jp->j_msg.m_msgtype, str, serverid);
+ "protocol server %d\n", proc, jp->j_msg.m_domid,
+ jp->j_msg.m_msgtype, str, serverid);
mutex_enter(&pq->q_mutex);
/*
@@ -9987,7 +9932,7 @@ idn_protojob_submit(int cookie, idn_protojob_t *jp)
cv_signal(&pq->q_cv);
} else {
PR_PROTO("%s: protocol server dead. freeing protojob\n",
- proc);
+ proc);
idn_protojob_free(jp);
}
mutex_exit(&pq->q_mutex);
@@ -10003,8 +9948,7 @@ idn_mboxarea_init(idn_mboxtbl_t *mtp, register int ntbls)
ASSERT(mtp && (ntbls > 0));
- PR_PROTO("%s: init mboxtbl (0x%p) ntbls = %d\n",
- proc, mtp, ntbls);
+ PR_PROTO("%s: init mboxtbl (0x%p) ntbls = %d\n", proc, mtp, ntbls);
for (d = 0; d < ntbls; d++) {
register int pd, sd;
@@ -10070,7 +10014,7 @@ idn_mainmbox_init(int domid, int mbx)
ASSERT(IDN_DLOCK_IS_HELD(domid));
PR_PROTO("%s: initializing main %s mailbox for domain %d\n",
- proc, IDNMBOX_IS_RECV(mbx) ? "RECV" : "SEND", domid);
+ proc, IDNMBOX_IS_RECV(mbx) ? "RECV" : "SEND", domid);
cmp = GETSTRUCT(idn_mainmbox_t, IDN_MAX_NETS);
for (c = 0; c < IDN_MAX_NETS; c++) {
@@ -10099,7 +10043,7 @@ idn_mainmbox_reset(int domid, idn_mainmbox_t *cmp)
ASSERT(IDN_DLOCK_IS_EXCL(domid));
PR_PROTO("%s: reseting main %s mailbox for domain %d\n",
- proc, IDNMBOX_IS_RECV(cmp->mm_type) ? "RECV" : "SEND", domid);
+ proc, IDNMBOX_IS_RECV(cmp->mm_type) ? "RECV" : "SEND", domid);
for (c = 0; c < IDN_MAX_NETS; c++) {
mmp = &cmp[c];
@@ -10122,7 +10066,7 @@ idn_mainmbox_deinit(int domid, idn_mainmbox_t *mmp)
ASSERT(IDN_DLOCK_IS_HELD(domid));
PR_PROTO("%s: deinitializing main %s mailbox for domain %d\n",
- proc, IDNMBOX_IS_RECV(mmp->mm_type) ? "RECV" : "SEND", domid);
+ proc, IDNMBOX_IS_RECV(mmp->mm_type) ? "RECV" : "SEND", domid);
ASSERT(idn_domain_is_registered(domid, -1, NULL) == 0);
@@ -10142,7 +10086,7 @@ idn_mainmbox_activate(int domid)
for (c = 0; c < IDN_MAX_NETS; c++)
idn_mainmbox_chan_register(domid, &dp->dmbox.m_send[c],
- &dp->dmbox.m_recv[c], c);
+ &dp->dmbox.m_recv[c], c);
}
/*
@@ -10162,17 +10106,17 @@ idn_mainmbox_deactivate(ushort_t domset)
return;
PR_PROTO("%s: %s deactivating main mailboxes for domset 0x%x\n",
- proc, (domset == (ushort_t)-1) ? "STOP-ALL" : "NORMAL", domset);
+ proc, (domset == (ushort_t)-1) ? "STOP-ALL" : "NORMAL", domset);
svr_count = idn_mainmbox_chan_unregister(domset, -1);
PR_PROTO("%s: deactivated %d chansvrs (domset 0x%x)\n",
- proc, svr_count, domset);
+ proc, svr_count, domset);
}
static void
idn_mainmbox_chan_register(int domid, idn_mainmbox_t *send_mmp,
- idn_mainmbox_t *recv_mmp, int channel)
+ idn_mainmbox_t *recv_mmp, int channel)
{
ASSERT(IDN_DLOCK_IS_HELD(domid));
@@ -10230,9 +10174,8 @@ idn_mainmbox_chan_unregister(ushort_t domset, int channel)
int min_chan, max_chan;
procname_t proc = "idn_mainmbox_chan_unregister";
-
PR_CHAN("%s: deactivating main mailboxes (channel %d) "
- "for domset 0x%x\n", proc, channel, domset);
+ "for domset 0x%x\n", proc, channel, domset);
if (channel == -1) {
min_chan = 0;
@@ -10263,7 +10206,7 @@ idn_mainmbox_chan_unregister(ushort_t domset, int channel)
dd_count++;
}
PR_CHAN("%s: deactivated %d channel mboxes for domset 0x%x, chan %d\n",
- proc, dd_count, domset, channel);
+ proc, dd_count, domset, channel);
return (dd_count);
}
@@ -10278,7 +10221,6 @@ idn_domain_is_registered(int domid, int channel, idn_chanset_t *chansetp)
idn_chanset_t chanset;
procname_t proc = "idn_domain_is_registered";
-
CHANSET_ZERO(chanset);
if (idn.chan_servers == NULL) {
@@ -10316,7 +10258,7 @@ idn_domain_is_registered(int domid, int channel, idn_chanset_t *chansetp)
}
PR_CHAN("%s: domid %d mbox reg'd with %d channels [0x%x] (req=%d)\n",
- proc, domid, regcount, chanset, channel);
+ proc, domid, regcount, chanset, channel);
if (chansetp)
*chansetp = chanset;
@@ -10345,7 +10287,7 @@ idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp)
mbox_type = mmp->mm_type;
ASSERT((mbox_type == IDNMMBOX_TYPE_SEND) ||
- (mbox_type == IDNMMBOX_TYPE_RECV));
+ (mbox_type == IDNMMBOX_TYPE_RECV));
mbox_str = (mbox_type == IDNMMBOX_TYPE_SEND) ? "SEND" : "RECV";
@@ -10370,20 +10312,20 @@ idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp)
*/
if (mmp[c].mm_smr_mboxp) {
PR_CHAN("%s:%d:%s: domain unregistered "
- "w/chan %d - DUMPING SMR reference\n",
- proc, domid, mbox_str, c);
+ "w/chan %d - DUMPING SMR reference\n",
+ proc, domid, mbox_str, c);
lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput,
- mmp[c].mm_qiget);
+ mmp[c].mm_qiget);
#ifdef DEBUG
if (mbox_type == IDNMMBOX_TYPE_RECV) {
PR_CHAN("%s:%d:%s: blowing away %d "
- "incoming pkts\n",
- proc, domid, mbox_str, lost_io);
+ "incoming pkts\n",
+ proc, domid, mbox_str, lost_io);
} else {
PR_CHAN("%s:%d:%s: blowing away %d/%d "
- "outstanding pkts\n",
- proc, domid, mbox_str, lost_io,
- idn_domain[domid].dio);
+ "outstanding pkts\n",
+ proc, domid, mbox_str, lost_io,
+ idn_domain[domid].dio);
}
#endif /* DEBUG */
}
@@ -10393,22 +10335,22 @@ idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp)
}
if (mmp[c].mm_smr_mboxp) {
mbox_csum =
- IDN_CKSUM_MBOX(&mmp[c].mm_smr_mboxp->mt_header);
+ IDN_CKSUM_MBOX(&mmp[c].mm_smr_mboxp->mt_header);
if (!VALID_NWRADDR(mmp[c].mm_smr_mboxp, 4) ||
!VALID_MBOXHDR(&mmp[c].mm_smr_mboxp->mt_header,
- c, mbox_csum)) {
+ c, mbox_csum)) {
lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput,
- mmp[c].mm_qiget);
+ mmp[c].mm_qiget);
#ifdef DEBUG
if (mbox_type == IDNMMBOX_TYPE_RECV) {
PR_CHAN("%s:%d:%s: bad mbox. blowing "
- "away %d incoming pkts\n",
- proc, domid, mbox_str, lost_io);
+ "away %d incoming pkts\n",
+ proc, domid, mbox_str, lost_io);
} else {
PR_CHAN("%s:%d:%s: bad mbox. blowing "
- "away %d/%d outstanding pkts\n",
- proc, domid, mbox_str, lost_io,
- idn_domain[domid].dio);
+ "away %d/%d outstanding pkts\n",
+ proc, domid, mbox_str, lost_io,
+ idn_domain[domid].dio);
}
#endif /* DEBUG */
mmp[c].mm_smr_mboxp = NULL;
@@ -10447,7 +10389,7 @@ idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp)
total_count += count;
PR_CHAN("%s:%d:%s: flushed out %d mbox entries for chan %d\n",
- proc, domid, mbox_str, count, c);
+ proc, domid, mbox_str, count, c);
}
if (total_lost_io && (mbox_type == IDNMMBOX_TYPE_SEND)) {
@@ -10461,11 +10403,11 @@ idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp)
lost_bufs = smr_buf_free_all(domid);
PR_CHAN("%s:%d:%s: flushed %d/%d buffers from slabs\n",
- proc, domid, mbox_str, lost_bufs, total_lost_io);
+ proc, domid, mbox_str, lost_bufs, total_lost_io);
}
PR_CHAN("%s:%d:%s: flushed total of %d mailbox entries (lost %d)\n",
- proc, domid, mbox_str, total_count, total_lost_io);
+ proc, domid, mbox_str, total_count, total_lost_io);
return (total_count);
}
@@ -10488,9 +10430,9 @@ idn_chanserver_bind(int net, int cpuid)
if ((cpuid != -1) && ((cp == NULL) || !cpu_is_online(cp))) {
mutex_exit(&cpu_lock);
cmn_err(CE_WARN,
- "IDN: 239: invalid CPU ID (%d) specified for "
- "IDN net %d",
- cpuid, net);
+ "IDN: 239: invalid CPU ID (%d) specified for "
+ "IDN net %d",
+ cpuid, net);
IDN_CHAN_UNLOCK_GLOBAL(csp);
return;
}
@@ -10516,7 +10458,7 @@ idn_chanserver_bind(int net, int cpuid)
mutex_exit(&cpu_lock);
PR_CHAN("%s: bound net/channel (%d) from cpuid %d to%scpuid %d\n",
- proc, net, ocpuid, tp ? " " : " (pending) ", cpuid);
+ proc, net, ocpuid, tp ? " " : " (pending) ", cpuid);
IDN_CHAN_UNLOCK_GLOBAL(csp);
}
@@ -10534,7 +10476,7 @@ idn_chan_server_syncheader(int channel)
idn_domain_t *ldp = &idn_domain[idn.localid];
idn_mboxtbl_t *mtp;
idn_mboxhdr_t *mhp;
- ushort_t mbox_csum;
+ ushort_t mbox_csum;
procname_t proc = "idn_chan_server_syncheader";
ASSERT(IDN_CHAN_RECV_IS_LOCKED(&idn.chan_servers[channel]));
@@ -10555,24 +10497,23 @@ idn_chan_server_syncheader(int channel)
if (mhp != prev_mhp[channel]) {
prev_mhp[channel] = mhp;
PR_CHAN("%s: chan_server (%d) cookie = 0x%x (exp 0x%x)\n",
- proc, channel, IDN_GET_MBOXHDR_COOKIE(mhp),
- IDN_MAKE_MBOXHDR_COOKIE(0, 0, channel));
+ proc, channel, IDN_GET_MBOXHDR_COOKIE(mhp),
+ IDN_MAKE_MBOXHDR_COOKIE(0, 0, channel));
PR_CHAN("%s: chan_server (%d) actv_ptr = 0x%x (exp 0x%x)\n",
- proc, channel, mhp->mh_svr_active_ptr,
- IDN_ADDR2OFFSET(&mhp->mh_svr_active));
+ proc, channel, mhp->mh_svr_active_ptr,
+ IDN_ADDR2OFFSET(&mhp->mh_svr_active));
PR_CHAN("%s: chan_server (%d) ready_ptr = 0x%x (exp 0x%x)\n",
- proc, channel, mhp->mh_svr_ready_ptr,
- IDN_ADDR2OFFSET(&mhp->mh_svr_ready));
+ proc, channel, mhp->mh_svr_ready_ptr,
+ IDN_ADDR2OFFSET(&mhp->mh_svr_ready));
PR_CHAN("%s: chan_server (%d) mbox_cksum = 0x%x (exp 0x%x)\n",
- proc, channel, (int)mhp->mh_cksum, (int)mbox_csum);
+ proc, channel, (int)mhp->mh_cksum, (int)mbox_csum);
}
#endif /* DEBUG */
if ((IDN_ADDR2OFFSET(&mhp->mh_svr_active) !=
- mhp->mh_svr_active_ptr) ||
- (IDN_ADDR2OFFSET(&mhp->mh_svr_ready) !=
- mhp->mh_svr_ready_ptr) ||
- !VALID_MBOXHDR(mhp, channel, mbox_csum)) {
+ mhp->mh_svr_active_ptr) ||
+ (IDN_ADDR2OFFSET(&mhp->mh_svr_ready) != mhp->mh_svr_ready_ptr) ||
+ !VALID_MBOXHDR(mhp, channel, mbox_csum)) {
idn_chansvr_t *csp;
csp = &idn.chan_servers[channel];
@@ -10580,24 +10521,24 @@ idn_chan_server_syncheader(int channel)
IDN_CHANSVC_MARK_RECV_CORRUPTED(csp);
cmn_err(CE_WARN,
- "IDN: 240: (channel %d) SMR CORRUPTED "
- "- RELINK", channel);
+ "IDN: 240: (channel %d) SMR CORRUPTED "
+ "- RELINK", channel);
cmn_err(CE_CONT,
- "IDN: 240: (channel %d) cookie "
- "(expected 0x%x, actual 0x%x)\n",
- channel,
- IDN_MAKE_MBOXHDR_COOKIE(0, 0, channel),
- mhp->mh_cookie);
+ "IDN: 240: (channel %d) cookie "
+ "(expected 0x%x, actual 0x%x)\n",
+ channel,
+ IDN_MAKE_MBOXHDR_COOKIE(0, 0, channel),
+ mhp->mh_cookie);
cmn_err(CE_CONT,
- "IDN: 240: (channel %d) actv_flg "
- "(expected 0x%x, actual 0x%x)\n",
- channel, mhp->mh_svr_active_ptr,
- IDN_ADDR2OFFSET(&mhp->mh_svr_active));
+ "IDN: 240: (channel %d) actv_flg "
+ "(expected 0x%x, actual 0x%x)\n",
+ channel, mhp->mh_svr_active_ptr,
+ IDN_ADDR2OFFSET(&mhp->mh_svr_active));
cmn_err(CE_CONT,
- "IDN: 240: (channel %d) ready_flg "
- "(expected 0x%x, actual 0x%x)\n",
- channel, mhp->mh_svr_ready_ptr,
- IDN_ADDR2OFFSET(&mhp->mh_svr_ready));
+ "IDN: 240: (channel %d) ready_flg "
+ "(expected 0x%x, actual 0x%x)\n",
+ channel, mhp->mh_svr_ready_ptr,
+ IDN_ADDR2OFFSET(&mhp->mh_svr_ready));
}
mhp = NULL;
@@ -10619,7 +10560,7 @@ idn_chan_server_syncheader(int channel)
for (_d = 0; _d < MAX_DOMAINS; _d++) { \
if (DOMAIN_IN_SET((csp)->ch_recv_domset, _d)) { \
(mmp)[_d] = \
- &idn_domain[_d].dmbox.m_recv[chan]; \
+ &idn_domain[_d].dmbox.m_recv[chan]; \
} else { \
(mmp)[_d] = NULL; \
} \
@@ -10678,7 +10619,7 @@ idn_chan_server(idn_chansvr_t **cspp)
ASSERT(sip);
PR_CHAN("%s: CHANNEL SERVER (channel %d) GOING ACTIVE...\n",
- proc, channel);
+ proc, channel);
IDN_CHAN_LOCK_RECV(csp);
IDN_CHAN_RECV_INPROGRESS(csp);
@@ -10697,9 +10638,9 @@ idn_chan_server(idn_chansvr_t **cspp)
*/
mutex_exit(&cpu_lock);
cmn_err(CE_WARN,
- "IDN: 239: invalid CPU ID (%d) specified for "
- "IDN net %d",
- cpuid, channel);
+ "IDN: 239: invalid CPU ID (%d) specified for "
+ "IDN net %d",
+ cpuid, channel);
} else {
csp->ch_bound_cpuid = cpuid;
affinity_set(csp->ch_bound_cpuid);
@@ -10711,7 +10652,7 @@ idn_chan_server(idn_chansvr_t **cspp)
}
if (csp->ch_bound_cpuid != -1) {
PR_CHAN("%s: thread bound to cpuid %d\n",
- proc, csp->ch_bound_cpuid);
+ proc, csp->ch_bound_cpuid);
}
/*
* Only the first (main) mbox header is used for
@@ -10722,14 +10663,14 @@ idn_chan_server(idn_chansvr_t **cspp)
CHANSVR_SYNC_CACHE(csp, mmp, channel);
mainhp = ((csp->ch_recv_domcount > 0) &&
- IDN_CHANNEL_IS_RECV_ACTIVE(csp))
- ? idn_chan_server_syncheader(channel) : NULL;
+ IDN_CHANNEL_IS_RECV_ACTIVE(csp))
+ ? idn_chan_server_syncheader(channel) : NULL;
if (mainhp && IDN_CHANNEL_IS_RECV_ACTIVE(csp))
mainhp->mh_svr_active = 1;
ASSERT(csp->ch_recv_domcount ?
- (csp->ch_recv_scanset && csp->ch_recv_domset) : 1);
+ (csp->ch_recv_scanset && csp->ch_recv_domset) : 1);
IDN_CHAN_UNLOCK_RECV(csp);
@@ -10763,17 +10704,17 @@ idn_chan_server(idn_chansvr_t **cspp)
* we wrap around. Done for performance.
*/
if (!IDN_CHANNEL_IS_RECV_ACTIVE(csp) ||
- csp->ch_recv.c_checkin ||
- (idn.state != IDNGS_ONLINE)) {
+ csp->ch_recv.c_checkin ||
+ (idn.state != IDNGS_ONLINE)) {
PR_DATA("%s: (channel %d) %s\n",
- proc, channel,
- IDN_CHANNEL_IS_DETACHED(csp)
- ? "DEAD" :
- IDN_CHANNEL_IS_PENDING(csp)
- ? "IDLED" :
- IDN_CHANNEL_IS_ACTIVE(csp)
- ? "ACTIVE" : "DISABLED");
+ proc, channel,
+ IDN_CHANNEL_IS_DETACHED(csp)
+ ? "DEAD" :
+ IDN_CHANNEL_IS_PENDING(csp)
+ ? "IDLED" :
+ IDN_CHANNEL_IS_ACTIVE(csp)
+ ? "ACTIVE" : "DISABLED");
goto cc_sleep;
}
}
@@ -10824,9 +10765,9 @@ idn_chan_server(idn_chansvr_t **cspp)
IDN_KSTAT_INC(sip, si_ierrors);
if (!(mmp[domid]->mm_flags & IDNMMBOX_FLAG_CORRUPTED)) {
cmn_err(CE_WARN,
- "IDN: 241: [recv] (domain %d, "
- "channel %d) SMR CORRUPTED - RELINK",
- domid, channel);
+ "IDN: 241: [recv] (domain %d, "
+ "channel %d) SMR CORRUPTED - RELINK",
+ domid, channel);
mmp[domid]->mm_flags |= IDNMMBOX_FLAG_CORRUPTED;
}
empty = 0;
@@ -10855,8 +10796,8 @@ idn_chan_server(idn_chansvr_t **cspp)
} else {
PR_DATA("%s: (channel %d) pkt (off 0x%x, "
- "qiget %d) from domain %d\n",
- proc, channel, bufoffset, qi, domid);
+ "qiget %d) from domain %d\n",
+ proc, channel, bufoffset, qi, domid);
#ifdef DEBUG
hdrp = IDN_BUF2HDR(IDN_OFFSET2ADDR(bufoffset));
@@ -10865,17 +10806,17 @@ idn_chan_server(idn_chansvr_t **cspp)
#endif /* DEBUG */
if (idn_recv_mboxdata(channel,
- IDN_OFFSET2ADDR(bufoffset)) < 0) {
+ IDN_OFFSET2ADDR(bufoffset)) < 0) {
mutex_enter(&mmp[domid]->mm_mutex);
if (!(mmp[domid]->mm_flags &
- IDNMMBOX_FLAG_CORRUPTED)) {
+ IDNMMBOX_FLAG_CORRUPTED)) {
cmn_err(CE_WARN,
- "IDN: 241: [recv] (domain "
- "%d, channel %d) SMR "
- "CORRUPTED - RELINK",
- domid, channel);
+ "IDN: 241: [recv] (domain "
+ "%d, channel %d) SMR "
+ "CORRUPTED - RELINK",
+ domid, channel);
mmp[domid]->mm_flags |=
- IDNMMBOX_FLAG_CORRUPTED;
+ IDNMMBOX_FLAG_CORRUPTED;
}
mutex_exit(&mmp[domid]->mm_mutex);
}
@@ -10904,7 +10845,7 @@ cc_next:
idleloops = 0;
PR_DATA("%s: (channel %d) dom=%d, pktcnt=%d\n",
- proc, channel, domid, pktcount);
+ proc, channel, domid, pktcount);
}
continue;
@@ -10914,7 +10855,7 @@ cc_slowdown:
#ifdef DEBUG
if (idleloops == 0) {
PR_DATA("%s: (channel %d) going SOFT IDLE...\n",
- proc, channel);
+ proc, channel);
}
#endif /* DEBUG */
if (idleloops++ < IDN_NETSVR_SPIN_COUNT) {
@@ -10938,28 +10879,27 @@ cc_die:
ASSERT(IDN_CHAN_RECV_IS_LOCKED(csp));
if (!IDN_CHANNEL_IS_RECV_ACTIVE(csp) &&
- IDN_CHANNEL_IS_DETACHED(csp)) {
+ IDN_CHANNEL_IS_DETACHED(csp)) {
/*
* Time to die...
*/
PR_CHAN("%s: (channel %d) serviced %d "
- "packets, drop = %d\n", proc, channel,
- tot_pktcount, tot_dropcount);
+ "packets, drop = %d\n", proc, channel,
+ tot_pktcount, tot_dropcount);
PR_CHAN("%s: (channel %d) TERMINATING\n",
- proc, channel);
+ proc, channel);
PR_CHAN("%s: (channel %d) ch_morguep = %p\n",
- proc, channel, csp->ch_recv_morguep);
+ proc, channel, csp->ch_recv_morguep);
csp->ch_recv_threadp = NULL;
#ifdef DEBUG
for (index = 0; index < csp->ch_recv_domcount;
- index++) {
+ index++) {
if ((int)((csp->ch_recv_scanset >>
- (index*4)) & 0xf)
- == domid) {
+ (index*4)) & 0xf) == domid) {
PR_DATA("%s: WARNING (channel %d) "
- "DROPPING domid %d...\n",
- proc, channel, domid);
+ "DROPPING domid %d...\n",
+ proc, channel, domid);
}
}
#endif /* DEBUG */
@@ -10976,20 +10916,19 @@ cc_die:
do {
if (IDN_CHANNEL_IS_DETACHED(csp)) {
PR_CHAN("%s: (channel %d) going to DIE...\n",
- proc, channel);
+ proc, channel);
goto cc_die;
}
#ifdef DEBUG
if (IDN_CHANNEL_IS_RECV_ACTIVE(csp) &&
- (csp->ch_recv_waittime <=
- IDN_NETSVR_WAIT_MAX)) {
+ (csp->ch_recv_waittime <= IDN_NETSVR_WAIT_MAX)) {
PR_CHAN("%s: (channel %d) going SOFT IDLE "
- "(waittime = %d ticks)...\n",
- proc, channel,
- csp->ch_recv_waittime);
+ "(waittime = %d ticks)...\n",
+ proc, channel,
+ csp->ch_recv_waittime);
} else {
PR_CHAN("%s: (channel %d) going "
- "HARD IDLE...\n", proc, channel);
+ "HARD IDLE...\n", proc, channel);
}
#endif /* DEBUG */
IDN_CHAN_RECV_DONE(csp);
@@ -11001,16 +10940,15 @@ cc_die:
*/
while (csp->ch_recv.c_checkin)
cv_wait(&csp->ch_recv_cv,
- &csp->ch_recv.c_mutex);
+ &csp->ch_recv.c_mutex);
if (csp->ch_recv_waittime > IDN_NETSVR_WAIT_MAX)
cv_wait(&csp->ch_recv_cv,
- &csp->ch_recv.c_mutex);
+ &csp->ch_recv.c_mutex);
else
- (void) cv_timedwait(&csp->ch_recv_cv,
- &csp->ch_recv.c_mutex,
- lbolt +
- csp->ch_recv_waittime);
+ (void) cv_reltimedwait(&csp->ch_recv_cv,
+ &csp->ch_recv.c_mutex,
+ csp->ch_recv_waittime, TR_CLOCK_TICK);
IDN_CHAN_RECV_INPROGRESS(csp);
@@ -11018,7 +10956,7 @@ cc_die:
if (csp->ch_recv_waittime <= IDN_NETSVR_WAIT_MAX)
csp->ch_recv_waittime <<=
- IDN_NETSVR_WAIT_SHIFT;
+ IDN_NETSVR_WAIT_SHIFT;
} while (!IDN_CHANNEL_IS_RECV_ACTIVE(csp));
@@ -11110,7 +11048,7 @@ idn_chan_flush(idn_chansvr_t *csp)
* enough to respond to us.
*/
PR_CHAN("%s: sending FLUSH (%x) to channel %d\n",
- proc, flush_type, csp->ch_id);
+ proc, flush_type, csp->ch_id);
(void) putnextctl1(rq, M_FLUSH, flush_type);
}
@@ -11154,7 +11092,7 @@ idn_chan_action(int channel, idn_chanaction_t chanaction, int wait)
csp = &idn.chan_servers[channel];
PR_CHAN("%s: requesting %s for channel %d\n",
- proc, chanaction_str[(int)chanaction], channel);
+ proc, chanaction_str[(int)chanaction], channel);
csend = &csp->ch_send;
crecv = &csp->ch_recv;
@@ -11184,7 +11122,7 @@ idn_chan_action(int channel, idn_chanaction_t chanaction, int wait)
is_running = 0;
if ((csend->c_inprogress || crecv->c_inprogress) &&
- wait && (csp->ch_recv_threadp != curthread)) {
+ wait && (csp->ch_recv_threadp != curthread)) {
rw_enter(&idn.struprwlock, RW_READER);
if ((sip = IDN_INST2SIP(channel)) != NULL) {
@@ -11265,7 +11203,7 @@ idn_chan_action(int channel, idn_chanaction_t chanaction, int wait)
while (csend->c_inprogress) {
csend->c_waiters++;
cv_wait(&csend->c_cv,
- &csend->c_mutex);
+ &csend->c_mutex);
csend->c_waiters--;
}
/*
@@ -11282,7 +11220,7 @@ idn_chan_action(int channel, idn_chanaction_t chanaction, int wait)
while (crecv->c_inprogress) {
crecv->c_waiters++;
cv_wait(&crecv->c_cv,
- &crecv->c_mutex);
+ &crecv->c_mutex);
crecv->c_waiters--;
}
mutex_enter(&csend->c_mutex);
@@ -11317,7 +11255,7 @@ idn_chan_action(int channel, idn_chanaction_t chanaction, int wait)
* ALL leave with locks held.
*/
PR_CHAN("%s: action (%s) for channel %d - COMPLETED\n",
- proc, chanaction_str[(int)chanaction], channel);
+ proc, chanaction_str[(int)chanaction], channel);
break;
case IDNCHAN_ACTION_ATTACH:
@@ -11342,12 +11280,11 @@ idn_chan_action(int channel, idn_chanaction_t chanaction, int wait)
* flow, so obviously no point in attempting to wake
* ourself up!.
*/
- if (csp->ch_recv_threadp &&
- (csp->ch_recv_threadp != curthread))
+ if (csp->ch_recv_threadp && (csp->ch_recv_threadp != curthread))
cv_signal(&csp->ch_recv_cv);
PR_CHAN("%s: action (%s) for channel %d - COMPLETED\n",
- proc, chanaction_str[(int)chanaction], channel);
+ proc, chanaction_str[(int)chanaction], channel);
/*
* Leaves with lock released.
@@ -11369,9 +11306,8 @@ idn_chan_addmbox(int channel, ushort_t domset)
register int d;
procname_t proc = "idn_chan_addmbox";
-
PR_CHAN("%s: adding domset 0x%x main mailboxes to channel %d\n",
- proc, domset, channel);
+ proc, domset, channel);
ASSERT(idn.chan_servers);
@@ -11399,13 +11335,13 @@ idn_chan_addmbox(int channel, ushort_t domset)
IDN_CHAN_DOMAIN_REGISTER(csp, d);
PR_CHAN("%s: domain %d (channel %d) RECV (pending) "
- "scanset = 0x%lx\n", proc, d, channel,
- csp->ch_recv_scanset_pending);
+ "scanset = 0x%lx\n", proc, d, channel,
+ csp->ch_recv_scanset_pending);
PR_CHAN("%s: domain %d (channel %d) domset = 0x%x\n",
- proc, d, channel, (uint_t)csp->ch_reg_domset);
+ proc, d, channel, (uint_t)csp->ch_reg_domset);
CHECKPOINT_OPENED(IDNSB_CHKPT_CHAN,
- idn_domain[d].dhw.dh_boardset, 1);
+ idn_domain[d].dhw.dh_boardset, 1);
}
if (domset)
csp->ch_recv_changed = 1;
@@ -11420,9 +11356,8 @@ idn_chan_delmbox(int channel, ushort_t domset)
register int d;
procname_t proc = "idn_chan_delmbox";
-
PR_CHAN("%s: deleting domset 0x%x main mailboxes from channel %d\n",
- proc, domset, channel);
+ proc, domset, channel);
ASSERT(idn.chan_servers);
@@ -11458,13 +11393,13 @@ idn_chan_delmbox(int channel, ushort_t domset)
IDN_CHAN_DOMAIN_UNREGISTER(csp, d);
PR_CHAN("%s: domain %d (channel %d) RECV (pending) "
- "scanset = 0x%lx\n", proc, d, channel,
- csp->ch_recv_scanset_pending);
+ "scanset = 0x%lx\n", proc, d, channel,
+ csp->ch_recv_scanset_pending);
PR_CHAN("%s: domain %d (channel %d) domset = 0x%x\n",
- proc, d, channel, (uint_t)csp->ch_reg_domset);
+ proc, d, channel, (uint_t)csp->ch_reg_domset);
CHECKPOINT_CLOSED(IDNSB_CHKPT_CHAN,
- idn_domain[d].dhw.dh_boardset, 2);
+ idn_domain[d].dhw.dh_boardset, 2);
}
if (domset)
@@ -11484,23 +11419,23 @@ idn_valid_etherheader(struct ether_header *ehp)
return (0);
if ((eap[IDNETHER_COOKIE1] != IDNETHER_COOKIE1_VAL) &&
- (eap[IDNETHER_COOKIE1] != 0xff))
+ (eap[IDNETHER_COOKIE1] != 0xff))
return (0);
if ((eap[IDNETHER_COOKIE2] != IDNETHER_COOKIE2_VAL) &&
- (eap[IDNETHER_COOKIE2] != 0xff))
+ (eap[IDNETHER_COOKIE2] != 0xff))
return (0);
if ((eap[IDNETHER_RESERVED] != IDNETHER_RESERVED_VAL) &&
- (eap[IDNETHER_RESERVED] != 0xff))
+ (eap[IDNETHER_RESERVED] != 0xff))
return (0);
if (!VALID_UCHANNEL(eap[IDNETHER_CHANNEL]) &&
- (eap[IDNETHER_CHANNEL] != 0xff))
+ (eap[IDNETHER_CHANNEL] != 0xff))
return (0);
if (!VALID_UDOMAINID(IDN_NETID2DOMID(eap[IDNETHER_NETID])) &&
- (eap[IDNETHER_NETID] != 0xff))
+ (eap[IDNETHER_NETID] != 0xff))
return (0);
return (1);
@@ -11539,7 +11474,7 @@ idn_send_mboxdata(int domid, struct idn *sip, int channel, caddr_t bufp)
if (mmp->mm_smr_mboxp == NULL) {
PR_DATA("%s: (d %d, chn %d) mm_smr_mboxp == NULL\n",
- proc, domid, channel);
+ proc, domid, channel);
IDN_KSTAT_INC(sip, si_linkdown);
rv = ENOLINK;
goto send_err;
@@ -11547,14 +11482,14 @@ idn_send_mboxdata(int domid, struct idn *sip, int channel, caddr_t bufp)
mbox_csum = IDN_CKSUM_MBOX(&mmp->mm_smr_mboxp->mt_header);
if (mbox_csum != mmp->mm_smr_mboxp->mt_header.mh_cksum) {
PR_DATA("%s: (d %d, chn %d) mbox hdr cksum (%d) "
- "!= actual (%d)\n",
- proc, domid, channel, mbox_csum,
- mmp->mm_smr_mboxp->mt_header.mh_cksum);
+ "!= actual (%d)\n",
+ proc, domid, channel, mbox_csum,
+ mmp->mm_smr_mboxp->mt_header.mh_cksum);
if ((mmp->mm_flags & IDNMMBOX_FLAG_CORRUPTED) == 0) {
cmn_err(CE_WARN,
- "IDN: 241: [send] (domain %d, "
- "channel %d) SMR CORRUPTED - RELINK",
- domid, channel);
+ "IDN: 241: [send] (domain %d, "
+ "channel %d) SMR CORRUPTED - RELINK",
+ domid, channel);
mmp->mm_flags |= IDNMMBOX_FLAG_CORRUPTED;
}
IDN_KSTAT_INC(sip, si_mboxcrc);
@@ -11574,7 +11509,7 @@ idn_send_mboxdata(int domid, struct idn *sip, int channel, caddr_t bufp)
if (mqp[qi].ms_owner) {
PR_DATA("%s: mailbox FULL (qiput=%d, qiget=%d)\n",
- proc, mmp->mm_qiput, mmp->mm_qiget);
+ proc, mmp->mm_qiput, mmp->mm_qiget);
IDN_KSTAT_INC(sip, si_txfull);
rv = ENOSPC;
goto send_err;
@@ -11592,8 +11527,8 @@ idn_send_mboxdata(int domid, struct idn *sip, int channel, caddr_t bufp)
recl_bufoffset = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
PR_DATA("%s: attempting reclaim (domain %d) "
- "(qiput=%d, b_off=0x%x)\n",
- proc, domid, qi, recl_bufoffset);
+ "(qiput=%d, b_off=0x%x)\n",
+ proc, domid, qi, recl_bufoffset);
if (VALID_NWROFFSET(recl_bufoffset, IDN_SMR_BUFSIZE)) {
int recl;
@@ -11614,17 +11549,17 @@ idn_send_mboxdata(int domid, struct idn *sip, int channel, caddr_t bufp)
#ifdef DEBUG
if (recl == 0) {
PR_DATA("%s: SUCCESSFULLY reclaimed buf "
- "(domain %d)\n", proc, domid);
+ "(domain %d)\n", proc, domid);
} else {
PR_DATA("%s: WARNING: reclaim failed (FREE) "
- "(domain %d)\n", proc, domid);
+ "(domain %d)\n", proc, domid);
}
#endif /* DEBUG */
} else {
IDN_KSTAT_INC(sip, si_smraddr);
IDN_KSTAT_INC(sip, si_reclaim);
PR_DATA("%s: WARNING: reclaim failed (BAD OFFSET) "
- "(domain %d)\n", proc, domid);
+ "(domain %d)\n", proc, domid);
}
}
@@ -11652,8 +11587,7 @@ idn_send_mboxdata(int domid, struct idn *sip, int channel, caddr_t bufp)
mt.mt_mtype = IDNP_DATA;
mt.mt_atype = 0;
IDN_KSTAT_INC(sip, si_xdcall);
- (void) IDNXDC(domid, &mt, (uint_t)dst.net.chan,
- 0, 0, 0);
+ (void) IDNXDC(domid, &mt, (uint_t)dst.net.chan, 0, 0, 0);
}
mutex_exit(&mmp->mm_mutex);
IDN_KSTAT_INC(sip, si_opackets);
@@ -11698,7 +11632,7 @@ idn_recv_mboxdata(int channel, caddr_t bufp)
if (csum != hdrp->b_cksum) {
PR_DATA("%s: bad checksum(%x) != expected(%x)\n",
- proc, (uint_t)csum, (uint_t)hdrp->b_cksum);
+ proc, (uint_t)csum, (uint_t)hdrp->b_cksum);
IDN_KSTAT_INC(sip, si_crc);
IDN_KSTAT_INC(sip, si_fcs_errors);
rv = -1;
@@ -11713,7 +11647,7 @@ idn_recv_mboxdata(int channel, caddr_t bufp)
if (dst.netaddr != daddr.netaddr) {
PR_DATA("%s: wrong dest netaddr (0x%x), expected (0x%x)\n",
- proc, dst.netaddr, daddr.netaddr);
+ proc, dst.netaddr, daddr.netaddr);
IDN_KSTAT_INC(sip, si_nolink);
IDN_KSTAT_INC(sip, si_macrcv_errors);
goto recv_err;
@@ -11723,7 +11657,7 @@ idn_recv_mboxdata(int channel, caddr_t bufp)
if ((pktlen <= 0) || (pktlen > IDN_DATA_SIZE)) {
PR_DATA("%s: invalid packet length (%d) <= 0 || > %lu\n",
- proc, pktlen, IDN_DATA_SIZE);
+ proc, pktlen, IDN_DATA_SIZE);
IDN_KSTAT_INC(sip, si_buff);
IDN_KSTAT_INC(sip, si_toolong_errors);
goto recv_err;
@@ -11802,7 +11736,7 @@ idn_reclaim_mboxdata(int domid, int channel, int nbufs)
dp = &idn_domain[domid];
PR_DATA("%s: requested %d buffers from domain %d\n",
- proc, nbufs, domid);
+ proc, nbufs, domid);
if (lock_try(&dp->dreclaim_inprogress) == 0) {
/*
@@ -11835,7 +11769,7 @@ idn_reclaim_mboxdata(int domid, int channel, int nbufs)
if (mmp->mm_smr_mboxp == NULL) {
PR_DATA("%s: no smr pointer for domid %d, chan %d\n",
- proc, domid, (int)mmp->mm_channel);
+ proc, domid, (int)mmp->mm_channel);
ASSERT(mmp->mm_qiget == mmp->mm_qiput);
mutex_exit(&mmp->mm_mutex);
IDN_MBOXCHAN_INC(mi);
@@ -11844,9 +11778,9 @@ idn_reclaim_mboxdata(int domid, int channel, int nbufs)
mbox_csum = IDN_CKSUM_MBOX(&mmp->mm_smr_mboxp->mt_header);
if (mbox_csum != mmp->mm_smr_mboxp->mt_header.mh_cksum) {
PR_DATA("%s: (d %d, chn %d) mbox hdr "
- "cksum (%d) != actual (%d)\n",
- proc, domid, (int)mmp->mm_channel, mbox_csum,
- mmp->mm_smr_mboxp->mt_header.mh_cksum);
+ "cksum (%d) != actual (%d)\n",
+ proc, domid, (int)mmp->mm_channel, mbox_csum,
+ mmp->mm_smr_mboxp->mt_header.mh_cksum);
IDN_KSTAT_INC(sip, si_mboxcrc);
IDN_KSTAT_INC(sip, si_oerrors);
mutex_exit(&mmp->mm_mutex);
@@ -11857,8 +11791,8 @@ idn_reclaim_mboxdata(int domid, int channel, int nbufs)
qi = mmp->mm_qiget;
while (!mqp[qi].ms_owner &&
- (mqp[qi].ms_flag & IDN_MBOXMSG_FLAG_RECLAIM) &&
- nbufs) {
+ (mqp[qi].ms_flag & IDN_MBOXMSG_FLAG_RECLAIM) &&
+ nbufs) {
idn_mboxmsg_t *msp;
int badbuf;
@@ -11867,11 +11801,11 @@ idn_reclaim_mboxdata(int domid, int channel, int nbufs)
if (msp->ms_flag & IDN_MBOXMSG_FLAG_ERRMASK) {
PR_DATA("%s: msg.flag ERROR(0x%x) (off=0x%x, "
- "domid=%d, qiget=%d)\n", proc,
- (uint_t)(msp->ms_flag &
- IDN_MBOXMSG_FLAG_ERRMASK),
- IDN_BFRAME2OFFSET(msp->ms_bframe),
- domid, qi);
+ "domid=%d, qiget=%d)\n", proc,
+ (uint_t)(msp->ms_flag &
+ IDN_MBOXMSG_FLAG_ERRMASK),
+ IDN_BFRAME2OFFSET(msp->ms_bframe),
+ domid, qi);
}
prev = curr;
curr = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
@@ -11895,15 +11829,15 @@ idn_reclaim_mboxdata(int domid, int channel, int nbufs)
IDN_KSTAT_INC(sip, si_fcs_errors);
IDN_KSTAT_INC(sip, si_reclaim);
if (!(mmp->mm_flags &
- IDNMMBOX_FLAG_CORRUPTED)) {
+ IDNMMBOX_FLAG_CORRUPTED)) {
cmn_err(CE_WARN,
- "IDN: 241: [send] "
- "(domain %d, channel "
- "%d) SMR CORRUPTED - "
- "RELINK",
- domid, channel);
+ "IDN: 241: [send] "
+ "(domain %d, channel "
+ "%d) SMR CORRUPTED - "
+ "RELINK",
+ domid, channel);
mmp->mm_flags |=
- IDNMMBOX_FLAG_CORRUPTED;
+ IDNMMBOX_FLAG_CORRUPTED;
}
} else if (reclaim_list == IDN_NIL_SMROFFSET) {
@@ -11945,7 +11879,7 @@ idn_reclaim_mboxdata(int domid, int channel, int nbufs)
}
PR_DATA("%s: reclaimed %d buffers from domain %d\n",
- proc, reclaim_cnt, domid);
+ proc, reclaim_cnt, domid);
if (reclaim_cnt == 0) {
lock_clear(&dp->dreclaim_inprogress);
@@ -11970,8 +11904,8 @@ idn_reclaim_mboxdata(int domid, int channel, int nbufs)
* These buffers are effectively lost.
*/
cmn_err(CE_WARN,
- "IDN: 241: [send] (domain %d, channel %d) SMR "
- "CORRUPTED - RELINK", domid, channel);
+ "IDN: 241: [send] (domain %d, channel %d) SMR "
+ "CORRUPTED - RELINK", domid, channel);
break;
}
@@ -11989,7 +11923,7 @@ idn_reclaim_mboxdata(int domid, int channel, int nbufs)
#ifdef DEBUG
if (free_cnt != reclaim_cnt) {
PR_DATA("%s: *** WARNING *** freecnt(%d) != reclaim_cnt (%d)\n",
- proc, free_cnt, reclaim_cnt);
+ proc, free_cnt, reclaim_cnt);
}
#endif /* DEBUG */
@@ -12069,12 +12003,12 @@ idn_signal_data_server(int domid, ushort_t channel)
}
if (channel == IDN_BROADCAST_ALLCHAN) {
PR_DATA("%s: requested signal to ALL channels on domain %d\n",
- proc, domid);
+ proc, domid);
min_chan = 0;
max_chan = IDN_MAX_NETS - 1;
} else {
PR_DATA("%s: requested signal to channel %d on domain %d\n",
- proc, channel, domid);
+ proc, channel, domid);
min_chan = max_chan = (int)channel;
}
mmp += min_chan;
@@ -12089,7 +12023,7 @@ idn_signal_data_server(int domid, ushort_t channel)
if (csp->ch_recv.c_checkin) {
PR_DATA("%s: chansvr (%d) for domid %d CHECK-IN\n",
- proc, c, domid);
+ proc, c, domid);
continue;
}
@@ -12098,7 +12032,7 @@ idn_signal_data_server(int domid, ushort_t channel)
* Failed to grab lock, server must be active.
*/
PR_DATA("%s: chansvr (%d) for domid %d already actv\n",
- proc, c, domid);
+ proc, c, domid);
continue;
}
@@ -12118,7 +12052,7 @@ idn_signal_data_server(int domid, ushort_t channel)
if (IDN_CHANNEL_IS_RECV_ACTIVE(csp) == 0) {
IDN_CHAN_UNLOCK_RECV(csp);
PR_DATA("%s: chansvr (%d) for domid %d inactive\n",
- proc, c, domid);
+ proc, c, domid);
continue;
}
@@ -12152,14 +12086,14 @@ idn_signal_data_server(int domid, ushort_t channel)
*/
IDN_CHAN_UNLOCK_RECV(csp);
PR_DATA("%s: chansvr (%d) for domid %d already actv\n",
- proc, c, domid);
+ proc, c, domid);
continue;
}
ASSERT(csp == &idn.chan_servers[c]);
PR_DATA("%s: signaling data dispatcher for chan %d dom %d\n",
- proc, c, domid);
+ proc, c, domid);
ASSERT(csp);
cv_signal(&csp->ch_recv_cv);
IDN_CHAN_UNLOCK_RECV(csp);
@@ -12178,7 +12112,7 @@ idn_signal_data_server(int domid, ushort_t channel)
send_dresp:
PR_DATA("%s: sending NACK (%s) back to domain %d (cpu %d)\n",
- proc, idnnack_str[nacktype], domid, idn_domain[domid].dcpu);
+ proc, idnnack_str[nacktype], domid, idn_domain[domid].dcpu);
idn_send_dataresp(domid, nacktype);
@@ -12195,9 +12129,9 @@ idn_recv_data(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
procname_t proc = "idn_recv_data";
PR_PROTO("%s:%d: DATA message received (msg = 0x%x, msgarg = 0x%x)\n",
- proc, domid, msg, msgarg);
+ proc, domid, msg, msgarg);
PR_PROTO("%s:%d: xargs = (0x%x, 0x%x, 0x%x, 0x%x)\n",
- proc, domid, xargs[0], xargs[1], xargs[2], xargs[3]);
+ proc, domid, xargs[0], xargs[1], xargs[2], xargs[3]);
#endif /* DEBUG */
return (0);
@@ -12254,8 +12188,8 @@ idn_open_channel(int channel)
if (channel >= IDN_MAX_NETS) {
cmn_err(CE_WARN,
- "IDN: 242: maximum channels (%d) already open",
- IDN_MAX_NETS);
+ "IDN: 242: maximum channels (%d) already open",
+ IDN_MAX_NETS);
return (-1);
}
IDN_GLOCK_EXCL();
@@ -12297,10 +12231,10 @@ idn_open_channel(int channel)
*/
ASSERT(idn.nchannels > 0);
IDN_WINDOW_EMAX = IDN_WINDOW_MAX +
- ((idn.nchannels - 1) * IDN_WINDOW_INCR);
+ ((idn.nchannels - 1) * IDN_WINDOW_INCR);
PR_CHAN("%s: channel %d is OPEN (nchannels = %d)\n",
- proc, channel, idn.nchannels);
+ proc, channel, idn.nchannels);
masterid = IDN_GET_MASTERID();
IDN_GUNLOCK();
@@ -12316,7 +12250,7 @@ idn_open_channel(int channel)
IDN_DLOCK_SHARED(masterid);
if (dp->dvote.v.master && (dp->dstate == IDNDS_CONNECTED))
(void) idn_activate_channel(CHANSET(channel),
- IDNCHAN_ONLINE);
+ IDNCHAN_ONLINE);
IDN_DUNLOCK(masterid);
}
@@ -12360,14 +12294,14 @@ idn_close_channel(int channel, idn_chanop_t chanop)
IDN_WINDOW_EMAX = 0;
else
IDN_WINDOW_EMAX = IDN_WINDOW_MAX +
- ((idn.nchannels - 1) * IDN_WINDOW_INCR);
+ ((idn.nchannels - 1) * IDN_WINDOW_INCR);
}
PR_CHAN("%s: channel %d is (%s) CLOSED (nchannels = %d)\n",
- proc, channel,
- (chanop == IDNCHAN_SOFT_CLOSE) ? "SOFT"
- : (chanop == IDNCHAN_HARD_CLOSE) ? "HARD" : "OFFLINE",
- idn.nchannels);
+ proc, channel,
+ (chanop == IDNCHAN_SOFT_CLOSE) ? "SOFT"
+ : (chanop == IDNCHAN_HARD_CLOSE) ? "HARD" : "OFFLINE",
+ idn.nchannels);
IDN_CHAN_UNLOCK_GLOBAL(csp);
IDN_GUNLOCK();
@@ -12380,7 +12314,7 @@ idn_activate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
procname_t proc = "idn_activate_channel";
PR_CHAN("%s: chanset = 0x%x, chanop = %s\n",
- proc, chanset, chanop_str[chanop]);
+ proc, chanset, chanop_str[chanop]);
if (idn.state != IDNGS_ONLINE) {
/*
@@ -12388,7 +12322,7 @@ idn_activate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
* domain is connected and thus has a master.
*/
PR_CHAN("%s: local domain not connected. no data servers\n",
- proc);
+ proc);
return (-1);
}
@@ -12412,8 +12346,8 @@ idn_activate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
*/
if (IDN_CHAN_TRYLOCK_GLOBAL(csp) == 0) {
PR_CHAN("%s: failed to acquire global "
- "lock for channel %d\n",
- proc, c);
+ "lock for channel %d\n",
+ proc, c);
continue;
}
}
@@ -12444,12 +12378,12 @@ idn_activate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
if (idn_activate_channel_services(c) >= 0) {
PR_CHAN("%s: Setting channel %d ACTIVE\n",
- proc, c);
+ proc, c);
IDN_CHANSVC_MARK_ACTIVE(csp);
rv++;
}
} else if (!IDN_CHANNEL_IS_PENDING(csp) &&
- (chanop == IDNCHAN_ONLINE)) {
+ (chanop == IDNCHAN_ONLINE)) {
PR_CHAN("%s: Setting channel %d PENDING\n", proc, c);
IDN_CHANSVC_MARK_PENDING(csp);
@@ -12461,9 +12395,9 @@ idn_activate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
* touch it.
*/
if (IDN_CHANNEL_IS_ENABLED(csp) &&
- ((mainhp = idn_chan_server_syncheader(c)) != NULL)) {
+ ((mainhp = idn_chan_server_syncheader(c)) != NULL)) {
PR_CHAN("%s: marking chansvr (mhp=0x%p) %d READY\n",
- proc, mainhp, c);
+ proc, mainhp, c);
mainhp->mh_svr_ready = 1;
}
@@ -12491,9 +12425,8 @@ idn_deactivate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
int c;
procname_t proc = "idn_deactivate_channel";
-
PR_CHAN("%s: chanset = 0x%x, chanop = %s\n",
- proc, chanset, chanop_str[chanop]);
+ proc, chanset, chanop_str[chanop]);
for (c = 0; c < IDN_MAX_NETS; c++) {
idn_chansvr_t *csp;
@@ -12507,17 +12440,17 @@ idn_deactivate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
IDN_CHAN_LOCK_GLOBAL(csp);
if (((chanop == IDNCHAN_SOFT_CLOSE) &&
- !IDN_CHANNEL_IS_ACTIVE(csp)) ||
- ((chanop == IDNCHAN_HARD_CLOSE) &&
- IDN_CHANNEL_IS_DETACHED(csp)) ||
- ((chanop == IDNCHAN_OFFLINE) &&
- !IDN_CHANNEL_IS_ENABLED(csp))) {
+ !IDN_CHANNEL_IS_ACTIVE(csp)) ||
+ ((chanop == IDNCHAN_HARD_CLOSE) &&
+ IDN_CHANNEL_IS_DETACHED(csp)) ||
+ ((chanop == IDNCHAN_OFFLINE) &&
+ !IDN_CHANNEL_IS_ENABLED(csp))) {
ASSERT(!IDN_CHANNEL_IS_RECV_ACTIVE(csp));
ASSERT(!IDN_CHANNEL_IS_SEND_ACTIVE(csp));
PR_CHAN("%s: channel %d already deactivated\n",
- proc, c);
+ proc, c);
IDN_CHAN_UNLOCK_GLOBAL(csp);
continue;
}
@@ -12551,9 +12484,9 @@ idn_deactivate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
lock_clear(&csp->ch_initlck);
PR_CHAN("%s: DEACTIVATING channel %d (%s)\n", proc, c,
- chanop_str[chanop]);
+ chanop_str[chanop]);
PR_CHAN("%s: removing chanset 0x%x data svrs for "
- "each domain link\n", proc, chanset);
+ "each domain link\n", proc, chanset);
(void) idn_deactivate_channel_services(c, chanop);
}
@@ -12593,14 +12526,14 @@ idn_activate_channel_services(int channel)
*/
ASSERT(csp->ch_id == (uchar_t)channel);
PR_CHAN("%s: existing chansvr FOUND for (c=%d)\n",
- proc, channel);
+ proc, channel);
if (IDN_CHANNEL_IS_PENDING(csp) == 0)
return (-1);
PR_CHAN("%s: chansvr (c=%d) Rstate = 0x%x, Sstate = 0x%x\n",
- proc, channel, csp->ch_recv.c_state,
- csp->ch_send.c_state);
+ proc, channel, csp->ch_recv.c_state,
+ csp->ch_send.c_state);
cv_signal(&csp->ch_recv_cv);
@@ -12687,7 +12620,7 @@ idn_deactivate_channel_services(int channel, idn_chanop_t chanop)
* At mark him idle incase we start him up.
*/
PR_CHAN("%s: no channel server found for chan %d\n",
- proc, c);
+ proc, c);
IDN_CHAN_UNLOCK_LOCAL(csp);
IDN_CHAN_UNLOCK_GLOBAL(csp);
continue;
@@ -12715,8 +12648,8 @@ idn_deactivate_channel_services(int channel, idn_chanop_t chanop)
}
PR_CHAN("%s: pointing chansvr %d to morgue (0x%p)\n",
- proc, c, central_morguep ? central_morguep
- : csp->ch_recv_morguep);
+ proc, c, central_morguep ? central_morguep
+ : csp->ch_recv_morguep);
if (central_morguep == NULL) {
central_morguep = csp->ch_recv_morguep;
@@ -12742,13 +12675,13 @@ idn_deactivate_channel_services(int channel, idn_chanop_t chanop)
cs_count++;
}
PR_CHAN("%s: signaled %d chansvrs for chanset 0x%x\n",
- proc, cs_count, chanset);
+ proc, cs_count, chanset);
if ((chanop == IDNCHAN_SOFT_CLOSE) || (chanop == IDNCHAN_OFFLINE))
return (cs_count);
PR_CHAN("%s: waiting for %d (chnset=0x%x) chan svrs to term\n",
- proc, cs_count, chanset);
+ proc, cs_count, chanset);
PR_CHAN("%s: morguep = 0x%p\n", proc, central_morguep);
ASSERT((cs_count > 0) ? (central_morguep != NULL) : 1);
@@ -12841,7 +12774,7 @@ idn_exec_chanactivate(void *chn)
if (IDN_CHANNEL_IS_PENDING(csp) && lock_try(&csp->ch_actvlck)) {
IDN_CHAN_UNLOCK_GLOBAL(csp);
not_active = idn_activate_channel(CHANSET(channel),
- IDNCHAN_OPEN);
+ IDNCHAN_OPEN);
if (not_active)
lock_clear(&csp->ch_actvlck);
} else {
@@ -12949,20 +12882,18 @@ idn_xmit_monitor(void *unused)
if (!IDN_CHAN_TRYLOCK_GLOBAL(csp))
continue;
- pending_bits = csp->ch_state &
- IDN_CHANSVC_PENDING_BITS;
+ pending_bits = csp->ch_state & IDN_CHANSVC_PENDING_BITS;
sip = IDN_INST2SIP(c);
if (!csp->ch_send.c_checkin &&
- (pending_bits == IDN_CHANSVC_PENDING_BITS) &&
- sip && (sip->si_flags & IDNRUNNING)) {
+ (pending_bits == IDN_CHANSVC_PENDING_BITS) &&
+ sip && (sip->si_flags & IDNRUNNING)) {
IDN_CHAN_UNLOCK_GLOBAL(csp);
CHANSET_ADD(wake_set, c);
- PR_XMON("%s: QENABLE for channel %d\n",
- proc, c);
+ PR_XMON("%s: QENABLE for channel %d\n", proc, c);
rw_enter(&idn.struprwlock, RW_READER);
mutex_enter(&idn.sipwenlock);
@@ -12985,7 +12916,7 @@ retry:
idn.xmit_tid = NULL;
else
idn.xmit_tid = timeout(idn_xmit_monitor, NULL,
- idn_xmit_monitor_freq);
+ idn_xmit_monitor_freq);
mutex_exit(&idn.xmit_lock);
}
@@ -13017,7 +12948,7 @@ idn_xmit_monitor_kickoff(int chan_wanted)
}
PR_XMON("%s: xmit_mon kicked OFF (chanset = 0x%x)\n",
- proc, idn.xmit_chanset_wanted);
+ proc, idn.xmit_chanset_wanted);
idn.xmit_tid = timeout(idn_xmit_monitor, NULL, idn_xmit_monitor_freq);
diff --git a/usr/src/uts/sun4u/starfire/io/idn_smr.c b/usr/src/uts/sun4u/starfire/io/idn_smr.c
index 8a54e3c7be..aaad23616c 100644
--- a/usr/src/uts/sun4u/starfire/io/idn_smr.c
+++ b/usr/src/uts/sun4u/starfire/io/idn_smr.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* Inter-Domain Network
@@ -28,8 +27,6 @@
* Shared Memory Region (SMR) supporting code.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/machparam.h>
@@ -240,7 +237,7 @@ smr_slab_alloc_remote(int domid, smr_slab_t **spp)
* First do a quick (no lock) check for global okayness.
*/
if ((idn.state != IDNGS_ONLINE) ||
- ((masterid = IDN_GET_MASTERID()) == IDN_NIL_DOMID)) {
+ ((masterid = IDN_GET_MASTERID()) == IDN_NIL_DOMID)) {
bailout = 1;
serrno = ECANCELED;
}
@@ -301,10 +298,10 @@ smr_slab_alloc_remote(int domid, smr_slab_t **spp)
* will be picked up when we attempt to wait.
*/
PR_SMR("%s: BAILING OUT on behalf domain %d "
- "(err=%d, gs=%s, ms=%s)\n",
- proc, domid, serrno, idngs_str[idn.state],
- (masterid == IDN_NIL_DOMID)
- ? "unknown" : idnds_str[idn_domain[masterid].dstate]);
+ "(err=%d, gs=%s, ms=%s)\n",
+ proc, domid, serrno, idngs_str[idn.state],
+ (masterid == IDN_NIL_DOMID)
+ ? "unknown" : idnds_str[idn_domain[masterid].dstate]);
(void) smr_slabwaiter_abort(domid, serrno);
} else if (nwait == 1) {
@@ -312,8 +309,7 @@ smr_slab_alloc_remote(int domid, smr_slab_t **spp)
* We are the original requester. Initiate the
* actual request to the master.
*/
- idn_send_cmd(masterid, IDNCMD_SLABALLOC,
- IDN_SLAB_SIZE, 0, 0);
+ idn_send_cmd(masterid, IDNCMD_SLABALLOC, IDN_SLAB_SIZE, 0, 0);
ASSERT(mdp);
IDN_DUNLOCK(masterid);
}
@@ -359,7 +355,7 @@ smr_slab_alloc(int domid, smr_slab_t **spp)
switch (dp->dslab_state) {
case DSLAB_STATE_UNKNOWN:
cmn_err(CE_WARN,
- "IDN: 300: no slab allocations without a master");
+ "IDN: 300: no slab allocations without a master");
serrno = EINVAL;
break;
@@ -371,7 +367,7 @@ smr_slab_alloc(int domid, smr_slab_t **spp)
* not been exceeded.
*/
if (((int)dp->dnslabs < IDN_SLAB_MAXPERDOMAIN) ||
- !IDN_SLAB_MAXPERDOMAIN)
+ !IDN_SLAB_MAXPERDOMAIN)
serrno = smr_slab_alloc_local(domid, spp);
else
serrno = EDQUOT;
@@ -391,7 +387,7 @@ smr_slab_alloc(int domid, smr_slab_t **spp)
* itself however it wishes.
*/
if (((int)dp->dnslabs < IDN_SLAB_MAXPERDOMAIN) ||
- !IDN_SLAB_MAXPERDOMAIN)
+ !IDN_SLAB_MAXPERDOMAIN)
serrno = smr_slab_alloc_remote(domid, spp);
else
serrno = EDQUOT;
@@ -399,16 +395,16 @@ smr_slab_alloc(int domid, smr_slab_t **spp)
default:
cmn_err(CE_WARN,
- "IDN: 301: (ALLOC) unknown slab state (%d) "
- "for domain %d", dp->dslab_state, domid);
+ "IDN: 301: (ALLOC) unknown slab state (%d) "
+ "for domain %d", dp->dslab_state, domid);
serrno = EINVAL;
break;
}
if (*spp == NULL) {
PR_SMR("%s: failed to allocate %s slab [serrno = %d]\n",
- proc, (idn.localid == IDN_GET_MASTERID()) ?
- "local" : "remote", serrno);
+ proc, (idn.localid == IDN_GET_MASTERID()) ?
+ "local" : "remote", serrno);
}
if (serrno) {
@@ -475,8 +471,8 @@ smr_slab_free_remote(int domid, smr_slab_t *sp)
FREESTRUCT(sp, smr_slab_t, 1);
IDN_DLOCK_SHARED(masterid);
- idn_send_cmd(masterid, IDNCMD_SLABFREE,
- slab_offset, slab_size, 0);
+ idn_send_cmd(masterid, IDNCMD_SLABFREE, slab_offset, slab_size,
+ 0);
IDN_DUNLOCK(masterid);
}
}
@@ -498,8 +494,7 @@ smr_slab_free(int domid, smr_slab_t *sp)
switch (idn_domain[domid].dslab_state) {
case DSLAB_STATE_UNKNOWN:
- cmn_err(CE_WARN,
- "IDN: 302: no slab free without a master");
+ cmn_err(CE_WARN, "IDN: 302: no slab free without a master");
break;
case DSLAB_STATE_LOCAL:
@@ -526,9 +521,8 @@ smr_slab_free(int domid, smr_slab_t *sp)
default:
cmn_err(CE_WARN,
- "IDN: 301: (FREE) unknown slab state "
- "(%d) for domain %d",
- idn_domain[domid].dslab_state, domid);
+ "IDN: 301: (FREE) unknown slab state (%d) for domain %d",
+ idn_domain[domid].dslab_state, domid);
break;
}
}
@@ -603,8 +597,8 @@ smr_buf_alloc(int domid, uint_t len, caddr_t *bufpp)
if (len > IDN_DATA_SIZE) {
cmn_err(CE_WARN,
- "IDN: 303: buffer len %d > IDN_DATA_SIZE (%lu)",
- len, IDN_DATA_SIZE);
+ "IDN: 303: buffer len %d > IDN_DATA_SIZE (%lu)",
+ len, IDN_DATA_SIZE);
IDN_GKSTAT_GLOBAL_EVENT(gk_buffail, gk_buffail_last);
return (EINVAL);
}
@@ -628,11 +622,11 @@ smr_buf_alloc(int domid, uint_t len, caddr_t *bufpp)
if (sp == NULL) {
if ((serrno = smr_slab_alloc(idn.localid, &sp)) != 0) {
PR_SMR("%s:%d: failed to allocate "
- "slab [serrno = %d]",
- proc, domid, serrno);
+ "slab [serrno = %d]",
+ proc, domid, serrno);
DSLAB_UNLOCK(idn.localid);
IDN_GKSTAT_GLOBAL_EVENT(gk_buffail,
- gk_buffail_last);
+ gk_buffail_last);
return (serrno);
}
/*
@@ -642,11 +636,11 @@ smr_buf_alloc(int domid, uint_t len, caddr_t *bufpp)
*/
if (dp->dstate != IDNDS_CONNECTED) {
PR_SMR("%s:%d: state changed during slab "
- "alloc (dstate = %s)\n",
- proc, domid, idnds_str[dp->dstate]);
+ "alloc (dstate = %s)\n",
+ proc, domid, idnds_str[dp->dstate]);
DSLAB_UNLOCK(idn.localid);
IDN_GKSTAT_GLOBAL_EVENT(gk_buffail,
- gk_buffail_last);
+ gk_buffail_last);
return (ENOLINK);
}
/*
@@ -740,15 +734,15 @@ smr_buf_free(int domid, caddr_t bufp, uint_t len)
if (((uintptr_t)bufp & (IDN_SMR_BUFSIZE-1)) &&
(IDN_ADDR2OFFSET(bufp) % IDN_SMR_BUFSIZE)) {
cmn_err(CE_WARN,
- "IDN: 304: buffer (0x%p) from domain %d not on a "
- "%d boundary", bufp, domid, IDN_SMR_BUFSIZE);
+ "IDN: 304: buffer (0x%p) from domain %d not on a "
+ "%d boundary", bufp, domid, IDN_SMR_BUFSIZE);
goto bfdone;
}
if (!lockheld && (len > IDN_DATA_SIZE)) {
cmn_err(CE_WARN,
- "IDN: 305: buffer length (%d) from domain %d greater "
- "than IDN_DATA_SIZE (%lu)",
- len, domid, IDN_DATA_SIZE);
+ "IDN: 305: buffer length (%d) from domain %d greater "
+ "than IDN_DATA_SIZE (%lu)",
+ len, domid, IDN_DATA_SIZE);
goto bfdone;
}
@@ -785,8 +779,8 @@ bfdone:
DIOCHECK(domid);
} else {
cmn_err(CE_WARN,
- "IDN: 306: unknown buffer (0x%p) from domain %d",
- bufp, domid);
+ "IDN: 306: unknown buffer (0x%p) from domain %d",
+ bufp, domid);
ATOMIC_INC(idn_domain[domid].dioerr);
}
@@ -828,8 +822,7 @@ smr_buf_free_all(int domid)
ASSERT(domid != idn.localid);
if (!VALID_DOMAINID(domid)) {
- cmn_err(CE_WARN,
- "IDN: 307: domain ID (%d) invalid", domid);
+ cmn_err(CE_WARN, "IDN: 307: domain ID (%d) invalid", domid);
return (-1);
}
@@ -866,8 +859,7 @@ smr_buf_free_all(int domid)
DSLAB_UNLOCK(idn.localid);
- PR_SMR("%s: freed %d buffers for domain %d\n",
- proc, nbufsfreed, domid);
+ PR_SMR("%s: freed %d buffers for domain %d\n", proc, nbufsfreed, domid);
return (nbufsfreed);
}
@@ -893,8 +885,7 @@ smr_buf_reclaim(int domid, int nbufs)
return (0);
}
- PR_SMR("%s: requested %d buffers from domain %d\n",
- proc, nbufs, domid);
+ PR_SMR("%s: requested %d buffers from domain %d\n", proc, nbufs, domid);
if (dp->dio && nbufs) {
register smr_slab_t *sp;
@@ -945,7 +936,7 @@ smr_buf_reclaim(int domid, int nbufs)
}
PR_SMR("%s: reclaimed %d buffers from domain %d\n",
- proc, num_reclaimed, domid);
+ proc, num_reclaimed, domid);
return (num_reclaimed);
}
@@ -1107,8 +1098,7 @@ smr_slabwaiter_register(int domid)
nwait = ++(wp->w_nwaiters);
ASSERT(nwait > 0);
- PR_SMR("%s: domain = %d, (new)nwaiters = %d\n",
- proc, domid, nwait);
+ PR_SMR("%s: domain = %d, (new)nwaiters = %d\n", proc, domid, nwait);
if (nwait > 1) {
/*
@@ -1116,7 +1106,7 @@ smr_slabwaiter_register(int domid)
* with respect to this domain.
*/
PR_SMR("%s: existing waiters for slabs for domain %d\n",
- proc, domid);
+ proc, domid);
mutex_exit(&wp->w_mutex);
return (nwait);
@@ -1161,8 +1151,7 @@ smr_slabwaiter_unregister(int domid, smr_slab_t **spp)
mutex_enter(&wp->w_mutex);
- PR_SMR("%s: domain = %d, nwaiters = %d\n",
- proc, domid, wp->w_nwaiters);
+ PR_SMR("%s: domain = %d, nwaiters = %d\n", proc, domid, wp->w_nwaiters);
if (wp->w_nwaiters <= 0) {
/*
@@ -1185,7 +1174,7 @@ smr_slabwaiter_unregister(int domid, smr_slab_t **spp)
* Thus, late sleepers may still get a chance.
*/
PR_SMR("%s: bummer no slab allocated for domain %d\n",
- proc, domid);
+ proc, domid);
ASSERT(wp->w_sp == NULL);
(*spp) = NULL;
serrno = wp->w_closed ? ECANCELED : EBUSY;
@@ -1200,7 +1189,7 @@ smr_slabwaiter_unregister(int domid, smr_slab_t **spp)
ASSERT(wp->w_sp);
PR_SMR("%s: allocation succeeded (domain %d)\n",
- proc, domid);
+ proc, domid);
DSLAB_LOCK_SHARED(domid);
for (sp = idn_domain[domid].dslab; sp; sp = sp->sl_next)
@@ -1208,12 +1197,12 @@ smr_slabwaiter_unregister(int domid, smr_slab_t **spp)
break;
if (sp == NULL)
cmn_err(CE_WARN,
- "%s:%d: slab ptr = NULL",
- proc, domid);
+ "%s:%d: slab ptr = NULL",
+ proc, domid);
DSLAB_UNLOCK(domid);
} else {
PR_SMR("%s: allocation failed (domain %d) "
- "[serrno = %d]\n", proc, domid, serrno);
+ "[serrno = %d]\n", proc, domid, serrno);
}
#endif /* DEBUG */
}
@@ -1222,7 +1211,7 @@ smr_slabwaiter_unregister(int domid, smr_slab_t **spp)
* Last one turns out the lights.
*/
PR_SMR("%s: domain %d last waiter, turning out lights\n",
- proc, domid);
+ proc, domid);
wp->w_sp = NULL;
wp->w_done = 0;
wp->w_serrno = 0;
@@ -1269,14 +1258,13 @@ smr_slaballoc_wait(int domid, smr_slab_t **spp)
mutex_enter(&wp->w_mutex);
PR_SMR("%s: domain = %d, nwaiters = %d, wsp = 0x%p\n",
- proc, domid, wp->w_nwaiters, wp->w_sp);
+ proc, domid, wp->w_nwaiters, wp->w_sp);
if (wp->w_nwaiters <= 0) {
/*
* Hmmm...no waiters registered.
*/
- PR_SMR("%s: domain %d, no waiters!\n",
- proc, domid);
+ PR_SMR("%s: domain %d, no waiters!\n", proc, domid);
mutex_exit(&wp->w_mutex);
return (EINVAL);
}
@@ -1289,17 +1277,15 @@ smr_slaballoc_wait(int domid, smr_slab_t **spp)
/*
* Only wait if data hasn't arrived yet.
*/
- PR_SMR("%s: domain %d, going to sleep...\n",
- proc, domid);
-
+ PR_SMR("%s: domain %d, going to sleep...\n", proc, domid);
- rv = cv_timedwait_sig(&wp->w_cv, &wp->w_mutex,
- lbolt + IDN_SLABALLOC_WAITTIME);
+ rv = cv_reltimedwait_sig(&wp->w_cv, &wp->w_mutex,
+ IDN_SLABALLOC_WAITTIME, TR_CLOCK_TICK);
if (rv == -1)
serrno = ETIMEDOUT;
PR_SMR("%s: domain %d, awakened (reason = %s)\n",
- proc, domid, (rv == -1) ? "TIMEOUT" : "SIGNALED");
+ proc, domid, (rv == -1) ? "TIMEOUT" : "SIGNALED");
}
/*
* We've awakened or request already filled!
@@ -1362,9 +1348,9 @@ smr_slaballoc_put(int domid, smr_slab_t *sp, int forceflag, int serrno)
mutex_enter(&wp->w_mutex);
PR_SMR("%s: domain = %d, bufp = 0x%p, ebufp = 0x%p, "
- "(f = %d, se = %d)\n", proc, domid,
- (sp ? sp->sl_start : 0),
- (sp ? sp->sl_end : 0), forceflag, serrno);
+ "(f = %d, se = %d)\n", proc, domid,
+ (sp ? sp->sl_start : 0),
+ (sp ? sp->sl_end : 0), forceflag, serrno);
if (wp->w_nwaiters <= 0) {
/*
@@ -1372,7 +1358,7 @@ smr_slaballoc_put(int domid, smr_slab_t *sp, int forceflag, int serrno)
* and left. Oh well...
*/
PR_SMR("%s: no slaballoc waiters found for domain %d\n",
- proc, domid);
+ proc, domid);
if (!forceflag || serrno || !sp) {
/*
* No waiters and caller doesn't want to force it.
@@ -1400,7 +1386,7 @@ smr_slaballoc_put(int domid, smr_slab_t *sp, int forceflag, int serrno)
* trying to let everybody know.
*/
ASSERT(wp->w_serrno ?
- (wp->w_sp == NULL) : (wp->w_sp != NULL));
+ (wp->w_sp == NULL) : (wp->w_sp != NULL));
cv_broadcast(&wp->w_cv);
mutex_exit(&wp->w_mutex);
@@ -1412,8 +1398,7 @@ smr_slaballoc_put(int domid, smr_slab_t *sp, int forceflag, int serrno)
* Bummer...allocation failed. This call is simply
* to wake up the sleepers and let them know.
*/
- PR_SMR("%s: slaballoc failed for domain %d\n",
- proc, domid);
+ PR_SMR("%s: slaballoc failed for domain %d\n", proc, domid);
wp->w_serrno = serrno;
wp->w_done = 1;
cv_broadcast(&wp->w_cv);
@@ -1422,7 +1407,7 @@ smr_slaballoc_put(int domid, smr_slab_t *sp, int forceflag, int serrno)
return (0);
}
PR_SMR("%s: putting slab into struct (domid=%d, localid=%d)\n",
- proc, domid, idn.localid);
+ proc, domid, idn.localid);
/*
* Prep the slab structure.
*/
@@ -1506,15 +1491,14 @@ smr_slaballoc_get(int domid, caddr_t bufp, caddr_t ebufp)
procname_t proc = "smr_slaballoc_get";
PR_SMR("%s: getting slab for domain %d [bufp=0x%p, ebufp=0x%p]\n",
- proc, domid, bufp, ebufp);
+ proc, domid, bufp, ebufp);
dp = &idn_domain[domid];
ASSERT(DSLAB_WRITE_HELD(domid));
if ((sp = dp->dslab) == NULL) {
- PR_SMR("%s: oops, no slabs for domain %d\n",
- proc, domid);
+ PR_SMR("%s: oops, no slabs for domain %d\n", proc, domid);
return (NULL);
}
/*
@@ -1532,7 +1516,7 @@ smr_slaballoc_get(int domid, caddr_t bufp, caddr_t ebufp)
nslabs = *(int *)ebufp;
if (nslabs == 0) {
PR_SMR("%s: requested nslabs (%d) <= 0\n",
- proc, nslabs);
+ proc, nslabs);
return (NULL);
} else if (nslabs < 0) {
/*
@@ -1554,8 +1538,8 @@ smr_slaballoc_get(int domid, caddr_t bufp, caddr_t ebufp)
if (bufp && (ebufp > sp->sl_end)) {
PR_SMR("%s: bufp/ebufp (0x%p/0x%p) "
- "expected (0x%p/0x%p)\n", proc, bufp, ebufp,
- sp->sl_start, sp->sl_end);
+ "expected (0x%p/0x%p)\n", proc, bufp, ebufp,
+ sp->sl_start, sp->sl_end);
ASSERT(0);
}
/*
@@ -1611,11 +1595,10 @@ smr_slaballoc_get(int domid, caddr_t bufp, caddr_t ebufp)
dp->dnslabs -= (short)foundit;
if (foundit) {
- PR_SMR("%s: found %d free slabs (domid = %d)\n",
- proc, foundit, domid);
+ PR_SMR("%s: found %d free slabs (domid = %d)\n", proc, foundit,
+ domid);
} else {
- PR_SMR("%s: no free slabs found (domid = %d)\n",
- proc, domid);
+ PR_SMR("%s: no free slabs found (domid = %d)\n", proc, domid);
}
/*
@@ -1678,7 +1661,7 @@ smr_slabpool_init(size_t reserved_size, caddr_t *reserved_area)
idn.slabpool->ntotslabs = ntotslabs = nwr_available / IDN_SLAB_SIZE;
ASSERT(ntotslabs > 0);
minperpool = (ntotslabs < IDN_SLAB_MINPERPOOL) ?
- 1 : IDN_SLAB_MINPERPOOL;
+ 1 : IDN_SLAB_MINPERPOOL;
idn.slabpool->npools = (ntotslabs + (minperpool - 1)) / minperpool;
if ((idn.slabpool->npools & 1) == 0) {
@@ -1689,7 +1672,7 @@ smr_slabpool_init(size_t reserved_size, caddr_t *reserved_area)
}
ASSERT(idn.slabpool->npools > 0);
minperpool = (ntotslabs < idn.slabpool->npools) ?
- 1 : (ntotslabs / idn.slabpool->npools);
+ 1 : (ntotslabs / idn.slabpool->npools);
/*
* Calculate the number of extra slabs that will need to
@@ -1704,7 +1687,7 @@ smr_slabpool_init(size_t reserved_size, caddr_t *reserved_area)
ASSERT((nxslabs >= 0) && (nxslabs < idn.slabpool->npools));
idn.slabpool->pool = GETSTRUCT(struct smr_slabtbl,
- idn.slabpool->npools);
+ idn.slabpool->npools);
sp = GETSTRUCT(smr_slab_t, idn.slabpool->ntotslabs);
idn.slabpool->savep = sp;
@@ -1737,9 +1720,8 @@ smr_slabpool_init(size_t reserved_size, caddr_t *reserved_area)
/*
* We should be at the end of the SMR at this point.
*/
- ASSERT(bufp == (idn.smr.vaddr
- + reserved_size
- + (idn.slabpool->ntotslabs * IDN_SLAB_SIZE)));
+ ASSERT(bufp == (idn.smr.vaddr + reserved_size
+ + (idn.slabpool->ntotslabs * IDN_SLAB_SIZE)));
if (reserved_size != 0)
*reserved_area = idn.smr.vaddr;
@@ -1755,7 +1737,7 @@ smr_slabpool_deinit()
FREESTRUCT(idn.slabpool->savep, smr_slab_t, idn.slabpool->ntotslabs);
FREESTRUCT(idn.slabpool->pool, struct smr_slabtbl,
- idn.slabpool->npools);
+ idn.slabpool->npools);
FREESTRUCT(idn.slabpool, struct slabpool, 1);
idn.slabpool = NULL;
@@ -1849,8 +1831,8 @@ smr_slab_reserve(int domid)
if (foundone) {
ASSERT((&spa[s] >= idn.slabpool->savep) &&
- (&spa[s] < (idn.slabpool->savep +
- idn.slabpool->ntotslabs)));
+ (&spa[s] < (idn.slabpool->savep +
+ idn.slabpool->ntotslabs)));
spa[s].sl_domid = (short)domid;
@@ -1876,7 +1858,7 @@ smr_slab_reserve(int domid)
smr_alloc_buflist(nsp);
spa = nsp;
PR_SMR("%s: allocated full slab struct for domain %d\n",
- proc, domid);
+ proc, domid);
} else {
/*
* Slab structure gets returned locked.
@@ -1885,11 +1867,11 @@ smr_slab_reserve(int domid)
}
PR_SMR("%s: allocated slab 0x%p (start=0x%p, size=%lu) for "
- "domain %d\n", proc, spa, spa->sl_start,
- spa->sl_end - spa->sl_start, domid);
+ "domain %d\n", proc, spa, spa->sl_start,
+ spa->sl_end - spa->sl_start, domid);
} else {
PR_SMR("%s: FAILED to allocate for domain %d\n",
- proc, domid);
+ proc, domid);
spa = NULL;
}
@@ -1931,8 +1913,8 @@ smr_slab_unreserve(int domid, smr_slab_t *sp)
}
if (foundit) {
ASSERT((&spa[s] >= idn.slabpool->savep) &&
- (&spa[s] < (idn.slabpool->savep +
- idn.slabpool->ntotslabs)));
+ (&spa[s] < (idn.slabpool->savep +
+ idn.slabpool->ntotslabs)));
ASSERT(!lock_try(&spa[s].sl_lock));
ASSERT(spa[s].sl_domid == (short)domid);
@@ -1943,7 +1925,7 @@ smr_slab_unreserve(int domid, smr_slab_t *sp)
ATOMIC_INC(idn.slabpool->pool[p].nfree);
PR_SMR("%s: freed (bufp=0x%p) for domain %d\n",
- proc, bufp, domid);
+ proc, bufp, domid);
if (domid == idn.localid) {
/*
@@ -1967,7 +1949,7 @@ smr_slab_unreserve(int domid, smr_slab_t *sp)
* Couldn't find slab entry for given buf!
*/
PR_SMR("%s: FAILED to free (bufp=0x%p) for domain %d\n",
- proc, bufp, domid);
+ proc, bufp, domid);
}
}
@@ -2009,23 +1991,25 @@ smr_slab_reap_global()
register struct smr_slabtbl *tblp;
static clock_t reap_last = 0;
procname_t proc = "smr_slab_reap_global";
+ clock_t now;
ASSERT(IDN_GET_MASTERID() != IDN_NIL_DOMID);
DSLAB_LOCK_SHARED(idn.localid);
if (idn_domain[idn.localid].dslab_state != DSLAB_STATE_LOCAL) {
PR_SMR("%s: only allowed by master (%d)\n",
- proc, IDN_GET_MASTERID());
+ proc, IDN_GET_MASTERID());
DSLAB_UNLOCK(idn.localid);
return;
}
DSLAB_UNLOCK(idn.localid);
- if ((lbolt > 0) && (lbolt > reap_last) &&
- ((lbolt - reap_last) < IDN_REAP_INTERVAL))
+ now = ddi_get_lbolt();
+ if ((now > 0) && (now > reap_last) &&
+ ((now - reap_last) < IDN_REAP_INTERVAL))
return;
- reap_last = lbolt;
+ reap_last = now;
ASSERT(idn.slabpool);
@@ -2039,12 +2023,12 @@ smr_slab_reap_global()
int diff, reap_per_domain;
PR_SMR("%s: kicking off reaping "
- "(total_free = %d, min = %d)\n",
- proc, total_free, IDN_SLAB_THRESHOLD);
+ "(total_free = %d, min = %d)\n",
+ proc, total_free, IDN_SLAB_THRESHOLD);
diff = IDN_SLAB_THRESHOLD - total_free;
- reap_per_domain = (diff < idn.ndomains)
- ? 1 : (diff / idn.ndomains);
+ reap_per_domain = (diff < idn.ndomains) ?
+ 1 : (diff / idn.ndomains);
idn_broadcast_cmd(IDNCMD_SLABREAP, reap_per_domain, 0, 0);
}
@@ -2065,7 +2049,7 @@ smr_slab_reap(int domid, int *nslabs)
*/
if (domid != idn.localid) {
PR_SMR("%s: called by domain %d, should only be local (%d)\n",
- proc, domid, idn.localid);
+ proc, domid, idn.localid);
ASSERT(0);
return;
}
@@ -2119,7 +2103,7 @@ smr_slab_reap(int domid, int *nslabs)
IDN_DUNLOCK(idn.localid);
}
if ((dp->dstate == IDNDS_CONNECTED) &&
- ((nr = idn_reclaim_mboxdata(d, 0, -1)) > 0))
+ ((nr = idn_reclaim_mboxdata(d, 0, -1)) > 0))
nreclaimed += nr;
IDN_DUNLOCK(d);
@@ -2151,7 +2135,7 @@ smr_remap(struct as *as, register caddr_t vaddr,
if (va_to_pfn(vaddr) == new_pfn) {
PR_REMAP("%s: vaddr (0x%p) already mapped to pfn (0x%lx)\n",
- proc, vaddr, new_pfn);
+ proc, vaddr, new_pfn);
return;
}
@@ -2160,7 +2144,7 @@ smr_remap(struct as *as, register caddr_t vaddr,
ASSERT(npgs != 0);
PR_REMAP("%s: va = 0x%p, pfn = 0x%lx, npgs = %ld, mb = %d MB (%ld)\n",
- proc, vaddr, new_pfn, npgs, mblen, blen);
+ proc, vaddr, new_pfn, npgs, mblen, blen);
/*
* Unmap the SMR virtual address from it's current
@@ -2178,9 +2162,8 @@ smr_remap(struct as *as, register caddr_t vaddr,
* i.e. space since it may beyond his physmax.
*/
for (p = 0; p < npgs; p++) {
- sfmmu_memtte(&tte, new_pfn,
- PROT_READ | PROT_WRITE | HAT_NOSYNC,
- TTE8K);
+ sfmmu_memtte(&tte, new_pfn, PROT_READ | PROT_WRITE | HAT_NOSYNC,
+ TTE8K);
sfmmu_tteload(as->a_hat, &tte, vaddr, NULL, HAT_LOAD_LOCK);
vaddr += MMU_PAGESIZE;
@@ -2188,5 +2171,5 @@ smr_remap(struct as *as, register caddr_t vaddr,
}
PR_REMAP("%s: remapped %ld pages (expected %ld)\n",
- proc, npgs, btopr(MB2B(mblen)));
+ proc, npgs, btopr(MB2B(mblen)));
}
diff --git a/usr/src/uts/sun4u/starfire/io/idn_xf.c b/usr/src/uts/sun4u/starfire/io/idn_xf.c
index bad3981bfe..c92f32ac23 100644
--- a/usr/src/uts/sun4u/starfire/io/idn_xf.c
+++ b/usr/src/uts/sun4u/starfire/io/idn_xf.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,11 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright (c) 1996 by Sun Microsystems, Inc.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/sysmacros.h>
#include <sys/open.h>
@@ -192,12 +190,10 @@ cic_read_domain_mask(int board, int bus)
ASSERT(CPUID_TO_BOARDID(CPU->cpu_id) == board);
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_DOMAIN_MASK_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_DOMAIN_MASK_ADDR,
+ bus);
PR_XF("%s: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
domain_mask = (boardset_t)PHYSIO_LDH(csr_addr);
@@ -213,12 +209,10 @@ cic_read_sm_mask(int board, int bus)
ASSERT(CPUID_TO_BOARDID(CPU->cpu_id) == board);
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_SM_MASK_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_SM_MASK_ADDR,
+ bus);
PR_XF("%s: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
sm_mask = (boardset_t)PHYSIO_LDH(csr_addr);
@@ -245,14 +239,12 @@ cic_write_sm_mask(int board, int bus, boardset_t sm_mask)
/*
* Now we can write to the CIC.
*/
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_SM_MASK_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_SM_MASK_ADDR,
+ bus);
PR_XF("%s: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
PR_XF("%s: writing sm_mask = 0x%x\n",
- proc, (ushort_t)sm_mask);
+ proc, (ushort_t)sm_mask);
UPDATE_CIC_HISTORY(CICREG_SMMASK, board, bus, sm_mask);
@@ -275,22 +267,18 @@ cic_read_sm_bar(int board, int bus)
ASSERT(CPUID_TO_BOARDID(CPU->cpu_id) == board);
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_SM_BAR_MSB_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_SM_BAR_MSB_ADDR,
+ bus);
PR_XF("%s:MSB: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
sm_bar = (uint_t)PHYSIO_LDH(csr_addr);
sm_bar <<= 16;
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_SM_BAR_LSB_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_SM_BAR_LSB_ADDR,
+ bus);
PR_XF("%s:LSB: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
sm_bar |= (uint_t)PHYSIO_LDH(csr_addr);
@@ -317,14 +305,12 @@ cic_write_sm_bar(int board, int bus, uint_t sm_bar)
if (pc_prep_cic_buffer(CPU->cpu_id, sm_bar_msb) < 0)
return (-1);
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_SM_BAR_MSB_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_SM_BAR_MSB_ADDR,
+ bus);
PR_XF("%s:MSB: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
PR_XF("%s:MSB: sm_bar[31:16] = 0x%x\n",
- proc, (ushort_t)sm_bar_msb);
+ proc, (ushort_t)sm_bar_msb);
UPDATE_CIC_HISTORY(CICREG_SMBAR, board, bus, sm_bar);
@@ -335,8 +321,8 @@ cic_write_sm_bar(int board, int bus, uint_t sm_bar)
;
if (cnt == 10) {
cmn_err(CE_WARN,
- "IDN: 500: failed to write sm_bar (msb) (0x%x)",
- (uint_t)sm_bar_msb);
+ "IDN: 500: failed to write sm_bar (msb) (0x%x)",
+ (uint_t)sm_bar_msb);
return (-1);
}
@@ -346,14 +332,12 @@ cic_write_sm_bar(int board, int bus, uint_t sm_bar)
if (pc_prep_cic_buffer(CPU->cpu_id, sm_bar_lsb) < 0)
return (-1);
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_SM_BAR_LSB_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_SM_BAR_LSB_ADDR,
+ bus);
PR_XF("%s:LSB: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
PR_XF("%s:LSB: sm_bar[15:0] = 0x%x\n",
- proc, (ushort_t)sm_bar_lsb);
+ proc, (ushort_t)sm_bar_lsb);
PHYSIO_STH(csr_addr, (ushort_t)sm_bar_lsb);
for (cnt = 0;
@@ -362,8 +346,8 @@ cic_write_sm_bar(int board, int bus, uint_t sm_bar)
;
if (cnt == 10) {
cmn_err(CE_WARN,
- "IDN: 500: failed to write sm_bar (lsb) (0x%x)",
- (uint_t)sm_bar_lsb);
+ "IDN: 500: failed to write sm_bar (lsb) (0x%x)",
+ (uint_t)sm_bar_lsb);
return (-1);
}
@@ -379,22 +363,18 @@ cic_read_sm_lar(int board, int bus)
ASSERT(CPUID_TO_BOARDID(CPU->cpu_id) == board);
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_SM_LAR_MSB_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_SM_LAR_MSB_ADDR,
+ bus);
PR_XF("%s:MSB: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
sm_lar = (uint_t)PHYSIO_LDH(csr_addr);
sm_lar <<= 16;
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_SM_LAR_LSB_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_SM_LAR_LSB_ADDR,
+ bus);
PR_XF("%s:LSB: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
sm_lar |= (uint_t)PHYSIO_LDH(csr_addr);
@@ -421,14 +401,12 @@ cic_write_sm_lar(int board, int bus, uint_t sm_lar)
if (pc_prep_cic_buffer(CPU->cpu_id, sm_lar_msb) < 0)
return (-1);
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_SM_LAR_MSB_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_SM_LAR_MSB_ADDR,
+ bus);
PR_XF("%s:MSB: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
PR_XF("%s:MSB: sm_lar[31:16] = 0x%x\n",
- proc, (ushort_t)sm_lar_msb);
+ proc, (ushort_t)sm_lar_msb);
UPDATE_CIC_HISTORY(CICREG_SMLAR, board, bus, sm_lar);
@@ -439,8 +417,8 @@ cic_write_sm_lar(int board, int bus, uint_t sm_lar)
;
if (cnt == 10) {
cmn_err(CE_WARN,
- "IDN: 501: failed to write sm_lar (msb) (0x%x)",
- (uint_t)sm_lar_msb);
+ "IDN: 501: failed to write sm_lar (msb) (0x%x)",
+ (uint_t)sm_lar_msb);
return (-1);
}
@@ -450,14 +428,12 @@ cic_write_sm_lar(int board, int bus, uint_t sm_lar)
if (pc_prep_cic_buffer(CPU->cpu_id, sm_lar_lsb) < 0)
return (-1);
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_SM_LAR_LSB_ADDR,
- bus);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_SM_LAR_LSB_ADDR,
+ bus);
PR_XF("%s:LSB: (bd=%d, bs=%d) csr_addr = 0x%llx\n",
- proc, board, bus, csr_addr);
+ proc, board, bus, csr_addr);
PR_XF("%s:LSB: sm_lar[15:0] = 0x%x\n",
- proc, (ushort_t)sm_lar_lsb);
+ proc, (ushort_t)sm_lar_lsb);
PHYSIO_STH(csr_addr, (ushort_t)sm_lar_lsb);
for (cnt = 0;
@@ -466,8 +442,8 @@ cic_write_sm_lar(int board, int bus, uint_t sm_lar)
;
if (cnt == 10) {
cmn_err(CE_WARN,
- "IDN: 501: failed to write sm_lar (lsb) (0x%x)",
- (uint_t)sm_lar_lsb);
+ "IDN: 501: failed to write sm_lar (lsb) (0x%x)",
+ (uint_t)sm_lar_lsb);
return (-1);
}
@@ -489,12 +465,9 @@ cic_get_smmask_bit(void)
* Now that I'm stuck on this cpu I can go look at this
* board's CIC registers.
*/
- csr_addr = MAKE_CIC_CSR_PA(board,
- CSR_TYPE_CIC,
- CIC_CONFIG1_ADDR,
- 0);
+ csr_addr = MAKE_CIC_CSR_PA(board, CSR_TYPE_CIC, CIC_CONFIG1_ADDR, 0);
PR_XF("%s: (bd=%d) csr_addr = 0x%llx (via cpu %d)\n",
- proc, board, csr_addr, (int)CPU->cpu_id);
+ proc, board, csr_addr, (int)CPU->cpu_id);
config1 = (uint_t)PHYSIO_LDH(csr_addr);
@@ -541,13 +514,13 @@ pc_prep_cic_buffer(int cpuid, uint_t cicdata)
rv = 0;
if (cnt == 10) {
cmn_err(CE_WARN,
- "IDN: 502: unable to store data (0x%x) to "
- "CIC buffer (0x%llx)",
- cicdata, csr_addr);
+ "IDN: 502: unable to store data (0x%x) to "
+ "CIC buffer (0x%llx)",
+ cicdata, csr_addr);
rv = -1;
} else if (cnt >= 1) {
PR_XF("%s: MULTIPLE READS (cpu=%d) cnt = %d\n",
- proc, cpuid, cnt);
+ proc, cpuid, cnt);
}
return (rv);
@@ -589,7 +562,7 @@ pc_write_madr(pda_handle_t ph, int lboard, int rboard, uint_t madr)
continue;
pc_madr_addr = (u_longlong_t)STARFIRE_PC_MADR_ADDR(lboard,
- rboard, p);
+ rboard, p);
/*
* On this first iteration of updating the PC
@@ -605,8 +578,8 @@ pc_write_madr(pda_handle_t ph, int lboard, int rboard, uint_t madr)
madr &= ~STARFIRE_PC_MADR_VALIDBIT;
}
PR_XF("%s: write madr(0x%x) to pc_addr(0x%llx) "
- "[lb=%d, rb=%d, cpu=%d]\n",
- proc, madr, pc_madr_addr, lboard, rboard, p);
+ "[lb=%d, rb=%d, cpu=%d]\n",
+ proc, madr, pc_madr_addr, lboard, rboard, p);
DEBUG_DELAY();
for (i = 0; i < 20; i++) {
@@ -620,14 +593,14 @@ pc_write_madr(pda_handle_t ph, int lboard, int rboard, uint_t madr)
}
if (i > 0) {
PR_XF("%s: WARNING: (1) lb=%d, rb=%d, "
- "madr=0x%x (i=%d)\n",
- proc, lboard, rboard, madr, i);
+ "madr=0x%x (i=%d)\n",
+ proc, lboard, rboard, madr, i);
}
if (rd_madr != madr) {
cmn_err(CE_WARN,
- "IDN: 503: (invalidate) failed to update "
- "PC madr (expected 0x%x, actual 0x%x)",
- madr, rd_madr);
+ "IDN: 503: (invalidate) failed to update "
+ "PC madr (expected 0x%x, actual 0x%x)",
+ madr, rd_madr);
rv++;
continue;
}
@@ -640,8 +613,8 @@ pc_write_madr(pda_handle_t ph, int lboard, int rboard, uint_t madr)
madr |= STARFIRE_PC_MADR_VALIDBIT;
}
PR_XF("%s: write madr(0x%x) to pc_addr(0x%llx) "
- "[lb=%d, rb=%d, cpu=%d]\n",
- proc, madr, pc_madr_addr, lboard, rboard, p);
+ "[lb=%d, rb=%d, cpu=%d]\n",
+ proc, madr, pc_madr_addr, lboard, rboard, p);
DEBUG_DELAY();
for (i = 0; i < 20; i++) {
@@ -655,14 +628,14 @@ pc_write_madr(pda_handle_t ph, int lboard, int rboard, uint_t madr)
}
if (i > 0) {
PR_XF("%s: WARNING: (2) lb=%d, rb=%d, "
- "madr=0x%x (i=%d)\n",
- proc, lboard, rboard, madr, i);
+ "madr=0x%x (i=%d)\n",
+ proc, lboard, rboard, madr, i);
}
if (rd_madr != madr) {
cmn_err(CE_WARN,
- "IDN: 503: (validate) failed to update "
- "PC madr (expected 0x%x, actual 0x%x)",
- madr, rd_madr);
+ "IDN: 503: (validate) failed to update "
+ "PC madr (expected 0x%x, actual 0x%x)",
+ madr, rd_madr);
rv++;
}
}
@@ -676,7 +649,7 @@ pc_write_madr(pda_handle_t ph, int lboard, int rboard, uint_t madr)
continue;
pc_madr_addr = (u_longlong_t)STARFIRE_PC_MADR_ADDR(lboard,
- rboard, ioc + 4);
+ rboard, ioc + 4);
if (madr != 0) {
/*
@@ -686,8 +659,8 @@ pc_write_madr(pda_handle_t ph, int lboard, int rboard, uint_t madr)
madr &= ~STARFIRE_PC_MADR_VALIDBIT;
}
PR_XF("%s: write madr(0x%x) to iopc_madr_addr(0x%llx) "
- "[lb=%d, rb=%d, ioc=%d]\n",
- proc, madr, pc_madr_addr, lboard, rboard, ioc);
+ "[lb=%d, rb=%d, ioc=%d]\n",
+ proc, madr, pc_madr_addr, lboard, rboard, ioc);
DEBUG_DELAY();
for (i = 0; i < 20; i++) {
@@ -701,14 +674,14 @@ pc_write_madr(pda_handle_t ph, int lboard, int rboard, uint_t madr)
}
if (i > 0) {
PR_XF("%s: WARNING: (3) lb=%d, rb=%d, "
- "madr=0x%x (i=%d)\n",
- proc, lboard, rboard, madr, i);
+ "madr=0x%x (i=%d)\n",
+ proc, lboard, rboard, madr, i);
}
if (rd_madr != madr) {
cmn_err(CE_WARN,
- "IDN: 504: (invalidate) failed to update "
- "IOPC madr (expected 0x%x, actual 0x%x)",
- madr, rd_madr);
+ "IDN: 504: (invalidate) failed to update "
+ "IOPC madr (expected 0x%x, actual 0x%x)",
+ madr, rd_madr);
rv++;
continue;
}
@@ -723,8 +696,8 @@ pc_write_madr(pda_handle_t ph, int lboard, int rboard, uint_t madr)
}
PR_XF("%s: write madr(0x%x) to iopc_madr_addr(0x%llx) "
- "[lb=%d, rb=%d, ioc=%d]\n",
- proc, madr, pc_madr_addr, lboard, rboard, ioc);
+ "[lb=%d, rb=%d, ioc=%d]\n",
+ proc, madr, pc_madr_addr, lboard, rboard, ioc);
DEBUG_DELAY();
for (i = 0; i < 20; i++) {
@@ -738,14 +711,14 @@ pc_write_madr(pda_handle_t ph, int lboard, int rboard, uint_t madr)
}
if (i > 0) {
PR_XF("%s: WARNING: (4) lb=%d, rb=%d, "
- "madr=0x%x (i=%d)\n",
- proc, lboard, rboard, madr, i);
+ "madr=0x%x (i=%d)\n",
+ proc, lboard, rboard, madr, i);
}
if (rd_madr != madr) {
cmn_err(CE_WARN,
- "IDN: 504: (validate) failed to update "
- "IOPC madr (expected 0x%x, actual 0x%x)",
- madr, rd_madr);
+ "IDN: 504: (validate) failed to update "
+ "IOPC madr (expected 0x%x, actual 0x%x)",
+ madr, rd_madr);
rv++;
}
}
@@ -790,8 +763,8 @@ pc_read_madr(pda_handle_t ph, int lboard, uint_t mc_adr[], int local_only)
break;
if (ioc == MAX_IOCS) {
cmn_err(CE_WARN,
- "IDN: 505: board %d missing any valid PCs",
- lboard);
+ "IDN: 505: board %d missing any valid PCs",
+ lboard);
return;
}
p = ioc + 4;
@@ -808,14 +781,13 @@ pc_read_madr(pda_handle_t ph, int lboard, uint_t mc_adr[], int local_only)
* entries corresponding to our boards.
*/
lbp = pda_get_board_info(ph, brd);
- if (!local_only ||
- ((lbp->bda_board & BDAN_MASK) == BDAN_GOOD))
+ if (!local_only || ((lbp->bda_board & BDAN_MASK) == BDAN_GOOD))
mc_adr[brd] = PHYSIO_LD(pc_madr_addr);
else
mc_adr[brd] = 0;
pc_madr_addr += ((u_longlong_t)1 <<
- STARFIRE_PC_MADR_BOARD_SHIFT);
+ STARFIRE_PC_MADR_BOARD_SHIFT);
}
}
@@ -883,13 +855,13 @@ update_local_hw_config(idn_domain_t *ldp, struct hwconfig *loc_hw)
int c;
PR_PROTO("%s: NEW HW CONFIG (old_bset = 0x%x, "
- "new_bset = 0x%x)\n",
- proc, ldp->dhw.dh_boardset, loc_hw->dh_boardset);
+ "new_bset = 0x%x)\n",
+ proc, ldp->dhw.dh_boardset, loc_hw->dh_boardset);
PR_PROTO("%s: clearing boardset 0x%x\n", proc,
- ldp->dhw.dh_boardset & ~loc_hw->dh_boardset);
+ ldp->dhw.dh_boardset & ~loc_hw->dh_boardset);
PR_PROTO("%s: setting boardset 0x%x\n", proc,
- loc_hw->dh_boardset & ~ldp->dhw.dh_boardset);
+ loc_hw->dh_boardset & ~ldp->dhw.dh_boardset);
idn.dc_boardset &= ~ldp->dhw.dh_boardset;
idn.dc_boardset |= loc_hw->dh_boardset;
@@ -939,7 +911,7 @@ get_hw_config(struct hwconfig *loc_hw)
* IDN operations.
*/
cmn_err(CE_WARN,
- "IDN: 506: cic sm_mask is not writeable");
+ "IDN: 506: cic sm_mask is not writeable");
return (-1);
}
/*
@@ -949,7 +921,7 @@ get_hw_config(struct hwconfig *loc_hw)
ph = pda_open();
if (ph == (pda_handle_t)NULL) {
cmn_err(CE_WARN,
- "IDN: 507: failed to map-in post2obp structure");
+ "IDN: 507: failed to map-in post2obp structure");
return (-1);
} else if (!pda_is_valid(ph)) {
cmn_err(CE_WARN, "IDN: 508: post2obp checksum invalid");
@@ -990,7 +962,7 @@ get_hw_config(struct hwconfig *loc_hw)
for (brd = 0; brd < MAX_BOARDS; brd++)
if (loc_hw->dh_mcadr[brd] != 0) {
PR_XF("%s: brd %d, mc = 0x%x\n",
- proc, brd, loc_hw->dh_mcadr[brd]);
+ proc, brd, loc_hw->dh_mcadr[brd]);
}
}
#endif /* DEBUG */
@@ -1015,7 +987,8 @@ idnxf_shmem_wakeup(void *arg)
int expired;
procname_t proc = "idnxf_shmem_wakeup";
- expired = ((lbolt - idnxfp->xf_start_time) >= IDNCIC_TIMEOUT) ? 1 : 0;
+ expired = ((ddi_get_lbolt() - idnxfp->xf_start_time) >=
+ IDNCIC_TIMEOUT) ? 1 : 0;
if ((count = idnxfp->xf_count) == 0) {
/*
@@ -1039,7 +1012,7 @@ idnxf_shmem_wakeup(void *arg)
*/
if ((idn_debug | o_idn_debug) & IDNDBG_REGS)
printf("%s: TIMEOUT...bailing on %d lost CIC "
- "updates...\n", proc, count);
+ "updates...\n", proc, count);
#endif /* DEBUG */
ATOMIC_SUB(idnxfp->xf_count, count);
@@ -1050,7 +1023,7 @@ idnxf_shmem_wakeup(void *arg)
} else {
(void) timeout(idnxf_shmem_wakeup, (caddr_t)idnxfp,
- (clock_t)IDNCIC_TIMECHK);
+ (clock_t)IDNCIC_TIMECHK);
}
}
@@ -1100,15 +1073,15 @@ idnxf_shmem_update_one(uint64_t arg1, uint64_t arg2)
if (idnxfp->xf_smbase != (uint_t)-1) {
(void) cic_write_sm_bar(brd, bus,
- idnxfp->xf_smbase);
+ idnxfp->xf_smbase);
(void) cic_write_sm_lar(brd, bus,
- idnxfp->xf_smlimit);
+ idnxfp->xf_smlimit);
}
/*
* Verify data got there!
*/
rv = verify_smregs(brd, bus, smmask, idnxfp->xf_smbase,
- idnxfp->xf_smlimit);
+ idnxfp->xf_smlimit);
} else {
smmask &= ~idnxfp->xf_boardset;
(void) cic_write_sm_mask(brd, bus, smmask);
@@ -1128,7 +1101,7 @@ idnxf_shmem_update_one(uint64_t arg1, uint64_t arg2)
rv = verify_smregs(brd, bus, smmask, 1, 0);
} else {
rv = verify_smregs(brd, bus, smmask,
- (uint_t)-1, (uint_t)-1);
+ (uint_t)-1, (uint_t)-1);
}
}
if (rv) {
@@ -1197,7 +1170,7 @@ idnxf_shmem_update_all(pda_handle_t ph, boardset_t boardset,
* Let's set up the global structure all the xcall
* recepients will read.
*/
- start_time = lbolt;
+ start_time = ddi_get_lbolt();
/*
* Set the start time. Make sure it's different
* then the previous run.
@@ -1226,9 +1199,9 @@ idnxf_shmem_update_all(pda_handle_t ph, boardset_t boardset,
tu32 = UPPER32_CPUMASK(target_cpuset);
tl32 = LOWER32_CPUMASK(target_cpuset);
PR_REGS("%s: (start %ld) broadcasting CIC - "
- "%s to cpus 0x%x.%0x\n",
- proc, start_time, doadd ? "LINK" : "UNLINK",
- tu32, tl32);
+ "%s to cpus 0x%x.%0x\n",
+ proc, start_time, doadd ? "LINK" : "UNLINK",
+ tu32, tl32);
}
/*
@@ -1241,7 +1214,7 @@ idnxf_shmem_update_all(pda_handle_t ph, boardset_t boardset,
xc_attention(target_cpuset);
xc_some(target_cpuset, idnxf_shmem_update_one,
- (uint64_t)&idnxf_cic_info, (uint64_t)start_time);
+ (uint64_t)&idnxf_cic_info, (uint64_t)start_time);
xc_dismissed(target_cpuset);
@@ -1253,9 +1226,9 @@ idnxf_shmem_update_all(pda_handle_t ph, boardset_t boardset,
#endif /* DEBUG */
PR_REGS("%s: waiting for completion of %d CIC - %s...\n",
- proc, idnxf_cic_info.xf_count, doadd ? "LINKS" : "UNLINKS");
+ proc, idnxf_cic_info.xf_count, doadd ? "LINKS" : "UNLINKS");
PR_REGS("%s: CIC - %s have checked IN.\n",
- proc, doadd ? "LINKS" : "UNLINKS");
+ proc, doadd ? "LINKS" : "UNLINKS");
/*
* Modifying xf_start_time effectively disables any
@@ -1266,7 +1239,7 @@ idnxf_shmem_update_all(pda_handle_t ph, boardset_t boardset,
idnxf_cic_info.xf_start_time++;
PR_REGS("%s: xf_errcnt = %d, xf_errtimer = %d\n",
- proc, idnxf_cic_info.xf_errcnt, idnxf_cic_info.xf_errtimer);
+ proc, idnxf_cic_info.xf_errcnt, idnxf_cic_info.xf_errtimer);
DUMP_CIC_HISTORY();
/*
* Should errors be fatal? (panic).
@@ -1291,23 +1264,23 @@ idnxf_shmem_update_all(pda_handle_t ph, boardset_t boardset,
if (!idnxf_cic_info.xf_errtimer)
break;
cmn_err(CE_WARN,
- "IDN: 509: CPU %d never responded "
- "to CIC update", c);
+ "IDN: 509: CPU %d never responded "
+ "to CIC update", c);
/*FALLTHROUGH*/
case IDNCIC_ERR:
cmn_err(CE_WARN,
- "IDN: 510: failed write-smregs "
- "(bd=%d, bs=%d, sm(bar=0x%x, "
- "lar=0x%x))",
- brd, bus, smbase, smlimit);
+ "IDN: 510: failed write-smregs "
+ "(bd=%d, bs=%d, sm(bar=0x%x, "
+ "lar=0x%x))",
+ brd, bus, smbase, smlimit);
rv++;
break;
case IDNCIC_BUSY:
cmn_err(CE_WARN, "IDN: 511: update-one "
- "(cpu=%d, bd=%d) time conflict",
- c, brd);
+ "(cpu=%d, bd=%d) time conflict",
+ c, brd);
/*
* Should never occur. Not fatal,
* just continue.
@@ -1316,8 +1289,8 @@ idnxf_shmem_update_all(pda_handle_t ph, boardset_t boardset,
default:
PR_REGS("%s: board %d, bus %d "
- "(bar=0x%x,lar=0x%x) - update OK\n",
- proc, brd, bus, smbase, smlimit);
+ "(bar=0x%x,lar=0x%x) - update OK\n",
+ proc, brd, bus, smbase, smlimit);
break;
}
}
@@ -1333,9 +1306,8 @@ idnxf_shmem_update_all(pda_handle_t ph, boardset_t boardset,
* is_master Indicates remote domain is a master.
*/
int
-idnxf_shmem_add(int is_master, boardset_t boardset,
- pfn_t pfnbase, pfn_t pfnlimit,
- uint_t *mcadr)
+idnxf_shmem_add(int is_master, boardset_t boardset, pfn_t pfnbase,
+ pfn_t pfnlimit, uint_t *mcadr)
{
int rv = 0;
register int brd, rbrd;
@@ -1358,7 +1330,7 @@ idnxf_shmem_add(int is_master, boardset_t boardset,
smbase = smlimit = (uint_t)-1;
}
PR_REGS("%s: is_master=%d, boardset=0x%x, smbase=0x%x, smlimit=%x\n",
- proc, is_master, boardset, smbase, smlimit);
+ proc, is_master, boardset, smbase, smlimit);
/*
* Need to serialize hardware access so we don't have multiple
@@ -1376,7 +1348,7 @@ idnxf_shmem_add(int is_master, boardset_t boardset,
ph = pda_open();
if (ph == (pda_handle_t)NULL) {
cmn_err(CE_WARN,
- "IDN: 507: failed to map-in post2obp structure");
+ "IDN: 507: failed to map-in post2obp structure");
rv = -1;
goto done;
@@ -1400,7 +1372,7 @@ idnxf_shmem_add(int is_master, boardset_t boardset,
* then we need to deprogram our PCs.
*/
PR_REGS("%s: updating PC regs (lboardset=0x%x, rboardset=0x%x)\n",
- proc, localboardset, boardset);
+ proc, localboardset, boardset);
for (brd = 0; brd < MAX_BOARDS; brd++) {
@@ -1423,9 +1395,9 @@ idnxf_shmem_add(int is_master, boardset_t boardset,
*/
if (pc_write_madr(ph, brd, rbrd, madr) < 0) {
cmn_err(CE_WARN,
- "IDN: 512: failed [add] write-madr "
- "(bd=%d, rbd=%d, madr=0x%x)",
- brd, rbrd, madr);
+ "IDN: 512: failed [add] write-madr "
+ "(bd=%d, rbd=%d, madr=0x%x)",
+ brd, rbrd, madr);
rv = -1;
goto done;
}
@@ -1467,7 +1439,7 @@ idnxf_shmem_sub(int is_master, boardset_t boardset)
ASSERT(localboardset && boardset && ((localboardset & boardset) == 0));
PR_REGS("%s: is_master=%d, boardset=0x%x\n",
- proc, is_master, boardset);
+ proc, is_master, boardset);
/*
* Need to serialize hardware access so we don't have multiple
@@ -1485,7 +1457,7 @@ idnxf_shmem_sub(int is_master, boardset_t boardset)
ph = pda_open();
if (ph == (pda_handle_t)NULL) {
cmn_err(CE_WARN,
- "IDN: 507: failed to map-in post2obp structure");
+ "IDN: 507: failed to map-in post2obp structure");
rv = -1;
goto done;
@@ -1509,7 +1481,7 @@ idnxf_shmem_sub(int is_master, boardset_t boardset)
* then we need to deprogram our PCs.
*/
PR_REGS("%s: reseting PC regs (lboardset=0x%x, rboardset=0x%x)\n",
- proc, localboardset, boardset);
+ proc, localboardset, boardset);
for (brd = 0; brd < MAX_BOARDS; brd++) {
@@ -1527,9 +1499,9 @@ idnxf_shmem_sub(int is_master, boardset_t boardset)
*/
if (pc_write_madr(ph, brd, rbrd, 0) < 0) {
cmn_err(CE_WARN,
- "IDN: 512: failed [del] write-madr "
- "(bd=%d, rbd=%d, madr=0x%x)",
- brd, rbrd, 0);
+ "IDN: 512: failed [del] write-madr "
+ "(bd=%d, rbd=%d, madr=0x%x)",
+ brd, rbrd, 0);
rv = -1;
goto done;
}
@@ -1581,9 +1553,8 @@ idnxf_flushall_ecache()
procname_t proc = "idnxf_flushall_ecache";
- PR_XF("%s: flushing ecache (cpu_ready_set = 0x%x.%x)\n",
- proc, UPPER32_CPUMASK(cpu_ready_set),
- LOWER32_CPUMASK(cpu_ready_set));
+ PR_XF("%s: flushing ecache (cpu_ready_set = 0x%x.%x)\n", proc,
+ UPPER32_CPUMASK(cpu_ready_set), LOWER32_CPUMASK(cpu_ready_set));
CHECKPOINT_CACHE_CLEAR_DEBUG(1);
CHECKPOINT_CACHE_STEP_DEBUG(0x1, 2);
@@ -1615,8 +1586,8 @@ idnxf_flushall_ecache()
* --------------------------------------------------
*/
static int
-verify_smregs(int brd, int bus, boardset_t smmask,
- uint_t smbase, uint_t smlimit)
+verify_smregs(int brd, int bus, boardset_t smmask, uint_t smbase, uint_t
+ smlimit)
{
int rv = 0;
uint_t smreg;
@@ -1625,9 +1596,9 @@ verify_smregs(int brd, int bus, boardset_t smmask,
smreg = (uint_t)cic_read_sm_mask(brd, bus);
if (smreg != (uint_t)smmask) {
cmn_err(CE_WARN,
- "IDN: 513: sm-mask error "
- "(expected = 0x%x, actual = 0x%x)",
- (uint_t)smmask, smreg);
+ "IDN: 513: sm-mask error "
+ "(expected = 0x%x, actual = 0x%x)",
+ (uint_t)smmask, smreg);
rv++;
}
}
@@ -1636,9 +1607,9 @@ verify_smregs(int brd, int bus, boardset_t smmask,
smreg = cic_read_sm_bar(brd, bus);
if (smreg != smbase) {
cmn_err(CE_WARN,
- "IDN: 514: sm-base error "
- "(expected = 0x%x, actual = 0x%x)",
- smbase, smreg);
+ "IDN: 514: sm-base error "
+ "(expected = 0x%x, actual = 0x%x)",
+ smbase, smreg);
rv++;
}
}
@@ -1647,9 +1618,9 @@ verify_smregs(int brd, int bus, boardset_t smmask,
smreg = cic_read_sm_lar(brd, bus);
if (smreg != smlimit) {
cmn_err(CE_WARN,
- "IDN: 515: sm-limit error "
- "(expected = 0x%x, actual = 0x%x)",
- smlimit, smreg);
+ "IDN: 515: sm-limit error "
+ "(expected = 0x%x, actual = 0x%x)",
+ smlimit, smreg);
rv++;
}
}
@@ -1708,13 +1679,13 @@ idn_cpu_per_board(pda_handle_t ph, cpuset_t cset, struct hwconfig *hwp)
lbp = ph ? pda_get_board_info(ph, b) : NULL;
if ((lbp &&
- (BDA_NBL(lbp->bda_board, BDA_MC_NBL) == BDAN_GOOD)) ||
- (!lbp && hwp->dh_mcadr[b])) {
+ (BDA_NBL(lbp->bda_board, BDA_MC_NBL) == BDAN_GOOD)) ||
+ (!lbp && hwp->dh_mcadr[b])) {
err++;
cmn_err(CE_WARN,
- "IDN: 516: (%s) board %d has memory, "
- "but no CPUs - CPU per memory board REQUIRED",
- ph ? "local" : "remote", b);
+ "IDN: 516: (%s) board %d has memory, "
+ "but no CPUs - CPU per memory board REQUIRED",
+ ph ? "local" : "remote", b);
}
}
diff --git a/usr/src/uts/sun4u/starfire/os/starfire.c b/usr/src/uts/sun4u/starfire/os/starfire.c
index 4bd49fc9f0..bd8c66eef2 100644
--- a/usr/src/uts/sun4u/starfire/os/starfire.c
+++ b/usr/src/uts/sun4u/starfire/os/starfire.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
@@ -45,6 +43,7 @@
#include <sys/cpu_sgnblk_defs.h>
#include <sys/cpu_sgn.h>
#include <sys/kdi_impl.h>
+#include <sys/clock_impl.h>
extern cpu_sgnblk_t *cpu_sgnblkp[];
@@ -151,7 +150,7 @@ set_platform_cage_params(void)
pgcnt_t preferred_cage_size;
preferred_cage_size =
- MAX(starfire_startup_cage_size, total_pages / 256);
+ MAX(starfire_startup_cage_size, total_pages / 256);
#ifdef DEBUG
if (starfire_cage_size_limit)
@@ -289,7 +288,7 @@ plat_freelist_process(int mnode)
bd_cnt = 0;
bd_flags = 0;
for (idx = 0; idx < STARFIRE_MAX_BOARDS;
- idx++) {
+ idx++) {
bdlist[idx] = NULL;
sortlist[idx] = NULL;
}
@@ -348,7 +347,7 @@ plat_freelist_process(int mnode)
*/
bds = 0;
for (idx = 0; idx < STARFIRE_MAX_BOARDS;
- idx++) {
+ idx++) {
if (bdlist[idx])
sortlist[bds++] = &bdlist[idx];
}
@@ -418,6 +417,7 @@ plat_freelist_process(int mnode)
idy++) {
if (bdlist[idy]) {
sortlist[bds++]
+ /* CSTYLED */
= &bdlist[idy];
}
}
@@ -550,14 +550,14 @@ sgn_update_all_cpus(ushort_t sgn, uchar_t state, uchar_t sub_state)
cpu_sub_state = sub_state;
if ((sblkp != NULL) && (cpu[i] != NULL && (cpu[i]->cpu_flags &
- (CPU_EXISTS|CPU_QUIESCED)))) {
+ (CPU_EXISTS|CPU_QUIESCED)))) {
if (sub_state == EXIT_REBOOT) {
cpu_sub_state =
- sblkp->sigb_signature.state_t.sub_state;
+ sblkp->sigb_signature.state_t.sub_state;
if ((cpu_sub_state == EXIT_PANIC1) ||
- (cpu_sub_state == EXIT_PANIC2))
+ (cpu_sub_state == EXIT_PANIC2))
cpu_sub_state = EXIT_PANIC_REBOOT;
else
cpu_sub_state = EXIT_REBOOT;
@@ -573,10 +573,10 @@ sgn_update_all_cpus(ushort_t sgn, uchar_t state, uchar_t sub_state)
cpu_state = sblkp->sigb_signature.state_t.state;
if (cpu_state == SIGBST_WATCHDOG_SYNC)
cpu_sgn_update(sgn, SIGBST_WATCHDOG_SYNC,
- cpu_sub_state, i);
+ cpu_sub_state, i);
else if (cpu_state == SIGBST_REDMODE_SYNC)
cpu_sgn_update(sgn, SIGBST_REDMODE_SYNC,
- cpu_sub_state, i);
+ cpu_sub_state, i);
else
cpu_sgn_update(sgn, state, cpu_sub_state, i);
}
@@ -617,6 +617,8 @@ get_cpu_sgn_state(int cpuid)
static void
starfire_system_claim(void)
{
+ lbolt_debug_entry();
+
prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
}
@@ -624,6 +626,8 @@ static void
starfire_system_release(void)
{
prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
+
+ lbolt_debug_return();
}
void
diff --git a/usr/src/uts/sun4u/starfire/sys/idn.h b/usr/src/uts/sun4u/starfire/sys/idn.h
index ce75beab46..a193e4ceb5 100644
--- a/usr/src/uts/sun4u/starfire/sys/idn.h
+++ b/usr/src/uts/sun4u/starfire/sys/idn.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* Inter-Domain Network
@@ -28,8 +28,6 @@
#ifndef _SYS_IDN_H
#define _SYS_IDN_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifndef _ASM
#ifdef _KERNEL
@@ -3317,7 +3315,7 @@ extern struct idn_gkstat sg_kstat;
#define IDN_GKSTAT_INC(vvv) ((sg_kstat.vvv)++)
#define IDN_GKSTAT_ADD(vvv, iii) ((sg_kstat.vvv) += (iii))
#define IDN_GKSTAT_GLOBAL_EVENT(vvv, ttt) \
- ((sg_kstat.vvv)++, ((sg_kstat.ttt) = lbolt))
+ ((sg_kstat.vvv)++, ((sg_kstat.ttt) = ddi_get_lbolt()))
#endif /* IDN_NO_KSTAT */
diff --git a/usr/src/uts/sun4v/io/glvc/glvc.c b/usr/src/uts/sun4v/io/glvc/glvc.c
index b8a1a8ffc6..8c151416aa 100644
--- a/usr/src/uts/sun4v/io/glvc/glvc.c
+++ b/usr/src/uts/sun4v/io/glvc/glvc.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -628,7 +628,6 @@ glvc_read(dev_t dev, struct uio *uiop, cred_t *credp)
int rv, error = DDI_SUCCESS;
uint64_t hverr, recv_count = 0;
uint64_t status_reg;
- clock_t tick;
instance = getminor(dev);
@@ -648,9 +647,9 @@ glvc_read(dev_t dev, struct uio *uiop, cred_t *credp)
*/
while (hverr == H_EOK && (status_reg & GLVC_REG_RECV) !=
GLVC_REG_RECV) {
- tick = ddi_get_lbolt() + softsp->polling_interval;
- rv = cv_timedwait_sig(&softsp->recv_cv,
- &softsp->recv_mutex, tick);
+ rv = cv_reltimedwait_sig(&softsp->recv_cv,
+ &softsp->recv_mutex, softsp->polling_interval,
+ TR_CLOCK_TICK);
if (rv == 0) {
/*
* We got interrupted.
@@ -718,7 +717,6 @@ glvc_write(dev_t dev, struct uio *uiop, cred_t *credp)
int instance;
int rv, error = DDI_SUCCESS;
uint64_t hverr, send_count = 0;
- clock_t tick;
instance = getminor(dev);
@@ -733,9 +731,9 @@ glvc_write(dev_t dev, struct uio *uiop, cred_t *credp)
mutex_enter(&softsp->send_complete_mutex);
while (softsp->send_complete_flag == 0) {
- tick = ddi_get_lbolt() + softsp->polling_interval;
- rv = cv_timedwait_sig(&softsp->send_complete_cv,
- &softsp->send_complete_mutex, tick);
+ rv = cv_reltimedwait_sig(&softsp->send_complete_cv,
+ &softsp->send_complete_mutex, softsp->polling_interval,
+ TR_CLOCK_TICK);
if (rv == 0) {
/*
* We got interrupted.
@@ -837,7 +835,6 @@ glvc_peek(glvc_soft_state_t *softsp, glvc_xport_msg_peek_t *msg_peek)
uint64_t hverr = H_EOK;
uint64_t recv_count = 0;
uint64_t status_reg;
- clock_t tick;
mutex_enter(&softsp->recv_mutex);
@@ -852,9 +849,9 @@ glvc_peek(glvc_soft_state_t *softsp, glvc_xport_msg_peek_t *msg_peek)
*/
while (hverr == H_EOK && (status_reg & GLVC_REG_RECV) !=
GLVC_REG_RECV) {
- tick = ddi_get_lbolt() + softsp->polling_interval;
- rv = cv_timedwait_sig(&softsp->recv_cv,
- &softsp->recv_mutex, tick);
+ rv = cv_reltimedwait_sig(&softsp->recv_cv,
+ &softsp->recv_mutex, softsp->polling_interval,
+ TR_CLOCK_TICK);
if (rv == 0) {
/*
* We got interrupted.
diff --git a/usr/src/uts/sun4v/io/ntwdt.c b/usr/src/uts/sun4v/io/ntwdt.c
index 49f01eae17..5dcace595a 100644
--- a/usr/src/uts/sun4v/io/ntwdt.c
+++ b/usr/src/uts/sun4v/io/ntwdt.c
@@ -19,11 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-
/*
* sun4v application watchdog driver
*/
@@ -669,8 +668,8 @@ ntwdt_cyclic_softint(caddr_t arg)
}
NTWDT_DBG(NTWDT_DBG_IOCTL, ("cyclic_softint: %d"
- "lbolt64: %d\n", ntwdt_state->ntwdt_watchdog_timeout,
- (int)TICK_TO_MSEC(lbolt64)));
+ "ddi_get_lbolt64(): %d\n", ntwdt_state->ntwdt_watchdog_timeout,
+ (int)TICK_TO_MSEC(ddi_get_lbolt64())));
/*
* Decrement the virtual watchdog timer and check if it has expired.
diff --git a/usr/src/uts/sun4v/io/vdc.c b/usr/src/uts/sun4v/io/vdc.c
index ea30b337b2..6a9886cf0d 100644
--- a/usr/src/uts/sun4v/io/vdc.c
+++ b/usr/src/uts/sun4v/io/vdc.c
@@ -6636,7 +6636,7 @@ vdc_eio_thread(void *arg)
{
int status;
vdc_t *vdc = (vdc_t *)arg;
- clock_t timeout, starttime;
+ clock_t starttime, timeout = drv_usectohz(vdc->failfast_interval);
mutex_enter(&vdc->lock);
@@ -6832,8 +6832,7 @@ vdc_ownership_thread(void *arg)
if (vdc->ownership & VDC_OWNERSHIP_GRANTED)
timeout = 0;
else
- timeout = ddi_get_lbolt() +
- drv_usectohz(vdc_ownership_delay);
+ timeout = drv_usectohz(vdc_ownership_delay);
/* Release the ownership_lock and wait on the vdc lock */
mutex_exit(&vdc->ownership_lock);
@@ -6841,8 +6840,8 @@ vdc_ownership_thread(void *arg)
if (timeout == 0)
(void) cv_wait(&vdc->ownership_cv, &vdc->lock);
else
- (void) cv_timedwait(&vdc->ownership_cv,
- &vdc->lock, timeout);
+ (void) cv_reltimedwait(&vdc->ownership_cv, &vdc->lock,
+ timeout, TR_CLOCK_TICK);
mutex_exit(&vdc->lock);
diff --git a/usr/src/uts/sun4v/io/vldc.c b/usr/src/uts/sun4v/io/vldc.c
index 558db60bfc..4ad4e723e5 100644
--- a/usr/src/uts/sun4v/io/vldc.c
+++ b/usr/src/uts/sun4v/io/vldc.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -830,8 +830,8 @@ i_vldc_close_port(vldc_t *vldcp, uint_t portno)
* after waking up, check if the port has been closed
* by another thread in the meantime.
*/
- (void) cv_timedwait(&vminor->cv, &vminor->lock,
- ddi_get_lbolt() + drv_usectohz(vldc_close_delay));
+ (void) cv_reltimedwait(&vminor->cv, &vminor->lock,
+ drv_usectohz(vldc_close_delay), TR_CLOCK_TICK);
rv = 0;
} while (vport->status != VLDC_PORT_CLOSED);
diff --git a/usr/src/uts/sun4v/io/vsw_ldc.c b/usr/src/uts/sun4v/io/vsw_ldc.c
index ffdae8ae36..bf22468284 100644
--- a/usr/src/uts/sun4v/io/vsw_ldc.c
+++ b/usr/src/uts/sun4v/io/vsw_ldc.c
@@ -1186,8 +1186,8 @@ vsw_drain_ldcs(vsw_port_t *port)
*/
while (ldc_unreg_callback(ldcp->ldc_handle)
== EWOULDBLOCK)
- (void) cv_timedwait(&ldcp->drain_cv,
- &ldcp->drain_cv_lock, ddi_get_lbolt() + hz);
+ (void) cv_reltimedwait(&ldcp->drain_cv,
+ &ldcp->drain_cv_lock, hz, TR_CLOCK_TICK);
mutex_exit(&ldcp->drain_cv_lock);
D2(vswp, "%s: unreg callback for chan %ld after "
diff --git a/usr/src/uts/sun4v/os/mach_cpu_states.c b/usr/src/uts/sun4v/os/mach_cpu_states.c
index d582873776..3b41ebcbf4 100644
--- a/usr/src/uts/sun4v/os/mach_cpu_states.c
+++ b/usr/src/uts/sun4v/os/mach_cpu_states.c
@@ -54,6 +54,7 @@
#include <sys/hsvc.h>
#include <sys/ldoms.h>
#include <sys/kldc.h>
+#include <sys/clock_impl.h>
#include <sys/dumphdr.h>
/*
@@ -1110,6 +1111,8 @@ static hsvc_info_t soft_state_hsvc = {
static void
sun4v_system_claim(void)
{
+ lbolt_debug_entry();
+
watchdog_suspend();
kldc_debug_enter();
/*
@@ -1141,6 +1144,8 @@ sun4v_system_release(void)
&SOLARIS_SOFT_STATE_SAVED_MSG);
soft_state_saved_state = -1;
}
+
+ lbolt_debug_return();
}
void
diff --git a/usr/src/uts/sun4v/promif/promif_prop.c b/usr/src/uts/sun4v/promif/promif_prop.c
index cb76dc37c5..67628d9d6c 100644
--- a/usr/src/uts/sun4v/promif/promif_prop.c
+++ b/usr/src/uts/sun4v/promif/promif_prop.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
@@ -214,8 +212,8 @@ promif_ldom_setprop(char *name, void *value, int valuelen)
kmem_free(req, sizeof (var_config_hdr_t) + paylen);
mutex_enter(&promif_prop_lock);
- if (cv_timedwait(&promif_prop_cv,
- &promif_prop_lock, lbolt + PROMIF_DS_TIMEOUT_SEC * hz) == -1) {
+ if (cv_reltimedwait(&promif_prop_cv, &promif_prop_lock,
+ PROMIF_DS_TIMEOUT_SEC * hz, TR_CLOCK_TICK) == -1) {
cmn_err(CE_WARN, "%s: ds response timeout", me);
rv = -1;
goto out;