summaryrefslogtreecommitdiff
path: root/usr/src/uts/common
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common')
-rw-r--r--usr/src/uts/common/inet/tcp/tcp.c4
-rw-r--r--usr/src/uts/common/os/clock.c7
-rw-r--r--usr/src/uts/common/os/clock_tick.c6
-rw-r--r--usr/src/uts/common/os/panic.c4
-rw-r--r--usr/src/uts/common/sys/clock_impl.h30
5 files changed, 28 insertions, 23 deletions
diff --git a/usr/src/uts/common/inet/tcp/tcp.c b/usr/src/uts/common/inet/tcp/tcp.c
index 3a22e6362d..6d5e2d35c1 100644
--- a/usr/src/uts/common/inet/tcp/tcp.c
+++ b/usr/src/uts/common/inet/tcp/tcp.c
@@ -12422,7 +12422,7 @@ tcp_paws_check(tcp_t *tcp, tcpha_t *tcpha, tcp_opt_t *tcpoptp)
if ((flags & TH_RST) == 0 &&
TSTMP_LT(tcpoptp->tcp_opt_ts_val,
tcp->tcp_ts_recent)) {
- if (TSTMP_LT(LBOLT_FASTPATH,
+ if (TSTMP_LT(LBOLT_FASTPATH64,
tcp->tcp_last_rcv_lbolt + PAWS_TIMEOUT)) {
/* This segment is not acceptable. */
return (B_FALSE);
@@ -15570,7 +15570,7 @@ data_null:
}
if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet &&
- (TICK_TO_MSEC((clock_t)LBOLT_FASTPATH - tcp->tcp_last_recv_time) >=
+ (TICK_TO_MSEC(LBOLT_FASTPATH - tcp->tcp_last_recv_time) >=
tcp->tcp_rto)) {
SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle);
}
diff --git a/usr/src/uts/common/os/clock.c b/usr/src/uts/common/os/clock.c
index c0c581a215..16ee76807a 100644
--- a/usr/src/uts/common/os/clock.c
+++ b/usr/src/uts/common/os/clock.c
@@ -985,9 +985,8 @@ clock_init(void)
/*
* Allocate cache line aligned space for the per CPU lbolt data and
- * lb_info structure. We also initialize these structures with their
- * default values and install the softint to change from event to
- * cyclic driven mode.
+ * lbolt info structures, and initialize them with their default
+ * values. Note that these structures are also cache line sized.
*/
sz = sizeof (lbolt_info_t) + CPU_CACHE_COHERENCE_SIZE;
buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
@@ -1001,7 +1000,7 @@ clock_init(void)
lb_info->lbi_thresh_calls = LBOLT_THRESH_CALLS;
- sz = (sizeof (lbolt_info_t) * max_ncpus) + CPU_CACHE_COHERENCE_SIZE;
+ sz = (sizeof (lbolt_cpu_t) * max_ncpus) + CPU_CACHE_COHERENCE_SIZE;
buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
lb_cpu = (lbolt_cpu_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
diff --git a/usr/src/uts/common/os/clock_tick.c b/usr/src/uts/common/os/clock_tick.c
index 9d2005660d..3d7e8ddaaf 100644
--- a/usr/src/uts/common/os/clock_tick.c
+++ b/usr/src/uts/common/os/clock_tick.c
@@ -275,7 +275,7 @@ clock_tick_schedule_one(clock_tick_set_t *csp, int pending, processorid_t cid)
*/
ctp = clock_tick_cpu[cid];
mutex_enter(&ctp->ct_lock);
- ctp->ct_lbolt = (clock_t)LBOLT_NO_ACCOUNT;
+ ctp->ct_lbolt = LBOLT_NO_ACCOUNT;
ctp->ct_pending += pending;
ctp->ct_start = csp->ct_start;
ctp->ct_end = csp->ct_end;
@@ -443,7 +443,7 @@ clock_tick_schedule(int one_sec)
clock_tick_scan = 0;
clock_tick_execute_common(0, clock_tick_scan, end,
- (clock_t)LBOLT_NO_ACCOUNT, 1);
+ LBOLT_NO_ACCOUNT, 1);
return;
}
@@ -476,7 +476,7 @@ clock_tick_schedule(int one_sec)
* we want to handle this before we block on anything and allow
* the pinned thread below the current thread to escape.
*/
- clock_tick_process(CPU, (clock_t)LBOLT_NO_ACCOUNT, clock_tick_pending);
+ clock_tick_process(CPU, LBOLT_NO_ACCOUNT, clock_tick_pending);
mutex_enter(&clock_tick_lock);
diff --git a/usr/src/uts/common/os/panic.c b/usr/src/uts/common/os/panic.c
index ec4bec5c37..64486d4251 100644
--- a/usr/src/uts/common/os/panic.c
+++ b/usr/src/uts/common/os/panic.c
@@ -281,8 +281,8 @@ panicsys(const char *format, va_list alist, struct regs *rp, int on_panic_stack)
panicstr = (char *)format;
va_copy(panicargs, alist);
- panic_lbolt = (clock_t)LBOLT_NO_ACCOUNT;
- panic_lbolt64 = LBOLT_NO_ACCOUNT;
+ panic_lbolt = LBOLT_NO_ACCOUNT;
+ panic_lbolt64 = LBOLT_NO_ACCOUNT64;
panic_hrestime = hrestime;
panic_hrtime = gethrtime_waitfree();
panic_thread = t;
diff --git a/usr/src/uts/common/sys/clock_impl.h b/usr/src/uts/common/sys/clock_impl.h
index 85f26a16c6..49ba0009ad 100644
--- a/usr/src/uts/common/sys/clock_impl.h
+++ b/usr/src/uts/common/sys/clock_impl.h
@@ -87,35 +87,41 @@ extern void lbolt_debug_return(void);
extern lbolt_info_t *lb_info;
/*
- * LBOLT_WAITFREE provides a non-waiting version of lbolt.
+ * LBOLT_WAITFREE{,64} provide a non-waiting version of lbolt.
*/
-#define LBOLT_WAITFREE \
+#define LBOLT_WAITFREE64 \
(lbolt_hybrid == lbolt_event_driven ? \
((gethrtime_waitfree()/nsec_per_tick) - \
lb_info->lbi_debug_time) : \
(lb_info->lbi_internal - lb_info->lbi_debug_time))
+#define LBOLT_WAITFREE (clock_t)LBOLT_WAITFREE64
+
/*
- * LBOLT_FASTPATH should *only* be used where the cost of calling
- * ddi_get_lbolt() affects performance. This is currently only used by the
- * TCP/IP code and will be removed once it's no longer required.
+ * LBOLT_FASTPATH{,64} should *only* be used where the cost of calling the
+ * DDI lbolt routines affects performance. This is currently only used by
+ * the TCP/IP code and will be removed once it's no longer required.
*/
-#define LBOLT_FASTPATH \
- (lbolt_hybrid == lbolt_event_driven ? ddi_get_lbolt() : \
- (clock_t)(lb_info->lbi_internal - lb_info->lbi_debug_time))
+#define LBOLT_FASTPATH64 \
+ (lbolt_hybrid == lbolt_event_driven ? ddi_get_lbolt64() : \
+ (lb_info->lbi_internal - lb_info->lbi_debug_time))
+
+#define LBOLT_FASTPATH (clock_t)LBOLT_FASTPATH64
/*
- * LBOLT_NO_ACCOUNT is used by lbolt consumers who fire at a periodic rate,
- * such as clock(), for which the lbolt usage statistics are not updated.
- * This is specially important for consumers whose rate may be modified by
+ * LBOLT_NO_ACCOUNT{,64} is used by lbolt consumers who fire at a periodic
+ * rate, such as clock(), for which the lbolt usage statistics are not updated.
+ * This is especially important for consumers whose rate may be modified by
* the user, resulting in an unaccounted for increase in activity around the
* lbolt routines that could cause a mode switch.
*/
-#define LBOLT_NO_ACCOUNT \
+#define LBOLT_NO_ACCOUNT64 \
(lbolt_hybrid == lbolt_event_driven ? \
((gethrtime()/nsec_per_tick) - lb_info->lbi_debug_time) : \
(lb_info->lbi_internal - lb_info->lbi_debug_time))
+#define LBOLT_NO_ACCOUNT (clock_t)LBOLT_NO_ACCOUNT64
+
#endif /* _KERNEL || _KMEMUSER */
#ifdef __cplusplus