summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/common/inet/tcp.h36
-rw-r--r--usr/src/uts/common/inet/tcp/tcp.c69
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_fusion.c85
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_kssl.c16
-rw-r--r--usr/src/uts/common/inet/tcp_impl.h14
5 files changed, 148 insertions, 72 deletions
diff --git a/usr/src/uts/common/inet/tcp.h b/usr/src/uts/common/inet/tcp.h
index e9c2600c6b..a586064d3b 100644
--- a/usr/src/uts/common/inet/tcp.h
+++ b/usr/src/uts/common/inet/tcp.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1990 Mentat Inc. */
@@ -235,40 +235,39 @@ typedef struct tcp_s {
tcp_fin_rcvd : 1, /* Have we seen a FIN? */
tcp_fin_sent : 1, /* Have we sent our FIN yet? */
tcp_ordrel_done : 1, /* Have we sent the ord_rel upstream? */
- tcp_flow_stopped : 1, /* Have we flow controlled xmitter? */
-
tcp_detached : 1, /* If we're detached from a stream */
+
tcp_bind_pending : 1, /* Client is waiting for bind ack */
tcp_unbind_pending : 1, /* Client sent T_UNBIND_REQ */
tcp_deferred_clean_death : 1,
/* defer tcp endpoint cleanup etc. */
-
tcp_conn_def_q0: 1, /* move from q0 to q deferred */
+
tcp_ka_enabled: 1, /* Connection KeepAlive Timer needed */
tcp_zero_win_probe: 1, /* Zero win probing is in progress */
tcp_loopback: 1, /* src and dst are the same machine */
-
tcp_localnet: 1, /* src and dst are on the same subnet */
+
tcp_syn_defense: 1, /* For defense against SYN attack */
#define tcp_dontdrop tcp_syn_defense
tcp_set_timer : 1,
tcp_active_open: 1, /* This is a active open */
-
tcp_timeout : 1, /* qbufcall failed, qtimeout pending */
+
tcp_rexmit : 1, /* TCP is retransmitting */
tcp_snd_sack_ok : 1, /* Can use SACK for this connection */
tcp_empty_flag : 1, /* Empty flag for future use */
-
tcp_recvdstaddr : 1, /* return T_EXTCONN_IND with dst addr */
+
tcp_hwcksum : 1, /* The NIC is capable of hwcksum */
tcp_ip_forward_progress : 1,
tcp_anon_priv_bind : 1,
-
tcp_ecn_ok : 1, /* Can use ECN for this connection */
+
tcp_ecn_echo_on : 1, /* Need to do ECN echo */
tcp_ecn_cwr_sent : 1, /* ECN_CWR has been sent */
- tcp_cwr : 1; /* Cwnd has reduced recently */
-
+ tcp_cwr : 1, /* Cwnd has reduced recently */
+ tcp_pad_to_bit31 : 1;
/* Following manipulated by TCP under squeue protection */
uint32_t
tcp_mdt : 1, /* Lower layer is capable of MDT */
@@ -528,9 +527,11 @@ typedef struct tcp_s {
uint_t tcp_fuse_rcv_unread_hiwater; /* max # of outstanding pkts */
/*
* The following fusion-related fields and bit fields are to be
- * manipulated with squeue protection or with tcp_fuse_lock held.
+ * manipulated with squeue protection or with tcp_non_sq_lock held.
+ * tcp_non_sq_lock is used to protect fields that may be modified
+ * accessed outside the squeue.
*/
- kmutex_t tcp_fuse_lock;
+ kmutex_t tcp_non_sq_lock;
kcondvar_t tcp_fuse_plugcv;
uint_t tcp_fuse_rcv_unread_cnt; /* # of outstanding pkts */
uint32_t
@@ -550,6 +551,7 @@ typedef struct tcp_s {
*/
boolean_t tcp_issocket; /* this is a socket tcp */
+ /* protected by the tcp_non_sq_lock lock */
uint32_t tcp_squeue_bytes;
/*
* Kernel SSL session information
@@ -579,6 +581,16 @@ typedef struct tcp_s {
*/
struct tcp_s *tcp_eager_prev_drop_q0;
struct tcp_s *tcp_eager_next_drop_q0;
+
+ /*
+ * Have we flow controlled xmitter?
+ * This variable can be modified outside the squeue and hence must
+ * not be declared as a bit field along with the rest that are
+ * modified only within the squeue.
+ * protected by the tcp_non_sq_lock lock.
+ */
+ boolean_t tcp_flow_stopped;
+
#ifdef DEBUG
pc_t tcmp_stk[15];
#endif
diff --git a/usr/src/uts/common/inet/tcp/tcp.c b/usr/src/uts/common/inet/tcp/tcp.c
index 3d7e027c1e..f55afe25f6 100644
--- a/usr/src/uts/common/inet/tcp/tcp.c
+++ b/usr/src/uts/common/inet/tcp/tcp.c
@@ -3983,9 +3983,11 @@ tcp_stop_lingering(tcp_t *tcp)
tcp->tcp_linger_tid = 0;
if (tcp->tcp_state > TCPS_LISTEN) {
tcp_acceptor_hash_remove(tcp);
+ mutex_enter(&tcp->tcp_non_sq_lock);
if (tcp->tcp_flow_stopped) {
tcp_clrqfull(tcp);
}
+ mutex_exit(&tcp->tcp_non_sq_lock);
if (tcp->tcp_timer_tid != 0) {
delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
@@ -4347,9 +4349,11 @@ tcp_close_output(void *arg, mblk_t *mp, void *arg2)
*/
tcp_acceptor_hash_remove(tcp);
+ mutex_enter(&tcp->tcp_non_sq_lock);
if (tcp->tcp_flow_stopped) {
tcp_clrqfull(tcp);
}
+ mutex_exit(&tcp->tcp_non_sq_lock);
if (tcp->tcp_timer_tid != 0) {
delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid);
@@ -4558,8 +4562,10 @@ tcp_closei_local(tcp_t *tcp)
tcp->tcp_ip_addr_cache = NULL;
}
}
+ mutex_enter(&tcp->tcp_non_sq_lock);
if (tcp->tcp_flow_stopped)
tcp_clrqfull(tcp);
+ mutex_exit(&tcp->tcp_non_sq_lock);
tcp_bind_hash_remove(tcp);
/*
@@ -7714,10 +7720,12 @@ tcp_reinit(tcp_t *tcp)
tcp_zcopy_notify(tcp);
tcp->tcp_xmit_last = tcp->tcp_xmit_tail = NULL;
tcp->tcp_unsent = tcp->tcp_xmit_tail_unsent = 0;
+ mutex_enter(&tcp->tcp_non_sq_lock);
if (tcp->tcp_flow_stopped &&
TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) {
tcp_clrqfull(tcp);
}
+ mutex_exit(&tcp->tcp_non_sq_lock);
tcp_close_mpp(&tcp->tcp_reass_head);
tcp->tcp_reass_tail = NULL;
if (tcp->tcp_rcv_list != NULL) {
@@ -10272,8 +10280,6 @@ tcp_opt_set(queue_t *q, uint_t optset_context, int level, int name,
tcp->tcp_dgram_errind = onoff;
break;
case SO_SNDBUF: {
- tcp_t *peer_tcp;
-
if (*i1 > tcp_max_buf) {
*outlenp = 0;
return (ENOBUFS);
@@ -10292,22 +10298,13 @@ tcp_opt_set(queue_t *q, uint_t optset_context, int level, int name,
* There are apps that increase SO_SNDBUF size when
* flow-controlled (EWOULDBLOCK), and expect the flow
* control condition to be lifted right away.
- *
- * For the fused tcp loopback case, in order to avoid
- * a race with the peer's tcp_fuse_rrw() we need to
- * hold its fuse_lock while accessing tcp_flow_stopped.
*/
- peer_tcp = tcp->tcp_loopback_peer;
- ASSERT(!tcp->tcp_fused || peer_tcp != NULL);
- if (tcp->tcp_fused)
- mutex_enter(&peer_tcp->tcp_fuse_lock);
-
+ mutex_enter(&tcp->tcp_non_sq_lock);
if (tcp->tcp_flow_stopped &&
TCP_UNSENT_BYTES(tcp) < tcp->tcp_xmit_hiwater) {
tcp_clrqfull(tcp);
}
- if (tcp->tcp_fused)
- mutex_exit(&peer_tcp->tcp_fuse_lock);
+ mutex_exit(&tcp->tcp_non_sq_lock);
break;
}
case SO_RCVBUF:
@@ -17389,9 +17386,9 @@ tcp_output(void *arg, mblk_t *mp, void *arg2)
ASSERT(DB_TYPE(mp) == M_DATA);
msize = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp);
- mutex_enter(&connp->conn_lock);
+ mutex_enter(&tcp->tcp_non_sq_lock);
tcp->tcp_squeue_bytes -= msize;
- mutex_exit(&connp->conn_lock);
+ mutex_exit(&tcp->tcp_non_sq_lock);
/* Bypass tcp protocol for fused tcp loopback */
if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize))
@@ -17475,10 +17472,12 @@ tcp_output(void *arg, mblk_t *mp, void *arg2)
goto slow;
}
+ mutex_enter(&tcp->tcp_non_sq_lock);
if (tcp->tcp_flow_stopped &&
TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) {
tcp_clrqfull(tcp);
}
+ mutex_exit(&tcp->tcp_non_sq_lock);
/*
* determine if anything to send (Nagle).
@@ -17862,15 +17861,29 @@ tcp_accept_finish(void *arg, mblk_t *mp, void *arg2)
* For fused tcp loopback, back-enable peer endpoint
* if it's currently flow-controlled.
*/
- if (tcp->tcp_fused &&
- tcp->tcp_loopback_peer->tcp_flow_stopped) {
+ if (tcp->tcp_fused) {
tcp_t *peer_tcp = tcp->tcp_loopback_peer;
ASSERT(peer_tcp != NULL);
ASSERT(peer_tcp->tcp_fused);
-
- tcp_clrqfull(peer_tcp);
- TCP_STAT(tcp_fusion_backenabled);
+ /*
+ * In order to change the peer's tcp_flow_stopped,
+ * we need to take locks for both end points. The
+ * highest address is taken first.
+ */
+ if (peer_tcp > tcp) {
+ mutex_enter(&peer_tcp->tcp_non_sq_lock);
+ mutex_enter(&tcp->tcp_non_sq_lock);
+ } else {
+ mutex_enter(&tcp->tcp_non_sq_lock);
+ mutex_enter(&peer_tcp->tcp_non_sq_lock);
+ }
+ if (peer_tcp->tcp_flow_stopped) {
+ tcp_clrqfull(peer_tcp);
+ TCP_STAT(tcp_fusion_backenabled);
+ }
+ mutex_exit(&peer_tcp->tcp_non_sq_lock);
+ mutex_exit(&tcp->tcp_non_sq_lock);
}
}
ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg);
@@ -18228,16 +18241,14 @@ tcp_wput(queue_t *q, mblk_t *mp)
msize = msgdsize(mp);
- mutex_enter(&connp->conn_lock);
- CONN_INC_REF_LOCKED(connp);
-
+ mutex_enter(&tcp->tcp_non_sq_lock);
tcp->tcp_squeue_bytes += msize;
if (TCP_UNSENT_BYTES(tcp) > tcp->tcp_xmit_hiwater) {
- mutex_exit(&connp->conn_lock);
tcp_setqfull(tcp);
- } else
- mutex_exit(&connp->conn_lock);
+ }
+ mutex_exit(&tcp->tcp_non_sq_lock);
+ CONN_INC_REF(connp);
(*tcp_squeue_wput_proc)(connp->conn_sqp, mp,
tcp_output, connp, SQTAG_TCP_OUTPUT);
return;
@@ -18929,10 +18940,12 @@ tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent)
(mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) != 0)
tcp_zcopy_notify(tcp);
freemsg(mp);
+ mutex_enter(&tcp->tcp_non_sq_lock);
if (tcp->tcp_flow_stopped &&
TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) {
tcp_clrqfull(tcp);
}
+ mutex_exit(&tcp->tcp_non_sq_lock);
return;
}
@@ -19268,6 +19281,7 @@ done:;
}
/* Note that len is the amount we just sent but with a negative sign */
tcp->tcp_unsent += len;
+ mutex_enter(&tcp->tcp_non_sq_lock);
if (tcp->tcp_flow_stopped) {
if (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) {
tcp_clrqfull(tcp);
@@ -19275,6 +19289,7 @@ done:;
} else if (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater) {
tcp_setqfull(tcp);
}
+ mutex_exit(&tcp->tcp_non_sq_lock);
}
/*
@@ -21537,9 +21552,11 @@ tcp_wput_flush(tcp_t *tcp, mblk_t *mp)
* We have no unsent data, so unsent must be less than
* tcp_xmit_lowater, so re-enable flow.
*/
+ mutex_enter(&tcp->tcp_non_sq_lock);
if (tcp->tcp_flow_stopped) {
tcp_clrqfull(tcp);
}
+ mutex_exit(&tcp->tcp_non_sq_lock);
}
/*
* TODO: you can't just flush these, you have to increase rwnd for one
diff --git a/usr/src/uts/common/inet/tcp/tcp_fusion.c b/usr/src/uts/common/inet/tcp/tcp_fusion.c
index 277b479a41..01626dbd0c 100644
--- a/usr/src/uts/common/inet/tcp/tcp_fusion.c
+++ b/usr/src/uts/common/inet/tcp/tcp_fusion.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -62,18 +62,18 @@
* because the tcp_fuse_rrw() path bypasses the M_PROTO processing done
* by strsock_proto() hook.
*
- * Sychronization is handled by squeue and the mutex tcp_fuse_lock.
+ * Sychronization is handled by squeue and the mutex tcp_non_sq_lock.
* One of the requirements for fusion to succeed is that both endpoints
* need to be using the same squeue. This ensures that neither side
* can disappear while the other side is still sending data. By itself,
* squeue is not sufficient for guaranteeing safety when synchronous
* streams is enabled. The reason is that tcp_fuse_rrw() doesn't enter
* the squeue and its access to tcp_rcv_list and other fusion-related
- * fields needs to be sychronized with the sender. tcp_fuse_lock is
+ * fields needs to be sychronized with the sender. tcp_non_sq_lock is
* used for this purpose. When there is urgent data, the sender needs
* to push the data up the receiver's streams read queue. In order to
- * avoid holding the tcp_fuse_lock across putnext(), the sender sets
- * the peer tcp's tcp_fuse_syncstr_plugged bit and releases tcp_fuse_lock
+ * avoid holding the tcp_non_sq_lock across putnext(), the sender sets
+ * the peer tcp's tcp_fuse_syncstr_plugged bit and releases tcp_non_sq_lock
* (see macro TCP_FUSE_SYNCSTR_PLUG_DRAIN()). If tcp_fuse_rrw() enters
* after this point, it will see that synchronous streams is plugged and
* will wait on tcp_fuse_plugcv. After the sender has finished pushing up
@@ -456,6 +456,8 @@ tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp)
/*
* Fusion output routine, called by tcp_output() and tcp_wput_proto().
+ * If we are modifying any member that can be changed outside the squeue,
+ * like tcp_flow_stopped, we need to take tcp_non_sq_lock.
*/
boolean_t
tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size)
@@ -597,7 +599,7 @@ tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size)
freemsg(mp1);
}
- mutex_enter(&peer_tcp->tcp_fuse_lock);
+ mutex_enter(&peer_tcp->tcp_non_sq_lock);
/*
* Wake up and signal the peer; it is okay to do this before
* enqueueing because we are holding the lock. One of the
@@ -644,6 +646,20 @@ tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size)
if (TCP_IS_DETACHED(peer_tcp) || max_unread == 0)
max_unread = UINT_MAX;
+ /*
+ * Since we are accessing our tcp_flow_stopped and might modify it,
+ * we need to take tcp->tcp_non_sq_lock. The lock for the highest
+ * address is held first. Dropping peer_tcp->tcp_non_sq_lock should
+ * not be an issue here since we are within the squeue and the peer
+ * won't disappear.
+ */
+ if (tcp > peer_tcp) {
+ mutex_exit(&peer_tcp->tcp_non_sq_lock);
+ mutex_enter(&tcp->tcp_non_sq_lock);
+ mutex_enter(&peer_tcp->tcp_non_sq_lock);
+ } else {
+ mutex_enter(&tcp->tcp_non_sq_lock);
+ }
flow_stopped = tcp->tcp_flow_stopped;
if (!flow_stopped &&
(((peer_tcp->tcp_direct_sockfs || TCP_IS_DETACHED(peer_tcp)) &&
@@ -662,7 +678,7 @@ tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size)
tcp_clrqfull(tcp);
flow_stopped = B_FALSE;
}
-
+ mutex_exit(&tcp->tcp_non_sq_lock);
loopback_packets++;
tcp->tcp_last_sent_len = send_size;
@@ -682,7 +698,7 @@ tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size)
BUMP_LOCAL(tcp->tcp_obsegs);
BUMP_LOCAL(peer_tcp->tcp_ibsegs);
- mutex_exit(&peer_tcp->tcp_fuse_lock);
+ mutex_exit(&peer_tcp->tcp_non_sq_lock);
DTRACE_PROBE2(tcp__fuse__output, tcp_t *, tcp, uint_t, send_size);
@@ -826,14 +842,26 @@ tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp)
/*
* Synchronous stream entry point for sockfs to retrieve
* data directly from tcp_rcv_list.
+ * tcp_fuse_rrw() might end up modifying the peer's tcp_flow_stopped,
+ * for which it must take the tcp_non_sq_lock of the peer as well
+ * making any change. The order of taking the locks is based on
+ * the TCP pointer itself. Before we get the peer we need to take
+ * our tcp_non_sq_lock so that the peer doesn't disappear. However,
+ * we cannot drop the lock if we have to grab the peer's lock (because
+ * of ordering), since the peer might disappear in the interim. So,
+ * we take our tcp_non_sq_lock, get the peer, increment the ref on the
+ * peer's conn, drop all the locks and then take the tcp_non_sq_lock in the
+ * desired order. Incrementing the conn ref on the peer means that the
+ * peer won't disappear when we drop our tcp_non_sq_lock.
*/
int
tcp_fuse_rrw(queue_t *q, struiod_t *dp)
{
tcp_t *tcp = Q_TO_CONN(q)->conn_tcp;
mblk_t *mp;
+ tcp_t *peer_tcp;
- mutex_enter(&tcp->tcp_fuse_lock);
+ mutex_enter(&tcp->tcp_non_sq_lock);
/*
* If tcp_fuse_syncstr_plugged is set, then another thread is moving
@@ -841,30 +869,53 @@ tcp_fuse_rrw(queue_t *q, struiod_t *dp)
* done, then return EBUSY so that strget() will dequeue data from the
* stream head to ensure data is drained in-order.
*/
+plugged:
if (tcp->tcp_fuse_syncstr_plugged) {
do {
- cv_wait(&tcp->tcp_fuse_plugcv, &tcp->tcp_fuse_lock);
+ cv_wait(&tcp->tcp_fuse_plugcv, &tcp->tcp_non_sq_lock);
} while (tcp->tcp_fuse_syncstr_plugged);
- mutex_exit(&tcp->tcp_fuse_lock);
+ mutex_exit(&tcp->tcp_non_sq_lock);
TCP_STAT(tcp_fusion_rrw_plugged);
TCP_STAT(tcp_fusion_rrw_busy);
return (EBUSY);
}
+ peer_tcp = tcp->tcp_loopback_peer;
+
/*
* If someone had turned off tcp_direct_sockfs or if synchronous
* streams is stopped, we return EBUSY. This causes strget() to
* dequeue data from the stream head instead.
*/
if (!tcp->tcp_direct_sockfs || tcp->tcp_fuse_syncstr_stopped) {
- mutex_exit(&tcp->tcp_fuse_lock);
+ mutex_exit(&tcp->tcp_non_sq_lock);
TCP_STAT(tcp_fusion_rrw_busy);
return (EBUSY);
}
+ /*
+ * Grab lock in order. The highest addressed tcp is locked first.
+ * We don't do this within the tcp_rcv_list check since if we
+ * have to drop the lock, for ordering, then the tcp_rcv_list
+ * could change.
+ */
+ if (peer_tcp > tcp) {
+ CONN_INC_REF(peer_tcp->tcp_connp);
+ mutex_exit(&tcp->tcp_non_sq_lock);
+ mutex_enter(&peer_tcp->tcp_non_sq_lock);
+ mutex_enter(&tcp->tcp_non_sq_lock);
+ CONN_DEC_REF(peer_tcp->tcp_connp);
+ /* This might have changed in the interim */
+ if (tcp->tcp_fuse_syncstr_plugged) {
+ mutex_exit(&peer_tcp->tcp_non_sq_lock);
+ goto plugged;
+ }
+ } else {
+ mutex_enter(&peer_tcp->tcp_non_sq_lock);
+ }
+
if ((mp = tcp->tcp_rcv_list) != NULL) {
- tcp_t *peer_tcp = tcp->tcp_loopback_peer;
DTRACE_PROBE3(tcp__fuse__rrw, tcp_t *, tcp,
uint32_t, tcp->tcp_rcv_cnt, ssize_t, dp->d_uio.uio_resid);
@@ -892,7 +943,7 @@ tcp_fuse_rrw(queue_t *q, struiod_t *dp)
TCP_STAT(tcp_fusion_backenabled);
}
}
-
+ mutex_exit(&peer_tcp->tcp_non_sq_lock);
/*
* Either we just dequeued everything or we get here from sockfs
* and have nothing to return; in this case clear RSLEEP.
@@ -903,7 +954,7 @@ tcp_fuse_rrw(queue_t *q, struiod_t *dp)
ASSERT(tcp->tcp_fuse_rcv_unread_cnt == 0);
STR_WAKEUP_CLEAR(STREAM(q));
- mutex_exit(&tcp->tcp_fuse_lock);
+ mutex_exit(&tcp->tcp_non_sq_lock);
dp->d_mp = mp;
return (0);
}
@@ -922,7 +973,7 @@ tcp_fuse_rinfop(queue_t *q, infod_t *dp)
int error = 0;
struct stdata *stp = STREAM(q);
- mutex_enter(&tcp->tcp_fuse_lock);
+ mutex_enter(&tcp->tcp_non_sq_lock);
/* If shutdown on read has happened, return nothing */
mutex_enter(&stp->sd_lock);
if (stp->sd_flag & STREOF) {
@@ -989,7 +1040,7 @@ tcp_fuse_rinfop(queue_t *q, infod_t *dp)
dp->d_cmd &= ~INFOD_COPYOUT;
}
done:
- mutex_exit(&tcp->tcp_fuse_lock);
+ mutex_exit(&tcp->tcp_non_sq_lock);
dp->d_res |= res;
diff --git a/usr/src/uts/common/inet/tcp/tcp_kssl.c b/usr/src/uts/common/inet/tcp/tcp_kssl.c
index 3e1982453a..dac3e0df3f 100644
--- a/usr/src/uts/common/inet/tcp/tcp_kssl.c
+++ b/usr/src/uts/common/inet/tcp/tcp_kssl.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -137,10 +137,9 @@ tcp_kssl_input(tcp_t *tcp, mblk_t *mp)
* outgoing flow. tcp_output() will decrement it
* as they are sent out.
*/
- ASSERT(!MUTEX_HELD(&connp->conn_lock));
- mutex_enter(&connp->conn_lock);
+ mutex_enter(&tcp->tcp_non_sq_lock);
tcp->tcp_squeue_bytes += msgdsize(outmp);
- mutex_exit(&connp->conn_lock);
+ mutex_exit(&tcp->tcp_non_sq_lock);
tcp_output(connp, outmp, NULL);
/* FALLTHROUGH */
@@ -330,14 +329,11 @@ tcp_kssl_input_callback(void *arg, mblk_t *mp, kssl_cmd_t kssl_cmd)
/*
* See comment in tcp_kssl_input() call to tcp_output()
*/
- ASSERT(!MUTEX_HELD(&connp->conn_lock));
- mutex_enter(&connp->conn_lock);
- CONN_INC_REF_LOCKED(connp);
+ mutex_enter(&tcp->tcp_non_sq_lock);
tcp->tcp_squeue_bytes += msgdsize(mp);
- mutex_exit(&connp->conn_lock);
- } else {
- CONN_INC_REF(connp);
+ mutex_exit(&tcp->tcp_non_sq_lock);
}
+ CONN_INC_REF(connp);
(*tcp_squeue_wput_proc)(connp->conn_sqp, mp,
tcp_output, connp, SQTAG_TCP_OUTPUT);
diff --git a/usr/src/uts/common/inet/tcp_impl.h b/usr/src/uts/common/inet/tcp_impl.h
index dc69b9c8b6..c724f53980 100644
--- a/usr/src/uts/common/inet/tcp_impl.h
+++ b/usr/src/uts/common/inet/tcp_impl.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -76,9 +76,9 @@ extern "C" {
*/
#define TCP_FUSE_SYNCSTR_STOP(tcp) { \
if ((tcp)->tcp_direct_sockfs) { \
- mutex_enter(&(tcp)->tcp_fuse_lock); \
+ mutex_enter(&(tcp)->tcp_non_sq_lock); \
(tcp)->tcp_fuse_syncstr_stopped = B_TRUE; \
- mutex_exit(&(tcp)->tcp_fuse_lock); \
+ mutex_exit(&(tcp)->tcp_non_sq_lock); \
} \
}
@@ -88,10 +88,10 @@ extern "C" {
*/
#define TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp) { \
if ((tcp)->tcp_direct_sockfs) { \
- mutex_enter(&(tcp)->tcp_fuse_lock); \
+ mutex_enter(&(tcp)->tcp_non_sq_lock); \
ASSERT(!(tcp)->tcp_fuse_syncstr_plugged); \
(tcp)->tcp_fuse_syncstr_plugged = B_TRUE; \
- mutex_exit(&(tcp)->tcp_fuse_lock); \
+ mutex_exit(&(tcp)->tcp_non_sq_lock); \
} \
}
@@ -101,10 +101,10 @@ extern "C" {
*/
#define TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp) { \
if ((tcp)->tcp_direct_sockfs) { \
- mutex_enter(&(tcp)->tcp_fuse_lock); \
+ mutex_enter(&(tcp)->tcp_non_sq_lock); \
(tcp)->tcp_fuse_syncstr_plugged = B_FALSE; \
(void) cv_broadcast(&(tcp)->tcp_fuse_plugcv); \
- mutex_exit(&(tcp)->tcp_fuse_lock); \
+ mutex_exit(&(tcp)->tcp_non_sq_lock); \
} \
}