diff options
author | meem <none@none> | 2006-08-05 07:49:30 -0700 |
---|---|---|
committer | meem <none@none> | 2006-08-05 07:49:30 -0700 |
commit | 81d28f7bcc433d05b5cd70e64152da57dabe39eb (patch) | |
tree | 10c098e470457d515b9fe88ec68618ac3da57d98 /usr/src | |
parent | 5749802bc1ab53eee0631759471dabfc4b455cd4 (diff) | |
download | illumos-joyent-81d28f7bcc433d05b5cd70e64152da57dabe39eb.tar.gz |
6389163 race in TCP Fusion TLI accept logic induces spinning in RPC
Diffstat (limited to 'usr/src')
-rw-r--r-- | usr/src/uts/common/inet/tcp/tcp.c | 10 | ||||
-rw-r--r-- | usr/src/uts/common/inet/tcp/tcp_fusion.c | 14 |
2 files changed, 21 insertions, 3 deletions
diff --git a/usr/src/uts/common/inet/tcp/tcp.c b/usr/src/uts/common/inet/tcp/tcp.c index 5c34200c3f..e4aff1d002 100644 --- a/usr/src/uts/common/inet/tcp/tcp.c +++ b/usr/src/uts/common/inet/tcp/tcp.c @@ -2405,6 +2405,16 @@ tcp_accept_swap(tcp_t *listener, tcp_t *acceptor, tcp_t *eager) eager->tcp_rq->q_ptr = econnp; eager->tcp_wq->q_ptr = econnp; + + /* + * In the TLI/XTI loopback case, we are inside the listener's squeue, + * which might be a different squeue from our peer TCP instance. + * For TCP Fusion, the peer expects that whenever tcp_detached is + * clear, our TCP queues point to the acceptor's queues. Thus, use + * membar_producer() to ensure that the assignments of tcp_rq/tcp_wq + * above reach global visibility prior to the clearing of tcp_detached. + */ + membar_producer(); eager->tcp_detached = B_FALSE; ASSERT(eager->tcp_ack_tid == 0); diff --git a/usr/src/uts/common/inet/tcp/tcp_fusion.c b/usr/src/uts/common/inet/tcp/tcp_fusion.c index 696a6c24cd..fda7bff9d2 100644 --- a/usr/src/uts/common/inet/tcp/tcp_fusion.c +++ b/usr/src/uts/common/inet/tcp/tcp_fusion.c @@ -458,7 +458,6 @@ boolean_t tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size) { tcp_t *peer_tcp = tcp->tcp_loopback_peer; - queue_t *peer_rq; uint_t max_unread; boolean_t flow_stopped; boolean_t urgent = (DB_TYPE(mp) != M_DATA); @@ -469,7 +468,6 @@ tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size) ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO); - peer_rq = peer_tcp->tcp_rq; max_unread = peer_tcp->tcp_fuse_rcv_unread_hiwater; /* If this connection requires IP, unfuse and use regular path */ @@ -599,7 +597,17 @@ tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size) */ if (urgent || (!flow_stopped && !peer_tcp->tcp_direct_sockfs)) { ASSERT(peer_tcp->tcp_rcv_list != NULL); - (void) tcp_fuse_rcv_drain(peer_rq, peer_tcp, NULL); + /* + * For TLI-based streams, a thread in tcp_accept_swap() + * can race with us. That thread will ensure that the + * correct peer_tcp->tcp_rq is globally visible before + * peer_tcp->tcp_detached is visible as clear, but we + * must also ensure that the load of tcp_rq cannot be + * reordered to be before the tcp_detached check. + */ + membar_consumer(); + (void) tcp_fuse_rcv_drain(peer_tcp->tcp_rq, peer_tcp, + NULL); /* * If synchronous streams was stopped above due * to the presence of urgent data, re-enable it. |