summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerry Jelinek <jerry.jelinek@joyent.com>2019-05-16 12:09:55 +0000
committerJerry Jelinek <jerry.jelinek@joyent.com>2019-05-16 12:09:55 +0000
commit478ff45c989d01e21a206c186c3466cea487dd37 (patch)
tree6fb084855add19f4c6c8d9a82c2fe8d903d18927
parent90e5050456387317c70dd8a3668940e0d9ab61f2 (diff)
parent466483367c0a97495499f43113a8f44c9eddff03 (diff)
downloadillumos-joyent-478ff45c989d01e21a206c186c3466cea487dd37.tar.gz
[illumos-gate merge]
commit a3380248e34d78eb55b8f65ccf1f0d8a6f7e7bbf 10055 recursive mutex enter in ahci commit 8f22c1dff63d6147c87d6bff65bcd3970ad4d368 10879 i86pc/i86xpv: assignment makes integer from pointer without a cast commit fb46ffcae2cbe5ba3f2fe0c3104beb0a222897fd 10878 i86pc/i86xpv: comparison between pointer and integer commit ee3e45c0196a43379207b0298fcd21987a2ab05f 10877 i86pc: comparison between pointer and integer commit fac9618d9408ad005fe8fcc1d189e7baedaf0a62 10870 dtrace: comparison between pointer and integer commit 7b4e981c32b1b233ce13a79cac81c8e75937d3f5 10866 dls: comparison between pointer and integer commit 525f82272fdf81a1292b8b1d1832d2012cdfc096 10865 ibmf: comparison between pointer and integer commit 2fcabb599ef45ae10a2847045a10486dd69b9b84 10863 idm: comparison between pointer and integer commit 6895f6f4d5c99fd997130635c4a4bff68554bb8e 10862 mac: comparison between pointer and integer commit 0e2db3e7fce28a3f9e1dfd13abce14eac44e98c6 10836 zcons: cast between incompatible function types commit cb492d90e7b22eb1b91a8577423f86ba447d2732 10835 tl: cast between incompatible function types commit 455e370ca67aeea268bdfbcf581b6a05547f6636 10924 Need mitigation of L1TF (CVE-2018-3646) commit 2849e8402eca436e7a7eca178c7ab6644a093a3e 10909 Incorrect parameters passed to DC lead to STATUS_INVALID_INFO_CLASS commit c26bf377e2245534feb1f92b43f6d4ed32513c41 10954 expose processor model name on chip topo nodes Conflicts: usr/src/uts/intel/sys/x86_archext.h usr/src/uts/intel/ia32/ml/swtch.s usr/src/uts/i86pc/sys/machcpuvar.h usr/src/uts/i86pc/sys/Makefile usr/src/uts/i86pc/os/intr.c usr/src/uts/i86pc/os/cpuid.c usr/src/uts/i86pc/io/pcplusmp/apic.c usr/src/uts/i86pc/io/apix/apix_utils.c usr/src/uts/i86pc/io/apix/apix_intr.c usr/src/uts/common/sys/thread.h usr/src/uts/common/os/lgrp.c usr/src/uts/common/io/dls/dls_mgmt.c usr/src/uts/common/fs/zfs/zvol.c usr/src/uts/common/disp/thread.c usr/src/uts/common/disp/disp.c usr/src/pkg/manifests/system-header.mf usr/src/lib/fm/topo/modules/i86pc/chip/Makefile
-rw-r--r--usr/src/pkg/manifests/system-header.mf2
-rw-r--r--usr/src/uts/common/dtrace/dtrace.c14
-rw-r--r--usr/src/uts/common/io/dls/dls_mgmt.c2
-rw-r--r--usr/src/uts/common/io/ib/mgt/ibmf/ibmf_recv.c10
-rw-r--r--usr/src/uts/common/io/ib/mgt/ibmf/ibmf_send.c4
-rw-r--r--usr/src/uts/common/io/ib/mgt/ibmf/ibmf_wqe.c2
-rw-r--r--usr/src/uts/common/io/idm/idm_text.c4
-rw-r--r--usr/src/uts/common/io/mac/mac.c4
-rw-r--r--usr/src/uts/common/io/mac/mac_hio.c2
-rw-r--r--usr/src/uts/common/io/sata/adapters/ahci/ahci.c4
-rw-r--r--usr/src/uts/common/io/tl.c436
-rw-r--r--usr/src/uts/common/io/zcons.c55
-rw-r--r--usr/src/uts/common/krtld/kobj.c2
-rw-r--r--usr/src/uts/common/os/brand.c4
-rw-r--r--usr/src/uts/common/smbsrv/ndl/netlogon.ndl3
-rw-r--r--usr/src/uts/i86pc/Makefile.files1
-rw-r--r--usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c2
-rw-r--r--usr/src/uts/i86pc/os/ht.c613
-rw-r--r--usr/src/uts/i86pc/os/mp_startup.c2
-rw-r--r--usr/src/uts/i86pc/os/trap.c4
-rw-r--r--usr/src/uts/i86pc/sys/ht.h47
-rw-r--r--usr/src/uts/i86xpv/Makefile.files3
-rw-r--r--usr/src/uts/intel/ia32/ml/swtch.s3
-rw-r--r--usr/src/uts/intel/zfs/Makefile3
-rw-r--r--usr/src/uts/sun4/sys/ht.h38
-rw-r--r--usr/src/uts/sun4v/sys/Makefile1
26 files changed, 991 insertions, 274 deletions
diff --git a/usr/src/pkg/manifests/system-header.mf b/usr/src/pkg/manifests/system-header.mf
index 146662d218..70ef6cd9e6 100644
--- a/usr/src/pkg/manifests/system-header.mf
+++ b/usr/src/pkg/manifests/system-header.mf
@@ -1737,6 +1737,7 @@ $(sparc_ONLY)file path=usr/platform/sun4u/include/sys/errclassify.h
$(sparc_ONLY)file path=usr/platform/sun4u/include/sys/fhc.h
$(sparc_ONLY)file path=usr/platform/sun4u/include/sys/gpio_87317.h
$(sparc_ONLY)file path=usr/platform/sun4u/include/sys/hpc3130_events.h
+$(sparc_ONLY)file path=usr/platform/sun4u/include/sys/ht.h
$(sparc_ONLY)file path=usr/platform/sun4u/include/sys/i2c/clients/hpc3130.h
$(sparc_ONLY)file path=usr/platform/sun4u/include/sys/i2c/clients/i2c_client.h
$(sparc_ONLY)file path=usr/platform/sun4u/include/sys/i2c/clients/lm75.h
@@ -1800,6 +1801,7 @@ $(sparc_ONLY)file path=usr/platform/sun4v/include/sys/dvma.h
$(sparc_ONLY)file path=usr/platform/sun4v/include/sys/eeprom.h
$(sparc_ONLY)file path=usr/platform/sun4v/include/sys/fcode.h
$(sparc_ONLY)file path=usr/platform/sun4v/include/sys/hsvc.h
+$(sparc_ONLY)file path=usr/platform/sun4v/include/sys/ht.h
$(sparc_ONLY)file path=usr/platform/sun4v/include/sys/hypervisor_api.h
$(sparc_ONLY)file path=usr/platform/sun4v/include/sys/idprom.h
$(sparc_ONLY)file path=usr/platform/sun4v/include/sys/intr.h
diff --git a/usr/src/uts/common/dtrace/dtrace.c b/usr/src/uts/common/dtrace/dtrace.c
index 769337294b..23fb7cece3 100644
--- a/usr/src/uts/common/dtrace/dtrace.c
+++ b/usr/src/uts/common/dtrace/dtrace.c
@@ -6091,7 +6091,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
uintptr_t a = (uintptr_t)svar->dtsv_data;
size_t lim;
- ASSERT(a != NULL);
+ ASSERT(a != (uintptr_t)NULL);
ASSERT(svar->dtsv_size != 0);
if (regs[rd] == 0) {
@@ -10319,7 +10319,7 @@ dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
continue;
if (svar->dtsv_size != 0) {
- ASSERT(svar->dtsv_data != NULL);
+ ASSERT(svar->dtsv_data != 0);
kmem_free((void *)(uintptr_t)svar->dtsv_data,
svar->dtsv_size);
}
@@ -10515,8 +10515,8 @@ dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
{
dtrace_actdesc_t *act;
- ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
- arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
+ ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != 0 &&
+ arg >= KERNELBASE) || (arg == 0 && kind == DTRACEACT_PRINTA));
act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
act->dtad_kind = kind;
@@ -11031,7 +11031,7 @@ dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
desc->dtad_kind == DTRACEACT_DIFEXPR);
format = 0;
} else {
- ASSERT(arg != NULL);
+ ASSERT(arg != 0);
ASSERT(arg > KERNELBASE);
format = dtrace_format_add(state,
(char *)(uintptr_t)arg);
@@ -15983,8 +15983,8 @@ dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
dtrace_toxrange = range;
}
- ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL);
- ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL);
+ ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == (uintptr_t)NULL);
+ ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == (uintptr_t)NULL);
dtrace_toxrange[dtrace_toxranges].dtt_base = base;
dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
diff --git a/usr/src/uts/common/io/dls/dls_mgmt.c b/usr/src/uts/common/io/dls/dls_mgmt.c
index f813acaac6..84860b5abb 100644
--- a/usr/src/uts/common/io/dls/dls_mgmt.c
+++ b/usr/src/uts/common/io/dls/dls_mgmt.c
@@ -1128,7 +1128,7 @@ dls_devnet_unset(mac_handle_t mh, datalink_id_t *id, boolean_t wait)
cv_wait(&ddp->dd_cv, &ddp->dd_mutex);
} else {
VERIFY(ddp->dd_tref == 0);
- VERIFY(ddp->dd_prop_taskid == NULL);
+ VERIFY(ddp->dd_prop_taskid == (taskqid_t)NULL);
}
if (ddp->dd_linkid != DATALINK_INVALID_LINKID) {
diff --git a/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_recv.c b/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_recv.c
index b8c4297c3e..02ae640ebf 100644
--- a/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_recv.c
+++ b/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_recv.c
@@ -85,7 +85,7 @@ static void ibmf_send_busy(ibmf_mod_load_args_t *modlargsp);
#define IS_MANDATORY_CLASS(class) \
((class == PERF_AGENT) || (class == BM_AGENT))
-char ibmf_client_modname[16];
+char ibmf_client_modname[16];
/*
* ibmf_i_handle_recv_completion():
@@ -355,7 +355,7 @@ ibmf_i_handle_recv_completion(ibmf_ci_t *cip, ibt_wc_t *wcp)
* boot and unregisters during detach and during
* HCA unconfigure operation. We come here
* 1. Before HCA registers with IBMF
- * Drop the MAD. Since this is a UD MAD,
+ * Drop the MAD. Since this is a UD MAD,
* sender will resend the request
* 2. After HCA unregistered with IBMF during DR operation.
* Since HCA is going away, we can safely drop the PMA
@@ -476,7 +476,7 @@ ibmf_i_do_recv_cb(void *taskq_arg)
grhpresent = B_TRUE;
ib_grh = (ib_grh_t *)recv_wqep->recv_mem;
gid.gid_prefix = b2h64(ib_grh->SGID.gid_prefix);
- gid.gid_guid = b2h64(ib_grh->SGID.gid_guid);
+ gid.gid_guid = b2h64(ib_grh->SGID.gid_guid);
} else {
grhpresent = B_FALSE;
lid = wcp->wc_slid;
@@ -1456,7 +1456,7 @@ ibmf_send_busy(ibmf_mod_load_args_t *modlargsp)
ibt_wr_ds_t sgl[1];
ibmf_send_wqe_t *send_wqep;
ibt_send_wr_t *swrp;
- ibmf_msg_impl_t *msgimplp;
+ ibmf_msg_impl_t *msgimplp;
ibmf_ud_dest_t *ibmf_ud_dest;
ibt_ud_dest_t *ud_dest;
ib_mad_hdr_t *smadhdrp, *rmadhdrp;
@@ -1586,7 +1586,7 @@ ibmf_send_busy(ibmf_mod_load_args_t *modlargsp)
/* use send wqe pointer as the WR ID */
swrp->wr_id = (ibt_wrid_t)(uintptr_t)send_wqep;
- ASSERT(swrp->wr_id != NULL);
+ ASSERT(swrp->wr_id != 0);
swrp->wr_flags = IBT_WR_NO_FLAGS;
swrp->wr_opcode = IBT_WRC_SEND;
swrp->wr_trans = IBT_UD_SRV;
diff --git a/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_send.c b/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_send.c
index 7265dd018e..0d682a8f5b 100644
--- a/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_send.c
+++ b/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_send.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* This file implements the MAD send logic in IBMF.
*/
@@ -268,7 +266,7 @@ ibmf_i_handle_send_completion(ibmf_ci_t *cip, ibt_wc_t *wcp)
_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqep))
- ASSERT(wcp->wc_id != NULL);
+ ASSERT(wcp->wc_id != 0);
ASSERT(IBMF_IS_SEND_WR_ID(wcp->wc_id));
diff --git a/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_wqe.c b/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_wqe.c
index 67a3092f24..3324cb9d1d 100644
--- a/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_wqe.c
+++ b/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_wqe.c
@@ -870,7 +870,7 @@ ibmf_i_init_send_wqe(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp,
swrp = &wqep->send_wr;
/* use send wqe pointer as the WR ID */
IBMF_ADDR_TO_SEND_WR_ID(wqep, swrp->wr_id);
- ASSERT(swrp->wr_id != NULL);
+ ASSERT(swrp->wr_id != 0);
swrp->wr_flags = IBT_WR_NO_FLAGS;
swrp->wr_opcode = IBT_WRC_SEND;
swrp->wr_trans = IBT_UD_SRV;
diff --git a/usr/src/uts/common/io/idm/idm_text.c b/usr/src/uts/common/io/idm/idm_text.c
index 7b812260df..871579e878 100644
--- a/usr/src/uts/common/io/idm/idm_text.c
+++ b/usr/src/uts/common/io/idm/idm_text.c
@@ -735,7 +735,7 @@ idm_nvlist_add_list_of_values(nvlist_t *nvl,
int
idm_nvlist_to_textbuf(nvlist_t *nvl, char **textbuf, int *textbuflen,
- int *validlen)
+ int *validlen)
{
int rc = 0;
nvpair_t *nvp = NULL;
@@ -1546,7 +1546,7 @@ idm_pdu_list_to_nvlist(list_t *pdu_list, nvlist_t **nvlist,
goto cleanup;
}
- ASSERT(split_kvbuflen != NULL);
+ ASSERT(split_kvbuflen != 0);
kmem_free(split_kvbuf, split_kvbuflen);
/* Now handle the remainder of the PDU as normal */
diff --git a/usr/src/uts/common/io/mac/mac.c b/usr/src/uts/common/io/mac/mac.c
index f258aad701..12d1e9809c 100644
--- a/usr/src/uts/common/io/mac/mac.c
+++ b/usr/src/uts/common/io/mac/mac.c
@@ -6492,7 +6492,7 @@ mac_reclaim_ring_from_grp(mac_impl_t *mip, mac_ring_type_t ring_type,
if (mcip == NULL)
mcip = mac_get_grp_primary(group);
ASSERT(mcip != NULL);
- ASSERT(mcip->mci_share == NULL);
+ ASSERT(mcip->mci_share == 0);
mrp = MCIP_RESOURCE_PROPS(mcip);
if (ring_type == MAC_RING_TYPE_RX) {
@@ -8069,7 +8069,7 @@ mac_group_ring_modify(mac_client_impl_t *mcip, mac_group_t *group,
}
/* don't allow modifying rings for a share for now. */
- ASSERT(mcip->mci_share == NULL);
+ ASSERT(mcip->mci_share == 0);
if (ringcnt == group->mrg_cur_count)
return (0);
diff --git a/usr/src/uts/common/io/mac/mac_hio.c b/usr/src/uts/common/io/mac/mac_hio.c
index 100f071220..3fa270c936 100644
--- a/usr/src/uts/common/io/mac/mac_hio.c
+++ b/usr/src/uts/common/io/mac/mac_hio.c
@@ -60,7 +60,7 @@ i_mac_share_alloc(mac_client_impl_t *mcip)
i_mac_perim_enter(mip);
- ASSERT(mcip->mci_share == NULL);
+ ASSERT(mcip->mci_share == 0);
if (mac_share_capable((mac_handle_t)mcip->mci_mip) == 0) {
DTRACE_PROBE1(i__mac__share__alloc__not__sup,
diff --git a/usr/src/uts/common/io/sata/adapters/ahci/ahci.c b/usr/src/uts/common/io/sata/adapters/ahci/ahci.c
index 0e4fb433cf..3073d5de02 100644
--- a/usr/src/uts/common/io/sata/adapters/ahci/ahci.c
+++ b/usr/src/uts/common/io/sata/adapters/ahci/ahci.c
@@ -1204,8 +1204,6 @@ ahci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
ahci_ctlp->ahcictl_flags |= AHCI_SUSPEND;
- ahci_em_suspend(ahci_ctlp);
-
/* stop the watchdog handler */
if (ahci_ctlp->ahcictl_timeout_id) {
(void) untimeout(ahci_ctlp->ahcictl_timeout_id);
@@ -1214,6 +1212,8 @@ ahci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
mutex_exit(&ahci_ctlp->ahcictl_mutex);
+ ahci_em_suspend(ahci_ctlp);
+
/*
* drain the taskq
*/
diff --git a/usr/src/uts/common/io/tl.c b/usr/src/uts/common/io/tl.c
index e77f33d31f..e6b3d41625 100644
--- a/usr/src/uts/common/io/tl.c
+++ b/usr/src/uts/common/io/tl.c
@@ -404,9 +404,9 @@
#define TL_TICLTS 2 /* connectionless transport */
#define TL_UNUSED 3
#define TL_SOCKET 4 /* Socket */
-#define TL_SOCK_COTS (TL_SOCKET|TL_TICOTS)
-#define TL_SOCK_COTSORD (TL_SOCKET|TL_TICOTSORD)
-#define TL_SOCK_CLTS (TL_SOCKET|TL_TICLTS)
+#define TL_SOCK_COTS (TL_SOCKET | TL_TICOTS)
+#define TL_SOCK_COTSORD (TL_SOCKET | TL_TICOTSORD)
+#define TL_SOCK_CLTS (TL_SOCKET | TL_TICLTS)
#define TL_MINOR_MASK 0x7
#define TL_MINOR_START (TL_TICLTS + 1)
@@ -430,9 +430,9 @@ extern char ti_statetbl[TE_NOEVENTS][TS_NOSTATES];
*/
static int tl_open(queue_t *, dev_t *, int, int, cred_t *);
static int tl_close(queue_t *, int, cred_t *);
-static void tl_wput(queue_t *, mblk_t *);
-static void tl_wsrv(queue_t *);
-static void tl_rsrv(queue_t *);
+static int tl_wput(queue_t *, mblk_t *);
+static int tl_wsrv(queue_t *);
+static int tl_rsrv(queue_t *);
static int tl_attach(dev_info_t *, ddi_attach_cmd_t);
static int tl_detach(dev_info_t *, ddi_detach_cmd_t);
@@ -709,7 +709,7 @@ static struct module_info tl_minfo = {
static struct qinit tl_rinit = {
NULL, /* qi_putp */
- (int (*)())tl_rsrv, /* qi_srvp */
+ tl_rsrv, /* qi_srvp */
tl_open, /* qi_qopen */
tl_close, /* qi_qclose */
NULL, /* qi_qadmin */
@@ -718,8 +718,8 @@ static struct qinit tl_rinit = {
};
static struct qinit tl_winit = {
- (int (*)())tl_wput, /* qi_putp */
- (int (*)())tl_wsrv, /* qi_srvp */
+ tl_wput, /* qi_putp */
+ tl_wsrv, /* qi_srvp */
NULL, /* qi_qopen */
NULL, /* qi_qclose */
NULL, /* qi_qadmin */
@@ -757,8 +757,8 @@ static struct modlinkage modlinkage = {
* Check sanity of unlimited connect data etc.
*/
-#define TL_CLTS_PROVIDER_FLAG (XPG4_1|SENDZERO)
-#define TL_COTS_PROVIDER_FLAG (XPG4_1|SENDZERO)
+#define TL_CLTS_PROVIDER_FLAG (XPG4_1 | SENDZERO)
+#define TL_COTS_PROVIDER_FLAG (XPG4_1 | SENDZERO)
static struct T_info_ack tl_cots_info_ack =
{
@@ -832,7 +832,7 @@ static void tl_do_proto(mblk_t *, tl_endpt_t *);
static void tl_do_ioctl(mblk_t *, tl_endpt_t *);
static void tl_do_ioctl_ser(mblk_t *, tl_endpt_t *);
static void tl_error_ack(queue_t *, mblk_t *, t_scalar_t, t_scalar_t,
- t_scalar_t);
+ t_scalar_t);
static void tl_bind(mblk_t *, tl_endpt_t *);
static void tl_bind_ser(mblk_t *, tl_endpt_t *);
static void tl_ok_ack(queue_t *, mblk_t *mp, t_scalar_t);
@@ -1123,7 +1123,7 @@ tl_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
break;
case DDI_INFO_DEVT2INSTANCE:
- *result = (void *)0;
+ *result = NULL;
retcode = DDI_SUCCESS;
break;
@@ -1189,7 +1189,7 @@ tl_free(tl_endpt_t *tep)
ASSERT(tep->te_wq == NULL);
ASSERT(tep->te_ser != NULL);
ASSERT(tep->te_ser_count == 0);
- ASSERT(! (tep->te_flag & TL_ADDRHASHED));
+ ASSERT(!(tep->te_flag & TL_ADDRHASHED));
if (IS_SOCKET(tep)) {
ASSERT(tep->te_alen == TL_SOUX_ADDRLEN);
@@ -1376,7 +1376,7 @@ tl_hash_cmp_addr(mod_hash_key_t key1, mod_hash_key_t key2)
ASSERT(ap2->ta_alen > 0);
#endif
- return (! tl_eqaddr((tl_addr_t *)key1, (tl_addr_t *)key2));
+ return (!tl_eqaddr((tl_addr_t *)key1, (tl_addr_t *)key2));
}
/*
@@ -1389,7 +1389,7 @@ tl_noclose(tl_endpt_t *tep)
boolean_t rc = B_FALSE;
mutex_enter(&tep->te_closelock);
- if (! tep->te_closing) {
+ if (!tep->te_closing) {
ASSERT(tep->te_closewait == 0);
tep->te_closewait++;
rc = B_TRUE;
@@ -1550,7 +1550,7 @@ tl_close(queue_t *rq, int flag, cred_t *credp)
ASSERT(rc == 0 && tep == elp);
if ((rc != 0) || (tep != elp)) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_close:inconsistency in AI hash"));
}
@@ -1726,7 +1726,7 @@ tl_close_finish_ser(mblk_t *mp, tl_endpt_t *tep)
*
* The T_CONN_REQ is processed outside of serializer.
*/
-static void
+static int
tl_wput(queue_t *wq, mblk_t *mp)
{
tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
@@ -1739,10 +1739,10 @@ tl_wput(queue_t *wq, mblk_t *mp)
/* Only valid for connection-oriented transports */
if (IS_CLTS(tep)) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:M_DATA invalid for ticlts driver"));
tl_merror(wq, mp, EPROTO);
- return;
+ return (0);
}
tl_proc = tl_wput_data_ser;
break;
@@ -1760,7 +1760,7 @@ tl_wput(queue_t *wq, mblk_t *mp)
default:
miocnak(wq, mp, 0, EINVAL);
- return;
+ return (0);
}
break;
@@ -1778,15 +1778,15 @@ tl_wput(queue_t *wq, mblk_t *mp)
} else {
freemsg(mp);
}
- return;
+ return (0);
case M_PROTO:
if (msz < sizeof (prim->type)) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:M_PROTO data too short"));
tl_merror(wq, mp, EPROTO);
- return;
+ return (0);
}
switch (prim->type) {
case T_OPTMGMT_REQ:
@@ -1803,7 +1803,7 @@ tl_wput(queue_t *wq, mblk_t *mp)
* and is consistent with BSD socket behavior).
*/
tl_optmgmt(wq, mp);
- return;
+ return (0);
case O_T_BIND_REQ:
case T_BIND_REQ:
tl_proc = tl_bind_ser;
@@ -1811,10 +1811,10 @@ tl_wput(queue_t *wq, mblk_t *mp)
case T_CONN_REQ:
if (IS_CLTS(tep)) {
tl_merror(wq, mp, EPROTO);
- return;
+ return (0);
}
tl_conn_req(wq, mp);
- return;
+ return (0);
case T_DATA_REQ:
case T_OPTDATA_REQ:
case T_EXDATA_REQ:
@@ -1825,7 +1825,7 @@ tl_wput(queue_t *wq, mblk_t *mp)
if (IS_COTS(tep) ||
(msz < sizeof (struct T_unitdata_req))) {
tl_merror(wq, mp, EPROTO);
- return;
+ return (0);
}
if ((tep->te_state == TS_IDLE) && !wq->q_first) {
tl_proc = tl_unitdata_ser;
@@ -1854,15 +1854,15 @@ tl_wput(queue_t *wq, mblk_t *mp)
*/
if (msz < sizeof (prim->type)) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:M_PCROTO data too short"));
tl_merror(wq, mp, EPROTO);
- return;
+ return (0);
}
switch (prim->type) {
case T_CAPABILITY_REQ:
tl_capability_req(mp, tep);
- return;
+ return (0);
case T_INFO_REQ:
tl_proc = tl_info_req_ser;
break;
@@ -1872,17 +1872,17 @@ tl_wput(queue_t *wq, mblk_t *mp)
default:
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:unknown TPI msg primitive"));
tl_merror(wq, mp, EPROTO);
- return;
+ return (0);
}
break;
default:
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_wput:default:unexpected Streams message"));
freemsg(mp);
- return;
+ return (0);
}
/*
@@ -1891,6 +1891,7 @@ tl_wput(queue_t *wq, mblk_t *mp)
ASSERT(tl_proc != NULL);
tl_refhold(tep);
tl_serializer_enter(tep, tl_proc, mp);
+ return (0);
}
/*
@@ -1996,7 +1997,7 @@ tl_wput_data_ser(mblk_t *mp, tl_endpt_t *tep)
* messages that need processing may have arrived, so tl_wsrv repeats until
* queue is empty or te_nowsrv is set.
*/
-static void
+static int
tl_wsrv(queue_t *wq)
{
tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
@@ -2019,6 +2020,7 @@ tl_wsrv(queue_t *wq)
cv_signal(&tep->te_srv_cv);
mutex_exit(&tep->te_srv_lock);
}
+ return (0);
}
/*
@@ -2067,7 +2069,7 @@ tl_wsrv_ser(mblk_t *ser_mp, tl_endpt_t *tep)
* is possible that two instances of tl_rsrv will be running reusing the same
* rsrv mblk.
*/
-static void
+static int
tl_rsrv(queue_t *rq)
{
tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr;
@@ -2086,6 +2088,7 @@ tl_rsrv(queue_t *rq)
}
cv_signal(&tep->te_srv_cv);
mutex_exit(&tep->te_srv_lock);
+ return (0);
}
/* ARGSUSED */
@@ -2180,7 +2183,7 @@ tl_do_proto(mblk_t *mp, tl_endpt_t *tep)
break;
case T_ORDREL_REQ:
- if (! IS_COTSORD(tep)) {
+ if (!IS_COTSORD(tep)) {
tl_merror(tep->te_wq, mp, EPROTO);
break;
}
@@ -2208,7 +2211,7 @@ tl_do_proto(mblk_t *mp, tl_endpt_t *tep)
static void
tl_do_ioctl_ser(mblk_t *mp, tl_endpt_t *tep)
{
- if (! tep->te_closing)
+ if (!tep->te_closing)
tl_do_ioctl(mp, tep);
else
freemsg(mp);
@@ -2293,7 +2296,7 @@ tl_error_ack(queue_t *wq, mblk_t *mp, t_scalar_t tli_err,
M_PCPROTO, T_ERROR_ACK);
if (ackmp == NULL) {
- (void) (STRLOG(TL_ID, 0, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, 0, 1, SL_TRACE | SL_ERROR,
"tl_error_ack:out of mblk memory"));
tl_merror(wq, NULL, ENOSR);
return;
@@ -2341,7 +2344,7 @@ tl_ok_ack(queue_t *wq, mblk_t *mp, t_scalar_t type)
static void
tl_bind_ser(mblk_t *mp, tl_endpt_t *tep)
{
- if (! tep->te_closing)
+ if (!tep->te_closing)
tl_bind(mp, tep);
else
freemsg(mp);
@@ -2373,7 +2376,7 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
if (tep->te_state != TS_UNBND) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:bind_request:out of state, state=%d",
tep->te_state));
tli_err = TOUTSTATE;
@@ -2381,7 +2384,8 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
}
if (msz < sizeof (struct T_bind_req)) {
- tli_err = TSYSERR; unix_err = EINVAL;
+ tli_err = TSYSERR;
+ unix_err = EINVAL;
goto error;
}
@@ -2408,7 +2412,8 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
((tep->te_flag & TL_ADDRHASHED) == 0) &&
mod_hash_reserve_nosleep(tep->te_addrhash,
&tep->te_hash_hndl) != 0) {
- tli_err = TSYSERR; unix_err = ENOSR;
+ tli_err = TSYSERR;
+ unix_err = ENOSR;
goto error;
}
@@ -2422,10 +2427,11 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
(aoff < 0) ||
(aoff + alen > msz)) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_bind: invalid socket addr"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
- tli_err = TSYSERR; unix_err = EINVAL;
+ tli_err = TSYSERR;
+ unix_err = EINVAL;
goto error;
}
/* Copy address from message to local buffer. */
@@ -2436,28 +2442,31 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
if ((ux_addr.soua_magic != SOU_MAGIC_EXPLICIT) &&
(ux_addr.soua_magic != SOU_MAGIC_IMPLICIT)) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_bind: invalid socket magic"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
- tli_err = TSYSERR; unix_err = EINVAL;
+ tli_err = TSYSERR;
+ unix_err = EINVAL;
goto error;
}
if ((ux_addr.soua_magic == SOU_MAGIC_IMPLICIT) &&
(ux_addr.soua_vp != NULL)) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_bind: implicit addr non-empty"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
- tli_err = TSYSERR; unix_err = EINVAL;
+ tli_err = TSYSERR;
+ unix_err = EINVAL;
goto error;
}
if ((ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) &&
(ux_addr.soua_vp == NULL)) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_bind: explicit addr empty"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
- tli_err = TSYSERR; unix_err = EINVAL;
+ tli_err = TSYSERR;
+ unix_err = EINVAL;
goto error;
}
} else {
@@ -2465,15 +2474,16 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
((ssize_t)(aoff + alen) > msz) ||
((aoff + alen) < 0))) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_bind: invalid message"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
- tli_err = TSYSERR; unix_err = EINVAL;
+ tli_err = TSYSERR;
+ unix_err = EINVAL;
goto error;
}
if ((alen < 0) || (alen > (msz - sizeof (struct T_bind_req)))) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_bind: bad addr in message"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
tli_err = TBADADDR;
@@ -2482,13 +2492,13 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
#ifdef DEBUG
/*
* Mild form of ASSERT()ion to detect broken TPI apps.
- * if (! assertion)
+ * if (!assertion)
* log warning;
*/
- if (! ((alen == 0 && aoff == 0) ||
+ if (!((alen == 0 && aoff == 0) ||
(aoff >= (t_scalar_t)(sizeof (struct T_bind_req))))) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 3, SL_TRACE|SL_ERROR,
+ 3, SL_TRACE | SL_ERROR,
"tl_bind: addr overlaps TPI message"));
}
#endif
@@ -2532,10 +2542,11 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
* other than supplied one for explicit binds.
*/
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_bind:requested addr %p is busy",
ux_addr.soua_vp));
- tli_err = TADDRBUSY; unix_err = 0;
+ tli_err = TADDRBUSY;
+ unix_err = 0;
goto error;
}
tep->te_uxaddr = ux_addr;
@@ -2546,12 +2557,13 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
/*
* assign any free address
*/
- if (! tl_get_any_addr(tep, NULL)) {
+ if (!tl_get_any_addr(tep, NULL)) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_bind:failed to get buffer for any "
"address"));
- tli_err = TSYSERR; unix_err = ENOSR;
+ tli_err = TSYSERR;
+ unix_err = ENOSR;
goto error;
}
} else {
@@ -2561,7 +2573,8 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
tep->te_abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP);
if (tep->te_abuf == NULL) {
- tli_err = TSYSERR; unix_err = ENOSR;
+ tli_err = TSYSERR;
+ unix_err = ENOSR;
goto error;
}
bcopy(addr_req.ta_abuf, tep->te_abuf, addr_req.ta_alen);
@@ -2577,9 +2590,10 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
* requested is busy
*/
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_bind:requested addr is busy"));
- tli_err = TADDRBUSY; unix_err = 0;
+ tli_err = TADDRBUSY;
+ unix_err = 0;
goto error;
}
@@ -2587,11 +2601,12 @@ tl_bind(mblk_t *mp, tl_endpt_t *tep)
* O_T_BIND_REQ semantics say if address if requested
* address is busy, bind to any available free address
*/
- if (! tl_get_any_addr(tep, &addr_req)) {
+ if (!tl_get_any_addr(tep, &addr_req)) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_bind:unable to get any addr buf"));
- tli_err = TSYSERR; unix_err = ENOMEM;
+ tli_err = TSYSERR;
+ unix_err = ENOMEM;
goto error;
}
} else {
@@ -2609,7 +2624,7 @@ skip_addr_bind:
basize = sizeof (struct T_bind_ack) + tep->te_alen;
bamp = reallocb(mp, basize, 0);
if (bamp == NULL) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_wput:tl_bind: allocb failed"));
/*
* roll back state changes
@@ -2693,7 +2708,7 @@ tl_unbind(mblk_t *mp, tl_endpt_t *tep)
*/
if (tep->te_state != TS_IDLE) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:T_UNBIND_REQ:out of state, state=%d",
tep->te_state));
tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_UNBIND_REQ);
@@ -2708,7 +2723,7 @@ tl_unbind(mblk_t *mp, tl_endpt_t *tep)
*/
(void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
- if (! IS_SOCKET(tep) || !IS_CLTS(tep) || tep->te_qlen != 0 ||
+ if (!IS_SOCKET(tep) || !IS_CLTS(tep) || tep->te_qlen != 0 ||
tep->te_magic != SOU_MAGIC_EXPLICIT) {
/*
@@ -2772,14 +2787,14 @@ tl_optmgmt(queue_t *wq, mblk_t *mp)
* tests this TLI (mis)feature using this device driver.
*/
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:T_SVR4_OPTMGMT_REQ:out of state, state=%d",
tep->te_state));
/*
* preallocate memory for T_ERROR_ACK
*/
ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
- if (! ackmp) {
+ if (ackmp == NULL) {
tl_memrecover(wq, mp, sizeof (struct T_error_ack));
return;
}
@@ -2835,7 +2850,7 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
* 2. max of T_DISCON_IND and T_CONN_IND
*/
ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
- if (! ackmp) {
+ if (ackmp == NULL) {
tl_memrecover(wq, mp, sizeof (struct T_error_ack));
return;
}
@@ -2846,7 +2861,7 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
if (tep->te_state != TS_IDLE) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:T_CONN_REQ:out of state, state=%d",
tep->te_state));
tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ);
@@ -2860,7 +2875,7 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
* after validating the message length.
*/
if (msz < sizeof (struct T_conn_req)) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_conn_req:invalid message length"));
tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
freemsg(mp);
@@ -2879,7 +2894,7 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
(aoff + alen > msz) ||
(alen > msz - sizeof (struct T_conn_req))) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_conn_req: invalid socket addr"));
tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
freemsg(mp);
@@ -2889,7 +2904,7 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) &&
(ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_conn_req: invalid socket magic"));
tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
freemsg(mp);
@@ -2901,7 +2916,7 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
ooff + olen < 0)) ||
olen < 0 || ooff < 0) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_conn_req:invalid message"));
tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
freemsg(mp);
@@ -2911,7 +2926,7 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
if (alen <= 0 || aoff < 0 ||
(ssize_t)alen > msz - sizeof (struct T_conn_req)) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_conn_req:bad addr in message, "
"alen=%d, msz=%ld",
alen, msz));
@@ -2922,12 +2937,12 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
#ifdef DEBUG
/*
* Mild form of ASSERT()ion to detect broken TPI apps.
- * if (! assertion)
+ * if (!assertion)
* log warning;
*/
- if (! (aoff >= (t_scalar_t)sizeof (struct T_conn_req))) {
+ if (!(aoff >= (t_scalar_t)sizeof (struct T_conn_req))) {
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_conn_req: addr overlaps TPI message"));
}
#endif
@@ -2937,7 +2952,7 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
* supported in this provider except for sockets.
*/
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_conn_req:options not supported "
"in message"));
tl_error_ack(wq, ackmp, TBADOPT, 0, T_CONN_REQ);
@@ -2949,8 +2964,8 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
/*
* Prevent tep from closing on us.
*/
- if (! tl_noclose(tep)) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ if (!tl_noclose(tep)) {
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_conn_req:endpoint is closing"));
tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ);
freemsg(mp);
@@ -2975,7 +2990,7 @@ tl_conn_req(queue_t *wq, mblk_t *mp)
tl_find_peer(tep, &dst));
if (peer_tep == NULL) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_conn_req:no one at connect address"));
err = ECONNREFUSED;
} else if (peer_tep->te_nicon >= peer_tep->te_qlen) {
@@ -3107,7 +3122,7 @@ tl_conn_req_ser(mblk_t *mp, tl_endpt_t *tep)
/*
* calculate length of T_CONN_IND message
*/
- if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED)) {
+ if (peer_tep->te_flag & (TL_SETCRED | TL_SETUCRED)) {
cr = msg_getcred(mp, &cpid);
ASSERT(cr != NULL);
if (peer_tep->te_flag & TL_SETCRED) {
@@ -3155,7 +3170,7 @@ tl_conn_req_ser(mblk_t *mp, tl_endpt_t *tep)
* are isomorphic.
*/
confmp = copyb(mp);
- if (! confmp) {
+ if (confmp == NULL) {
/*
* roll back state changes
*/
@@ -3233,8 +3248,8 @@ tl_conn_req_ser(mblk_t *mp, tl_endpt_t *tep)
* in the returned mblk
*/
cimp = tl_resizemp(indmp, size);
- if (! cimp) {
- (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
+ if (cimp == NULL) {
+ (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
"tl_conn_req:con_ind:allocb failure"));
tl_merror(wq, indmp, ENOMEM);
TL_UNCONNECT(tep->te_oconp);
@@ -3363,7 +3378,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
* 2. max of T_DISCON_IND and T_CONN_CON
*/
ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
- if (! ackmp) {
+ if (ackmp == NULL) {
tl_memrecover(wq, mp, sizeof (struct T_error_ack));
return;
}
@@ -3380,7 +3395,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
*/
if (tep->te_state != TS_WRES_CIND) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:T_CONN_RES:out of state, state=%d",
tep->te_state));
tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim);
@@ -3394,7 +3409,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
* after validating the message length.
*/
if (msz < sizeof (struct T_conn_res)) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_conn_res:invalid message length"));
tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim);
freemsg(mp);
@@ -3403,7 +3418,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
olen = cres->OPT_length;
ooff = cres->OPT_offset;
if (((olen > 0) && ((ooff + olen) > msz))) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_conn_res:invalid message"));
tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim);
freemsg(mp);
@@ -3414,7 +3429,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
* no opts in connect res
* supported in this provider
*/
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_conn_res:options not supported in message"));
tl_error_ack(wq, ackmp, TBADOPT, 0, prim);
freemsg(mp);
@@ -3426,7 +3441,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
if (cres->SEQ_number < TL_MINOR_START &&
cres->SEQ_number >= BADSEQNUM) {
- (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
"tl_conn_res:remote endpoint sequence number bad"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
@@ -3440,7 +3455,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
if (mod_hash_find_cb(tep->te_transport->tr_ai_hash,
(mod_hash_key_t)(uintptr_t)cres->ACCEPTOR_id,
(mod_hash_val_t *)&acc_ep, tl_find_callback) != 0) {
- (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
"tl_conn_res:bad accepting endpoint"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
tl_error_ack(wq, ackmp, TBADF, 0, prim);
@@ -3451,8 +3466,8 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
/*
* Prevent acceptor from closing.
*/
- if (! tl_noclose(acc_ep)) {
- (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
+ if (!tl_noclose(acc_ep)) {
+ (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
"tl_conn_res:bad accepting endpoint"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
tl_error_ack(wq, ackmp, TBADF, 0, prim);
@@ -3469,7 +3484,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
* TROUBLE in XPG4 !!?
*/
if ((tep != acc_ep) && (acc_ep->te_state != TS_IDLE)) {
- (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
"tl_conn_res:accepting endpoint has no address bound,"
"state=%d", acc_ep->te_state));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
@@ -3486,7 +3501,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
*/
if ((tep == acc_ep) && (tep->te_nicon > 1)) {
- (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
"tl_conn_res: > 1 conn_ind on listener-acceptor"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
tl_error_ack(wq, ackmp, TBADF, 0, prim);
@@ -3504,7 +3519,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
*/
tip = tl_icon_find(tep, cres->SEQ_number);
if (tip == NULL) {
- (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE | SL_ERROR,
"tl_conn_res:no client in listener list"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
@@ -3586,9 +3601,9 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
* ack validity of request (T_OK_ACK) after memory committed
*/
- if (err)
+ if (err) {
size = sizeof (struct T_discon_ind);
- else {
+ } else {
/*
* calculate length of T_CONN_CON message
*/
@@ -3627,8 +3642,9 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
tep->te_state = NEXTSTATE(TE_OK_ACK2, tep->te_state);
else
tep->te_state = NEXTSTATE(TE_OK_ACK3, tep->te_state);
- } else
+ } else {
tep->te_state = NEXTSTATE(TE_OK_ACK4, tep->te_state);
+ }
/*
* send T_DISCON_IND now if client state validation failed earlier
@@ -3641,9 +3657,9 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
(void) putnextctl1(acc_ep->te_rq, M_FLUSH, FLUSHR);
dimp = tl_resizemp(respmp, size);
- if (! dimp) {
+ if (dimp == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_conn_res:con_ind:allocb failure"));
tl_merror(wq, respmp, ENOMEM);
tl_closeok(acc_ep);
@@ -3718,12 +3734,12 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
* allocate the message - original data blocks
* retained in the returned mblk
*/
- if (! IS_SOCKET(cl_ep) || tl_disable_early_connect) {
+ if (!IS_SOCKET(cl_ep) || tl_disable_early_connect) {
ccmp = tl_resizemp(respmp, size);
if (ccmp == NULL) {
tl_ok_ack(wq, ackmp, prim);
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_conn_res:conn_con:allocb failure"));
tl_merror(wq, respmp, ENOMEM);
tl_closeok(acc_ep);
@@ -3740,7 +3756,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
cc->RES_length = acc_ep->te_alen;
addr_startp = ccmp->b_rptr + cc->RES_offset;
bcopy(acc_ep->te_abuf, addr_startp, acc_ep->te_alen);
- if (cl_ep->te_flag & (TL_SETCRED|TL_SETUCRED)) {
+ if (cl_ep->te_flag & (TL_SETCRED | TL_SETUCRED)) {
cc->OPT_offset = (t_scalar_t)T_ALIGN(cc->RES_offset +
cc->RES_length);
cc->OPT_length = olen;
@@ -3822,7 +3838,7 @@ tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
/*
* link queues so that I_SENDFD will work
*/
- if (! IS_SOCKET(tep)) {
+ if (!IS_SOCKET(tep)) {
acc_ep->te_wq->q_next = cl_ep->te_rq;
cl_ep->te_wq->q_next = acc_ep->te_rq;
}
@@ -3897,7 +3913,7 @@ tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
* 2. for T_DISCON_IND
*/
ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
- if (! ackmp) {
+ if (ackmp == NULL) {
tl_memrecover(wq, mp, sizeof (struct T_error_ack));
return;
}
@@ -3913,10 +3929,10 @@ tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
* validate the state
*/
save_state = new_state = tep->te_state;
- if (! (save_state >= TS_WCON_CREQ && save_state <= TS_WRES_CIND) &&
- ! (save_state >= TS_DATA_XFER && save_state <= TS_WREQ_ORDREL)) {
+ if (!(save_state >= TS_WCON_CREQ && save_state <= TS_WRES_CIND) &&
+ !(save_state >= TS_DATA_XFER && save_state <= TS_WREQ_ORDREL)) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:T_DISCON_REQ:out of state, state=%d",
tep->te_state));
tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_DISCON_REQ);
@@ -3931,7 +3947,7 @@ tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
/* validate the message */
if (msz < sizeof (struct T_discon_req)) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_discon_req:invalid message"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_DISCON_REQ);
@@ -3951,7 +3967,7 @@ tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
tip = tl_icon_find(tep, dr->SEQ_number);
if (tip == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 2,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_discon_req:no disconnect endpoint"));
tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
tl_error_ack(wq, ackmp, TBADSEQ, 0, T_DISCON_REQ);
@@ -3982,13 +3998,14 @@ tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
/*
* prepare message to ack validity of request
*/
- if (tep->te_nicon == 0)
+ if (tep->te_nicon == 0) {
new_state = NEXTSTATE(TE_OK_ACK1, new_state);
- else
+ } else {
if (tep->te_nicon == 1)
new_state = NEXTSTATE(TE_OK_ACK2, new_state);
else
new_state = NEXTSTATE(TE_OK_ACK4, new_state);
+ }
/*
* Flushing queues according to TPI. Using the old state.
@@ -4012,7 +4029,7 @@ tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
*/
if ((dimp = tl_resizemp(respmp, size)) == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 2,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_discon_req: reallocb failed"));
tep->te_state = new_state;
tl_merror(wq, respmp, ENOMEM);
@@ -4043,7 +4060,7 @@ tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
if ((dimp = tl_resizemp(respmp, size)) == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 2,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_discon_req: reallocb failed"));
tep->te_state = new_state;
tl_merror(wq, respmp, ENOMEM);
@@ -4101,7 +4118,7 @@ tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
} else if ((peer_tep = tep->te_conp) != NULL) { /* connected! */
if ((dimp = tl_resizemp(respmp, size)) == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 2,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_discon_req: reallocb failed"));
tep->te_state = new_state;
tl_merror(wq, respmp, ENOMEM);
@@ -4150,7 +4167,7 @@ tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
putnext(peer_tep->te_rq, dimp);
done:
if (tep->te_conp) { /* disconnect pointers if connected */
- ASSERT(! peer_tep->te_closing);
+ ASSERT(!peer_tep->te_closing);
/*
* Messages may be queued on peer's write queue
@@ -4165,7 +4182,7 @@ done:
TL_QENABLE(peer_tep);
ASSERT(peer_tep != NULL && peer_tep->te_conp != NULL);
TL_UNCONNECT(peer_tep->te_conp);
- if (! IS_SOCKET(tep)) {
+ if (!IS_SOCKET(tep)) {
/*
* unlink the streams
*/
@@ -4222,7 +4239,7 @@ tl_addr_req(mblk_t *mp, tl_endpt_t *tep)
ackmp = reallocb(mp, ack_sz, 0);
if (ackmp == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_addr_req: reallocb failed"));
tl_memrecover(wq, mp, ack_sz);
return;
@@ -4285,7 +4302,7 @@ tl_connected_cots_addr_req(mblk_t *mp, tl_endpt_t *tep)
ackmp = tpi_ack_alloc(mp, ack_sz, M_PCPROTO, T_ADDR_ACK);
if (ackmp == NULL) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_connected_cots_addr_req: reallocb failed"));
tl_memrecover(tep->te_wq, mp, ack_sz);
return;
@@ -4351,7 +4368,7 @@ tl_capability_req(mblk_t *mp, tl_endpt_t *tep)
ackmp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
M_PCPROTO, T_CAPABILITY_ACK);
if (ackmp == NULL) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_capability_req: reallocb failed"));
tl_memrecover(tep->te_wq, mp,
sizeof (struct T_capability_ack));
@@ -4377,7 +4394,7 @@ tl_capability_req(mblk_t *mp, tl_endpt_t *tep)
static void
tl_info_req_ser(mblk_t *mp, tl_endpt_t *tep)
{
- if (! tep->te_closing)
+ if (!tep->te_closing)
tl_info_req(mp, tep);
else
freemsg(mp);
@@ -4394,7 +4411,7 @@ tl_info_req(mblk_t *mp, tl_endpt_t *tep)
ackmp = tpi_ack_alloc(mp, sizeof (struct T_info_ack),
M_PCPROTO, T_INFO_ACK);
if (ackmp == NULL) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_info_req: reallocb failed"));
tl_memrecover(tep->te_wq, mp, sizeof (struct T_info_ack));
return;
@@ -4427,7 +4444,7 @@ tl_data(mblk_t *mp, tl_endpt_t *tep)
if (IS_CLTS(tep)) {
(void) (STRLOG(TL_ID, tep->te_minor, 2,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:clts:unattached M_DATA"));
if (!closing) {
tl_merror(wq, mp, EPROTO);
@@ -4453,7 +4470,7 @@ tl_data(mblk_t *mp, tl_endpt_t *tep)
if (prim->type == T_DATA_REQ &&
msz < sizeof (struct T_data_req)) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_data:T_DATA_REQ:invalid message"));
if (!closing) {
tl_merror(wq, mp, EPROTO);
@@ -4464,7 +4481,7 @@ tl_data(mblk_t *mp, tl_endpt_t *tep)
} else if (prim->type == T_OPTDATA_REQ &&
(msz < sizeof (struct T_optdata_req) || !IS_SOCKET(tep))) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_data:T_OPTDATA_REQ:invalid message"));
if (!closing) {
tl_merror(wq, mp, EPROTO);
@@ -4484,7 +4501,7 @@ tl_data(mblk_t *mp, tl_endpt_t *tep)
* Other end not here - do nothing.
*/
freemsg(mp);
- (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
"tl_data:cots with endpoint idle"));
return;
@@ -4512,7 +4529,7 @@ tl_data(mblk_t *mp, tl_endpt_t *tep)
*/
if (!closing) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_data: ocon"));
TL_PUTBQ(tep, mp);
return;
@@ -4540,7 +4557,7 @@ tl_data(mblk_t *mp, tl_endpt_t *tep)
*/
freemsg(mp);
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_data: WREQ_ORDREL and no peer"));
tl_discon_ind(tep, 0);
return;
@@ -4549,7 +4566,7 @@ tl_data(mblk_t *mp, tl_endpt_t *tep)
default:
/* invalid state for event TE_DATA_REQ */
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_data:cots:out of state"));
tl_merror(wq, mp, EPROTO);
return;
@@ -4592,7 +4609,7 @@ tl_data(mblk_t *mp, tl_endpt_t *tep)
/* valid states */
break;
default:
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_data:rx side:invalid state"));
tl_merror(peer_tep->te_wq, mp, EPROTO);
return;
@@ -4627,7 +4644,7 @@ tl_exdata(mblk_t *mp, tl_endpt_t *tep)
boolean_t closing = tep->te_closing;
if (msz < sizeof (struct T_exdata_req)) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_exdata:invalid message"));
if (!closing) {
tl_merror(wq, mp, EPROTO);
@@ -4658,7 +4675,7 @@ tl_exdata(mblk_t *mp, tl_endpt_t *tep)
* Other end not here - do nothing.
*/
freemsg(mp);
- (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
"tl_exdata:cots with endpoint idle"));
return;
@@ -4686,12 +4703,12 @@ tl_exdata(mblk_t *mp, tl_endpt_t *tep)
*/
if (!closing) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_exdata: ocon"));
TL_PUTBQ(tep, mp);
return;
}
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_exdata: closing socket ocon"));
prim->type = T_EXDATA_IND;
tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
@@ -4706,7 +4723,7 @@ tl_exdata(mblk_t *mp, tl_endpt_t *tep)
*/
freemsg(mp);
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_exdata: WREQ_ORDREL and no peer"));
tl_discon_ind(tep, 0);
return;
@@ -4715,7 +4732,7 @@ tl_exdata(mblk_t *mp, tl_endpt_t *tep)
default:
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:T_EXDATA_REQ:out of state, state=%d",
tep->te_state));
tl_merror(wq, mp, EPROTO);
@@ -4758,7 +4775,7 @@ tl_exdata(mblk_t *mp, tl_endpt_t *tep)
/* valid states */
break;
default:
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_exdata:rx side:invalid state"));
tl_merror(peer_tep->te_wq, mp, EPROTO);
return;
@@ -4783,7 +4800,7 @@ tl_exdata(mblk_t *mp, tl_endpt_t *tep)
static void
tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
{
- queue_t *wq = tep->te_wq;
+ queue_t *wq = tep->te_wq;
union T_primitives *prim = (union T_primitives *)mp->b_rptr;
ssize_t msz = MBLKL(mp);
tl_endpt_t *peer_tep;
@@ -4791,7 +4808,7 @@ tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
boolean_t closing = tep->te_closing;
if (msz < sizeof (struct T_ordrel_req)) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_ordrel:invalid message"));
if (!closing) {
tl_merror(wq, mp, EPROTO);
@@ -4825,12 +4842,12 @@ tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
*/
if (!closing) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_ordlrel: ocon"));
TL_PUTBQ(tep, mp);
return;
}
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_ordlrel: closing socket ocon"));
prim->type = T_ORDREL_IND;
(void) tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
@@ -4838,7 +4855,7 @@ tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
default:
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:T_ORDREL_REQ:out of state, state=%d",
tep->te_state));
if (!closing) {
@@ -4868,7 +4885,7 @@ tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
* Note: Messages already on queue when we are closing is bounded
* so we can ignore flow control.
*/
- if (! canputnext(peer_rq) && !closing) {
+ if (!canputnext(peer_rq) && !closing) {
TL_PUTBQ(tep, mp);
return;
}
@@ -4882,7 +4899,7 @@ tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
/* valid states */
break;
default:
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_ordrel:rx side:invalid state"));
tl_merror(peer_tep->te_wq, mp, EPROTO);
return;
@@ -4930,8 +4947,8 @@ tl_uderr(queue_t *wq, mblk_t *mp, t_scalar_t err)
err_sz += olen;
err_mp = allocb(err_sz, BPRI_MED);
- if (! err_mp) {
- (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
+ if (err_mp == NULL) {
+ (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
"tl_uderr:allocb failure"));
/*
* Note: no rollback of state needed as it does
@@ -4953,7 +4970,7 @@ tl_uderr(queue_t *wq, mblk_t *mp, t_scalar_t err)
} else {
uderr->DEST_offset =
(t_scalar_t)sizeof (struct T_uderror_ind);
- addr_startp = mp->b_rptr + udreq->DEST_offset;
+ addr_startp = mp->b_rptr + udreq->DEST_offset;
bcopy(addr_startp, err_mp->b_rptr + uderr->DEST_offset,
(size_t)alen);
}
@@ -4963,7 +4980,7 @@ tl_uderr(queue_t *wq, mblk_t *mp, t_scalar_t err)
uderr->OPT_offset =
(t_scalar_t)T_ALIGN(sizeof (struct T_uderror_ind) +
uderr->DEST_length);
- addr_startp = mp->b_rptr + udreq->OPT_offset;
+ addr_startp = mp->b_rptr + udreq->OPT_offset;
bcopy(addr_startp, err_mp->b_rptr+uderr->OPT_offset,
(size_t)olen);
}
@@ -4984,10 +5001,12 @@ tl_unitdata_ser(mblk_t *mp, tl_endpt_t *tep)
if (!tep->te_closing && (wq->q_first != NULL)) {
TL_PUTQ(tep, mp);
- } else if (tep->te_rq != NULL)
- tl_unitdata(mp, tep);
- else
- freemsg(mp);
+ } else {
+ if (tep->te_rq != NULL)
+ tl_unitdata(mp, tep);
+ else
+ freemsg(mp);
+ }
tl_serializer_exit(tep);
tl_refrele(tep);
@@ -5022,7 +5041,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
*/
if (tep->te_state != TS_IDLE) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_wput:T_CONN_REQ:out of state"));
tl_merror(wq, mp, EPROTO);
return;
@@ -5038,7 +5057,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
* after validating the message length.
*/
if (msz < sizeof (struct T_unitdata_req)) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_unitdata:invalid message length"));
tl_merror(wq, mp, EINVAL);
return;
@@ -5057,7 +5076,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
(olen < 0) || (ooff < 0) ||
((olen > 0) && ((ooff + olen) > msz))) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_unitdata_req: invalid socket addr "
"(msz=%d, al=%d, ao=%d, ol=%d, oo = %d)",
(int)msz, alen, aoff, olen, ooff));
@@ -5069,7 +5088,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) &&
(ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) {
(void) (STRLOG(TL_ID, tep->te_minor,
- 1, SL_TRACE|SL_ERROR,
+ 1, SL_TRACE | SL_ERROR,
"tl_conn_req: invalid socket magic"));
tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ);
return;
@@ -5085,7 +5104,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
(ooff < 0) ||
((ssize_t)olen > (msz - sizeof (struct T_unitdata_req)))) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_unitdata:invalid unit data message"));
tl_merror(wq, mp, EINVAL);
return;
@@ -5094,7 +5113,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
/* Options not supported unless it's a socket */
if (alen == 0 || (olen != 0 && !IS_SOCKET(tep))) {
- (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
"tl_unitdata:option use(unsupported) or zero len addr"));
tl_uderr(wq, mp, EPROTO);
return;
@@ -5102,11 +5121,11 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
#ifdef DEBUG
/*
* Mild form of ASSERT()ion to detect broken TPI apps.
- * if (! assertion)
+ * if (!assertion)
* log warning;
*/
- if (! (aoff >= (t_scalar_t)sizeof (struct T_unitdata_req))) {
- (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
+ if (!(aoff >= (t_scalar_t)sizeof (struct T_unitdata_req))) {
+ (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
"tl_unitdata:addr overlaps TPI message"));
}
#endif
@@ -5137,7 +5156,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
if (peer_tep == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_unitdata:no one at destination address"));
tl_uderr(wq, mp, ECONNRESET);
return;
@@ -5153,7 +5172,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
}
if (peer_tep->te_state != TS_IDLE) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_unitdata:provider in invalid state"));
tl_uderr(wq, mp, EPROTO);
return;
@@ -5183,7 +5202,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
/*
* calculate length of message
*/
- if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED)) {
+ if (peer_tep->te_flag & (TL_SETCRED | TL_SETUCRED | TL_SOCKUCRED)) {
cr = msg_getcred(mp, &cpid);
ASSERT(cr != NULL);
@@ -5225,11 +5244,11 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
* avoid allocating a new message block.
*/
if (msz >= ui_sz && alen >= tep->te_alen &&
- !(peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED))) {
+ !(peer_tep->te_flag & (TL_SETCRED | TL_SETUCRED | TL_SOCKUCRED))) {
/*
* Reuse the original mblk. Leave options in place.
*/
- udind = (struct T_unitdata_ind *)mp->b_rptr;
+ udind = (struct T_unitdata_ind *)mp->b_rptr;
udind->PRIM_type = T_UNITDATA_IND;
udind->SRC_length = tep->te_alen;
addr_startp = mp->b_rptr + udind->SRC_offset;
@@ -5243,13 +5262,14 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
* option. Reuse the original mblk, leaving existing options in
* place.
*/
- udind = (struct T_unitdata_ind *)mp->b_rptr;
+ udind = (struct T_unitdata_ind *)mp->b_rptr;
udind->PRIM_type = T_UNITDATA_IND;
udind->SRC_length = tep->te_alen;
addr_startp = mp->b_rptr + udind->SRC_offset;
bcopy(tep->te_abuf, addr_startp, tep->te_alen);
- if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED)) {
+ if (peer_tep->te_flag &
+ (TL_SETCRED | TL_SETUCRED | TL_SOCKUCRED)) {
ASSERT(cr != NULL);
/*
* We're appending one new option here after the
@@ -5275,7 +5295,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
mblk_t *ui_mp;
ui_mp = allocb(ui_sz, BPRI_MED);
- if (! ui_mp) {
+ if (ui_mp == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 4, SL_TRACE,
"tl_unitdata:allocb failure:message queued"));
tl_memrecover(wq, mp, ui_sz);
@@ -5287,7 +5307,7 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
*/
DB_TYPE(ui_mp) = M_PROTO;
ui_mp->b_wptr = ui_mp->b_rptr + ui_sz;
- udind = (struct T_unitdata_ind *)ui_mp->b_rptr;
+ udind = (struct T_unitdata_ind *)ui_mp->b_rptr;
udind->PRIM_type = T_UNITDATA_IND;
udind->SRC_offset = (t_scalar_t)sizeof (struct T_unitdata_ind);
udind->SRC_length = tep->te_alen;
@@ -5296,7 +5316,8 @@ tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
udind->OPT_offset =
(t_scalar_t)T_ALIGN(udind->SRC_offset + udind->SRC_length);
udind->OPT_length = olen;
- if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED)) {
+ if (peer_tep->te_flag &
+ (TL_SETCRED | TL_SETUCRED | TL_SOCKUCRED)) {
if (oldolen != 0) {
bcopy((void *)((uintptr_t)udreq + ooff),
@@ -5344,7 +5365,7 @@ tl_find_peer(tl_endpt_t *tep, tl_addr_t *ap)
int rc = mod_hash_find_cb(tep->te_addrhash, (mod_hash_key_t)ap,
(mod_hash_val_t *)&peer_tep, tl_find_callback);
- ASSERT(! IS_SOCKET(tep));
+ ASSERT(!IS_SOCKET(tep));
ASSERT(ap != NULL && ap->ta_alen > 0);
ASSERT(ap->ta_zoneid == tep->te_zoneid);
@@ -5424,7 +5445,7 @@ tl_get_any_addr(tl_endpt_t *tep, tl_addr_t *req)
uint32_t loopcnt; /* Limit loop to 2^32 */
ASSERT(tep->te_hash_hndl != NULL);
- ASSERT(! IS_SOCKET(tep));
+ ASSERT(!IS_SOCKET(tep));
if (tep->te_hash_hndl == NULL)
return (B_FALSE);
@@ -5520,7 +5541,7 @@ tl_cl_backenable(tl_endpt_t *tep)
for (elp = list_head(l); elp != NULL; elp = list_head(l)) {
ASSERT(tep->te_ser == elp->te_ser);
ASSERT(elp->te_flowq == tep);
- if (! elp->te_closing)
+ if (!elp->te_closing)
TL_QENABLE(elp);
elp->te_flowq = NULL;
list_remove(l, elp);
@@ -5589,7 +5610,7 @@ tl_co_unconnect(tl_endpt_t *tep)
putnext(cl_tep->te_rq, d_mp);
} else {
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_co_unconnect:icmng: "
"allocb failure"));
}
@@ -5617,7 +5638,7 @@ tl_co_unconnect(tl_endpt_t *tep)
}
if (d_mp == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_co_unconnect:outgoing:allocb failure"));
TL_UNCONNECT(tep->te_oconp);
goto discon_peer;
@@ -5682,9 +5703,9 @@ tl_co_unconnect(tl_endpt_t *tep)
peer_tep->te_state,
NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state)));
d_mp = tl_ordrel_ind_alloc();
- if (! d_mp) {
+ if (d_mp == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_co_unconnect:connected:"
"allocb failure"));
/*
@@ -5711,13 +5732,13 @@ tl_co_unconnect(tl_endpt_t *tep)
* with error 0 to inform that the peer is gone.
*/
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_co_unconnect: discon in state %d",
tep->te_state));
tl_discon_ind(peer_tep, 0);
} else {
(void) (STRLOG(TL_ID, tep->te_minor, 3,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_co_unconnect: state %d", tep->te_state));
tl_discon_ind(peer_tep, ECONNRESET);
}
@@ -5757,8 +5778,8 @@ tl_discon_ind(tl_endpt_t *tep, uint32_t reason)
* send discon ind
*/
d_mp = tl_discon_ind_alloc(reason, tep->te_seqno);
- if (! d_mp) {
- (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
+ if (d_mp == NULL) {
+ (void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE | SL_ERROR,
"tl_discon_ind:allocb failure"));
return;
}
@@ -5994,7 +6015,7 @@ tl_merror(queue_t *wq, mblk_t *mp, int error)
}
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_merror: tep=%p, err=%d", (void *)tep, error));
/*
@@ -6014,9 +6035,9 @@ tl_merror(queue_t *wq, mblk_t *mp, int error)
if ((MBLKSIZE(mp) < 1) || (DB_REF(mp) > 1)) {
freemsg(mp);
mp = allocb(1, BPRI_HI);
- if (!mp) {
+ if (mp == NULL) {
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_merror:M_PROTO: out of memory"));
return;
}
@@ -6099,7 +6120,7 @@ tl_get_opt(queue_t *wq, int level, int name, uchar_t *ptr)
switch (level) {
case SOL_SOCKET:
- if (! IS_SOCKET(tep))
+ if (!IS_SOCKET(tep))
break;
switch (name) {
case SO_RECVUCRED:
@@ -6130,17 +6151,9 @@ tl_get_opt(queue_t *wq, int level, int name, uchar_t *ptr)
/* ARGSUSED */
static int
-tl_set_opt(
- queue_t *wq,
- uint_t mgmt_flags,
- int level,
- int name,
- uint_t inlen,
- uchar_t *invalp,
- uint_t *outlenp,
- uchar_t *outvalp,
- void *thisdg_attrs,
- cred_t *cr)
+tl_set_opt(queue_t *wq, uint_t mgmt_flags, int level, int name, uint_t inlen,
+ uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, void *thisdg_attrs,
+ cred_t *cr)
{
int error;
tl_endpt_t *tep;
@@ -6155,7 +6168,7 @@ tl_set_opt(
switch (level) {
case SOL_SOCKET:
- if (! IS_SOCKET(tep)) {
+ if (!IS_SOCKET(tep)) {
error = EINVAL;
break;
}
@@ -6170,7 +6183,7 @@ tl_set_opt(
* getpeerucred handles the connection oriented
* transports.
*/
- if (! IS_CLTS(tep)) {
+ if (!IS_CLTS(tep)) {
error = EINVAL;
break;
}
@@ -6196,7 +6209,7 @@ tl_set_opt(
* option.
*/
(void) (STRLOG(TL_ID, tep->te_minor, 1,
- SL_TRACE|SL_ERROR,
+ SL_TRACE | SL_ERROR,
"tl_set_opt: option is not supported"));
error = EPROTO;
break;
@@ -6259,12 +6272,13 @@ tl_memrecover(queue_t *wq, mblk_t *mp, size_t size)
(void) insq(wq, wq->q_first, mp);
if (tep->te_bufcid || tep->te_timoutid) {
- (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
+ (void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE | SL_ERROR,
"tl_memrecover:recover %p pending", (void *)wq));
return;
}
- if (!(tep->te_bufcid = qbufcall(wq, size, BPRI_MED, tl_buffer, wq))) {
+ tep->te_bufcid = qbufcall(wq, size, BPRI_MED, tl_buffer, wq);
+ if (tep->te_bufcid == NULL) {
tep->te_timoutid = qtimeout(wq, tl_timer, wq,
drv_usectohz(TL_BUFWAIT));
}
diff --git a/usr/src/uts/common/io/zcons.c b/usr/src/uts/common/io/zcons.c
index d4169a6720..8430e3e8cb 100644
--- a/usr/src/uts/common/io/zcons.c
+++ b/usr/src/uts/common/io/zcons.c
@@ -180,9 +180,9 @@ static int zc_detach(dev_info_t *, ddi_detach_cmd_t);
static int zc_open(queue_t *, dev_t *, int, int, cred_t *);
static int zc_close(queue_t *, int, cred_t *);
-static void zc_wput(queue_t *, mblk_t *);
-static void zc_rsrv(queue_t *);
-static void zc_wsrv(queue_t *);
+static int zc_wput(queue_t *, mblk_t *);
+static int zc_rsrv(queue_t *);
+static int zc_wsrv(queue_t *);
/*
* The instance number is encoded in the dev_t in the minor number; the lowest
@@ -221,7 +221,7 @@ static struct module_info zc_info = {
static struct qinit zc_rinit = {
NULL,
- (int (*)()) zc_rsrv,
+ zc_rsrv,
zc_open,
zc_close,
NULL,
@@ -230,8 +230,8 @@ static struct qinit zc_rinit = {
};
static struct qinit zc_winit = {
- (int (*)()) zc_wput,
- (int (*)()) zc_wsrv,
+ zc_wput,
+ zc_wsrv,
NULL,
NULL,
NULL,
@@ -259,7 +259,7 @@ DDI_DEFINE_STREAM_OPS(zc_ops, nulldev, nulldev, zc_attach, zc_detach, nodev, \
*/
static struct modldrv modldrv = {
- &mod_driverops, /* Type of module (this is a pseudo driver) */
+ &mod_driverops, /* Type of module (this is a pseudo driver) */
"Zone console driver", /* description of module */
&zc_ops /* driver ops */
};
@@ -740,7 +740,7 @@ handle_mflush(queue_t *qp, mblk_t *mp)
* enqueues the messages; in the case that something is enqueued, wsrv(9E)
* will take care of eventually shuttling I/O to the other side.
*/
-static void
+static int
zc_wput(queue_t *qp, mblk_t *mp)
{
unsigned char type = mp->b_datap->db_type;
@@ -771,11 +771,11 @@ zc_wput(queue_t *qp, mblk_t *mp)
*/
if (iocbp->ioc_count != TRANSPARENT) {
miocack(qp, mp, 0, EINVAL);
- return;
+ return (0);
}
if (zcs->zc_slave_vnode != NULL) {
miocack(qp, mp, 0, 0);
- return;
+ return (0);
}
/*
@@ -784,7 +784,7 @@ zc_wput(queue_t *qp, mblk_t *mp)
*/
if (curzone != global_zone) {
miocack(qp, mp, 0, EINVAL);
- return;
+ return (0);
}
/*
@@ -797,13 +797,13 @@ zc_wput(queue_t *qp, mblk_t *mp)
slave_filep = getf(slave_fd);
if (slave_filep == NULL) {
miocack(qp, mp, 0, EINVAL);
- return;
+ return (0);
}
if (ZC_STATE_TO_SLAVEDEV(zcs) !=
slave_filep->f_vnode->v_rdev) {
releasef(slave_fd);
miocack(qp, mp, 0, EINVAL);
- return;
+ return (0);
}
/*
@@ -820,7 +820,7 @@ zc_wput(queue_t *qp, mblk_t *mp)
mutex_exit(&slave_snodep->s_lock);
releasef(slave_fd);
miocack(qp, mp, 0, 0);
- return;
+ return (0);
case ZC_RELEASESLAVE:
/*
* Release the master's handle on the slave's vnode.
@@ -829,11 +829,11 @@ zc_wput(queue_t *qp, mblk_t *mp)
*/
if (iocbp->ioc_count != TRANSPARENT) {
miocack(qp, mp, 0, EINVAL);
- return;
+ return (0);
}
if (zcs->zc_slave_vnode == NULL) {
miocack(qp, mp, 0, 0);
- return;
+ return (0);
}
/*
@@ -842,7 +842,7 @@ zc_wput(queue_t *qp, mblk_t *mp)
*/
if (curzone != global_zone) {
miocack(qp, mp, 0, EINVAL);
- return;
+ return (0);
}
/*
@@ -856,13 +856,13 @@ zc_wput(queue_t *qp, mblk_t *mp)
slave_filep = getf(slave_fd);
if (slave_filep == NULL) {
miocack(qp, mp, 0, EINVAL);
- return;
+ return (0);
}
if (zcs->zc_slave_vnode->v_rdev !=
slave_filep->f_vnode->v_rdev) {
releasef(slave_fd);
miocack(qp, mp, 0, EINVAL);
- return;
+ return (0);
}
/*
@@ -879,7 +879,7 @@ zc_wput(queue_t *qp, mblk_t *mp)
zcs->zc_slave_vnode = NULL;
releasef(slave_fd);
miocack(qp, mp, 0, 0);
- return;
+ return (0);
default:
break;
}
@@ -898,7 +898,7 @@ zc_wput(queue_t *qp, mblk_t *mp)
freemsg(mp);
break;
}
- return;
+ return (0);
}
if (type >= QPCTL) {
@@ -920,7 +920,7 @@ zc_wput(queue_t *qp, mblk_t *mp)
break;
}
DBG1("done (hipri) wput, %s side", zc_side(qp));
- return;
+ return (0);
}
/*
@@ -935,6 +935,7 @@ zc_wput(queue_t *qp, mblk_t *mp)
(void) putq(qp, mp);
}
DBG1("done wput, %s side", zc_side(qp));
+ return (0);
}
/*
@@ -944,7 +945,7 @@ zc_wput(queue_t *qp, mblk_t *mp)
* Enable the write side of the partner. This triggers the partner to send
* messages queued on its write side to this queue's read side.
*/
-static void
+static int
zc_rsrv(queue_t *qp)
{
zc_state_t *zcs;
@@ -957,9 +958,10 @@ zc_rsrv(queue_t *qp)
ASSERT(qp == zcs->zc_master_rdq || qp == zcs->zc_slave_rdq);
if (zc_switch(qp) == NULL) {
DBG("zc_rsrv: other side isn't listening\n");
- return;
+ return (0);
}
qenable(WR(zc_switch(qp)));
+ return (0);
}
/*
@@ -970,7 +972,7 @@ zc_rsrv(queue_t *qp)
* them via putnext(). Else, if queued messages cannot be sent, leave them
* on this queue.
*/
-static void
+static int
zc_wsrv(queue_t *qp)
{
mblk_t *mp;
@@ -989,7 +991,7 @@ zc_wsrv(queue_t *qp)
freemsg(mp);
}
flushq(qp, FLUSHALL);
- return;
+ return (0);
}
/*
@@ -1011,4 +1013,5 @@ zc_wsrv(queue_t *qp)
break;
}
}
+ return (0);
}
diff --git a/usr/src/uts/common/krtld/kobj.c b/usr/src/uts/common/krtld/kobj.c
index 1038875bbc..3e9b2a85e2 100644
--- a/usr/src/uts/common/krtld/kobj.c
+++ b/usr/src/uts/common/krtld/kobj.c
@@ -2728,7 +2728,7 @@ crypto_es_hash(struct module *mp, char *hash, char *shstrtab)
" %s data size=%d\n", shstrtab + shp->sh_name,
shp->sh_size);
#endif
- ASSERT(shp->sh_addr != NULL);
+ ASSERT(shp->sh_addr != 0);
SHA1Update(&ctx, (const uint8_t *)shp->sh_addr, shp->sh_size);
}
diff --git a/usr/src/uts/common/os/brand.c b/usr/src/uts/common/os/brand.c
index ecf396f926..fa3555a82a 100644
--- a/usr/src/uts/common/os/brand.c
+++ b/usr/src/uts/common/os/brand.c
@@ -786,8 +786,8 @@ brand_solaris_elfexec(vnode_t *vp, execa_t *uap, uarg_t *args,
}
}
/* Make sure the emulator has an entry point */
- ASSERT(sed.sed_entry != NULL);
- ASSERT(sed.sed_phdr != NULL);
+ ASSERT(sed.sed_entry != 0);
+ ASSERT(sed.sed_phdr != 0);
bzero(&env, sizeof (env));
if (args->to_model == DATAMODEL_NATIVE) {
diff --git a/usr/src/uts/common/smbsrv/ndl/netlogon.ndl b/usr/src/uts/common/smbsrv/ndl/netlogon.ndl
index 907d52ec72..eded8f8415 100644
--- a/usr/src/uts/common/smbsrv/ndl/netlogon.ndl
+++ b/usr/src/uts/common/smbsrv/ndl/netlogon.ndl
@@ -22,7 +22,7 @@
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
- * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
*/
#ifndef _MLSVC_NETR_NDL_
@@ -395,6 +395,7 @@ struct netr_validation_info {
* unmarshalled. NT does not clear out the unused bytes in the
* DWORD so they must be cast to get the correct value.
*/
+ALIGN(2)
OPERATION(NETR_OPNUM_SamLogon)
struct netr_SamLogon {
IN LPTSTR servername;
diff --git a/usr/src/uts/i86pc/Makefile.files b/usr/src/uts/i86pc/Makefile.files
index e33bb30c87..dbd6f73437 100644
--- a/usr/src/uts/i86pc/Makefile.files
+++ b/usr/src/uts/i86pc/Makefile.files
@@ -71,6 +71,7 @@ CORE_OBJS += \
hment.o \
hold_page.o \
hrtimers.o \
+ ht.o \
htable.o \
hypercall.o \
hypersubr.o \
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c
index 1d3fc4f3e9..7d70b5d4e4 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c
@@ -876,7 +876,7 @@ init_pgtable:
/*
* The address in the cookie must be 4K aligned and >= table size
*/
- ASSERT(pt->pt_cookie.dmac_cookie_addr != NULL);
+ ASSERT(pt->pt_cookie.dmac_cookie_addr != (uintptr_t)NULL);
ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
ASSERT(pt->pt_cookie.dmac_size >= pt->pt_mem_realsz);
ASSERT(pt->pt_cookie.dmac_size >= pt->pt_mem_reqsz);
diff --git a/usr/src/uts/i86pc/os/ht.c b/usr/src/uts/i86pc/os/ht.c
new file mode 100644
index 0000000000..6e13eaedae
--- /dev/null
+++ b/usr/src/uts/i86pc/os/ht.c
@@ -0,0 +1,613 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2018 Joyent, Inc.
+ */
+
+/*
+ * HT exclusion: prevent a sibling in a hyper-threaded core from running in VMX
+ * non-root guest mode, when certain threads are running on the other sibling.
+ * This avoids speculation-based information leaks such as L1TF being available
+ * to the untrusted guest. The stance we take is that threads from the same
+ * zone as the guest VPCU thread are considered safe to run alongside, but all
+ * other threads (except the idle thread), and all interrupts, are unsafe. Note
+ * that due to the implementation here, there are significant sections of e.g.
+ * the dispatcher code that can run concurrently with a guest, until the thread
+ * reaches ht_mark(). This code assumes there are only two HT threads per core.
+ *
+ * The entry points are as follows:
+ *
+ * ht_mark_as_vcpu()
+ *
+ * All threads that enter guest mode (i.e. VCPU threads) need to call this at
+ * least once, which sets TS_VCPU in ->t_schedflag.
+ *
+ * ht_mark()
+ *
+ * A new ->cpu_thread is now curthread (although interrupt threads have their
+ * own separate handling). After preventing any interrupts, we will take our
+ * own CPU's spinlock and update our own state in mcpu_ht.
+ *
+ * If our sibling is poisoned (i.e. in guest mode or the little bit of code
+ * around it), and we're not compatible (that is, same zone ID, or the idle
+ * thread), then we need to ht_kick() that sibling. ht_kick() itself waits for
+ * the sibling to call ht_release(), and it will not re-enter guest mode until
+ * allowed.
+ *
+ * Note that we ignore the fact a process can change its zone ID: poisoning
+ * threads never do so, and we can ignore the other cases.
+ *
+ * ht_acquire()
+ *
+ * We are a VCPU thread about to start guest execution. Interrupts are
+ * disabled. We must have already run ht_mark() to be in this code, so there's
+ * no need to take our *own* spinlock in order to mark ourselves as CM_POISONED.
+ * Instead, we take our sibling's lock to also mark ourselves as poisoned in the
+ * sibling cpu_ht_t. This is so ht_mark() will only ever need to look at its
+ * local mcpu_ht.
+ *
+ * We'll loop here for up to ht_acquire_wait_time microseconds; this is mainly
+ * to wait out any sibling interrupt: many of them will complete quicker than
+ * this.
+ *
+ * Finally, if we succeeded in acquiring the core, we'll flush the L1 cache as
+ * mitigation against L1TF: no incompatible thread will now be able to populate
+ * the L1 cache until *we* ht_release().
+ *
+ * ht_release()
+ *
+ * Simply unpoison ourselves similarly to ht_acquire(); ht_kick() will wait for
+ * this to happen if needed.
+ *
+ * ht_begin_intr()
+ *
+ * In an interrupt prolog. We're either a hilevel interrupt, or a pinning
+ * interrupt. In both cases, we mark our interrupt depth, and potentially
+ * ht_kick(). This enforces exclusion, but doesn't otherwise modify ->ch_state:
+ * we want the dispatcher code to essentially ignore interrupts.
+ *
+ * ht_end_intr()
+ *
+ * In an interrupt epilogue *or* thread_unpin(). In the first case, we never
+ * slept, and we can simply decrement our counter. In the second case, we're an
+ * interrupt thread about to sleep: we'll still just decrement our counter, and
+ * henceforth treat the thread as a normal thread when it next gets scheduled,
+ * until it finally gets to its epilogue.
+ *
+ * ht_mark_unsafe() / ht_mark_safe()
+ *
+ * Mark the current thread as temporarily unsafe (guests should not be executing
+ * while a sibling is marked unsafe). This can be used for a thread that's
+ * otherwise considered safe, if it needs to handle potentially sensitive data.
+ * Right now, this means certain I/O handling operations that reach down into
+ * the networking and ZFS sub-systems.
+ *
+ * ht_should_run(thread, cpu)
+ *
+ * This is used by the dispatcher when making scheduling decisions: if the
+ * sibling is compatible with the given thread, we return B_TRUE. This is
+ * essentially trying to guess if any subsequent ht_acquire() will fail, by
+ * peeking at the sibling CPU's state. The peek is racy, but if we get things
+ * wrong, the "only" consequence is that ht_acquire() may lose.
+ *
+ * ht_adjust_cpu_score()
+ *
+ * Used when scoring other CPUs in disp_lowpri_cpu(). If we shouldn't run here,
+ * we'll add a small penalty to the score. This also makes sure a VCPU thread
+ * migration behaves properly.
+ */
+
+#include <sys/archsystm.h>
+#include <sys/disp.h>
+#include <sys/cmt.h>
+#include <sys/systm.h>
+#include <sys/cpu.h>
+#include <sys/var.h>
+#include <sys/xc_levels.h>
+#include <sys/cmn_err.h>
+#include <sys/sysmacros.h>
+#include <sys/x86_archext.h>
+
+#define CS_SHIFT (8)
+#define CS_MASK ((1 << CS_SHIFT) - 1)
+#define CS_MARK(s) ((s) & CS_MASK)
+#define CS_ZONE(s) ((s) >> CS_SHIFT)
+#define CS_MK(s, z) ((s) | (z << CS_SHIFT))
+
+typedef enum ch_mark {
+ CM_IDLE = 0, /* running CPU idle thread */
+ CM_THREAD, /* running general non-VCPU thread */
+ CM_UNSAFE, /* running ->t_unsafe thread */
+ CM_VCPU, /* running VCPU thread */
+ CM_POISONED /* running in guest */
+} ch_mark_t;
+
+/* Double-check our false-sharing padding. */
+CTASSERT(offsetof(cpu_ht_t, ch_sib) == 64);
+CTASSERT(CM_IDLE == 0);
+CTASSERT(CM_POISONED < (1 << CS_SHIFT));
+CTASSERT(CM_POISONED > CM_VCPU);
+CTASSERT(CM_VCPU > CM_UNSAFE);
+
+static uint_t empty_pil = XC_CPUPOKE_PIL;
+
+/*
+ * If disabled, no HT exclusion is performed, and system is potentially
+ * vulnerable to L1TF if hyper-threading is enabled, and we don't have the "not
+ * vulnerable" CPUID bit.
+ */
+int ht_exclusion = 1;
+
+/*
+ * How long ht_acquire() will spin trying to acquire the core, in micro-seconds.
+ * This is enough time to wait out a significant proportion of interrupts.
+ */
+clock_t ht_acquire_wait_time = 64;
+
+static cpu_t *
+ht_find_sibling(cpu_t *cp)
+{
+ for (uint_t i = 0; i < GROUP_SIZE(&cp->cpu_pg->cmt_pgs); i++) {
+ pg_cmt_t *pg = GROUP_ACCESS(&cp->cpu_pg->cmt_pgs, i);
+ group_t *cg = &pg->cmt_pg.pghw_pg.pg_cpus;
+
+ if (pg->cmt_pg.pghw_hw != PGHW_IPIPE)
+ continue;
+
+ if (GROUP_SIZE(cg) == 1)
+ break;
+
+ VERIFY3U(GROUP_SIZE(cg), ==, 2);
+
+ if (GROUP_ACCESS(cg, 0) != cp)
+ return (GROUP_ACCESS(cg, 0));
+
+ VERIFY3P(GROUP_ACCESS(cg, 1), !=, cp);
+
+ return (GROUP_ACCESS(cg, 1));
+ }
+
+ return (NULL);
+}
+
+/*
+ * Initialize HT links. We have to be careful here not to race with
+ * ht_begin/end_intr(), which also complicates trying to do this initialization
+ * from a cross-call; hence the slightly odd approach below.
+ */
+void
+ht_init(void)
+{
+ cpu_t *scp = CPU;
+ cpu_t *cp = scp;
+ ulong_t flags;
+
+ if (!ht_exclusion)
+ return;
+
+ mutex_enter(&cpu_lock);
+
+ do {
+ thread_affinity_set(curthread, cp->cpu_id);
+ flags = intr_clear();
+
+ cp->cpu_m.mcpu_ht.ch_intr_depth = 0;
+ cp->cpu_m.mcpu_ht.ch_state = CS_MK(CM_THREAD, GLOBAL_ZONEID);
+ cp->cpu_m.mcpu_ht.ch_sibstate = CS_MK(CM_THREAD, GLOBAL_ZONEID);
+ ASSERT3P(cp->cpu_m.mcpu_ht.ch_sib, ==, NULL);
+ cp->cpu_m.mcpu_ht.ch_sib = ht_find_sibling(cp);
+
+ intr_restore(flags);
+ thread_affinity_clear(curthread);
+ } while ((cp = cp->cpu_next_onln) != scp);
+
+ mutex_exit(&cpu_lock);
+}
+
+/*
+ * We're adding an interrupt handler of some kind at the given PIL. If this
+ * happens to be the same PIL as XC_CPUPOKE_PIL, then we need to disable our
+ * pil_needs_kick() optimization, as there is now potentially an unsafe
+ * interrupt handler at that PIL. This typically won't occur, so we're not that
+ * careful about what's actually getting added, which CPU it's on, or if it gets
+ * removed. This also presumes that softints can't cover our empty_pil.
+ */
+void
+ht_intr_alloc_pil(uint_t pil)
+{
+ ASSERT(pil <= PIL_MAX);
+
+ if (empty_pil == pil)
+ empty_pil = PIL_MAX + 1;
+}
+
+/*
+ * If our sibling is also a VCPU thread from a different zone, we need one of
+ * them to give up, otherwise they will just battle each other for exclusion
+ * until they exhaust their quantum.
+ *
+ * We arbitrate between them by dispatch priority: clearly, a higher-priority
+ * thread deserves to win the acquisition. However, under CPU load, it'll be
+ * very common to see both threads with ->t_pri == 1. If so, we'll break the
+ * tie by cpu_id (which is hopefully arbitrary enough).
+ *
+ * If we lose, the VMM code will take this as a hint to call
+ * thread_affinity_set(CPU_BEST), which will likely migrate the VCPU thread
+ * somewhere else.
+ *
+ * Note that all of this state examination is racy, as we don't own any locks
+ * here.
+ */
+static boolean_t
+yield_to_vcpu(cpu_t *sib, zoneid_t zoneid)
+{
+ cpu_ht_t *sibht = &sib->cpu_m.mcpu_ht;
+ uint64_t sibstate = sibht->ch_state;
+
+ /*
+ * If we're likely just waiting for an interrupt, don't yield.
+ */
+ if (sibht->ch_intr_depth != 0)
+ return (B_FALSE);
+
+ /*
+ * We're only interested in VCPUs from a different zone.
+ */
+ if (CS_MARK(sibstate) < CM_VCPU || CS_ZONE(sibstate) == zoneid)
+ return (B_FALSE);
+
+ if (curthread->t_pri < sib->cpu_dispatch_pri)
+ return (B_TRUE);
+
+ if (curthread->t_pri == sib->cpu_dispatch_pri &&
+ CPU->cpu_id < sib->cpu_id)
+ return (B_TRUE);
+
+ return (B_FALSE);
+}
+
+static inline boolean_t
+sibling_compatible(cpu_ht_t *sibht, zoneid_t zoneid)
+{
+ uint64_t sibstate = sibht->ch_state;
+
+ if (sibht->ch_intr_depth != 0)
+ return (B_FALSE);
+
+ if (CS_MARK(sibstate) == CM_UNSAFE)
+ return (B_FALSE);
+
+ if (CS_MARK(sibstate) == CM_IDLE)
+ return (B_TRUE);
+
+ return (CS_ZONE(sibstate) == zoneid);
+}
+
+int
+ht_acquire(void)
+{
+ clock_t wait = ht_acquire_wait_time;
+ cpu_ht_t *ht = &CPU->cpu_m.mcpu_ht;
+ zoneid_t zoneid = getzoneid();
+ cpu_ht_t *sibht;
+ int ret = 0;
+
+ ASSERT(!interrupts_enabled());
+
+ if (ht->ch_sib == NULL) {
+ /* For the "sequential" L1TF case. */
+ spec_l1d_flush();
+ return (1);
+ }
+
+ sibht = &ht->ch_sib->cpu_m.mcpu_ht;
+
+ /* A VCPU thread should never change zone. */
+ ASSERT3U(CS_ZONE(ht->ch_state), ==, zoneid);
+ ASSERT3U(CS_MARK(ht->ch_state), ==, CM_VCPU);
+ ASSERT3U(zoneid, !=, GLOBAL_ZONEID);
+ ASSERT3U(curthread->t_preempt, >=, 1);
+ ASSERT(curthread->t_schedflag & TS_VCPU);
+
+ while (ret == 0 && wait > 0) {
+
+ if (yield_to_vcpu(ht->ch_sib, zoneid)) {
+ ret = -1;
+ break;
+ }
+
+ if (sibling_compatible(sibht, zoneid)) {
+ lock_set(&sibht->ch_lock);
+
+ if (sibling_compatible(sibht, zoneid)) {
+ ht->ch_state = CS_MK(CM_POISONED, zoneid);
+ sibht->ch_sibstate = CS_MK(CM_POISONED, zoneid);
+ membar_enter();
+ ret = 1;
+ }
+
+ lock_clear(&sibht->ch_lock);
+ } else {
+ drv_usecwait(10);
+ wait -= 10;
+ }
+ }
+
+ DTRACE_PROBE4(ht__acquire, int, ret, uint64_t, sibht->ch_state,
+ uint64_t, sibht->ch_intr_depth, clock_t, wait);
+
+ if (ret == 1)
+ spec_l1d_flush();
+
+ return (ret);
+}
+
+void
+ht_release(void)
+{
+ cpu_ht_t *ht = &CPU->cpu_m.mcpu_ht;
+ zoneid_t zoneid = getzoneid();
+ cpu_ht_t *sibht;
+
+ ASSERT(!interrupts_enabled());
+
+ if (ht->ch_sib == NULL)
+ return;
+
+ ASSERT3U(zoneid, !=, GLOBAL_ZONEID);
+ ASSERT3U(CS_ZONE(ht->ch_state), ==, zoneid);
+ ASSERT3U(CS_MARK(ht->ch_state), ==, CM_POISONED);
+ ASSERT3U(curthread->t_preempt, >=, 1);
+
+ sibht = &ht->ch_sib->cpu_m.mcpu_ht;
+
+ lock_set(&sibht->ch_lock);
+
+ ht->ch_state = CS_MK(CM_VCPU, zoneid);
+ sibht->ch_sibstate = CS_MK(CM_VCPU, zoneid);
+ membar_producer();
+
+ lock_clear(&sibht->ch_lock);
+}
+
+static void
+ht_kick(cpu_ht_t *ht, zoneid_t zoneid)
+{
+ uint64_t sibstate;
+
+ ASSERT(LOCK_HELD(&ht->ch_lock));
+ ASSERT(!interrupts_enabled());
+
+ poke_cpu(ht->ch_sib->cpu_id);
+
+ membar_consumer();
+ sibstate = ht->ch_sibstate;
+
+ if (CS_MARK(sibstate) != CM_POISONED || CS_ZONE(sibstate) == zoneid)
+ return;
+
+ lock_clear(&ht->ch_lock);
+
+ /*
+ * Spin until we can see the sibling has been kicked out or is otherwise
+ * OK.
+ */
+ for (;;) {
+ membar_consumer();
+ sibstate = ht->ch_sibstate;
+
+ if (CS_MARK(sibstate) != CM_POISONED ||
+ CS_ZONE(sibstate) == zoneid)
+ break;
+
+ SMT_PAUSE();
+ }
+
+ lock_set(&ht->ch_lock);
+}
+
+static boolean_t
+pil_needs_kick(uint_t pil)
+{
+ return (pil != empty_pil);
+}
+
+void
+ht_begin_intr(uint_t pil)
+{
+ ulong_t flags;
+ cpu_ht_t *ht;
+
+ ASSERT(pil <= PIL_MAX);
+
+ flags = intr_clear();
+ ht = &CPU->cpu_m.mcpu_ht;
+
+ if (ht->ch_sib == NULL) {
+ intr_restore(flags);
+ return;
+ }
+
+ if (atomic_inc_64_nv(&ht->ch_intr_depth) == 1 && pil_needs_kick(pil)) {
+ lock_set(&ht->ch_lock);
+
+ membar_consumer();
+
+ if (CS_MARK(ht->ch_sibstate) == CM_POISONED)
+ ht_kick(ht, GLOBAL_ZONEID);
+
+ lock_clear(&ht->ch_lock);
+ }
+
+ intr_restore(flags);
+}
+
+void
+ht_end_intr(void)
+{
+ ulong_t flags;
+ cpu_ht_t *ht;
+
+ flags = intr_clear();
+ ht = &CPU->cpu_m.mcpu_ht;
+
+ if (ht->ch_sib == NULL) {
+ intr_restore(flags);
+ return;
+ }
+
+ ASSERT3U(ht->ch_intr_depth, >, 0);
+ atomic_dec_64(&ht->ch_intr_depth);
+
+ intr_restore(flags);
+}
+
+static inline boolean_t
+ht_need_kick(cpu_ht_t *ht, zoneid_t zoneid)
+{
+ membar_consumer();
+
+ if (CS_MARK(ht->ch_sibstate) != CM_POISONED)
+ return (B_FALSE);
+
+ if (CS_MARK(ht->ch_state) == CM_UNSAFE)
+ return (B_TRUE);
+
+ return (CS_ZONE(ht->ch_sibstate) != zoneid);
+}
+
+void
+ht_mark(void)
+{
+ zoneid_t zoneid = getzoneid();
+ kthread_t *t = curthread;
+ ulong_t flags;
+ cpu_ht_t *ht;
+ cpu_t *cp;
+
+ flags = intr_clear();
+
+ cp = CPU;
+ ht = &cp->cpu_m.mcpu_ht;
+
+ if (ht->ch_sib == NULL) {
+ intr_restore(flags);
+ return;
+ }
+
+ lock_set(&ht->ch_lock);
+
+ /*
+ * If we were a nested interrupt and went through the resume_from_intr()
+ * path, we can now be resuming to a pinning interrupt thread; in which
+ * case, skip marking, until we later resume to a "real" thread.
+ */
+ if (ht->ch_intr_depth > 0) {
+ ASSERT3P(t->t_intr, !=, NULL);
+
+ if (ht_need_kick(ht, zoneid))
+ ht_kick(ht, zoneid);
+ goto out;
+ }
+
+ if (t == t->t_cpu->cpu_idle_thread) {
+ ASSERT3U(zoneid, ==, GLOBAL_ZONEID);
+ ht->ch_state = CS_MK(CM_IDLE, zoneid);
+ } else {
+ uint64_t state = CM_THREAD;
+
+ if (t->t_unsafe)
+ state = CM_UNSAFE;
+ else if (t->t_schedflag & TS_VCPU)
+ state = CM_VCPU;
+
+ ht->ch_state = CS_MK(state, zoneid);
+
+ if (ht_need_kick(ht, zoneid))
+ ht_kick(ht, zoneid);
+ }
+
+out:
+ membar_producer();
+ lock_clear(&ht->ch_lock);
+ intr_restore(flags);
+}
+
+void
+ht_begin_unsafe(void)
+{
+ curthread->t_unsafe++;
+ ht_mark();
+}
+
+void
+ht_end_unsafe(void)
+{
+ ASSERT3U(curthread->t_unsafe, >, 0);
+ curthread->t_unsafe--;
+ ht_mark();
+}
+
+void
+ht_mark_as_vcpu(void)
+{
+ thread_lock(curthread);
+ curthread->t_schedflag |= TS_VCPU;
+ ht_mark();
+ thread_unlock(curthread);
+}
+
+boolean_t
+ht_should_run(kthread_t *t, cpu_t *cp)
+{
+ uint64_t sibstate;
+ cpu_t *sib;
+
+ if (t == t->t_cpu->cpu_idle_thread)
+ return (B_TRUE);
+
+ if ((sib = cp->cpu_m.mcpu_ht.ch_sib) == NULL)
+ return (B_TRUE);
+
+ sibstate = sib->cpu_m.mcpu_ht.ch_state;
+
+ if ((t->t_schedflag & TS_VCPU)) {
+ if (CS_MARK(sibstate) == CM_IDLE)
+ return (B_TRUE);
+ if (CS_MARK(sibstate) == CM_UNSAFE)
+ return (B_FALSE);
+ return (CS_ZONE(sibstate) == ttozone(t)->zone_id);
+ }
+
+ if (CS_MARK(sibstate) < CM_VCPU)
+ return (B_TRUE);
+
+ return (CS_ZONE(sibstate) == ttozone(t)->zone_id);
+}
+
+pri_t
+ht_adjust_cpu_score(kthread_t *t, struct cpu *cp, pri_t score)
+{
+ if (ht_should_run(t, cp))
+ return (score);
+
+ /*
+ * If we're a VCPU thread scoring our current CPU, we are most likely
+ * asking to be rescheduled elsewhere after losing ht_acquire(). In
+ * this case, the current CPU is not a good choice, most likely, and we
+ * should go elsewhere.
+ */
+ if ((t->t_schedflag & TS_VCPU) && cp == t->t_cpu && score < 0)
+ return ((v.v_maxsyspri + 1) * 2);
+
+ return (score + 1);
+}
diff --git a/usr/src/uts/i86pc/os/mp_startup.c b/usr/src/uts/i86pc/os/mp_startup.c
index 844b1279f3..7ecbbd3147 100644
--- a/usr/src/uts/i86pc/os/mp_startup.c
+++ b/usr/src/uts/i86pc/os/mp_startup.c
@@ -551,7 +551,7 @@ mp_cpu_unconfigure_common(struct cpu *cp, int error)
trap_trace_ctl_t *ttc = &trap_trace_ctl[cp->cpu_id];
kmem_free((void *)ttc->ttc_first, trap_trace_bufsize);
- ttc->ttc_first = NULL;
+ ttc->ttc_first = (uintptr_t)NULL;
}
#endif
diff --git a/usr/src/uts/i86pc/os/trap.c b/usr/src/uts/i86pc/os/trap.c
index 33fe34f116..c97255fae7 100644
--- a/usr/src/uts/i86pc/os/trap.c
+++ b/usr/src/uts/i86pc/os/trap.c
@@ -2100,7 +2100,7 @@ dump_ttrace(void)
for (i = 0; i < n; i++) {
ttc = &trap_trace_ctl[i];
- if (ttc->ttc_first == NULL)
+ if (ttc->ttc_first == (uintptr_t)NULL)
continue;
current = ttc->ttc_next - sizeof (trap_trace_rec_t);
@@ -2116,7 +2116,7 @@ dump_ttrace(void)
current =
ttc->ttc_limit - sizeof (trap_trace_rec_t);
- if (current == NULL)
+ if (current == (uintptr_t)NULL)
continue;
rec = (trap_trace_rec_t *)current;
diff --git a/usr/src/uts/i86pc/sys/ht.h b/usr/src/uts/i86pc/sys/ht.h
new file mode 100644
index 0000000000..8bb5a0d6d2
--- /dev/null
+++ b/usr/src/uts/i86pc/sys/ht.h
@@ -0,0 +1,47 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2018 Joyent, Inc.
+ */
+
+#ifndef _SYS_HT_H
+#define _SYS_HT_H
+
+#include <sys/types.h>
+#include <sys/thread.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct cpu;
+
+extern void ht_init(void);
+extern void ht_intr_alloc_pil(uint_t);
+
+extern int ht_acquire(void);
+extern void ht_release(void);
+extern void ht_mark(void);
+extern void ht_begin_unsafe(void);
+extern void ht_end_unsafe(void);
+extern void ht_begin_intr(uint_t);
+extern void ht_end_intr(void);
+extern void ht_mark_as_vcpu(void);
+
+extern boolean_t ht_should_run(kthread_t *, struct cpu *);
+extern pri_t ht_adjust_cpu_score(kthread_t *, struct cpu *, pri_t);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HT_H */
diff --git a/usr/src/uts/i86xpv/Makefile.files b/usr/src/uts/i86xpv/Makefile.files
index dff7d50755..a040ecf0da 100644
--- a/usr/src/uts/i86xpv/Makefile.files
+++ b/usr/src/uts/i86xpv/Makefile.files
@@ -61,6 +61,7 @@ CORE_OBJS += \
hment.o \
hold_page.o \
hrtimers.o \
+ ht.o \
htable.o \
i86_mmu.o \
ibft.o \
@@ -238,7 +239,7 @@ INC_PATH += -I$(UTSBASE)/i86xpv -I$(UTSBASE)/i86pc -I$(SRC)/common \
# since only C headers are included when #defined(__lint) is true.
#
-ASSYM_DEPS += \
+ASSYM_DEPS += \
copy.o \
desctbls_asm.o \
ddi_i86_asm.o \
diff --git a/usr/src/uts/intel/ia32/ml/swtch.s b/usr/src/uts/intel/ia32/ml/swtch.s
index 4f27c58be8..d0696652b7 100644
--- a/usr/src/uts/intel/ia32/ml/swtch.s
+++ b/usr/src/uts/intel/ia32/ml/swtch.s
@@ -39,7 +39,6 @@
#include <sys/segments.h>
#include <sys/psw.h>
-#if !defined(__lint)
#include "assym.h"
/*
@@ -533,5 +532,3 @@ resume_from_intr_return:
movq %rax, TSS_RSP0(%r9)
ret
SET_SIZE(thread_splitstack_cleanup)
-
-#endif /* !__lint */
diff --git a/usr/src/uts/intel/zfs/Makefile b/usr/src/uts/intel/zfs/Makefile
index f1715120dd..18569006d8 100644
--- a/usr/src/uts/intel/zfs/Makefile
+++ b/usr/src/uts/intel/zfs/Makefile
@@ -29,7 +29,7 @@
#
# Copyright (c) 2016 by Delphix. All rights reserved.
#
-# Copyright (c) 2018, Joyent, Inc.
+# Copyright 2019 Joyent, Inc.
#
# Path to the base of the uts directory tree (usually /usr/src/uts).
@@ -72,6 +72,7 @@ INC_PATH += -I$(UTSBASE)/common/fs/zfs
INC_PATH += -I$(UTSBASE)/common/fs/zfs/lua
INC_PATH += -I$(SRC)/common
INC_PATH += -I$(COMMONBASE)/zfs
+INC_PATH += -I$(UTSBASE)/i86pc
CPPFLAGS += -I$(UTSBASE)/i86pc
C99LMODE= -Xc99=%all
diff --git a/usr/src/uts/sun4/sys/ht.h b/usr/src/uts/sun4/sys/ht.h
new file mode 100644
index 0000000000..6d7b3e37cc
--- /dev/null
+++ b/usr/src/uts/sun4/sys/ht.h
@@ -0,0 +1,38 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2018 Joyent, Inc.
+ */
+
+#ifndef _SYS_HT_H
+#define _SYS_HT_H
+
+#include <sys/types.h>
+#include <sys/thread.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ht_init() {}
+
+#define ht_should_run(t, c) (B_TRUE)
+#define ht_adjust_cpu_score(t, c, p) (p)
+#define ht_begin_unsafe(void) {}
+#define ht_end_unsafe(void) {}
+#define ht_end_intr(void) {}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HT_H */
diff --git a/usr/src/uts/sun4v/sys/Makefile b/usr/src/uts/sun4v/sys/Makefile
index 025ce96e9a..7061019749 100644
--- a/usr/src/uts/sun4v/sys/Makefile
+++ b/usr/src/uts/sun4v/sys/Makefile
@@ -46,6 +46,7 @@ SUN4_HDRS= \
dvma.h \
eeprom.h \
fcode.h \
+ ht.h \
idprom.h \
intr.h \
intreg.h \