summaryrefslogtreecommitdiff
path: root/usr
diff options
context:
space:
mode:
authoryl150051 <none@none>2006-11-13 20:44:19 -0800
committeryl150051 <none@none>2006-11-13 20:44:19 -0800
commit8347601bcb0a439f6e50fc36b4039a73d08700e1 (patch)
tree91c7b38a0f362892987099abe649246d234437b0 /usr
parentfa52d01bb7e683bd3f348e8daeacb561fc8b0a52 (diff)
downloadillumos-gate-8347601bcb0a439f6e50fc36b4039a73d08700e1.tar.gz
PSARC/2006/190 Large Send Offload (LSO)
6390838 XGE need to support adaptive TX by switching COPY/DMA according to MBLKL(mp) 6394197 Solaris need to support LSO (Large Send Offload) 6466394 Minor logic error in tcp_send_data() 6467558 Need to integrate HAL 2.0.6765 to improve rx performance
Diffstat (limited to 'usr')
-rw-r--r--usr/src/uts/common/inet/ip.h20
-rw-r--r--usr/src/uts/common/inet/ip/ip.c139
-rw-r--r--usr/src/uts/common/inet/ip/ip_if.c174
-rw-r--r--usr/src/uts/common/inet/ip_impl.h31
-rw-r--r--usr/src/uts/common/inet/ipclassifier.h4
-rw-r--r--usr/src/uts/common/inet/tcp.h6
-rw-r--r--usr/src/uts/common/inet/tcp/tcp.c664
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_fusion.c2
-rw-r--r--usr/src/uts/common/inet/tcp_impl.h4
-rw-r--r--usr/src/uts/common/inet/udp/udp.c10
-rw-r--r--usr/src/uts/common/io/dld/dld_proto.c63
-rw-r--r--usr/src/uts/common/io/dld/dld_str.c13
-rw-r--r--usr/src/uts/common/io/xge/drv/xge.c153
-rw-r--r--usr/src/uts/common/io/xge/drv/xge_osdep.h26
-rw-r--r--usr/src/uts/common/io/xge/drv/xgell.c358
-rw-r--r--usr/src/uts/common/io/xge/drv/xgell.h153
-rw-r--r--usr/src/uts/common/io/xge/hal/include/build-version.h4
-rw-r--r--usr/src/uts/common/io/xge/hal/include/version.h13
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xge-debug.h209
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xge-defs.h39
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xge-list.h35
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xge-os-pal.h16
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xge-queue.h26
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-channel.h55
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-config.h153
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-device.h176
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-driver.h24
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-event.h15
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-fifo.h23
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-mgmt.h36
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-mgmtaux.h16
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-mm.h28
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-regs.h196
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-ring.h19
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-stats.h193
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal-types.h92
-rw-r--r--usr/src/uts/common/io/xge/hal/include/xgehal.h12
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xge-queue.c42
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel-fp.c208
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel.c403
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-config.c126
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-device-fp.c1141
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-device.c1179
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-driver.c13
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-fifo-fp.c90
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-fifo.c63
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-mgmt.c516
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-mgmtaux.c322
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-mm.c120
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring-fp.c630
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring.c101
-rw-r--r--usr/src/uts/common/io/xge/hal/xgehal/xgehal-stats.c49
-rw-r--r--usr/src/uts/common/os/strsubr.c3
-rw-r--r--usr/src/uts/common/sys/dld_impl.h6
-rw-r--r--usr/src/uts/common/sys/dlpi.h33
-rw-r--r--usr/src/uts/common/sys/mac.h24
-rw-r--r--usr/src/uts/common/sys/pattr.h13
-rw-r--r--usr/src/uts/common/sys/strsubr.h2
-rw-r--r--usr/src/uts/intel/xge/Makefile17
-rw-r--r--usr/src/uts/sparc/xge/Makefile17
60 files changed, 5881 insertions, 2437 deletions
diff --git a/usr/src/uts/common/inet/ip.h b/usr/src/uts/common/inet/ip.h
index 4e588a67c7..9b3fcf0b00 100644
--- a/usr/src/uts/common/inet/ip.h
+++ b/usr/src/uts/common/inet/ip.h
@@ -1674,6 +1674,7 @@ extern ill_g_head_t ill_g_heads[]; /* ILL List Head */
#define ILL_CAPAB_ZEROCOPY 0x10 /* Zero-copy */
#define ILL_CAPAB_POLL 0x20 /* Polling Toggle */
#define ILL_CAPAB_SOFT_RING 0x40 /* Soft_Ring capability */
+#define ILL_CAPAB_LSO 0x80 /* Large Segment Offload */
/*
* Per-ill Multidata Transmit capabilities.
@@ -1705,6 +1706,11 @@ typedef struct ill_dls_capab_s ill_dls_capab_t;
*/
typedef struct ill_rx_ring ill_rx_ring_t;
+/*
+ * Per-ill Large Segment Offload capabilities.
+ */
+typedef struct ill_lso_capab_s ill_lso_capab_t;
+
/* The following are ill_state_flags */
#define ILL_LL_SUBNET_PENDING 0x01 /* Waiting for DL_INFO_ACK from drv */
#define ILL_CONDEMNED 0x02 /* No more new ref's to the ILL */
@@ -1867,6 +1873,7 @@ typedef struct ill_s {
ill_hcksum_capab_t *ill_hcksum_capab; /* H/W cksumming capabilities */
ill_zerocopy_capab_t *ill_zerocopy_capab; /* Zero-copy capabilities */
ill_dls_capab_t *ill_dls_capab; /* Polling, soft ring capabilities */
+ ill_lso_capab_t *ill_lso_capab; /* Large Segment Offload capabilities */
/*
* New fields for IPv6
@@ -2895,8 +2902,9 @@ extern vmem_t *ip_minor_arena;
#define ip_defend_interval ip_param_arr[53].ip_param_value
#define ip_dup_recovery ip_param_arr[54].ip_param_value
#define ip_restrict_interzone_loopback ip_param_arr[55].ip_param_value
+#define ip_lso_outbound ip_param_arr[56].ip_param_value
#ifdef DEBUG
-#define ipv6_drop_inbound_icmpv6 ip_param_arr[56].ip_param_value
+#define ipv6_drop_inbound_icmpv6 ip_param_arr[57].ip_param_value
#else
#define ipv6_drop_inbound_icmpv6 0
#endif
@@ -3264,6 +3272,9 @@ struct pdesc_s;
extern mblk_t *ip_mdinfo_alloc(ill_mdt_capab_t *);
extern mblk_t *ip_mdinfo_return(ire_t *, conn_t *, char *, ill_mdt_capab_t *);
+extern mblk_t *ip_lsoinfo_alloc(ill_lso_capab_t *);
+extern mblk_t *ip_lsoinfo_return(ire_t *, conn_t *, char *,
+ ill_lso_capab_t *);
extern uint_t ip_md_cksum(struct pdesc_s *, int, uint_t);
extern boolean_t ip_md_addr_attr(struct multidata_s *, struct pdesc_s *,
const mblk_t *);
@@ -3368,6 +3379,13 @@ struct ill_zerocopy_capab_s {
uint_t ill_zerocopy_flags; /* capabilities */
};
+struct ill_lso_capab_s {
+ uint_t ill_lso_version; /* interface version */
+ uint_t ill_lso_on; /* on/off switch for LSO on this ILL */
+ uint_t ill_lso_flags; /* capabilities */
+ uint_t ill_lso_max; /* maximum size of payload */
+};
+
/* Possible ill_states */
#define ILL_RING_INPROC 3 /* Being assigned to squeue */
#define ILL_RING_INUSE 2 /* Already Assigned to Rx Ring */
diff --git a/usr/src/uts/common/inet/ip/ip.c b/usr/src/uts/common/inet/ip/ip.c
index 080c503d27..b3fc7d93ad 100644
--- a/usr/src/uts/common/inet/ip/ip.c
+++ b/usr/src/uts/common/inet/ip/ip.c
@@ -983,6 +983,7 @@ static ipparam_t lcl_param_arr[] = {
{ 0, 999999, 30, "ip_defend_interval" },
{ 0, 3600000, 300000, "ip_dup_recovery" },
{ 0, 1, 1, "ip_restrict_interzone_loopback" },
+ { 0, 1, 1, "ip_lso_outbound" },
#ifdef DEBUG
{ 0, 1, 0, "ip6_drop_inbound_icmpv6" },
#endif
@@ -4763,7 +4764,8 @@ ip_bind_connected(conn_t *connp, mblk_t *mp, ipaddr_t *src_addrp,
mblk_t *policy_mp;
ire_t *sire = NULL;
ire_t *md_dst_ire = NULL;
- ill_t *md_ill = NULL;
+ ire_t *lso_dst_ire = NULL;
+ ill_t *ill = NULL;
zoneid_t zoneid;
ipaddr_t src_addr = *src_addrp;
@@ -4898,8 +4900,8 @@ ip_bind_connected(conn_t *connp, mblk_t *mp, ipaddr_t *src_addrp,
}
/*
- * See if we should notify ULP about MDT; we do this whether or not
- * ire_requested is TRUE, in order to handle active connects; MDT
+ * See if we should notify ULP about LSO/MDT; we do this whether or not
+ * ire_requested is TRUE, in order to handle active connects; LSO/MDT
* eligibility tests for passive connects are handled separately
* through tcp_adapt_ire(). We do this before the source address
* selection, because dst_ire may change after a call to
@@ -4907,14 +4909,19 @@ ip_bind_connected(conn_t *connp, mblk_t *mp, ipaddr_t *src_addrp,
* packet for this connection may not actually go through
* dst_ire->ire_stq, and the exact IRE can only be known after
* calling ip_newroute(). This is why we further check on the
- * IRE during Multidata packet transmission in tcp_multisend().
+ * IRE during LSO/Multidata packet transmission in
+ * tcp_lsosend()/tcp_multisend().
*/
- if (ip_multidata_outbound && !ipsec_policy_set && dst_ire != NULL &&
+ if (!ipsec_policy_set && dst_ire != NULL &&
!(dst_ire->ire_type & (IRE_LOCAL | IRE_LOOPBACK | IRE_BROADCAST)) &&
- (md_ill = ire_to_ill(dst_ire), md_ill != NULL) &&
- ILL_MDT_CAPABLE(md_ill)) {
- md_dst_ire = dst_ire;
- IRE_REFHOLD(md_dst_ire);
+ (ill = ire_to_ill(dst_ire), ill != NULL)) {
+ if (ip_lso_outbound && ILL_LSO_CAPABLE(ill)) {
+ lso_dst_ire = dst_ire;
+ IRE_REFHOLD(lso_dst_ire);
+ } else if (ip_multidata_outbound && ILL_MDT_CAPABLE(ill)) {
+ md_dst_ire = dst_ire;
+ IRE_REFHOLD(md_dst_ire);
+ }
}
if (dst_ire != NULL &&
@@ -5155,20 +5162,26 @@ ip_bind_connected(conn_t *connp, mblk_t *mp, ipaddr_t *src_addrp,
if (error == 0) {
connp->conn_fully_bound = B_TRUE;
/*
- * Our initial checks for MDT have passed; the IRE is not
+ * Our initial checks for LSO/MDT have passed; the IRE is not
* LOCAL/LOOPBACK/BROADCAST, and the link layer seems to
- * be supporting MDT. Pass the IRE, IPC and ILL into
- * ip_mdinfo_return(), which performs further checks
- * against them and upon success, returns the MDT info
+ * be supporting LSO/MDT. Pass the IRE, IPC and ILL into
+ * ip_xxinfo_return(), which performs further checks
+ * against them and upon success, returns the LSO/MDT info
* mblk which we will attach to the bind acknowledgment.
*/
- if (md_dst_ire != NULL) {
+ if (lso_dst_ire != NULL) {
+ mblk_t *lsoinfo_mp;
+
+ ASSERT(ill->ill_lso_capab != NULL);
+ if ((lsoinfo_mp = ip_lsoinfo_return(lso_dst_ire, connp,
+ ill->ill_name, ill->ill_lso_capab)) != NULL)
+ linkb(mp, lsoinfo_mp);
+ } else if (md_dst_ire != NULL) {
mblk_t *mdinfo_mp;
- ASSERT(md_ill != NULL);
- ASSERT(md_ill->ill_mdt_capab != NULL);
+ ASSERT(ill->ill_mdt_capab != NULL);
if ((mdinfo_mp = ip_mdinfo_return(md_dst_ire, connp,
- md_ill->ill_name, md_ill->ill_mdt_capab)) != NULL)
+ ill->ill_name, ill->ill_mdt_capab)) != NULL)
linkb(mp, mdinfo_mp);
}
}
@@ -5191,6 +5204,8 @@ bad_addr:
IRE_REFRELE(sire);
if (md_dst_ire != NULL)
IRE_REFRELE(md_dst_ire);
+ if (lso_dst_ire != NULL)
+ IRE_REFRELE(lso_dst_ire);
return (error);
}
@@ -22874,7 +22889,7 @@ ip_mdinfo_return(ire_t *dst_ire, conn_t *connp, char *ill_name,
}
/* socket option(s) present? */
- if (!CONN_IS_MD_FASTPATH(connp))
+ if (!CONN_IS_LSO_MD_FASTPATH(connp))
break;
rc = B_TRUE;
@@ -22907,6 +22922,94 @@ ip_mdinfo_return(ire_t *dst_ire, conn_t *connp, char *ill_name,
}
/*
+ * Routine to allocate a message that is used to notify the ULP about LSO.
+ * The caller may provide a pointer to the link-layer LSO capabilities,
+ * or NULL if LSO is to be disabled on the stream.
+ */
+mblk_t *
+ip_lsoinfo_alloc(ill_lso_capab_t *isrc)
+{
+ mblk_t *mp;
+ ip_lso_info_t *lsoi;
+ ill_lso_capab_t *idst;
+
+ if ((mp = allocb(sizeof (*lsoi), BPRI_HI)) != NULL) {
+ DB_TYPE(mp) = M_CTL;
+ mp->b_wptr = mp->b_rptr + sizeof (*lsoi);
+ lsoi = (ip_lso_info_t *)mp->b_rptr;
+ lsoi->lso_info_id = LSO_IOC_INFO_UPDATE;
+ idst = &(lsoi->lso_capab);
+
+ /*
+ * If the caller provides us with the capability, copy
+ * it over into our notification message; otherwise
+ * we zero out the capability portion.
+ */
+ if (isrc != NULL)
+ bcopy((caddr_t)isrc, (caddr_t)idst, sizeof (*idst));
+ else
+ bzero((caddr_t)idst, sizeof (*idst));
+ }
+ return (mp);
+}
+
+/*
+ * Routine which determines whether LSO can be enabled on the destination
+ * IRE and IPC combination, and if so, allocates and returns the LSO
+ * notification mblk that may be used by ULP. We also check if we need to
+ * turn LSO back to 'on' when certain restrictions prohibiting us to allow
+ * LSO usage in the past have been lifted. This gets called during IP
+ * and ULP binding.
+ */
+mblk_t *
+ip_lsoinfo_return(ire_t *dst_ire, conn_t *connp, char *ill_name,
+ ill_lso_capab_t *lso_cap)
+{
+ mblk_t *mp;
+
+ ASSERT(dst_ire != NULL);
+ ASSERT(connp != NULL);
+ ASSERT(lso_cap != NULL);
+
+ connp->conn_lso_ok = B_TRUE;
+
+ if ((connp->conn_ulp != IPPROTO_TCP) ||
+ CONN_IPSEC_OUT_ENCAPSULATED(connp) ||
+ (dst_ire->ire_flags & RTF_MULTIRT) ||
+ !CONN_IS_LSO_MD_FASTPATH(connp) ||
+ (IPP_ENABLED(IPP_LOCAL_OUT))) {
+ connp->conn_lso_ok = B_FALSE;
+ if (IPP_ENABLED(IPP_LOCAL_OUT)) {
+ /*
+ * Disable LSO for this and all future connections going
+ * over the interface.
+ */
+ lso_cap->ill_lso_on = 0;
+ }
+ }
+
+ if (!connp->conn_lso_ok)
+ return (NULL);
+ else if (!lso_cap->ill_lso_on) {
+ /*
+ * If LSO has been previously turned off in the past, and we
+ * currently can do LSO (due to IPQoS policy removal, etc.)
+ * then enable it for this interface.
+ */
+ lso_cap->ill_lso_on = 1;
+ ip1dbg(("ip_mdinfo_return: reenabling LSO for interface %s\n",
+ ill_name));
+ }
+
+ /* Allocate the LSO info mblk */
+ if ((mp = ip_lsoinfo_alloc(lso_cap)) == NULL)
+ ip0dbg(("ip_lsoinfo_return: can't enable LSO for "
+ "conn %p on %s (ENOMEM)\n", (void *)connp, ill_name));
+
+ return (mp);
+}
+
+/*
* Create destination address attribute, and fill it with the physical
* destination address and SAP taken from the template DL_UNITDATA_REQ
* message block.
diff --git a/usr/src/uts/common/inet/ip/ip_if.c b/usr/src/uts/common/inet/ip/ip_if.c
index af650a20cc..8efb3a7c04 100644
--- a/usr/src/uts/common/inet/ip/ip_if.c
+++ b/usr/src/uts/common/inet/ip/ip_if.c
@@ -234,7 +234,8 @@ static void ill_capability_hcksum_reset(ill_t *, mblk_t **);
static void ill_capability_zerocopy_ack(ill_t *, mblk_t *,
dl_capability_sub_t *);
static void ill_capability_zerocopy_reset(ill_t *, mblk_t **);
-
+static void ill_capability_lso_ack(ill_t *, mblk_t *, dl_capability_sub_t *);
+static void ill_capability_lso_reset(ill_t *, mblk_t **);
static void ill_capability_dls_ack(ill_t *, mblk_t *, dl_capability_sub_t *);
static mac_resource_handle_t ill_ring_add(void *, mac_resource_t *);
static void ill_capability_dls_reset(ill_t *, mblk_t **);
@@ -874,6 +875,11 @@ ill_delete_tail(ill_t *ill)
ill->ill_zerocopy_capab = NULL;
}
+ if (ill->ill_lso_capab != NULL) {
+ kmem_free(ill->ill_lso_capab, sizeof (ill_lso_capab_t));
+ ill->ill_lso_capab = NULL;
+ }
+
if (ill->ill_dls_capab != NULL) {
CONN_DEC_REF(ill->ill_dls_capab->ill_unbind_conn);
ill->ill_dls_capab->ill_unbind_conn = NULL;
@@ -1853,6 +1859,7 @@ ill_capability_reset(ill_t *ill)
ill_capability_zerocopy_reset(ill, &sc_mp);
ill_capability_ipsec_reset(ill, &sc_mp);
ill_capability_dls_reset(ill, &sc_mp);
+ ill_capability_lso_reset(ill, &sc_mp);
/* Nothing to send down in order to disable the capabilities? */
if (sc_mp == NULL)
@@ -2685,6 +2692,9 @@ ill_capability_dispatch(ill_t *ill, mblk_t *mp, dl_capability_sub_t *subp,
if (SOFT_RINGS_ENABLED())
ill_capability_dls_ack(ill, mp, subp);
break;
+ case DL_CAPAB_LSO:
+ ill_capability_lso_ack(ill, mp, subp);
+ break;
default:
ip1dbg(("ill_capability_dispatch: unknown capab type %d\n",
subp->dl_cap));
@@ -3437,6 +3447,168 @@ ill_capability_zerocopy_reset(ill_t *ill, mblk_t **sc_mp)
}
/*
+ * Process Large Segment Offload capability negotiation ack received from a
+ * DLS Provider. isub must point to the sub-capability (DL_CAPAB_LSO) of a
+ * DL_CAPABILITY_ACK message.
+ */
+static void
+ill_capability_lso_ack(ill_t *ill, mblk_t *mp, dl_capability_sub_t *isub)
+{
+ mblk_t *nmp = NULL;
+ dl_capability_req_t *oc;
+ dl_capab_lso_t *lso_ic, *lso_oc;
+ ill_lso_capab_t **ill_lso_capab;
+ uint_t sub_dl_cap = isub->dl_cap;
+ uint8_t *capend;
+
+ ASSERT(sub_dl_cap == DL_CAPAB_LSO);
+
+ ill_lso_capab = (ill_lso_capab_t **)&ill->ill_lso_capab;
+
+ /*
+ * Note: range checks here are not absolutely sufficient to
+ * make us robust against malformed messages sent by drivers;
+ * this is in keeping with the rest of IP's dlpi handling.
+ * (Remember, it's coming from something else in the kernel
+ * address space)
+ */
+ capend = (uint8_t *)(isub + 1) + isub->dl_length;
+ if (capend > mp->b_wptr) {
+ cmn_err(CE_WARN, "ill_capability_lso_ack: "
+ "malformed sub-capability too long for mblk");
+ return;
+ }
+
+ lso_ic = (dl_capab_lso_t *)(isub + 1);
+
+ if (lso_ic->lso_version != LSO_VERSION_1) {
+ cmn_err(CE_CONT, "ill_capability_lso_ack: "
+ "unsupported LSO sub-capability (version %d, expected %d)",
+ lso_ic->lso_version, LSO_VERSION_1);
+ return;
+ }
+
+ if (!dlcapabcheckqid(&lso_ic->lso_mid, ill->ill_lmod_rq)) {
+ ip1dbg(("ill_capability_lso_ack: mid token for LSO "
+ "capability isn't as expected; pass-thru module(s) "
+ "detected, discarding capability\n"));
+ return;
+ }
+
+ if ((lso_ic->lso_flags & LSO_TX_ENABLE) &&
+ (lso_ic->lso_flags & LSO_TX_BASIC_TCP_IPV4)) {
+ if (*ill_lso_capab == NULL) {
+ *ill_lso_capab = kmem_zalloc(sizeof (ill_lso_capab_t),
+ KM_NOSLEEP);
+
+ if (*ill_lso_capab == NULL) {
+ cmn_err(CE_WARN, "ill_capability_lso_ack: "
+ "could not enable LSO version %d "
+ "for %s (ENOMEM)\n", LSO_VERSION_1,
+ ill->ill_name);
+ return;
+ }
+ }
+
+ (*ill_lso_capab)->ill_lso_version = lso_ic->lso_version;
+ (*ill_lso_capab)->ill_lso_flags = lso_ic->lso_flags;
+ (*ill_lso_capab)->ill_lso_max = lso_ic->lso_max;
+ ill->ill_capabilities |= ILL_CAPAB_LSO;
+
+ ip1dbg(("ill_capability_lso_ack: interface %s "
+ "has enabled LSO\n ", ill->ill_name));
+ } else if (lso_ic->lso_flags & LSO_TX_BASIC_TCP_IPV4) {
+ uint_t size;
+ uchar_t *rptr;
+
+ size = sizeof (dl_capability_req_t) +
+ sizeof (dl_capability_sub_t) + sizeof (dl_capab_lso_t);
+
+ if ((nmp = ip_dlpi_alloc(size, DL_CAPABILITY_REQ)) == NULL) {
+ cmn_err(CE_WARN, "ill_capability_lso_ack: "
+ "could not enable LSO for %s (ENOMEM)\n",
+ ill->ill_name);
+ return;
+ }
+
+ rptr = nmp->b_rptr;
+ /* initialize dl_capability_req_t */
+ oc = (dl_capability_req_t *)nmp->b_rptr;
+ oc->dl_sub_offset = sizeof (dl_capability_req_t);
+ oc->dl_sub_length = sizeof (dl_capability_sub_t) +
+ sizeof (dl_capab_lso_t);
+ nmp->b_rptr += sizeof (dl_capability_req_t);
+
+ /* initialize dl_capability_sub_t */
+ bcopy(isub, nmp->b_rptr, sizeof (*isub));
+ nmp->b_rptr += sizeof (*isub);
+
+ /* initialize dl_capab_lso_t */
+ lso_oc = (dl_capab_lso_t *)nmp->b_rptr;
+ bcopy(lso_ic, lso_oc, sizeof (*lso_ic));
+
+ nmp->b_rptr = rptr;
+ ASSERT(nmp->b_wptr == (nmp->b_rptr + size));
+
+ /* set ENABLE flag */
+ lso_oc->lso_flags |= LSO_TX_ENABLE;
+
+ /* nmp points to a DL_CAPABILITY_REQ message to enable LSO */
+ ill_dlpi_send(ill, nmp);
+ } else {
+ ip1dbg(("ill_capability_lso_ack: interface %s has "
+ "advertised %x LSO capability flags\n",
+ ill->ill_name, lso_ic->lso_flags));
+ }
+}
+
+
+static void
+ill_capability_lso_reset(ill_t *ill, mblk_t **sc_mp)
+{
+ mblk_t *mp;
+ dl_capab_lso_t *lso_subcap;
+ dl_capability_sub_t *dl_subcap;
+ int size;
+
+ if (!(ill->ill_capabilities & ILL_CAPAB_LSO))
+ return;
+
+ ASSERT(ill->ill_lso_capab != NULL);
+ /*
+ * Clear the capability flag for LSO but retain the
+ * ill_lso_capab structure since it's possible that another
+ * thread is still referring to it. The structure only gets
+ * deallocated when we destroy the ill.
+ */
+ ill->ill_capabilities &= ~ILL_CAPAB_LSO;
+
+ size = sizeof (*dl_subcap) + sizeof (*lso_subcap);
+
+ mp = allocb(size, BPRI_HI);
+ if (mp == NULL) {
+ ip1dbg(("ill_capability_lso_reset: unable to allocate "
+ "request to disable LSO\n"));
+ return;
+ }
+
+ mp->b_wptr = mp->b_rptr + size;
+
+ dl_subcap = (dl_capability_sub_t *)mp->b_rptr;
+ dl_subcap->dl_cap = DL_CAPAB_LSO;
+ dl_subcap->dl_length = sizeof (*lso_subcap);
+
+ lso_subcap = (dl_capab_lso_t *)(dl_subcap + 1);
+ lso_subcap->lso_version = ill->ill_lso_capab->ill_lso_version;
+ lso_subcap->lso_flags = 0;
+
+ if (*sc_mp != NULL)
+ linkb(*sc_mp, mp);
+ else
+ *sc_mp = mp;
+}
+
+/*
* Consume a new-style hardware capabilities negotiation ack.
* Called from ip_rput_dlpi_writer().
*/
diff --git a/usr/src/uts/common/inet/ip_impl.h b/usr/src/uts/common/inet/ip_impl.h
index 77e8ab36e0..a91febb854 100644
--- a/usr/src/uts/common/inet/ip_impl.h
+++ b/usr/src/uts/common/inet/ip_impl.h
@@ -359,11 +359,38 @@ typedef struct ip_mdt_info_s {
ill->ill_mdt_capab->ill_mdt_version == MDT_VERSION_2 && \
ill->ill_mdt_capab->ill_mdt_on != 0)
+#define ILL_LSO_CAPABLE(ill) \
+ (((ill)->ill_capabilities & ILL_CAPAB_LSO) != 0)
+
+/*
+ * ioctl identifier and structure for Large Segment Offload
+ * private M_CTL communication from IP to ULP.
+ */
+#define LSO_IOC_INFO_UPDATE (('L' << 24) + ('S' << 16) + ('O' << 8))
+
+typedef struct ip_lso_info_s {
+ uint_t lso_info_id; /* LSO_IOC_INFO_UPDATE */
+ ill_lso_capab_t lso_capab; /* ILL LSO capabilities */
+} ip_lso_info_t;
+
+/*
+ * Macro that determines whether or not a given ILL is allowed for LSO.
+ */
+#define ILL_LSO_USABLE(ill) \
+ (ILL_LSO_CAPABLE(ill) && \
+ ill->ill_lso_capab != NULL && \
+ ill->ill_lso_capab->ill_lso_version == LSO_VERSION_1 && \
+ ill->ill_lso_capab->ill_lso_on != 0)
+
+#define ILL_LSO_TCP_USABLE(ill) \
+ (ILL_LSO_USABLE(ill) && \
+ ill->ill_lso_capab->ill_lso_flags & LSO_TX_BASIC_TCP_IPV4)
+
/*
* Macro that determines whether or not a given CONN may be considered
- * for fast path prior to proceeding further with Multidata.
+ * for fast path prior to proceeding further with LSO or Multidata.
*/
-#define CONN_IS_MD_FASTPATH(connp) \
+#define CONN_IS_LSO_MD_FASTPATH(connp) \
((connp)->conn_dontroute == 0 && /* SO_DONTROUTE */ \
!((connp)->conn_nexthop_set) && /* IP_NEXTHOP */ \
(connp)->conn_nofailover_ill == NULL && /* IPIF_NOFAILOVER */ \
diff --git a/usr/src/uts/common/inet/ipclassifier.h b/usr/src/uts/common/inet/ipclassifier.h
index 03d510fdbf..48e5fc845f 100644
--- a/usr/src/uts/common/inet/ipclassifier.h
+++ b/usr/src/uts/common/inet/ipclassifier.h
@@ -173,7 +173,9 @@ struct conn_s {
conn_recvslla : 1, /* IP_RECVSLLA option */
conn_mdt_ok : 1, /* MDT is permitted */
conn_nexthop_set : 1,
- conn_allzones : 1; /* SO_ALLZONES */
+ conn_allzones : 1, /* SO_ALLZONES */
+
+ conn_lso_ok : 1; /* LSO is usable */
tcp_t *conn_tcp; /* Pointer to the tcp struct */
struct udp_s *conn_udp; /* Pointer to the udp struct */
diff --git a/usr/src/uts/common/inet/tcp.h b/usr/src/uts/common/inet/tcp.h
index 7552e53600..67bf7aa4f2 100644
--- a/usr/src/uts/common/inet/tcp.h
+++ b/usr/src/uts/common/inet/tcp.h
@@ -112,6 +112,7 @@ typedef struct tcphdr_s {
#define TCP_PORT_LEN sizeof (in_port_t)
#define TCP_MAX_WINSHIFT 14
#define TCP_MAX_LARGEWIN (TCP_MAXWIN << TCP_MAX_WINSHIFT)
+#define TCP_MAX_LSO_LENGTH (IP_MAXPACKET - TCP_MAX_COMBINED_HEADER_LENGTH)
#define TCPIP_HDR_LENGTH(mp, n) \
(n) = IPH_HDR_LENGTH((mp)->b_rptr), \
@@ -288,7 +289,8 @@ typedef struct tcp_s {
tcp_send_discon_ind : 1, /* TLI accept err, send discon ind */
tcp_cork : 1, /* tcp_cork option */
tcp_tconnind_started : 1, /* conn_ind message is being sent */
- tcp_pad_to_bit_31 : 17;
+ tcp_lso :1, /* Lower layer is capable of LSO */
+ tcp_pad_to_bit_31 : 16;
uint32_t tcp_if_mtu; /* Outgoing interface MTU. */
@@ -492,6 +494,8 @@ typedef struct tcp_s {
uint_t tcp_mdt_hdr_tail; /* trailing header fragment extra space */
int tcp_mdt_max_pld; /* maximum payload buffers per Multidata */
+ uint32_t tcp_lso_max; /* maximum LSO payload */
+
uint32_t tcp_ofo_fin_seq; /* Recv out of order FIN seq num */
uint32_t tcp_cwr_snd_max;
uint_t tcp_drop_opt_ack_cnt; /* # tcp generated optmgmt */
diff --git a/usr/src/uts/common/inet/tcp/tcp.c b/usr/src/uts/common/inet/tcp/tcp.c
index aa80594733..cbe35bd58c 100644
--- a/usr/src/uts/common/inet/tcp/tcp.c
+++ b/usr/src/uts/common/inet/tcp/tcp.c
@@ -418,6 +418,10 @@ tcp_stat_t tcp_statistics = {
{ "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64 },
{ "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64 },
{ "tcp_sock_fallback", KSTAT_DATA_UINT64 },
+ { "tcp_lso_enabled", KSTAT_DATA_UINT64 },
+ { "tcp_lso_disabled", KSTAT_DATA_UINT64 },
+ { "tcp_lso_times", KSTAT_DATA_UINT64 },
+ { "tcp_lso_pkt_out", KSTAT_DATA_UINT64 },
};
static kstat_t *tcp_kstat;
@@ -962,6 +966,8 @@ static int tcp_mdt_add_attrs(multidata_t *, const mblk_t *,
const uint32_t, const uint32_t);
static void tcp_multisend_data(tcp_t *, ire_t *, const ill_t *, mblk_t *,
const uint_t, const uint_t, boolean_t *);
+static mblk_t *tcp_lso_info_mp(mblk_t *);
+static void tcp_lso_update(tcp_t *, ill_lso_capab_t *);
static void tcp_send_data(tcp_t *, queue_t *, mblk_t *);
extern mblk_t *tcp_timermp_alloc(int);
extern void tcp_timermp_free(tcp_t *);
@@ -4241,6 +4247,9 @@ tcp_close_output(void *arg, mblk_t *mp, void *arg2)
connp->conn_mdt_ok = B_FALSE;
tcp->tcp_mdt = B_FALSE;
+ connp->conn_lso_ok = B_FALSE;
+ tcp->tcp_lso = B_FALSE;
+
msg = NULL;
switch (tcp->tcp_state) {
case TCPS_CLOSED:
@@ -8122,6 +8131,8 @@ tcp_reinit_values(tcp)
tcp->tcp_fuse_rcv_unread_hiwater = 0;
tcp->tcp_fuse_rcv_unread_cnt = 0;
+ tcp->tcp_lso = B_FALSE;
+
tcp->tcp_in_ack_unsent = 0;
tcp->tcp_cork = B_FALSE;
tcp->tcp_tconnind_started = B_FALSE;
@@ -9280,7 +9291,7 @@ tcp_maxpsz_set(tcp_t *tcp, boolean_t set_maxblk)
if (tcp->tcp_fused) {
maxpsz = tcp_fuse_maxpsz_set(tcp);
mss = INFPSZ;
- } else if (tcp->tcp_mdt || tcp->tcp_maxpsz == 0) {
+ } else if (tcp->tcp_mdt || tcp->tcp_lso || tcp->tcp_maxpsz == 0) {
/*
* Set the sd_qn_maxpsz according to the socket send buffer
* size, and sd_maxblk to INFPSZ (-1). This will essentially
@@ -11743,6 +11754,17 @@ tcp_rput_common(tcp_t *tcp, mblk_t *mp)
}
freemsg(mp);
return;
+ case LSO_IOC_INFO_UPDATE:
+ /*
+ * Handle LSO information update; the following
+ * routine will free the message.
+ */
+ if (tcp->tcp_connp->conn_lso_ok) {
+ tcp_lso_update(tcp,
+ &((ip_lso_info_t *)mp->b_rptr)->lso_capab);
+ }
+ freemsg(mp);
+ return;
default:
break;
}
@@ -15436,6 +15458,7 @@ tcp_rput_other(tcp_t *tcp, mblk_t *mp)
uint32_t mss;
mblk_t *syn_mp;
mblk_t *mdti;
+ mblk_t *lsoi;
int retval;
mblk_t *ire_mp;
@@ -15459,6 +15482,16 @@ tcp_rput_other(tcp_t *tcp, mblk_t *mp)
freemsg(mdti);
}
+ /*
+ * Check to update LSO information with tcp, and
+ * tcp_lso_update routine will free the message.
+ */
+ if ((lsoi = tcp_lso_info_mp(mp)) != NULL) {
+ tcp_lso_update(tcp, &((ip_lso_info_t *)lsoi->
+ b_rptr)->lso_capab);
+ freemsg(lsoi);
+ }
+
/* Get the IRE, if we had requested for it */
ire_mp = tcp_ire_mp(mp);
@@ -18473,61 +18506,26 @@ tcp_zcopy_notify(tcp_t *tcp)
mutex_exit(&stp->sd_lock);
}
-static void
-tcp_send_data(tcp_t *tcp, queue_t *q, mblk_t *mp)
+static boolean_t
+tcp_send_find_ire(tcp_t *tcp, ipaddr_t *dst, ire_t **irep)
{
- ipha_t *ipha;
- ipaddr_t src;
- ipaddr_t dst;
- uint32_t cksum;
- ire_t *ire;
- uint16_t *up;
- ill_t *ill;
- conn_t *connp = tcp->tcp_connp;
- uint32_t hcksum_txflags = 0;
- mblk_t *ire_fp_mp;
- uint_t ire_fp_mp_len;
-
- ASSERT(DB_TYPE(mp) == M_DATA);
-
- if (DB_CRED(mp) == NULL)
- mblk_setcred(mp, CONN_CRED(connp));
-
- ipha = (ipha_t *)mp->b_rptr;
- src = ipha->ipha_src;
- dst = ipha->ipha_dst;
+ ire_t *ire;
+ conn_t *connp = tcp->tcp_connp;
- /*
- * Drop off fast path for IPv6 and also if options are present or
- * we need to resolve a TS label.
- */
- if (tcp->tcp_ipversion != IPV4_VERSION ||
- !IPCL_IS_CONNECTED(connp) ||
- (connp->conn_flags & IPCL_CHECK_POLICY) != 0 ||
- connp->conn_dontroute ||
- connp->conn_nexthop_set ||
- connp->conn_xmit_if_ill != NULL ||
- connp->conn_nofailover_ill != NULL ||
- !connp->conn_ulp_labeled ||
- ipha->ipha_ident == IP_HDR_INCLUDED ||
- ipha->ipha_version_and_hdr_length != IP_SIMPLE_HDR_VERSION ||
- IPP_ENABLED(IPP_LOCAL_OUT)) {
- if (tcp->tcp_snd_zcopy_aware)
- mp = tcp_zcopy_disable(tcp, mp);
- TCP_STAT(tcp_ip_send);
- CALL_IP_WPUT(connp, q, mp);
- return;
- }
mutex_enter(&connp->conn_lock);
ire = connp->conn_ire_cache;
ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT));
- if (ire != NULL && ire->ire_addr == dst &&
+
+ if ((ire != NULL) &&
+ (((dst != NULL) && (ire->ire_addr == *dst)) || ((dst == NULL) &&
+ IN6_ARE_ADDR_EQUAL(&ire->ire_addr_v6, &tcp->tcp_ip6h->ip6_dst))) &&
!(ire->ire_marks & IRE_MARK_CONDEMNED)) {
IRE_REFHOLD(ire);
mutex_exit(&connp->conn_lock);
} else {
boolean_t cached = B_FALSE;
+ ts_label_t *tsl;
/* force a recheck later on */
tcp->tcp_ire_ill_check_done = B_FALSE;
@@ -18535,17 +18533,20 @@ tcp_send_data(tcp_t *tcp, queue_t *q, mblk_t *mp)
TCP_DBGSTAT(tcp_ire_null1);
connp->conn_ire_cache = NULL;
mutex_exit(&connp->conn_lock);
+
if (ire != NULL)
IRE_REFRELE_NOTR(ire);
- ire = ire_cache_lookup(dst, connp->conn_zoneid,
- MBLK_GETLABEL(mp));
+
+ tsl = crgetlabel(CONN_CRED(connp));
+ ire = (dst ? ire_cache_lookup(*dst, connp->conn_zoneid, tsl) :
+ ire_cache_lookup_v6(&tcp->tcp_ip6h->ip6_dst,
+ connp->conn_zoneid, tsl));
+
if (ire == NULL) {
- if (tcp->tcp_snd_zcopy_aware)
- mp = tcp_zcopy_backoff(tcp, mp, 0);
TCP_STAT(tcp_ire_null);
- CALL_IP_WPUT(connp, q, mp);
- return;
+ return (B_FALSE);
}
+
IRE_REFHOLD_NOTR(ire);
/*
* Since we are inside the squeue, there cannot be another
@@ -18577,22 +18578,45 @@ tcp_send_data(tcp_t *tcp, queue_t *q, mblk_t *mp)
*/
}
- /*
- * The following if case identifies whether or not
- * we are forced to take the slowpath.
- */
- if (ire->ire_flags & RTF_MULTIRT ||
- ire->ire_stq == NULL ||
- ire->ire_max_frag < ntohs(ipha->ipha_length) ||
- (ire->ire_nce != NULL &&
- (ire_fp_mp = ire->ire_nce->nce_fp_mp) == NULL) ||
- (ire_fp_mp_len = MBLKL(ire_fp_mp)) > MBLKHEAD(mp)) {
- if (tcp->tcp_snd_zcopy_aware)
- mp = tcp_zcopy_disable(tcp, mp);
+ *irep = ire;
+
+ return (B_TRUE);
+}
+
+/*
+ * Called from tcp_send() or tcp_send_data() to find workable IRE.
+ *
+ * 0 = success;
+ * 1 = failed to find ire and ill.
+ */
+static boolean_t
+tcp_send_find_ire_ill(tcp_t *tcp, mblk_t *mp, ire_t **irep, ill_t **illp)
+{
+ ipha_t *ipha;
+ ipaddr_t dst;
+ ire_t *ire;
+ ill_t *ill;
+ conn_t *connp = tcp->tcp_connp;
+ mblk_t *ire_fp_mp;
+
+ if (mp != NULL)
+ ipha = (ipha_t *)mp->b_rptr;
+ else
+ ipha = tcp->tcp_ipha;
+ dst = ipha->ipha_dst;
+
+ if (!tcp_send_find_ire(tcp, &dst, &ire))
+ return (B_FALSE);
+
+ if ((ire->ire_flags & RTF_MULTIRT) ||
+ (ire->ire_stq == NULL) ||
+ (ire->ire_nce == NULL) ||
+ ((ire_fp_mp = ire->ire_nce->nce_fp_mp) == NULL) ||
+ ((mp != NULL) && (ire->ire_max_frag < ntohs(ipha->ipha_length) ||
+ MBLKL(ire_fp_mp) > MBLKHEAD(mp)))) {
TCP_STAT(tcp_ip_ire_send);
IRE_REFRELE(ire);
- CALL_IP_WPUT(connp, q, mp);
- return;
+ return (B_FALSE);
}
ill = ire_to_ill(ire);
@@ -18611,6 +18635,64 @@ tcp_send_data(tcp_t *tcp, queue_t *q, mblk_t *mp)
tcp->tcp_ire_ill_check_done = B_TRUE;
}
+ *irep = ire;
+ *illp = ill;
+
+ return (B_TRUE);
+}
+
+static void
+tcp_send_data(tcp_t *tcp, queue_t *q, mblk_t *mp)
+{
+ ipha_t *ipha;
+ ipaddr_t src;
+ ipaddr_t dst;
+ uint32_t cksum;
+ ire_t *ire;
+ uint16_t *up;
+ ill_t *ill;
+ conn_t *connp = tcp->tcp_connp;
+ uint32_t hcksum_txflags = 0;
+ mblk_t *ire_fp_mp;
+ uint_t ire_fp_mp_len;
+
+ ASSERT(DB_TYPE(mp) == M_DATA);
+
+ if (DB_CRED(mp) == NULL)
+ mblk_setcred(mp, CONN_CRED(connp));
+
+ ipha = (ipha_t *)mp->b_rptr;
+ src = ipha->ipha_src;
+ dst = ipha->ipha_dst;
+
+ /*
+ * Drop off fast path for IPv6 and also if options are present or
+ * we need to resolve a TS label.
+ */
+ if (tcp->tcp_ipversion != IPV4_VERSION ||
+ !IPCL_IS_CONNECTED(connp) ||
+ !CONN_IS_LSO_MD_FASTPATH(connp) ||
+ (connp->conn_flags & IPCL_CHECK_POLICY) != 0 ||
+ !connp->conn_ulp_labeled ||
+ ipha->ipha_ident == IP_HDR_INCLUDED ||
+ ipha->ipha_version_and_hdr_length != IP_SIMPLE_HDR_VERSION ||
+ IPP_ENABLED(IPP_LOCAL_OUT)) {
+ if (tcp->tcp_snd_zcopy_aware)
+ mp = tcp_zcopy_disable(tcp, mp);
+ TCP_STAT(tcp_ip_send);
+ CALL_IP_WPUT(connp, q, mp);
+ return;
+ }
+
+ if (!tcp_send_find_ire_ill(tcp, mp, &ire, &ill)) {
+ if (tcp->tcp_snd_zcopy_aware)
+ mp = tcp_zcopy_backoff(tcp, mp, 0);
+ CALL_IP_WPUT(connp, q, mp);
+ return;
+ }
+ ire_fp_mp = ire->ire_nce->nce_fp_mp;
+ ire_fp_mp_len = MBLKL(ire_fp_mp);
+
ASSERT(ipha->ipha_ident == 0 || ipha->ipha_ident == IP_HDR_INCLUDED);
ipha->ipha_ident = (uint16_t)atomic_add_32_nv(&ire->ire_ident, 1);
#ifndef _BIG_ENDIAN
@@ -18618,16 +18700,25 @@ tcp_send_data(tcp_t *tcp, queue_t *q, mblk_t *mp)
#endif
/*
- * Check to see if we need to re-enable MDT for this connection
+ * Check to see if we need to re-enable LSO/MDT for this connection
* because it was previously disabled due to changes in the ill;
* note that by doing it here, this re-enabling only applies when
* the packet is not dispatched through CALL_IP_WPUT().
*
- * That means for IPv4, it is worth re-enabling MDT for the fastpath
+ * That means for IPv4, it is worth re-enabling LSO/MDT for the fastpath
* case, since that's how we ended up here. For IPv6, we do the
* re-enabling work in ip_xmit_v6(), albeit indirectly via squeue.
*/
- if (connp->conn_mdt_ok && !tcp->tcp_mdt && ILL_MDT_USABLE(ill)) {
+ if (connp->conn_lso_ok && !tcp->tcp_lso && ILL_LSO_TCP_USABLE(ill)) {
+ /*
+ * Restore LSO for this connection, so that next time around
+ * it is eligible to go through tcp_lsosend() path again.
+ */
+ TCP_STAT(tcp_lso_enabled);
+ tcp->tcp_lso = B_TRUE;
+ ip1dbg(("tcp_send_data: reenabling LSO for connp %p on "
+ "interface %s\n", (void *)connp, ill->ill_name));
+ } else if (connp->conn_mdt_ok && !tcp->tcp_mdt && ILL_MDT_USABLE(ill)) {
/*
* Restore MDT for this connection, so that next time around
* it is eligible to go through tcp_multisend() path again.
@@ -19052,7 +19143,7 @@ data_null:
tcp->tcp_tcph->th_win);
/*
- * Determine if it's worthwhile to attempt MDT, based on:
+ * Determine if it's worthwhile to attempt LSO or MDT, based on:
*
* 1. Simple TCP/IP{v4,v6} (no options).
* 2. IPSEC/IPQoS processing is not needed for the TCP connection.
@@ -19060,24 +19151,32 @@ data_null:
* 4. The TCP is not detached.
*
* If any of the above conditions have changed during the
- * connection, stop using MDT and restore the stream head
+ * connection, stop using LSO/MDT and restore the stream head
* parameters accordingly.
*/
- if (tcp->tcp_mdt &&
+ if ((tcp->tcp_lso || tcp->tcp_mdt) &&
((tcp->tcp_ipversion == IPV4_VERSION &&
tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) ||
(tcp->tcp_ipversion == IPV6_VERSION &&
tcp->tcp_ip_hdr_len != IPV6_HDR_LEN) ||
tcp->tcp_state != TCPS_ESTABLISHED ||
- TCP_IS_DETACHED(tcp) || !CONN_IS_MD_FASTPATH(tcp->tcp_connp) ||
+ TCP_IS_DETACHED(tcp) || !CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp) ||
CONN_IPSEC_OUT_ENCAPSULATED(tcp->tcp_connp) ||
IPP_ENABLED(IPP_LOCAL_OUT))) {
- tcp->tcp_connp->conn_mdt_ok = B_FALSE;
- tcp->tcp_mdt = B_FALSE;
+ if (tcp->tcp_lso) {
+ tcp->tcp_connp->conn_lso_ok = B_FALSE;
+ tcp->tcp_lso = B_FALSE;
+ } else {
+ tcp->tcp_connp->conn_mdt_ok = B_FALSE;
+ tcp->tcp_mdt = B_FALSE;
+ }
/* Anything other than detached is considered pathological */
if (!TCP_IS_DETACHED(tcp)) {
- TCP_STAT(tcp_mdt_conn_halted1);
+ if (tcp->tcp_lso)
+ TCP_STAT(tcp_lso_disabled);
+ else
+ TCP_STAT(tcp_mdt_conn_halted1);
(void) tcp_maxpsz_set(tcp, B_TRUE);
}
}
@@ -19322,7 +19421,7 @@ tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
boolean_t done = B_FALSE;
uint32_t cksum;
uint32_t hwcksum_flags;
- ire_t *ire;
+ ire_t *ire = NULL;
ill_t *ill;
ipha_t *ipha;
ip6_t *ip6h;
@@ -19372,7 +19471,7 @@ tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
connp = tcp->tcp_connp;
ASSERT(connp != NULL);
- ASSERT(CONN_IS_MD_FASTPATH(connp));
+ ASSERT(CONN_IS_LSO_MD_FASTPATH(connp));
ASSERT(!CONN_IPSEC_OUT_ENCAPSULATED(connp));
/*
@@ -19404,65 +19503,8 @@ tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
* in proceeding any further, and we should just hand everything
* off to the legacy path.
*/
- mutex_enter(&connp->conn_lock);
- ire = connp->conn_ire_cache;
- ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT));
- if (ire != NULL && ((af == AF_INET && ire->ire_addr == dst) ||
- (af == AF_INET6 && IN6_ARE_ADDR_EQUAL(&ire->ire_addr_v6,
- &tcp->tcp_ip6h->ip6_dst))) &&
- !(ire->ire_marks & IRE_MARK_CONDEMNED)) {
- IRE_REFHOLD(ire);
- mutex_exit(&connp->conn_lock);
- } else {
- boolean_t cached = B_FALSE;
- ts_label_t *tsl;
-
- /* force a recheck later on */
- tcp->tcp_ire_ill_check_done = B_FALSE;
-
- TCP_DBGSTAT(tcp_ire_null1);
- connp->conn_ire_cache = NULL;
- mutex_exit(&connp->conn_lock);
-
- /* Release the old ire */
- if (ire != NULL)
- IRE_REFRELE_NOTR(ire);
-
- tsl = crgetlabel(CONN_CRED(connp));
- ire = (af == AF_INET) ?
- ire_cache_lookup(dst, connp->conn_zoneid, tsl) :
- ire_cache_lookup_v6(&tcp->tcp_ip6h->ip6_dst,
- connp->conn_zoneid, tsl);
-
- if (ire == NULL) {
- TCP_STAT(tcp_ire_null);
- goto legacy_send_no_md;
- }
-
- IRE_REFHOLD_NOTR(ire);
- /*
- * Since we are inside the squeue, there cannot be another
- * thread in TCP trying to set the conn_ire_cache now. The
- * check for IRE_MARK_CONDEMNED ensures that an interface
- * unplumb thread has not yet started cleaning up the conns.
- * Hence we don't need to grab the conn lock.
- */
- if (!(connp->conn_state_flags & CONN_CLOSING)) {
- rw_enter(&ire->ire_bucket->irb_lock, RW_READER);
- if (!(ire->ire_marks & IRE_MARK_CONDEMNED)) {
- connp->conn_ire_cache = ire;
- cached = B_TRUE;
- }
- rw_exit(&ire->ire_bucket->irb_lock);
- }
-
- /*
- * We can continue to use the ire but since it was not
- * cached, we should drop the extra reference.
- */
- if (!cached)
- IRE_REFRELE_NOTR(ire);
- }
+ if (!tcp_send_find_ire(tcp, (af == AF_INET) ? &dst : NULL, &ire))
+ goto legacy_send_no_md;
ASSERT(ire != NULL);
ASSERT(af != AF_INET || ire->ire_ipversion == IPV4_VERSION);
@@ -20578,6 +20620,103 @@ tcp_multisend_data(tcp_t *tcp, ire_t *ire, const ill_t *ill, mblk_t *md_mp_head,
}
/*
+ * Derived from tcp_send_data().
+ */
+static void
+tcp_lsosend_data(tcp_t *tcp, mblk_t *mp, ire_t *ire, ill_t *ill, const int mss,
+ int num_lso_seg)
+{
+ ipha_t *ipha;
+ mblk_t *ire_fp_mp;
+ uint_t ire_fp_mp_len;
+ uint32_t hcksum_txflags = 0;
+ ipaddr_t src;
+ ipaddr_t dst;
+ uint32_t cksum;
+ uint16_t *up;
+
+ ASSERT(DB_TYPE(mp) == M_DATA);
+ ASSERT(tcp->tcp_state == TCPS_ESTABLISHED);
+ ASSERT(tcp->tcp_ipversion == IPV4_VERSION);
+ ASSERT(tcp->tcp_connp != NULL);
+ ASSERT(CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp));
+
+ ipha = (ipha_t *)mp->b_rptr;
+ src = ipha->ipha_src;
+ dst = ipha->ipha_dst;
+
+ ASSERT(ipha->ipha_ident == 0 || ipha->ipha_ident == IP_HDR_INCLUDED);
+ ipha->ipha_ident = (uint16_t)atomic_add_32_nv(&ire->ire_ident,
+ num_lso_seg);
+#ifndef _BIG_ENDIAN
+ ipha->ipha_ident = (ipha->ipha_ident << 8) | (ipha->ipha_ident >> 8);
+#endif
+ if (tcp->tcp_snd_zcopy_aware) {
+ if ((ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) == 0 ||
+ (ill->ill_zerocopy_capab->ill_zerocopy_flags == 0))
+ mp = tcp_zcopy_disable(tcp, mp);
+ }
+
+ if (ILL_HCKSUM_CAPABLE(ill) && dohwcksum) {
+ ASSERT(ill->ill_hcksum_capab != NULL);
+ hcksum_txflags = ill->ill_hcksum_capab->ill_hcksum_txflags;
+ }
+
+ /*
+ * Since the TCP checksum should be recalculated by h/w, we can just
+ * zero the checksum field for HCK_FULLCKSUM, or calculate partial
+ * pseudo-header checksum for HCK_PARTIALCKSUM.
+ * The partial pseudo-header excludes TCP length, that was calculated
+ * in tcp_send(), so to zero *up before further processing.
+ */
+ cksum = (dst >> 16) + (dst & 0xFFFF) + (src >> 16) + (src & 0xFFFF);
+
+ up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH);
+ *up = 0;
+
+ IP_CKSUM_XMIT_FAST(ire->ire_ipversion, hcksum_txflags, mp, ipha, up,
+ IPPROTO_TCP, IP_SIMPLE_HDR_LENGTH, ntohs(ipha->ipha_length), cksum);
+
+ /*
+ * Append LSO flag to DB_LSOFLAGS(mp) and set the mss to DB_LSOMSS(mp).
+ */
+ DB_LSOFLAGS(mp) |= HW_LSO;
+ DB_LSOMSS(mp) = mss;
+
+ ipha->ipha_fragment_offset_and_flags |=
+ (uint32_t)htons(ire->ire_frag_flag);
+
+ ire_fp_mp = ire->ire_nce->nce_fp_mp;
+ ire_fp_mp_len = MBLKL(ire_fp_mp);
+ ASSERT(DB_TYPE(ire_fp_mp) == M_DATA);
+ mp->b_rptr = (uchar_t *)ipha - ire_fp_mp_len;
+ bcopy(ire_fp_mp->b_rptr, mp->b_rptr, ire_fp_mp_len);
+
+ UPDATE_OB_PKT_COUNT(ire);
+ ire->ire_last_used_time = lbolt;
+ BUMP_MIB(&ip_mib, ipOutRequests);
+
+ if (ILL_DLS_CAPABLE(ill)) {
+ /*
+ * Send the packet directly to DLD, where it may be queued
+ * depending on the availability of transmit resources at
+ * the media layer.
+ */
+ IP_DLS_ILL_TX(ill, ipha, mp);
+ } else {
+ ill_t *out_ill = (ill_t *)ire->ire_stq->q_ptr;
+ DTRACE_PROBE4(ip4__physical__out__start,
+ ill_t *, NULL, ill_t *, out_ill,
+ ipha_t *, ipha, mblk_t *, mp);
+ FW_HOOKS(ip4_physical_out_event, ipv4firewall_physical_out,
+ NULL, out_ill, ipha, mp, mp);
+ DTRACE_PROBE1(ip4__physical__out__end, mblk_t *, mp);
+ if (mp != NULL)
+ putnext(ire->ire_stq, mp);
+ }
+}
+
+/*
* tcp_send() is called by tcp_wput_data() for non-Multidata transmission
* scheme, and returns one of the following:
*
@@ -20595,6 +20734,44 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
const int mdt_thres)
{
int num_burst_seg = tcp->tcp_snd_burst;
+ ire_t *ire = NULL;
+ ill_t *ill = NULL;
+ mblk_t *ire_fp_mp = NULL;
+ uint_t ire_fp_mp_len = 0;
+ int num_lso_seg = 1;
+ uint_t lso_usable;
+ boolean_t do_lso_send = B_FALSE;
+
+ /*
+ * Check LSO capability before any further work. And the similar check
+ * need to be done in for(;;) loop.
+ * LSO will be deployed when therer is more than one mss of available
+ * data and a burst transmission is allowed.
+ */
+ if (tcp->tcp_lso &&
+ (tcp->tcp_valid_bits == 0 ||
+ tcp->tcp_valid_bits == TCP_FSS_VALID) &&
+ num_burst_seg >= 2 && (*usable - 1) / mss >= 1) {
+ /*
+ * Try to find usable IRE/ILL and do basic check to the ILL.
+ */
+ if (tcp_send_find_ire_ill(tcp, NULL, &ire, &ill)) {
+ /*
+ * Enable LSO with this transmission.
+ * Since IRE has been hold in
+ * tcp_send_find_ire_ill(), IRE_REFRELE(ire)
+ * should be called before return.
+ */
+ do_lso_send = B_TRUE;
+ ire_fp_mp = ire->ire_nce->nce_fp_mp;
+ ire_fp_mp_len = MBLKL(ire_fp_mp);
+ /* Round up to multiple of 4 */
+ ire_fp_mp_len = ((ire_fp_mp_len + 3) / 4) * 4;
+ } else {
+ do_lso_send = B_FALSE;
+ ill = NULL;
+ }
+ }
for (;;) {
struct datab *db;
@@ -20617,11 +20794,48 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
return (1); /* success; do large send */
}
- if (num_burst_seg-- == 0)
+ if (num_burst_seg == 0)
break; /* success; burst count reached */
+ /*
+ * Calculate the maximum payload length we can send in *one*
+ * time.
+ */
+ if (do_lso_send) {
+ /*
+ * Check whether need to do LSO any more.
+ */
+ if (num_burst_seg >= 2 && (*usable - 1) / mss >= 1) {
+ lso_usable = MIN(tcp->tcp_lso_max, *usable);
+ lso_usable = MIN(lso_usable,
+ num_burst_seg * mss);
+
+ num_lso_seg = lso_usable / mss;
+ if (lso_usable % mss) {
+ num_lso_seg++;
+ tcp->tcp_last_sent_len = (ushort_t)
+ (lso_usable % mss);
+ } else {
+ tcp->tcp_last_sent_len = (ushort_t)mss;
+ }
+ } else {
+ do_lso_send = B_FALSE;
+ num_lso_seg = 1;
+ lso_usable = mss;
+ }
+ }
+
+ ASSERT(num_lso_seg <= IP_MAXPACKET / mss + 1);
+
+ /*
+ * Adjust num_burst_seg here.
+ */
+ num_burst_seg -= num_lso_seg;
+
len = mss;
if (len > *usable) {
+ ASSERT(do_lso_send == B_FALSE);
+
len = *usable;
if (len <= 0) {
/* Terminate the loop */
@@ -20665,6 +20879,13 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
tcph = tcp->tcp_tcph;
+ /*
+ * The reason to adjust len here is that we need to set flags
+ * and calculate checksum.
+ */
+ if (do_lso_send)
+ len = lso_usable;
+
*usable -= len; /* Approximate - can be adjusted later */
if (*usable > 0)
tcph->th_flags[0] = TH_ACK;
@@ -20724,11 +20945,15 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
} else
(*xmit_tail)->b_rptr = prev_rptr;
- if (mp == NULL)
+ if (mp == NULL) {
+ if (ire != NULL)
+ IRE_REFRELE(ire);
return (-1);
+ }
mp1 = mp->b_cont;
- tcp->tcp_last_sent_len = (ushort_t)len;
+ if (len <= mss) /* LSO is unusable (!do_lso_send) */
+ tcp->tcp_last_sent_len = (ushort_t)len;
while (mp1->b_cont) {
*xmit_tail = (*xmit_tail)->b_cont;
(*xmit_tail)->b_prev = local_time;
@@ -20755,7 +20980,8 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
rptr = (*xmit_tail)->b_wptr - *tail_unsent;
if (rptr != (*xmit_tail)->b_rptr) {
*tail_unsent -= len;
- tcp->tcp_last_sent_len = (ushort_t)len;
+ if (len <= mss) /* LSO is unusable */
+ tcp->tcp_last_sent_len = (ushort_t)len;
len += tcp_hdr_len;
if (tcp->tcp_ipversion == IPV4_VERSION)
tcp->tcp_ipha->ipha_length = htons(len);
@@ -20765,8 +20991,11 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
((char *)&tcp->tcp_ip6h[1] -
tcp->tcp_iphc));
mp = dupb(*xmit_tail);
- if (!mp)
+ if (mp == NULL) {
+ if (ire != NULL)
+ IRE_REFRELE(ire);
return (-1); /* out_of_mem */
+ }
mp->b_rptr = rptr;
/*
* If the old timestamp is no longer in use,
@@ -20791,7 +21020,8 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
(*xmit_tail)->b_next = (mblk_t *)(uintptr_t)(*snxt - len);
*tail_unsent -= len;
- tcp->tcp_last_sent_len = (ushort_t)len;
+ if (len <= mss) /* LSO is unusable (!do_lso_send) */
+ tcp->tcp_last_sent_len = (ushort_t)len;
len += tcp_hdr_len;
if (tcp->tcp_ipversion == IPV4_VERSION)
@@ -20801,8 +21031,11 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc));
mp = dupb(*xmit_tail);
- if (!mp)
+ if (mp == NULL) {
+ if (ire != NULL)
+ IRE_REFRELE(ire);
return (-1); /* out_of_mem */
+ }
len = tcp_hdr_len;
/*
@@ -20815,21 +21048,23 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
rptr = mp->b_rptr - len;
if (!OK_32PTR(rptr) ||
((db = mp->b_datap), db->db_ref != 2) ||
- rptr < db->db_base) {
+ rptr < db->db_base + ire_fp_mp_len) {
/* NOTE: we assume allocb returns an OK_32PTR */
must_alloc:;
mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH +
- tcp_wroff_xtra, BPRI_MED);
- if (!mp1) {
+ tcp_wroff_xtra + ire_fp_mp_len, BPRI_MED);
+ if (mp1 == NULL) {
freemsg(mp);
+ if (ire != NULL)
+ IRE_REFRELE(ire);
return (-1); /* out_of_mem */
}
mp1->b_cont = mp;
mp = mp1;
/* Leave room for Link Level header */
len = tcp_hdr_len;
- rptr = &mp->b_rptr[tcp_wroff_xtra];
+ rptr = &mp->b_rptr[tcp_wroff_xtra + ire_fp_mp_len];
mp->b_wptr = &rptr[len];
}
@@ -20845,7 +21080,7 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
int spill = *tail_unsent;
mp1 = mp->b_cont;
- if (!mp1)
+ if (mp1 == NULL)
mp1 = mp;
/*
@@ -20865,7 +21100,8 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
* keep on splitting as this is a transient
* send path.
*/
- if (!tcp->tcp_mdt && (spill + nmpsz > 0)) {
+ if (!do_lso_send && !tcp->tcp_mdt &&
+ (spill + nmpsz > 0)) {
/*
* Don't split if stream head was
* told to break up larger writes
@@ -20899,6 +21135,8 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
if (mp1 == NULL) {
*tail_unsent = spill;
freemsg(mp);
+ if (ire != NULL)
+ IRE_REFRELE(ire);
return (-1); /* out_of_mem */
}
}
@@ -20946,10 +21184,21 @@ tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len,
}
TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT);
- tcp_send_data(tcp, q, mp);
- BUMP_LOCAL(tcp->tcp_obsegs);
+ if (do_lso_send) {
+ tcp_lsosend_data(tcp, mp, ire, ill, mss,
+ num_lso_seg);
+ tcp->tcp_obsegs += num_lso_seg;
+
+ TCP_STAT(tcp_lso_times);
+ TCP_STAT_UPDATE(tcp_lso_pkt_out, num_lso_seg);
+ } else {
+ tcp_send_data(tcp, q, mp);
+ BUMP_LOCAL(tcp->tcp_obsegs);
+ }
}
+ if (ire != NULL)
+ IRE_REFRELE(ire);
return (0);
}
@@ -21060,8 +21309,70 @@ tcp_mdt_update(tcp_t *tcp, ill_mdt_capab_t *mdt_capab, boolean_t first)
}
}
+/* Unlink and return any mblk that looks like it contains a LSO info */
+static mblk_t *
+tcp_lso_info_mp(mblk_t *mp)
+{
+ mblk_t *prev_mp;
+
+ for (;;) {
+ prev_mp = mp;
+ /* no more to process? */
+ if ((mp = mp->b_cont) == NULL)
+ break;
+
+ switch (DB_TYPE(mp)) {
+ case M_CTL:
+ if (*(uint32_t *)mp->b_rptr != LSO_IOC_INFO_UPDATE)
+ continue;
+ ASSERT(prev_mp != NULL);
+ prev_mp->b_cont = mp->b_cont;
+ mp->b_cont = NULL;
+ return (mp);
+ default:
+ break;
+ }
+ }
+
+ return (mp);
+}
+
+/* LSO info update routine, called when IP notifies us about LSO */
static void
-tcp_ire_ill_check(tcp_t *tcp, ire_t *ire, ill_t *ill, boolean_t check_mdt)
+tcp_lso_update(tcp_t *tcp, ill_lso_capab_t *lso_capab)
+{
+ /*
+ * IP is telling us to abort LSO on this connection? We know
+ * this because the capability is only turned off when IP
+ * encounters some pathological cases, e.g. link-layer change
+ * where the new NIC/driver doesn't support LSO, or in situation
+ * where LSO usage on the link-layer has been switched off.
+ * IP would not have sent us the initial LSO_IOC_INFO_UPDATE
+ * if the link-layer doesn't support LSO, and if it does, it
+ * will indicate that the feature is to be turned on.
+ */
+ tcp->tcp_lso = (lso_capab->ill_lso_on != 0);
+ TCP_STAT(tcp_lso_enabled);
+
+ /*
+ * We currently only support LSO on simple TCP/IPv4,
+ * so disable LSO otherwise. The checks are done here
+ * and in tcp_wput_data().
+ */
+ if (tcp->tcp_lso &&
+ (tcp->tcp_ipversion == IPV4_VERSION &&
+ tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) ||
+ (tcp->tcp_ipversion == IPV6_VERSION)) {
+ tcp->tcp_lso = B_FALSE;
+ TCP_STAT(tcp_lso_disabled);
+ } else {
+ tcp->tcp_lso_max = MIN(TCP_MAX_LSO_LENGTH,
+ lso_capab->ill_lso_max);
+ }
+}
+
+static void
+tcp_ire_ill_check(tcp_t *tcp, ire_t *ire, ill_t *ill, boolean_t check_lso_mdt)
{
conn_t *connp = tcp->tcp_connp;
@@ -21069,33 +21380,42 @@ tcp_ire_ill_check(tcp_t *tcp, ire_t *ire, ill_t *ill, boolean_t check_mdt)
/*
* We may be in the fastpath here, and although we essentially do
- * similar checks as in ip_bind_connected{_v6}/ip_mdinfo_return,
+ * similar checks as in ip_bind_connected{_v6}/ip_xxinfo_return,
* we try to keep things as brief as possible. After all, these
* are only best-effort checks, and we do more thorough ones prior
- * to calling tcp_multisend().
+ * to calling tcp_send()/tcp_multisend().
*/
- if (ip_multidata_outbound && check_mdt &&
+ if ((ip_lso_outbound || ip_multidata_outbound) && check_lso_mdt &&
!(ire->ire_type & (IRE_LOCAL | IRE_LOOPBACK)) &&
- ill != NULL && ILL_MDT_CAPABLE(ill) &&
- !CONN_IPSEC_OUT_ENCAPSULATED(connp) &&
+ ill != NULL && !CONN_IPSEC_OUT_ENCAPSULATED(connp) &&
!(ire->ire_flags & RTF_MULTIRT) &&
!IPP_ENABLED(IPP_LOCAL_OUT) &&
- CONN_IS_MD_FASTPATH(connp)) {
- /* Remember the result */
- connp->conn_mdt_ok = B_TRUE;
-
- ASSERT(ill->ill_mdt_capab != NULL);
- if (!ill->ill_mdt_capab->ill_mdt_on) {
- /*
- * If MDT has been previously turned off in the past,
- * and we currently can do MDT (due to IPQoS policy
- * removal, etc.) then enable it for this interface.
- */
- ill->ill_mdt_capab->ill_mdt_on = 1;
- ip1dbg(("tcp_ire_ill_check: connp %p enables MDT for "
- "interface %s\n", (void *)connp, ill->ill_name));
+ CONN_IS_LSO_MD_FASTPATH(connp)) {
+ if (ip_lso_outbound && ILL_LSO_CAPABLE(ill)) {
+ /* Cache the result */
+ connp->conn_lso_ok = B_TRUE;
+
+ ASSERT(ill->ill_lso_capab != NULL);
+ if (!ill->ill_lso_capab->ill_lso_on) {
+ ill->ill_lso_capab->ill_lso_on = 1;
+ ip1dbg(("tcp_ire_ill_check: connp %p enables "
+ "LSO for interface %s\n", (void *)connp,
+ ill->ill_name));
+ }
+ tcp_lso_update(tcp, ill->ill_lso_capab);
+ } else if (ip_multidata_outbound && ILL_MDT_CAPABLE(ill)) {
+ /* Cache the result */
+ connp->conn_mdt_ok = B_TRUE;
+
+ ASSERT(ill->ill_mdt_capab != NULL);
+ if (!ill->ill_mdt_capab->ill_mdt_on) {
+ ill->ill_mdt_capab->ill_mdt_on = 1;
+ ip1dbg(("tcp_ire_ill_check: connp %p enables "
+ "MDT for interface %s\n", (void *)connp,
+ ill->ill_name));
+ }
+ tcp_mdt_update(tcp, ill->ill_mdt_capab, B_TRUE);
}
- tcp_mdt_update(tcp, ill->ill_mdt_capab, B_TRUE);
}
/*
diff --git a/usr/src/uts/common/inet/tcp/tcp_fusion.c b/usr/src/uts/common/inet/tcp/tcp_fusion.c
index 0787f3eb6e..277b479a41 100644
--- a/usr/src/uts/common/inet/tcp/tcp_fusion.c
+++ b/usr/src/uts/common/inet/tcp/tcp_fusion.c
@@ -117,7 +117,7 @@
#define TCP_LOOPBACK_IP(tcp) \
(TCP_IPOPT_POLICY_V4(tcp) || TCP_IPOPT_POLICY_V6(tcp) || \
- !CONN_IS_MD_FASTPATH((tcp)->tcp_connp))
+ !CONN_IS_LSO_MD_FASTPATH((tcp)->tcp_connp))
/*
* Setting this to false means we disable fusion altogether and
diff --git a/usr/src/uts/common/inet/tcp_impl.h b/usr/src/uts/common/inet/tcp_impl.h
index 1348d2f6cd..dc69b9c8b6 100644
--- a/usr/src/uts/common/inet/tcp_impl.h
+++ b/usr/src/uts/common/inet/tcp_impl.h
@@ -307,6 +307,10 @@ typedef struct tcp_stat {
kstat_named_t tcp_fusion_rrw_plugged;
kstat_named_t tcp_in_ack_unsent_drop;
kstat_named_t tcp_sock_fallback;
+ kstat_named_t tcp_lso_enabled;
+ kstat_named_t tcp_lso_disabled;
+ kstat_named_t tcp_lso_times;
+ kstat_named_t tcp_lso_pkt_out;
} tcp_stat_t;
extern tcp_stat_t tcp_statistics;
diff --git a/usr/src/uts/common/inet/udp/udp.c b/usr/src/uts/common/inet/udp/udp.c
index 91db2fe866..d89c1d8e13 100644
--- a/usr/src/uts/common/inet/udp/udp.c
+++ b/usr/src/uts/common/inet/udp/udp.c
@@ -6603,12 +6603,12 @@ udp_send_data(udp_t *udp, queue_t *q, mblk_t *mp, ipha_t *ipha)
* available yet) are sent down the legacy (slow) path
*/
if ((ire->ire_type & (IRE_BROADCAST|IRE_LOCAL|IRE_LOOPBACK)) ||
- (ire->ire_flags & RTF_MULTIRT) || ire->ire_stq == NULL ||
- ire->ire_max_frag < ntohs(ipha->ipha_length) ||
- (ire->ire_nce != NULL &&
- (ire_fp_mp = ire->ire_nce->nce_fp_mp) == NULL) ||
+ (ire->ire_flags & RTF_MULTIRT) || (ire->ire_stq == NULL) ||
+ (ire->ire_max_frag < ntohs(ipha->ipha_length)) ||
(connp->conn_nexthop_set) ||
- (ire_fp_mp_len = MBLKL(ire_fp_mp)) > MBLKHEAD(mp)) {
+ (ire->ire_nce == NULL) ||
+ ((ire_fp_mp = ire->ire_nce->nce_fp_mp) == NULL) ||
+ ((ire_fp_mp_len = MBLKL(ire_fp_mp)) > MBLKHEAD(mp))) {
if (ipif != NULL)
ipif_refrele(ipif);
UDP_STAT(udp_ip_ire_send);
diff --git a/usr/src/uts/common/io/dld/dld_proto.c b/usr/src/uts/common/io/dld/dld_proto.c
index 994ad7af38..88d1e46ef6 100644
--- a/usr/src/uts/common/io/dld/dld_proto.c
+++ b/usr/src/uts/common/io/dld/dld_proto.c
@@ -607,6 +607,12 @@ proto_process_unbind_req(void *arg)
proto_poll_disable(dsp);
/*
+ * Clear LSO flags.
+ */
+ dsp->ds_lso = B_FALSE;
+ dsp->ds_lso_max = 0;
+
+ /*
* Clear the receive callback.
*/
dls_rx_set(dsp->ds_dc, NULL, NULL);
@@ -1201,6 +1207,23 @@ proto_capability_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
}
/*
+ * Large segment offload. (LSO)
+ */
+ case DL_CAPAB_LSO: {
+ dl_capab_lso_t *lsop;
+ dl_capab_lso_t lso;
+
+ lsop = (dl_capab_lso_t *)&sp[1];
+ /*
+ * Copy for alignment.
+ */
+ bcopy(lsop, &lso, sizeof (dl_capab_lso_t));
+ dlcapabsetqid(&(lso.lso_mid), dsp->ds_rq);
+ bcopy(&lso, lsop, sizeof (dl_capab_lso_t));
+ break;
+ }
+
+ /*
* IP polling interface.
*/
case DL_CAPAB_POLL: {
@@ -1682,12 +1705,15 @@ proto_capability_advertise(dld_str_t *dsp, mblk_t *mp)
dl_capability_sub_t *dlsp;
size_t subsize;
dl_capab_dls_t poll;
- dl_capab_dls_t soft_ring;
+ dl_capab_dls_t soft_ring;
dl_capab_hcksum_t hcksum;
+ dl_capab_lso_t lso;
dl_capab_zerocopy_t zcopy;
uint8_t *ptr;
boolean_t cksum_cap;
boolean_t poll_cap;
+ boolean_t lso_cap;
+ mac_capab_lso_t mac_lso;
queue_t *q = dsp->ds_wq;
mblk_t *mp1;
@@ -1729,6 +1755,15 @@ proto_capability_advertise(dld_str_t *dsp, mblk_t *mp)
}
/*
+ * If LSO is usable for MAC, reserve space for the DL_CAPAB_LSO
+ * capability.
+ */
+ if (lso_cap = mac_capab_get(dsp->ds_mh, MAC_CAPAB_LSO, &mac_lso)) {
+ subsize += sizeof (dl_capability_sub_t) +
+ sizeof (dl_capab_lso_t);
+ }
+
+ /*
* If DL_CAPAB_ZEROCOPY has not be explicitly disabled then
* reserve space for it.
*/
@@ -1853,6 +1888,32 @@ proto_capability_advertise(dld_str_t *dsp, mblk_t *mp)
}
/*
+ * Large segment offload. (LSO)
+ */
+ if (lso_cap) {
+ dlsp = (dl_capability_sub_t *)ptr;
+
+ dlsp->dl_cap = DL_CAPAB_LSO;
+ dlsp->dl_length = sizeof (dl_capab_lso_t);
+ ptr += sizeof (dl_capability_sub_t);
+
+ lso.lso_version = LSO_VERSION_1;
+ lso.lso_flags = mac_lso.lso_flags;
+ lso.lso_max = mac_lso.lso_basic_tcp_ipv4.lso_max;
+
+ /* Simply enable LSO with DLD */
+ dsp->ds_lso = B_TRUE;
+ dsp->ds_lso_max = lso.lso_max;
+
+ dlcapabsetqid(&(lso.lso_mid), dsp->ds_rq);
+ bcopy(&lso, ptr, sizeof (dl_capab_lso_t));
+ ptr += sizeof (dl_capab_lso_t);
+ } else {
+ dsp->ds_lso = B_FALSE;
+ dsp->ds_lso_max = 0;
+ }
+
+ /*
* Zero copy
*/
if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) {
diff --git a/usr/src/uts/common/io/dld/dld_str.c b/usr/src/uts/common/io/dld/dld_str.c
index cb4cdbf485..7b311cf7e6 100644
--- a/usr/src/uts/common/io/dld/dld_str.c
+++ b/usr/src/uts/common/io/dld/dld_str.c
@@ -973,7 +973,12 @@ str_mdata_raw_put(dld_str_t *dsp, mblk_t *mp)
if (dls_header_info(dsp->ds_dc, mp, &mhi) != 0)
goto discard;
- if (size > dsp->ds_mip->mi_sdu_max + mhi.mhi_hdrsize)
+ /*
+ * If LSO is enabled, check the size against lso_max. Otherwise,
+ * compare the packet size with sdu_max.
+ */
+ if (size > (dsp->ds_lso ? dsp->ds_lso_max : dsp->ds_mip->mi_sdu_max)
+ + mhi.mhi_hdrsize)
goto discard;
if (is_ethernet) {
@@ -1104,6 +1109,12 @@ dld_str_detach(dld_str_t *dsp)
dsp->ds_promisc = 0;
/*
+ * Clear LSO flags.
+ */
+ dsp->ds_lso = B_FALSE;
+ dsp->ds_lso_max = 0;
+
+ /*
* Close the channel.
*/
dls_close(dsp->ds_dc);
diff --git a/usr/src/uts/common/io/xge/drv/xge.c b/usr/src/uts/common/io/xge/drv/xge.c
index 379522cec7..648c9134ea 100644
--- a/usr/src/uts/common/io/xge/drv/xge.c
+++ b/usr/src/uts/common/io/xge/drv/xge.c
@@ -79,7 +79,7 @@ xge_event(xge_queue_item_t *item)
switch (item->event_type) {
case XGELL_EVENT_RESCHED_NEEDED:
if (lldev->is_initialized) {
- if (__hal_channel_dtr_count(lldev->fifo_channel)
+ if (xge_hal_channel_dtr_count(lldev->fifo_channel)
>= XGELL_TX_LEVEL_HIGH) {
mac_tx_update(lldev->mh);
xge_debug_osdep(XGE_TRACE,
@@ -245,12 +245,6 @@ xge_configuration_init(dev_info_t *dev_info,
/*
* Initialize common properties
*/
-
- /*
- * We prefer HAL could provide all default values to these tunables,
- * so this level could care little the configurations need by HAL.
- * Leave a const here is definitely not good idea.
- */
device_config->mtu = ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "default_mtu",
XGE_HAL_DEFAULT_INITIAL_MTU);
@@ -275,13 +269,9 @@ xge_configuration_init(dev_info_t *dev_info,
device_config->device_poll_millis = ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "device_poll_millis",
XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS);
- /*
- * Query PCI bus freqency from parent nexus driver.
- * Note this property is only provided on SPARC platforms.
- */
device_config->pci_freq_mherz = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, 0, "clock-frequency",
- XGE_HAL_PCI_FREQ_MHERZ_DEFAULT * 1000000) / 1000000;
+ dev_info, DDI_PROP_DONTPASS, "pci_freq_mherz",
+ XGE_HAL_DEFAULT_USE_HARDCODE);
/*
* Initialize ring properties
@@ -337,7 +327,7 @@ xge_configuration_init(dev_info_t *dev_info,
device_config->ring.queue[XGELL_RING_MAIN_QID].configured =
ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "ring_main_configured",
- 1); /* HAL never provide a good named macro */
+ 1);
device_config->ring.queue[XGELL_RING_MAIN_QID].rti.urange_a =
ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "ring_main_urange_a",
@@ -353,7 +343,9 @@ xge_configuration_init(dev_info_t *dev_info,
device_config->ring.queue[XGELL_RING_MAIN_QID].rti.ufc_b =
ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "ring_main_ufc_b",
- XGE_HAL_DEFAULT_RX_UFC_B);
+ device_config->mtu > XGE_HAL_DEFAULT_MTU ?
+ XGE_HAL_DEFAULT_RX_UFC_B_J :
+ XGE_HAL_DEFAULT_RX_UFC_B_N);
device_config->ring.queue[XGELL_RING_MAIN_QID].rti.urange_c =
ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "ring_main_urange_c",
@@ -361,7 +353,9 @@ xge_configuration_init(dev_info_t *dev_info,
device_config->ring.queue[XGELL_RING_MAIN_QID].rti.ufc_c =
ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "ring_main_ufc_c",
- XGE_HAL_DEFAULT_RX_UFC_C);
+ device_config->mtu > XGE_HAL_DEFAULT_MTU ?
+ XGE_HAL_DEFAULT_RX_UFC_C_J :
+ XGE_HAL_DEFAULT_RX_UFC_C_N);
device_config->ring.queue[XGELL_RING_MAIN_QID].rti.ufc_d =
ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "ring_main_ufc_d",
@@ -377,24 +371,9 @@ xge_configuration_init(dev_info_t *dev_info,
device_config->ring.queue[XGELL_RING_MAIN_QID].indicate_max_pkts =
ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "ring_main_indicate_max_pkts",
- XGE_HAL_DEFAULT_INDICATE_MAX_PKTS);
-
- /* adaptive rx coalesing */
- device_config->sched_timer_us = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "ring_main_ufc_a_timer",
- 0);
- device_config->rxufca_intr_thres = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "rxufca_intr_thres",
- 35);
- device_config->rxufca_lo_lim = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "rxufca_lo_lim",
- 1);
- device_config->rxufca_hi_lim = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "rxufca_hi_lim",
- 16);
- device_config->rxufca_lbolt_period = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "rxufca_lbolt_period",
- 1);
+ (device_config->bimodal_interrupts ?
+ XGE_HAL_DEFAULT_INDICATE_MAX_PKTS_B :
+ XGE_HAL_DEFAULT_INDICATE_MAX_PKTS_N));
/*
* Initialize mac properties
@@ -408,6 +387,12 @@ xge_configuration_init(dev_info_t *dev_info,
device_config->mac.rmac_bcast_en = ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "mac_rmac_bcast_en",
1); /* HAL never provide a good named macro */
+ device_config->mac.rmac_pause_gen_en = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev_info, DDI_PROP_DONTPASS, "rmac_pause_gen_en",
+ XGE_HAL_DEFAULT_RMAC_PAUSE_GEN_DIS);
+ device_config->mac.rmac_pause_rcv_en = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev_info, DDI_PROP_DONTPASS, "rmac_pause_rcv_en",
+ XGE_HAL_DEFAULT_RMAC_PAUSE_RCV_DIS);
device_config->mac.rmac_pause_time = ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "mac_rmac_pause_time",
XGE_HAL_DEFAULT_RMAC_HIGH_PTIME);
@@ -435,10 +420,12 @@ xge_configuration_init(dev_info_t *dev_info,
dev_info, DDI_PROP_DONTPASS, "fifo_memblock_size",
XGE_HAL_DEFAULT_FIFO_MEMBLOCK_SIZE);
#ifdef XGE_HAL_ALIGN_XMIT
- device_config->fifo.alignment_size =
- XGE_HAL_DEFAULT_FIFO_ALIGNMENT_SIZE;
- device_config->fifo.max_aligned_frags =
- XGE_HAL_DEFAULT_FIFO_MAX_ALIGNED_FRAGS;
+ device_config->fifo.alignment_size = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev_info, DDI_PROP_DONTPASS, "fifo_copied_frag_size",
+ XGE_HAL_DEFAULT_FIFO_ALIGNMENT_SIZE);
+ device_config->fifo.max_aligned_frags = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev_info, DDI_PROP_DONTPASS, "fifo_copied_max_frags",
+ XGE_HAL_DEFAULT_FIFO_MAX_ALIGNED_FRAGS);
#endif
#if defined(__sparc)
device_config->fifo.queue[0].no_snoop_bits = 1;
@@ -469,40 +456,52 @@ xge_configuration_init(dev_info_t *dev_info,
"fifo0_configured", 1);
/*
- * Initialize tti properties
+ * Bimodal Interrupts - TTI 56 configuration
+ */
+ device_config->bimodal_interrupts = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "bimodal_interrupts",
+ XGE_HAL_DEFAULT_BIMODAL_INTERRUPTS);
+ device_config->bimodal_timer_lo_us = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "bimodal_timer_lo_us",
+ XGE_HAL_DEFAULT_BIMODAL_TIMER_LO_US);
+ device_config->bimodal_timer_hi_us = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "bimodal_timer_hi_us",
+ XGE_HAL_DEFAULT_BIMODAL_TIMER_HI_US);
+
+ /*
+ * TTI 0 configuration
*/
- device_config->tti.enabled = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_enabled",
- XGE_HAL_TTI_ENABLE);
- device_config->tti.urange_a = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_urange_a",
+ device_config->fifo.queue[0].tti[0].enabled = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_enable", 1);
+ device_config->fifo.queue[0].tti[0].urange_a = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_urange_a",
XGE_HAL_DEFAULT_TX_URANGE_A);
- device_config->tti.ufc_a = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_ufc_a",
+ device_config->fifo.queue[0].tti[0].ufc_a = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_ufc_a",
XGE_HAL_DEFAULT_TX_UFC_A);
- device_config->tti.urange_b = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_urange_b",
+ device_config->fifo.queue[0].tti[0].urange_b = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_urange_b",
XGE_HAL_DEFAULT_TX_URANGE_B);
- device_config->tti.ufc_b = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_ufc_b",
+ device_config->fifo.queue[0].tti[0].ufc_b = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_ufc_b",
XGE_HAL_DEFAULT_TX_UFC_B);
- device_config->tti.urange_c = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_urange_c",
+ device_config->fifo.queue[0].tti[0].urange_c = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_urange_c",
XGE_HAL_DEFAULT_TX_URANGE_C);
- device_config->tti.ufc_c = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_ufc_c",
+ device_config->fifo.queue[0].tti[0].ufc_c = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_ufc_c",
XGE_HAL_DEFAULT_TX_UFC_C);
- device_config->tti.ufc_d = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_ufc_d",
+ device_config->fifo.queue[0].tti[0].ufc_d = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_ufc_d",
XGE_HAL_DEFAULT_TX_UFC_D);
- device_config->tti.timer_val_us = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_timer_ac_en",
+ device_config->fifo.queue[0].tti[0].timer_ac_en = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_timer_ac_en",
XGE_HAL_DEFAULT_TX_TIMER_AC_EN);
- device_config->tti.timer_val_us = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_timer_val",
+ device_config->fifo.queue[0].tti[0].timer_val_us = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_timer_val",
XGE_HAL_DEFAULT_TX_TIMER_VAL);
- device_config->tti.timer_ci_en = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "tti_timer_ci_en",
+ device_config->fifo.queue[0].tti[0].timer_ci_en = ddi_prop_get_int(
+ DDI_DEV_T_ANY, dev_info, DDI_PROP_DONTPASS, "tti_timer_ci_en",
XGE_HAL_DEFAULT_TX_TIMER_CI_EN);
/*
@@ -519,17 +518,37 @@ xge_configuration_init(dev_info_t *dev_info,
0);
/*
+ * LRO tunables
+ */
+ device_config->lro_sg_size = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev_info, DDI_PROP_DONTPASS, "lro_sg_size",
+ XGE_HAL_DEFAULT_LRO_SG_SIZE);
+ device_config->lro_frm_len = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev_info, DDI_PROP_DONTPASS, "lro_frm_len",
+ XGE_HAL_DEFAULT_LRO_FRM_LEN);
+
+ /*
* Initialize link layer configuration
*/
ll_config->rx_buffer_total = ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "rx_buffer_total",
- XGELL_RX_BUFFER_TOTAL);
+ device_config->ring.queue[XGELL_RING_MAIN_QID].initial *
+ XGELL_RX_BUFFER_TOTAL);
ll_config->rx_buffer_post_hiwat = ddi_prop_get_int(DDI_DEV_T_ANY,
dev_info, DDI_PROP_DONTPASS, "rx_buffer_post_hiwat",
- XGELL_RX_BUFFER_POST_HIWAT);
- ll_config->rx_buffer_recycle_hiwat = ddi_prop_get_int(DDI_DEV_T_ANY,
- dev_info, DDI_PROP_DONTPASS, "rx_buffer_recycle_hiwat",
- XGELL_RX_BUFFER_RECYCLE_HIWAT);
+ device_config->ring.queue[XGELL_RING_MAIN_QID].initial *
+ XGELL_RX_BUFFER_POST_HIWAT);
+ ll_config->rx_pkt_burst = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev_info, DDI_PROP_DONTPASS, "rx_pkt_burst",
+ XGELL_RX_PKT_BURST);
+ ll_config->rx_dma_lowat = ddi_prop_get_int(DDI_DEV_T_ANY, dev_info,
+ DDI_PROP_DONTPASS, "rx_dma_lowat", XGELL_RX_DMA_LOWAT);
+ ll_config->tx_dma_lowat = ddi_prop_get_int(DDI_DEV_T_ANY, dev_info,
+ DDI_PROP_DONTPASS, "tx_dma_lowat", XGELL_TX_DMA_LOWAT);
+ ll_config->msi_enable = ddi_prop_get_int(DDI_DEV_T_ANY, dev_info,
+ DDI_PROP_DONTPASS, "msi_enable", XGELL_CONF_ENABLE_BY_DEFAULT);
+ ll_config->lso_enable = ddi_prop_get_int(DDI_DEV_T_ANY, dev_info,
+ DDI_PROP_DONTPASS, "lso_enable", XGELL_CONF_ENABLE_BY_DEFAULT);
}
/*
diff --git a/usr/src/uts/common/io/xge/drv/xge_osdep.h b/usr/src/uts/common/io/xge/drv/xge_osdep.h
index 699536dde5..e1be445644 100644
--- a/usr/src/uts/common/io/xge/drv/xge_osdep.h
+++ b/usr/src/uts/common/io/xge/drv/xge_osdep.h
@@ -57,21 +57,24 @@
extern "C" {
#endif
+#ifdef DEBUG
+#define XGE_DEBUG_ASSERT
+#endif
+
/* ------------------------- includes and defines ------------------------- */
#define XGE_HAL_TX_MULTI_POST_IRQ 1
#define XGE_HAL_TX_MULTI_RESERVE_IRQ 1
#define XGE_HAL_TX_MULTI_FREE_IRQ 1
-#define XGE_HAL_RX_MULTI_FREE 1
#define XGE_HAL_DMA_DTR_CONSISTENT 1
#define XGE_HAL_DMA_STATS_STREAMING 1
#if defined(__sparc)
#define XGE_OS_DMA_REQUIRES_SYNC 1
-#define XGELL_TX_NOMAP_COPY 1
-#define XGE_HAL_ALIGN_XMIT 1
#endif
+#define XGE_HAL_ALIGN_XMIT 1
+
#ifdef _BIG_ENDIAN
#define XGE_OS_HOST_BIG_ENDIAN 1
#else
@@ -86,6 +89,10 @@ extern "C" {
#define XGE_OS_HAS_SNPRINTF 1
+/* LRO defines */
+#define XGE_HAL_CONFIG_LRO 0
+#define XGE_LL_IP_FAST_CSUM(hdr, len) 0 /* ip_ocsum(hdr, len>>1, 0); */
+
/* ---------------------- fixed size primitive types ----------------------- */
#define u8 uint8_t
@@ -103,6 +110,12 @@ typedef ddi_iblock_cookie_t pci_irq_h;
typedef ddi_dma_handle_t pci_dma_h;
typedef ddi_acc_handle_t pci_dma_acc_h;
+/* LRO types */
+#define OS_NETSTACK_BUF mblk_t *
+#define OS_LL_HEADER uint8_t *
+#define OS_IP_HEADER uint8_t *
+#define OS_TL_HEADER uint8_t *
+
/* -------------------------- "libc" functionality ------------------------- */
#define xge_os_strcpy (void) strcpy
@@ -111,6 +124,9 @@ typedef ddi_acc_handle_t pci_dma_acc_h;
#define xge_os_memzero(addr, size) bzero(addr, size)
#define xge_os_memcpy(dst, src, size) bcopy(src, dst, size)
#define xge_os_memcmp(src1, src2, size) bcmp(src1, src2, size)
+#define xge_os_ntohl ntohl
+#define xge_os_htons htons
+#define xge_os_ntohs ntohs
#ifdef __GNUC__
#define xge_os_printf(fmt...) cmn_err(CE_CONT, fmt)
@@ -250,8 +266,8 @@ static inline void *__xge_os_dma_malloc(pci_dev_h pdev, unsigned long size,
ret = ddi_dma_mem_alloc(*p_dmah, size, p_xge_dev_attr,
(dma_flags & XGE_OS_DMA_CONSISTENT ?
- DDI_DMA_CONSISTENT : DDI_DMA_STREAMING),
- DDI_DMA_DONTWAIT, 0, (caddr_t *)&vaddr, &real_size, p_dma_acch);
+ DDI_DMA_CONSISTENT : DDI_DMA_STREAMING), DDI_DMA_DONTWAIT, 0,
+ (caddr_t *)&vaddr, &real_size, p_dma_acch);
if (ret != DDI_SUCCESS) {
ddi_dma_free_handle(p_dmah);
return (NULL);
diff --git a/usr/src/uts/common/io/xge/drv/xgell.c b/usr/src/uts/common/io/xge/drv/xgell.c
index e9b8885d89..a153880f13 100644
--- a/usr/src/uts/common/io/xge/drv/xgell.c
+++ b/usr/src/uts/common/io/xge/drv/xgell.c
@@ -40,6 +40,7 @@
#include <netinet/ip.h>
#include <netinet/tcp.h>
+#include <netinet/udp.h>
#define XGELL_MAX_FRAME_SIZE(hldev) ((hldev)->config.mtu + \
sizeof (struct ether_vlan_header))
@@ -62,7 +63,7 @@ static struct ddi_dma_attr tx_dma_attr = {
1, /* dma_attr_minxfer */
0xFFFFFFFFULL, /* dma_attr_maxxfer */
0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */
- 4, /* dma_attr_sgllen */
+ 18, /* dma_attr_sgllen */
1, /* dma_attr_granular */
0 /* dma_attr_flags */
};
@@ -167,6 +168,11 @@ xge_device_poll(void *data)
xge_hal_device_poll(data);
lldev->timeout_id = timeout(xge_device_poll, data,
XGE_DEV_POLL_TICKS);
+ } else if (lldev->in_reset == 1) {
+ lldev->timeout_id = timeout(xge_device_poll, data,
+ XGE_DEV_POLL_TICKS);
+ } else {
+ lldev->timeout_id = 0;
}
mutex_exit(&lldev->genlock);
}
@@ -182,10 +188,10 @@ xge_device_poll_now(void *data)
xgelldev_t *lldev = xge_hal_device_private(data);
mutex_enter(&lldev->genlock);
- (void) untimeout(lldev->timeout_id);
- lldev->timeout_id = timeout(xge_device_poll, data, 0);
+ if (lldev->is_initialized) {
+ xge_hal_device_poll(data);
+ }
mutex_exit(&lldev->genlock);
-
}
/*
@@ -234,6 +240,8 @@ xgell_rx_buffer_replenish_all(xgelldev_t *lldev)
xgell_rx_buffer_t *rx_buffer;
xgell_rxd_priv_t *rxd_priv;
+ xge_assert(mutex_owned(&lldev->bf_pool.pool_lock));
+
while ((lldev->bf_pool.free > 0) &&
(xge_hal_ring_dtr_reserve(lldev->ring_main.channelh, &dtr) ==
XGE_HAL_OK)) {
@@ -258,21 +266,20 @@ xgell_rx_buffer_replenish_all(xgelldev_t *lldev)
* xgell_rx_buffer_release
*
* The only thing done here is to put the buffer back to the pool.
+ * Calling this function need be protected by mutex, bf_pool.pool_lock.
*/
static void
xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
{
xgelldev_t *lldev = rx_buffer->lldev;
- mutex_enter(&lldev->bf_pool.pool_lock);
+ xge_assert(mutex_owned(&lldev->bf_pool.pool_lock));
/* Put the buffer back to pool */
rx_buffer->next = lldev->bf_pool.head;
lldev->bf_pool.head = rx_buffer;
lldev->bf_pool.free++;
-
- mutex_exit(&lldev->bf_pool.pool_lock);
}
/*
@@ -287,9 +294,9 @@ xgell_rx_buffer_recycle(char *arg)
xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
xgelldev_t *lldev = rx_buffer->lldev;
- xgell_rx_buffer_release(rx_buffer);
-
mutex_enter(&lldev->bf_pool.pool_lock);
+
+ xgell_rx_buffer_release(rx_buffer);
lldev->bf_pool.post--;
/*
@@ -323,7 +330,7 @@ xgell_rx_buffer_alloc(xgelldev_t *lldev)
extern ddi_device_acc_attr_t *p_xge_dev_attr;
xgell_rx_buffer_t *rx_buffer;
- hldev = lldev->devh;
+ hldev = (xge_hal_device_t *)lldev->devh;
if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
0, &dma_handle) != DDI_SUCCESS) {
@@ -463,7 +470,6 @@ xgell_rx_create_buffer_pool(xgelldev_t *lldev)
lldev->bf_pool.free = 0;
lldev->bf_pool.post = 0;
lldev->bf_pool.post_hiwat = lldev->config.rx_buffer_post_hiwat;
- lldev->bf_pool.recycle_hiwat = lldev->config.rx_buffer_recycle_hiwat;
mutex_init(&lldev->bf_pool.pool_lock, NULL, MUTEX_DRIVER,
hldev->irqh);
@@ -576,9 +582,6 @@ xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
xge_hal_dtr_info_t *ext_info)
{
int cksum_flags = 0;
- int ip_off;
-
- ip_off = xgell_get_ip_offset(ext_info);
if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
@@ -599,6 +602,7 @@ xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
/*
* Just pass the partial cksum up to IP.
*/
+ int ip_off = xgell_get_ip_offset(ext_info);
int start, end = pkt_length - ip_off;
if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
@@ -621,12 +625,12 @@ xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
* Allocate message header for data buffer, and decide if copy the packet to
* new data buffer to release big rx_buffer to save memory.
*
- * If the pkt_length <= XGELL_DMA_BUFFER_SIZE_LOWAT, call allocb() to allocate
+ * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
* new message and copy the payload in.
*/
static mblk_t *
-xgell_rx_1b_msg_alloc(xgell_rx_buffer_t *rx_buffer, int pkt_length,
- xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
+xgell_rx_1b_msg_alloc(xgelldev_t *lldev, xgell_rx_buffer_t *rx_buffer,
+ int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
{
mblk_t *mp;
mblk_t *nmp = NULL;
@@ -634,13 +638,12 @@ xgell_rx_1b_msg_alloc(xgell_rx_buffer_t *rx_buffer, int pkt_length,
int hdr_length = 0;
#ifdef XGELL_L3_ALIGNED
- int doalign = 1;
+ boolean_t doalign = B_TRUE;
struct ip *ip;
struct tcphdr *tcp;
int tcp_off;
int mp_align_len;
int ip_off;
-
#endif
vaddr = (char *)rx_buffer->vaddr + HEADROOM;
@@ -649,7 +652,7 @@ xgell_rx_1b_msg_alloc(xgell_rx_buffer_t *rx_buffer, int pkt_length,
/* Check ip_off with HEADROOM */
if ((ip_off & 3) == HEADROOM) {
- doalign = 0;
+ doalign = B_FALSE;
}
/*
@@ -658,7 +661,7 @@ xgell_rx_1b_msg_alloc(xgell_rx_buffer_t *rx_buffer, int pkt_length,
/* Is IPv4 or IPv6? */
if (doalign && !(ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6)) {
- doalign = 0;
+ doalign = B_FALSE;
}
/* Is TCP? */
@@ -672,15 +675,15 @@ xgell_rx_1b_msg_alloc(xgell_rx_buffer_t *rx_buffer, int pkt_length,
hdr_length = pkt_length;
}
} else {
- doalign = 0;
+ doalign = B_FALSE;
}
#endif
/*
* Copy packet into new allocated message buffer, if pkt_length
- * is less than XGELL_DMA_BUFFER_LOWAT
+ * is less than XGELL_RX_DMA_LOWAT
*/
- if (*copyit || pkt_length <= XGELL_DMA_BUFFER_SIZE_LOWAT) {
+ if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
/* Keep room for alignment */
if ((mp = allocb(pkt_length + HEADROOM + 4, 0)) == NULL) {
return (NULL);
@@ -766,13 +769,14 @@ xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
xgell_rx_buffer_t *rx_buffer;
mblk_t *mp_head = NULL;
mblk_t *mp_end = NULL;
+ int pkt_burst = 0;
+
+ mutex_enter(&lldev->bf_pool.pool_lock);
do {
- int ret;
int pkt_length;
dma_addr_t dma_data;
mblk_t *mp;
-
boolean_t copyit = B_FALSE;
xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
@@ -801,9 +805,8 @@ xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
/*
* Sync the DMA memory
*/
- ret = ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
- DDI_DMA_SYNC_FORKERNEL);
- if (ret != DDI_SUCCESS) {
+ if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
+ DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
XGELL_IFNAME, lldev->instance);
xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
@@ -820,8 +823,8 @@ xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
copyit = B_FALSE;
}
- mp = xgell_rx_1b_msg_alloc(rx_buffer, pkt_length, &ext_info,
- &copyit);
+ mp = xgell_rx_1b_msg_alloc(lldev, rx_buffer, pkt_length,
+ &ext_info, &copyit);
xge_hal_ring_dtr_free(channelh, dtr);
@@ -834,19 +837,18 @@ xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
/*
* Count it since the buffer should be loaned up.
*/
- mutex_enter(&lldev->bf_pool.pool_lock);
lldev->bf_pool.post++;
- mutex_exit(&lldev->bf_pool.pool_lock);
}
if (mp == NULL) {
xge_debug_ll(XGE_ERR,
- "%s%d: rx: can not allocate mp mblk", XGELL_IFNAME,
- lldev->instance);
+ "%s%d: rx: can not allocate mp mblk",
+ XGELL_IFNAME, lldev->instance);
continue;
}
/*
- * Associate cksum_flags per packet type and h/w cksum flags.
+ * Associate cksum_flags per packet type and h/w
+ * cksum flags.
*/
xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr +
HEADROOM, pkt_length, &ext_info);
@@ -859,20 +861,35 @@ xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
mp_end = mp;
}
+ if (++pkt_burst < lldev->config.rx_pkt_burst)
+ continue;
+
+ if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
+ /* Replenish rx buffers */
+ xgell_rx_buffer_replenish_all(lldev);
+ }
+ mutex_exit(&lldev->bf_pool.pool_lock);
+ if (mp_head != NULL) {
+ mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle,
+ mp_head);
+ }
+ mp_head = mp_end = NULL;
+ pkt_burst = 0;
+ mutex_enter(&lldev->bf_pool.pool_lock);
+
} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
XGE_HAL_OK);
- if (mp_head) {
- mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle, mp_head);
- }
-
/*
* Always call replenish_all to recycle rx_buffers.
*/
- mutex_enter(&lldev->bf_pool.pool_lock);
xgell_rx_buffer_replenish_all(lldev);
mutex_exit(&lldev->bf_pool.pool_lock);
+ if (mp_head != NULL) {
+ mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle, mp_head);
+ }
+
return (XGE_HAL_OK);
}
@@ -894,9 +911,7 @@ xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
xge_hal_fifo_dtr_private(dtr));
mblk_t *mp = txd_priv->mblk;
-#if !defined(XGELL_TX_NOMAP_COPY)
int i;
-#endif
if (t_code) {
xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
@@ -907,14 +922,12 @@ xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
t_code);
}
-#if !defined(XGELL_TX_NOMAP_COPY)
for (i = 0; i < txd_priv->handle_cnt; i++) {
xge_assert(txd_priv->dma_handles[i]);
(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
ddi_dma_free_handle(&txd_priv->dma_handles[i]);
txd_priv->dma_handles[i] = 0;
}
-#endif
xge_hal_fifo_dtr_free(channelh, dtr);
@@ -939,7 +952,7 @@ xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
/*
* xgell_send
- * @hldev: pointer to s2hal_device_t strucutre
+ * @hldev: pointer to xge_hal_device_t strucutre
* @mblk: pointer to network buffer, i.e. mblk_t structure
*
* Called by the xgell_m_tx to transmit the packet to the XFRAME firmware.
@@ -947,24 +960,22 @@ xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
* this routine.
*/
static boolean_t
-xgell_send(xge_hal_device_t *hldev, mblk_t *mp)
+xgell_send(xgelldev_t *lldev, mblk_t *mp)
{
mblk_t *bp;
- int retry, repeat;
+ boolean_t retry;
+ xge_hal_device_t *hldev = lldev->devh;
xge_hal_status_e status;
xge_hal_dtr_h dtr;
- xgelldev_t *lldev = xge_hal_device_private(hldev);
xgell_txd_priv_t *txd_priv;
- uint32_t pflags;
-#ifndef XGELL_TX_NOMAP_COPY
- int handle_cnt, frag_cnt, ret, i;
-#endif
+ uint32_t hckflags;
+ uint32_t mss;
+ int handle_cnt, frag_cnt, ret, i, copied;
+ boolean_t used_copy;
_begin:
- retry = repeat = 0;
-#ifndef XGELL_TX_NOMAP_COPY
+ retry = B_FALSE;
handle_cnt = frag_cnt = 0;
-#endif
if (!lldev->is_initialized || lldev->in_reset)
return (B_FALSE);
@@ -976,14 +987,14 @@ _begin:
* gld through gld_sched call, when the free dtrs count exceeds
* the higher threshold.
*/
- if (__hal_channel_dtr_count(lldev->fifo_channel)
+ if (xge_hal_channel_dtr_count(lldev->fifo_channel)
<= XGELL_TX_LEVEL_LOW) {
xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
"free descriptors count at low threshold %d",
XGELL_IFNAME, lldev->instance,
((xge_hal_channel_t *)lldev->fifo_channel)->post_qid,
XGELL_TX_LEVEL_LOW);
- retry = 1;
+ retry = B_TRUE;
goto _exit;
}
@@ -996,7 +1007,7 @@ _begin:
lldev->instance,
((xge_hal_channel_t *)
lldev->fifo_channel)->post_qid);
- retry = 1;
+ retry = B_TRUE;
goto _exit;
case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
@@ -1004,7 +1015,7 @@ _begin:
lldev->instance,
((xge_hal_channel_t *)
lldev->fifo_channel)->post_qid);
- retry = 1;
+ retry = B_TRUE;
goto _exit;
default:
return (B_FALSE);
@@ -1029,31 +1040,17 @@ _begin:
*
* evhp = (struct ether_vlan_header *)mp->b_rptr;
* if (evhp->ether_tpid == htons(VLAN_TPID)) {
- * tci = ntohs(evhp->ether_tci);
- * (void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
+ * tci = ntohs(evhp->ether_tci);
+ * (void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
* 2 * ETHERADDRL);
- * mp->b_rptr += VLAN_TAGSZ;
+ * mp->b_rptr += VLAN_TAGSZ;
*
- * xge_hal_fifo_dtr_vlan_set(dtr, tci);
+ * xge_hal_fifo_dtr_vlan_set(dtr, tci);
* }
*/
-#ifdef XGELL_TX_NOMAP_COPY
- for (bp = mp; bp != NULL; bp = bp->b_cont) {
- int mblen;
- xge_hal_status_e rc;
-
- /* skip zero-length message blocks */
- mblen = MBLKL(bp);
- if (mblen == 0) {
- continue;
- }
- rc = xge_hal_fifo_dtr_buffer_append(lldev->fifo_channel, dtr,
- bp->b_rptr, mblen);
- xge_assert(rc == XGE_HAL_OK);
- }
- xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel, dtr, 0);
-#else
+ copied = 0;
+ used_copy = B_FALSE;
for (bp = mp; bp != NULL; bp = bp->b_cont) {
int mblen;
uint_t ncookies;
@@ -1066,12 +1063,36 @@ _begin:
continue;
}
+ /*
+ * Check the message length to decide to DMA or bcopy() data
+ * to tx descriptor(s).
+ */
+ if (mblen < lldev->config.tx_dma_lowat &&
+ (copied + mblen) < lldev->tx_copied_max) {
+ xge_hal_status_e rc;
+ rc = xge_hal_fifo_dtr_buffer_append(lldev->fifo_channel,
+ dtr, bp->b_rptr, mblen);
+ if (rc == XGE_HAL_OK) {
+ used_copy = B_TRUE;
+ copied += mblen;
+ continue;
+ } else if (used_copy) {
+ xge_hal_fifo_dtr_buffer_finalize(
+ lldev->fifo_channel, dtr, frag_cnt++);
+ used_copy = B_FALSE;
+ }
+ } else if (used_copy) {
+ xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel,
+ dtr, frag_cnt++);
+ used_copy = B_FALSE;
+ }
+
ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
DDI_DMA_DONTWAIT, 0, &dma_handle);
if (ret != DDI_SUCCESS) {
xge_debug_ll(XGE_ERR,
- "%s%d: can not allocate dma handle",
- XGELL_IFNAME, lldev->instance);
+ "%s%d: can not allocate dma handle", XGELL_IFNAME,
+ lldev->instance);
goto _exit_cleanup;
}
@@ -1104,7 +1125,7 @@ _begin:
goto _exit_cleanup;
}
- if (ncookies + frag_cnt > XGE_HAL_DEFAULT_FIFO_FRAGS) {
+ if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
"requested c:%d+f:%d", XGELL_IFNAME,
lldev->instance, ncookies, frag_cnt);
@@ -1128,7 +1149,7 @@ _begin:
if (bp->b_cont &&
(frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
- XGE_HAL_DEFAULT_FIFO_FRAGS)) {
+ hldev->config.fifo.max_frags)) {
mblk_t *nmp;
xge_debug_ll(XGE_TRACE,
@@ -1146,15 +1167,28 @@ _begin:
}
}
+ /* finalize unfinished copies */
+ if (used_copy) {
+ xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel, dtr,
+ frag_cnt++);
+ }
+
txd_priv->handle_cnt = handle_cnt;
-#endif /* XGELL_TX_NOMAP_COPY */
- hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &pflags);
- if (pflags & HCK_IPV4_HDRCKSUM) {
+ /*
+ * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
+ * do all necessary work.
+ */
+ hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, &mss, &hckflags);
+ if ((hckflags & HW_LSO) && (mss != 0)) {
+ xge_hal_fifo_dtr_mss_set(dtr, mss);
+ }
+
+ if (hckflags & HCK_IPV4_HDRCKSUM) {
xge_hal_fifo_dtr_cksum_set_bits(dtr,
XGE_HAL_TXD_TX_CKO_IPV4_EN);
}
- if (pflags & HCK_FULLCKSUM) {
+ if (hckflags & HCK_FULLCKSUM) {
xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
XGE_HAL_TXD_TX_CKO_UDP_EN);
}
@@ -1165,20 +1199,14 @@ _begin:
_exit_cleanup:
-#if !defined(XGELL_TX_NOMAP_COPY)
for (i = 0; i < handle_cnt; i++) {
(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
ddi_dma_free_handle(&txd_priv->dma_handles[i]);
txd_priv->dma_handles[i] = 0;
}
-#endif
xge_hal_fifo_dtr_free(lldev->fifo_channel, dtr);
- if (repeat) {
- goto _begin;
- }
-
_exit:
if (retry) {
if (lldev->resched_avail != lldev->resched_send &&
@@ -1197,7 +1225,7 @@ _exit:
/*
* xge_m_tx
- * @arg: pointer to the s2hal_device_t structure
+ * @arg: pointer to the xgelldev_t structure
* @resid: resource id
* @mp: pointer to the message buffer
*
@@ -1206,14 +1234,14 @@ _exit:
static mblk_t *
xgell_m_tx(void *arg, mblk_t *mp)
{
- xge_hal_device_t *hldev = arg;
+ xgelldev_t *lldev = arg;
mblk_t *next;
while (mp != NULL) {
next = mp->b_next;
mp->b_next = NULL;
- if (!xgell_send(hldev, mp)) {
+ if (!xgell_send(lldev, mp)) {
mp->b_next = next;
break;
}
@@ -1238,8 +1266,12 @@ xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
if (state == XGE_HAL_DTR_STATE_POSTED) {
+ xgelldev_t *lldev = rx_buffer->lldev;
+
+ mutex_enter(&lldev->bf_pool.pool_lock);
xge_hal_ring_dtr_free(channelh, dtrh);
xgell_rx_buffer_release(rx_buffer);
+ mutex_exit(&lldev->bf_pool.pool_lock);
}
}
@@ -1256,9 +1288,8 @@ xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
xgell_txd_priv_t *txd_priv =
((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
mblk_t *mp = txd_priv->mblk;
-#if !defined(XGELL_TX_NOMAP_COPY)
int i;
-#endif
+
/*
* for Tx we must clean up the DTR *only* if it has been
* posted!
@@ -1267,14 +1298,12 @@ xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
return;
}
-#if !defined(XGELL_TX_NOMAP_COPY)
for (i = 0; i < txd_priv->handle_cnt; i++) {
xge_assert(txd_priv->dma_handles[i]);
(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
ddi_dma_free_handle(&txd_priv->dma_handles[i]);
txd_priv->dma_handles[i] = 0;
}
-#endif
xge_hal_fifo_dtr_free(channelh, dtrh);
@@ -1390,6 +1419,17 @@ xgell_initiate_start(xgelldev_t *lldev)
return (EIO);
}
+ /* tune jumbo/normal frame UFC counters */
+ hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_b = \
+ maxpkt > XGE_HAL_DEFAULT_MTU ?
+ XGE_HAL_DEFAULT_RX_UFC_B_J :
+ XGE_HAL_DEFAULT_RX_UFC_B_N;
+
+ hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_c = \
+ maxpkt > XGE_HAL_DEFAULT_MTU ?
+ XGE_HAL_DEFAULT_RX_UFC_C_J :
+ XGE_HAL_DEFAULT_RX_UFC_C_N;
+
/* now, enable the device */
status = xge_hal_device_enable(lldev->devh);
if (status != XGE_HAL_OK) {
@@ -1414,10 +1454,6 @@ xgell_initiate_start(xgelldev_t *lldev)
return (ENOMEM);
}
-#ifdef XGELL_TX_NOMAP_COPY
- hldev->config.fifo.alignment_size = XGELL_MAX_FRAME_SIZE(hldev);
-#endif
-
if (!xgell_tx_open(lldev)) {
status = xge_hal_device_disable(lldev->devh);
if (status != XGE_HAL_OK) {
@@ -1487,8 +1523,8 @@ xgell_initiate_stop(xgelldev_t *lldev)
static int
xgell_m_start(void *arg)
{
- xge_hal_device_t *hldev = arg;
- xgelldev_t *lldev = xge_hal_device_private(hldev);
+ xgelldev_t *lldev = arg;
+ xge_hal_device_t *hldev = lldev->devh;
int ret;
xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
@@ -1511,12 +1547,6 @@ xgell_m_start(void *arg)
lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
- if (!lldev->timeout_id) {
- xgell_initiate_stop(lldev);
- mutex_exit(&lldev->genlock);
- return (EINVAL);
- }
-
mutex_exit(&lldev->genlock);
return (0);
@@ -1534,17 +1564,11 @@ xgell_m_start(void *arg)
static void
xgell_m_stop(void *arg)
{
- xge_hal_device_t *hldev;
- xgelldev_t *lldev;
+ xgelldev_t *lldev = arg;
+ xge_hal_device_t *hldev = lldev->devh;
xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
- hldev = arg;
- xge_assert(hldev);
-
- lldev = (xgelldev_t *)xge_hal_device_private(hldev);
- xge_assert(lldev);
-
mutex_enter(&lldev->genlock);
if (!lldev->is_initialized) {
xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
@@ -1560,7 +1584,9 @@ xgell_m_stop(void *arg)
mutex_exit(&lldev->genlock);
- (void) untimeout(lldev->timeout_id);
+ if (lldev->timeout_id != 0) {
+ (void) untimeout(lldev->timeout_id);
+ }
xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
}
@@ -1608,8 +1634,8 @@ static int
xgell_m_unicst(void *arg, const uint8_t *macaddr)
{
xge_hal_status_e status;
- xge_hal_device_t *hldev = arg;
- xgelldev_t *lldev = (xgelldev_t *)xge_hal_device_private(hldev);
+ xgelldev_t *lldev = (xgelldev_t *)arg;
+ xge_hal_device_t *hldev = lldev->devh;
xge_debug_ll(XGE_TRACE, "%s", "MAC_UNICST");
xge_debug_ll(XGE_TRACE, "%s", "M_UNICAST");
@@ -1648,8 +1674,8 @@ static int
xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
{
xge_hal_status_e status;
- xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
- xgelldev_t *lldev = xge_hal_device_private(hldev);
+ xgelldev_t *lldev = (xgelldev_t *)arg;
+ xge_hal_device_t *hldev = lldev->devh;
xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
@@ -1692,8 +1718,8 @@ xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
static int
xgell_m_promisc(void *arg, boolean_t on)
{
- xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
- xgelldev_t *lldev = xge_hal_device_private(hldev);
+ xgelldev_t *lldev = (xgelldev_t *)arg;
+ xge_hal_device_t *hldev = lldev->devh;
mutex_enter(&lldev->genlock);
@@ -1728,8 +1754,8 @@ static int
xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
{
xge_hal_stats_hw_info_t *hw_info;
- xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
- xgelldev_t *lldev = xge_hal_device_private(hldev);
+ xgelldev_t *lldev = (xgelldev_t *)arg;
+ xge_hal_device_t *hldev = lldev->devh;
xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
@@ -1752,23 +1778,28 @@ xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
break;
case MAC_STAT_MULTIRCV:
- *val = hw_info->rmac_vld_mcst_frms;
+ *val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
+ hw_info->rmac_vld_mcst_frms;
break;
case MAC_STAT_BRDCSTRCV:
- *val = hw_info->rmac_vld_bcst_frms;
+ *val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
+ hw_info->rmac_vld_bcst_frms;
break;
case MAC_STAT_MULTIXMT:
- *val = hw_info->tmac_mcst_frms;
+ *val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
+ hw_info->tmac_mcst_frms;
break;
case MAC_STAT_BRDCSTXMT:
- *val = hw_info->tmac_bcst_frms;
+ *val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
+ hw_info->tmac_bcst_frms;
break;
case MAC_STAT_RBYTES:
- *val = hw_info->rmac_ttl_octets;
+ *val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
+ hw_info->rmac_ttl_octets;
break;
case MAC_STAT_NORCVBUF:
@@ -1776,11 +1807,13 @@ xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
break;
case MAC_STAT_IERRORS:
- *val = hw_info->rmac_discarded_frms;
+ *val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
+ hw_info->rmac_discarded_frms;
break;
case MAC_STAT_OBYTES:
- *val = hw_info->tmac_ttl_octets;
+ *val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
+ hw_info->tmac_ttl_octets;
break;
case MAC_STAT_NOXMTBUF:
@@ -1788,15 +1821,18 @@ xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
break;
case MAC_STAT_OERRORS:
- *val = hw_info->tmac_any_err_frms;
+ *val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
+ hw_info->tmac_any_err_frms;
break;
case MAC_STAT_IPACKETS:
- *val = hw_info->rmac_vld_frms;
+ *val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
+ hw_info->rmac_vld_frms;
break;
case MAC_STAT_OPACKETS:
- *val = hw_info->tmac_frms;
+ *val = ((u64) hw_info->tmac_frms_oflow << 32) |
+ hw_info->tmac_frms;
break;
case ETHER_STAT_FCS_ERRORS:
@@ -1839,7 +1875,6 @@ xgell_device_alloc(xge_hal_device_h devh,
lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
- /* allocate mac */
lldev->devh = hldev;
lldev->instance = instance;
lldev->dev_info = dev_info;
@@ -1869,8 +1904,7 @@ xgell_device_free(xgelldev_t *lldev)
static void
xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
{
- xge_hal_device_t *hldev = (xge_hal_device_t *)arg;
- xgelldev_t *lldev = (xgelldev_t *)xge_hal_device_private(hldev);
+ xgelldev_t *lldev = arg;
struct iocblk *iocp;
int err = 0;
int cmd;
@@ -1945,6 +1979,8 @@ xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
static boolean_t
xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
{
+ xgelldev_t *lldev = arg;
+
switch (cap) {
case MAC_CAPAB_HCKSUM: {
uint32_t *hcksum_txflags = cap_data;
@@ -1952,13 +1988,17 @@ xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
HCKSUM_IPHDRCKSUM;
break;
}
- case MAC_CAPAB_POLL:
- /*
- * Fallthrough to default, as we don't support GLDv3
- * polling. When blanking is implemented, we will need to
- * change this to return B_TRUE in addition to registering
- * an mc_resources callback.
- */
+ case MAC_CAPAB_LSO: {
+ mac_capab_lso_t *cap_lso = cap_data;
+
+ if (lldev->config.lso_enable) {
+ cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
+ cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
+ break;
+ } else {
+ return (B_FALSE);
+ }
+ }
default:
return (B_FALSE);
}
@@ -2242,10 +2282,8 @@ xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
int
xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
{
- mac_register_t *macp;
+ mac_register_t *macp = NULL;
xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
- int err;
-
if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
(caddr_t)lldev) == B_FALSE)
@@ -2289,7 +2327,7 @@ xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
if ((macp = mac_alloc(MAC_VERSION)) == NULL)
goto xgell_register_fail;
macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
- macp->m_driver = hldev;
+ macp->m_driver = lldev;
macp->m_dip = lldev->dev_info;
macp->m_src_addr = hldev->macaddr[0];
macp->m_callbacks = &xgell_m_callbacks;
@@ -2299,11 +2337,15 @@ xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
* Finally, we're ready to register ourselves with the Nemo
* interface; if this succeeds, we're all ready to start()
*/
- err = mac_register(macp, &lldev->mh);
- mac_free(macp);
- if (err != 0)
+
+ if (mac_register(macp, &lldev->mh) != 0)
goto xgell_register_fail;
+ /* Calculate tx_copied_max here ??? */
+ lldev->tx_copied_max = hldev->config.fifo.max_frags *
+ hldev->config.fifo.alignment_size *
+ hldev->config.fifo.max_aligned_frags;
+
xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
XGELL_IFNAME, lldev->instance);
@@ -2315,6 +2357,8 @@ xgell_ndd_fail:
return (DDI_FAILURE);
xgell_register_fail:
+ if (macp != NULL)
+ mac_free(macp);
nd_free(&lldev->ndp);
mutex_destroy(&lldev->genlock);
/* Ignore return value, since RX not start */
diff --git a/usr/src/uts/common/io/xge/drv/xgell.h b/usr/src/uts/common/io/xge/drv/xgell.h
index 7b3fbb43f0..375571de52 100644
--- a/usr/src/uts/common/io/xge/drv/xgell.h
+++ b/usr/src/uts/common/io/xge/drv/xgell.h
@@ -80,19 +80,41 @@ extern "C" {
#define XGELL_L3_ALIGNED 1
#endif
-/* Control driver to copy or DMA received packets */
-#define XGELL_DMA_BUFFER_SIZE_LOWAT 256
+/*
+ * These default values can be overridden by vaules in xge.conf.
+ * In xge.conf user has to specify actual (not percentages) values.
+ */
+#define XGELL_RX_BUFFER_TOTAL XGE_HAL_RING_RXDS_PER_BLOCK(1) * 6
+#define XGELL_RX_BUFFER_POST_HIWAT XGE_HAL_RING_RXDS_PER_BLOCK(1) * 5
-/* There default values can be overrided by vaules in xge.conf */
-#define XGELL_RX_BUFFER_TOTAL (1024 * 6) /* 6K */
-#define XGELL_RX_BUFFER_POST_HIWAT (1024 * 3) /* 3K */
-#define XGELL_RX_BUFFER_RECYCLE_HIWAT 64
+/* Control driver to copy or DMA received packets */
+#define XGELL_RX_DMA_LOWAT 256
#define XGELL_RING_MAIN_QID 0
+#if defined(__x86)
+#define XGELL_TX_DMA_LOWAT 128
+#else
+#define XGELL_TX_DMA_LOWAT 1024
+#endif
+
+/*
+ * Try to collapse up to XGELL_RX_PKT_BURST packets into single mblk
+ * sequence before mac_rx() is called.
+ */
+#define XGELL_RX_PKT_BURST 32
+
/* About 1s */
#define XGE_DEV_POLL_TICKS drv_usectohz(1000000)
+#define XGELL_LSO_MAXLEN 65535
+#define XGELL_CONF_ENABLE_BY_DEFAULT 1
+#define XGELL_CONF_DISABLE_BY_DEFAULT 0
+
+/* LRO configuration */
+#define XGE_HAL_DEFAULT_LRO_SG_SIZE 8 /* <=2 LRO fix not required */
+#define XGE_HAL_DEFAULT_LRO_FRM_LEN 65535
+
/*
* If HAL could provide defualt values to all tunables, we'll remove following
* macros.
@@ -100,53 +122,35 @@ extern "C" {
*/
#define XGE_HAL_DEFAULT_USE_HARDCODE -1
-/*
- * The reason to define different values for Link Utilization interrupts is
- * different performance numbers between SPARC and x86 platforms.
- */
-#if defined(__sparc)
-#define XGE_HAL_DEFAULT_TX_URANGE_A 2
-#define XGE_HAL_DEFAULT_TX_UFC_A 1
-#define XGE_HAL_DEFAULT_TX_URANGE_B 5
-#define XGE_HAL_DEFAULT_TX_UFC_B 10
-#define XGE_HAL_DEFAULT_TX_URANGE_C 10
-#define XGE_HAL_DEFAULT_TX_UFC_C 40
-#define XGE_HAL_DEFAULT_TX_UFC_D 80
+/* bimodal adaptive schema defaults - ENABLED */
+#define XGE_HAL_DEFAULT_BIMODAL_INTERRUPTS -1
+#define XGE_HAL_DEFAULT_BIMODAL_TIMER_LO_US 24
+#define XGE_HAL_DEFAULT_BIMODAL_TIMER_HI_US 256
+
+/* interrupt moderation/utilization defaults */
+#define XGE_HAL_DEFAULT_TX_URANGE_A 5
+#define XGE_HAL_DEFAULT_TX_URANGE_B 15
+#define XGE_HAL_DEFAULT_TX_URANGE_C 30
+#define XGE_HAL_DEFAULT_TX_UFC_A 15
+#define XGE_HAL_DEFAULT_TX_UFC_B 30
+#define XGE_HAL_DEFAULT_TX_UFC_C 45
+#define XGE_HAL_DEFAULT_TX_UFC_D 60
#define XGE_HAL_DEFAULT_TX_TIMER_CI_EN 1
#define XGE_HAL_DEFAULT_TX_TIMER_AC_EN 1
-#define XGE_HAL_DEFAULT_TX_TIMER_VAL 4000
-#define XGE_HAL_DEFAULT_INDICATE_MAX_PKTS 128
-#define XGE_HAL_DEFAULT_RX_URANGE_A 2
-#define XGE_HAL_DEFAULT_RX_UFC_A 1
-#define XGE_HAL_DEFAULT_RX_URANGE_B 5
-#define XGE_HAL_DEFAULT_RX_UFC_B 10
-#define XGE_HAL_DEFAULT_RX_URANGE_C 10
-#define XGE_HAL_DEFAULT_RX_UFC_C 40
-#define XGE_HAL_DEFAULT_RX_UFC_D 80
-#define XGE_HAL_DEFAULT_RX_TIMER_AC_EN 1
-#define XGE_HAL_DEFAULT_RX_TIMER_VAL 24
-#else
-#define XGE_HAL_DEFAULT_TX_URANGE_A 10
-#define XGE_HAL_DEFAULT_TX_UFC_A 1
-#define XGE_HAL_DEFAULT_TX_URANGE_B 20
-#define XGE_HAL_DEFAULT_TX_UFC_B 10
-#define XGE_HAL_DEFAULT_TX_URANGE_C 50
-#define XGE_HAL_DEFAULT_TX_UFC_C 40
-#define XGE_HAL_DEFAULT_TX_UFC_D 80
-#define XGE_HAL_DEFAULT_TX_TIMER_CI_EN 1
-#define XGE_HAL_DEFAULT_TX_TIMER_AC_EN 1
-#define XGE_HAL_DEFAULT_TX_TIMER_VAL 4000
-#define XGE_HAL_DEFAULT_INDICATE_MAX_PKTS 128
+#define XGE_HAL_DEFAULT_TX_TIMER_VAL 10000
+#define XGE_HAL_DEFAULT_INDICATE_MAX_PKTS_B 512 /* bimodal */
+#define XGE_HAL_DEFAULT_INDICATE_MAX_PKTS_N 256 /* normal UFC */
#define XGE_HAL_DEFAULT_RX_URANGE_A 10
-#define XGE_HAL_DEFAULT_RX_UFC_A 1
-#define XGE_HAL_DEFAULT_RX_URANGE_B 20
-#define XGE_HAL_DEFAULT_RX_UFC_B 10
+#define XGE_HAL_DEFAULT_RX_URANGE_B 30
#define XGE_HAL_DEFAULT_RX_URANGE_C 50
-#define XGE_HAL_DEFAULT_RX_UFC_C 40
-#define XGE_HAL_DEFAULT_RX_UFC_D 80
+#define XGE_HAL_DEFAULT_RX_UFC_A 1
+#define XGE_HAL_DEFAULT_RX_UFC_B_J 2
+#define XGE_HAL_DEFAULT_RX_UFC_B_N 8
+#define XGE_HAL_DEFAULT_RX_UFC_C_J 4
+#define XGE_HAL_DEFAULT_RX_UFC_C_N 16
+#define XGE_HAL_DEFAULT_RX_UFC_D 32
#define XGE_HAL_DEFAULT_RX_TIMER_AC_EN 1
-#define XGE_HAL_DEFAULT_RX_TIMER_VAL 24
-#endif
+#define XGE_HAL_DEFAULT_RX_TIMER_VAL 384
#define XGE_HAL_DEFAULT_FIFO_QUEUE_LENGTH_J 2048
#define XGE_HAL_DEFAULT_FIFO_QUEUE_LENGTH_N 4096
@@ -154,31 +158,25 @@ extern "C" {
#define XGE_HAL_DEFAULT_FIFO_RESERVE_THRESHOLD 0
#define XGE_HAL_DEFAULT_FIFO_MEMBLOCK_SIZE PAGESIZE
-#ifdef XGELL_TX_NOMAP_COPY
-
-#define XGE_HAL_DEFAULT_FIFO_FRAGS 1
-#define XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD 0
-#define XGE_HAL_DEFAULT_FIFO_ALIGNMENT_SIZE (XGE_HAL_MAC_HEADER_MAX_SIZE + \
- XGE_HAL_DEFAULT_MTU)
+/*
+ * this will force HAL to allocate extra copied buffer per TXDL which
+ * size calculated by formula:
+ *
+ * (ALIGNMENT_SIZE * ALIGNED_FRAGS)
+ */
+#define XGE_HAL_DEFAULT_FIFO_ALIGNMENT_SIZE 4096
#define XGE_HAL_DEFAULT_FIFO_MAX_ALIGNED_FRAGS 1
-#else
-
#if defined(__x86)
-#define XGE_HAL_DEFAULT_FIFO_FRAGS 32
+#define XGE_HAL_DEFAULT_FIFO_FRAGS 128
#else
-#define XGE_HAL_DEFAULT_FIFO_FRAGS 16
+#define XGE_HAL_DEFAULT_FIFO_FRAGS 64
#endif
-#define XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD 4
-#define XGE_HAL_DEFAULT_FIFO_ALIGNMENT_SIZE sizeof (uint64_t)
-#define XGE_HAL_DEFAULT_FIFO_MAX_ALIGNED_FRAGS 6
-
-#endif /* XGELL_TX_NOMAP_COPY */
+#define XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD 18
-#define XGE_HAL_DEFAULT_RING_QUEUE_BLOCKS_J 16
-#define XGE_HAL_DEFAULT_RING_QUEUE_BLOCKS_N 32
+#define XGE_HAL_DEFAULT_RING_QUEUE_BLOCKS_J 2
+#define XGE_HAL_DEFAULT_RING_QUEUE_BLOCKS_N 2
#define XGE_HAL_RING_QUEUE_BUFFER_MODE_DEFAULT 1
-#define XGE_HAL_DEFAULT_RING_QUEUE_SIZE 64
-#define XGE_HAL_DEFAULT_BACKOFF_INTERVAL_US 35
+#define XGE_HAL_DEFAULT_BACKOFF_INTERVAL_US 64
#define XGE_HAL_DEFAULT_RING_PRIORITY 0
#define XGE_HAL_DEFAULT_RING_MEMBLOCK_SIZE PAGESIZE
@@ -188,15 +186,18 @@ extern "C" {
#define XGE_HAL_DEFAULT_RMAC_HIGH_PTIME 65535
#define XGE_HAL_DEFAULT_MC_PAUSE_THRESHOLD_Q0Q3 187
#define XGE_HAL_DEFAULT_MC_PAUSE_THRESHOLD_Q4Q7 187
+#define XGE_HAL_DEFAULT_RMAC_PAUSE_GEN_EN 1
+#define XGE_HAL_DEFAULT_RMAC_PAUSE_GEN_DIS 0
+#define XGE_HAL_DEFAULT_RMAC_PAUSE_RCV_EN 1
+#define XGE_HAL_DEFAULT_RMAC_PAUSE_RCV_DIS 0
#define XGE_HAL_DEFAULT_INITIAL_MTU XGE_HAL_DEFAULT_MTU /* 1500 */
-#define XGE_HAL_DEFAULT_ISR_POLLING_CNT 4
+#define XGE_HAL_DEFAULT_ISR_POLLING_CNT 0
#define XGE_HAL_DEFAULT_LATENCY_TIMER 255
-#define XGE_HAL_DEFAULT_SPLIT_TRANSACTION 1 /* 2 splits */
+#define XGE_HAL_DEFAULT_SPLIT_TRANSACTION XGE_HAL_TWO_SPLIT_TRANSACTION
#define XGE_HAL_DEFAULT_BIOS_MMRB_COUNT -1
#define XGE_HAL_DEFAULT_MMRB_COUNT 1 /* 1k */
-#define XGE_HAL_DEFAULT_SHARED_SPLITS 0
+#define XGE_HAL_DEFAULT_SHARED_SPLITS 1
#define XGE_HAL_DEFAULT_STATS_REFRESH_TIME 1
-#define XGE_HAL_PCI_FREQ_MHERZ_DEFAULT 133
/*
* default the size of buffers allocated for ndd interface functions
@@ -221,9 +222,13 @@ typedef enum xgell_event_e {
} xgell_event_e;
typedef struct {
+ int rx_pkt_burst;
int rx_buffer_total;
int rx_buffer_post_hiwat;
- int rx_buffer_recycle_hiwat;
+ int rx_dma_lowat;
+ int tx_dma_lowat;
+ int msi_enable;
+ int lso_enable;
} xgell_config_t;
typedef struct xgell_rx_buffer_t {
@@ -245,7 +250,6 @@ typedef struct xgell_rx_buffer_pool_t {
uint_t total; /* total buffers */
uint_t size; /* buffer size */
xgell_rx_buffer_t *head; /* header pointer */
- uint_t recycle_hiwat; /* hiwat to recycle */
uint_t free; /* free buffers */
uint_t post; /* posted buffers */
uint_t post_hiwat; /* hiwat to stop post */
@@ -271,6 +275,7 @@ struct xgelldev {
int resched_avail;
int resched_send;
int resched_retry;
+ int tx_copied_max;
xge_hal_channel_h fifo_channel;
volatile int is_initialized;
xgell_config_t config;
@@ -281,10 +286,8 @@ struct xgelldev {
typedef struct {
mblk_t *mblk;
-#if !defined(XGELL_TX_NOMAP_COPY)
ddi_dma_handle_t dma_handles[XGE_HAL_DEFAULT_FIFO_FRAGS];
int handle_cnt;
-#endif
} xgell_txd_priv_t;
typedef struct {
diff --git a/usr/src/uts/common/io/xge/hal/include/build-version.h b/usr/src/uts/common/io/xge/hal/include/build-version.h
index d3d70de8b1..35a54c808d 100644
--- a/usr/src/uts/common/io/xge/hal/include/build-version.h
+++ b/usr/src/uts/common/io/xge/hal/include/build-version.h
@@ -17,10 +17,12 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
+ *
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef BUILD_VERSION_H
#define BUILD_VERSION_H
/* Do not edit! Automatically generated when released.*/
-#define GENERATED_BUILD_VERSION "4254"
+#define GENERATED_BUILD_VERSION "6765"
#endif /* BUILD_VERSION_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/version.h b/usr/src/uts/common/io/xge/hal/include/version.h
index 9a095ba7c4..07cf143953 100644
--- a/usr/src/uts/common/io/xge/hal/include/version.h
+++ b/usr/src/uts/common/io/xge/hal/include/version.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : version.h
- *
- * Description: versioning file
*
- * Created: 3 September 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef VERSION_H
@@ -36,7 +27,7 @@
#include "build-version.h"
#define XGE_HAL_VERSION_MAJOR "2"
-#define XGE_HAL_VERSION_MINOR "0"
+#define XGE_HAL_VERSION_MINOR "5"
#define XGE_HAL_VERSION_FIX "0"
#define XGE_HAL_VERSION_BUILD GENERATED_BUILD_VERSION
#define XGE_HAL_VERSION XGE_HAL_VERSION_MAJOR"."XGE_HAL_VERSION_MINOR"."\
diff --git a/usr/src/uts/common/io/xge/hal/include/xge-debug.h b/usr/src/uts/common/io/xge/hal/include/xge-debug.h
index 65ae6e60d4..008b7c1f98 100644
--- a/usr/src/uts/common/io/xge/hal/include/xge-debug.h
+++ b/usr/src/uts/common/io/xge/hal/include/xge-debug.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xge-debug.h
*
- * Description: debug facilities
- *
- * Created: 6 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_DEBUG_H
@@ -35,6 +26,8 @@
#include "xge-os-pal.h"
+__EXTERN_BEGIN_DECLS
+
/* to make some gcc versions happier */
#ifndef __func__
#define __func__ __FUNCTION__
@@ -87,6 +80,13 @@ extern int *g_level;
* @XGE_COMPONENT_HAL_RING: do debug for xge core ring module
* @XGE_COMPONENT_HAL_CHANNEL: do debug for xge core channel module
* @XGE_COMPONENT_HAL_DEVICE: do debug for xge core device module
+ * @XGE_COMPONENT_HAL_DMQ: do debug for xge core DMQ module
+ * @XGE_COMPONENT_HAL_UMQ: do debug for xge core UMQ module
+ * @XGE_COMPONENT_HAL_SQ: do debug for xge core SQ module
+ * @XGE_COMPONENT_HAL_SRQ: do debug for xge core SRQ module
+ * @XGE_COMPONENT_HAL_CRQ: do debug for xge core CRQ module
+ * @XGE_COMPONENT_HAL_LRQ: do debug for xge core LRQ module
+ * @XGE_COMPONENT_HAL_LCQ: do debug for xge core LCQ module
* @XGE_COMPONENT_CORE: do debug for xge KMA core module
* @XGE_COMPONENT_OSDEP: do debug for xge KMA os dependent parts
* @XGE_COMPONENT_LL: do debug for xge link layer module
@@ -96,17 +96,28 @@ extern int *g_level;
* or libraries during compilation and runtime. Makefile must declare
* XGE_DEBUG_MODULE_MASK macro and set it to proper value.
*/
-#define XGE_COMPONENT_HAL_CONFIG 0x1
-#define XGE_COMPONENT_HAL_FIFO 0x2
-#define XGE_COMPONENT_HAL_RING 0x4
-#define XGE_COMPONENT_HAL_CHANNEL 0x8
-#define XGE_COMPONENT_HAL_DEVICE 0x10
-#define XGE_COMPONENT_HAL_MM 0x20
-#define XGE_COMPONENT_HAL_QUEUE 0x40
-#define XGE_COMPONENT_HAL_STATS 0x100
+#define XGE_COMPONENT_HAL_CONFIG 0x00000001
+#define XGE_COMPONENT_HAL_FIFO 0x00000002
+#define XGE_COMPONENT_HAL_RING 0x00000004
+#define XGE_COMPONENT_HAL_CHANNEL 0x00000008
+#define XGE_COMPONENT_HAL_DEVICE 0x00000010
+#define XGE_COMPONENT_HAL_MM 0x00000020
+#define XGE_COMPONENT_HAL_QUEUE 0x00000040
+#define XGE_COMPONENT_HAL_STATS 0x00000100
+#ifdef XGEHAL_RNIC
+#define XGE_COMPONENT_HAL_DMQ 0x00000200
+#define XGE_COMPONENT_HAL_UMQ 0x00000400
+#define XGE_COMPONENT_HAL_SQ 0x00000800
+#define XGE_COMPONENT_HAL_SRQ 0x00001000
+#define XGE_COMPONENT_HAL_CQRQ 0x00002000
+#define XGE_COMPONENT_HAL_LRQ 0x00004000
+#define XGE_COMPONENT_HAL_LCQ 0x00008000
+#define XGE_COMPONENT_HAL_POOL 0x00010000
+#endif
+
/* space for CORE_XXX */
-#define XGE_COMPONENT_OSDEP 0x1000
-#define XGE_COMPONENT_LL 0x2000
+#define XGE_COMPONENT_OSDEP 0x10000000
+#define XGE_COMPONENT_LL 0x20000000
#define XGE_COMPONENT_ALL 0xffffffff
#ifndef XGE_DEBUG_MODULE_MASK
@@ -303,6 +314,154 @@ static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {}
#endif /* __GNUC__ */
#endif
+#ifdef XGEHAL_RNIC
+
+#if (XGE_COMPONENT_HAL_DMQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_DMQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_dmq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_DMQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_dmq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_UMQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_UMQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_umq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_UMQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_umq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_SQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_SQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_sq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_SQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_sq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_SRQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_SRQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_srq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_SRQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_srq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_CQRQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_CQRQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_cqrq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_CQRQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_cqrq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_LRQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_lrq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_LRQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_lrq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_LRQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_lrq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_lrq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_LCQ & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_lcq(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_LCQ;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_lcq(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_LCQ, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_lcq(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_lcq(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#if (XGE_COMPONENT_HAL_POOL & XGE_DEBUG_MODULE_MASK)
+#ifndef __GNUC__
+static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {
+ u32 module = XGE_COMPONENT_HAL_POOL;
+ xge_debug(module, level, fmt);
+}
+#else /* __GNUC__ */
+#define xge_debug_pool(level, fmt...) \
+ xge_debug(XGE_COMPONENT_HAL_POOL, level, fmt)
+#endif /* __GNUC__ */
+#else
+#ifndef __GNUC__
+static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {}
+#else /* __GNUC__ */
+#define xge_debug_pool(level, fmt...)
+#endif /* __GNUC__ */
+#endif
+
+#endif
+
#if (XGE_COMPONENT_OSDEP & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {
@@ -350,6 +509,14 @@ static inline void xge_debug_fifo(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_ring(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_channel(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_lrq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_lcq(xge_debug_level_e level, char *fmt, ...) {}
+static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_hal(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...) {}
@@ -375,4 +542,6 @@ static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...) {}
#define xge_assert(test)
#endif /* end of XGE_DEBUG_ASSERT */
+__EXTERN_END_DECLS
+
#endif /* XGE_DEBUG_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xge-defs.h b/usr/src/uts/common/io/xge/hal/include/xge-defs.h
index b89903ba67..9f05f2b9ac 100644
--- a/usr/src/uts/common/io/xge/hal/include/xge-defs.h
+++ b/usr/src/uts/common/io/xge/hal/include/xge-defs.h
@@ -17,35 +17,36 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
*
- * FileName : xge-defs.h
- *
- * Description: global definitions
- *
- * Created: 13 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_DEFS_H
#define XGE_DEFS_H
-#define XGE_PCI_VENDOR_ID 0x17D5
+#define XGE_PCI_VENDOR_ID 0x17D5
#define XGE_PCI_DEVICE_ID_XENA_1 0x5731
#define XGE_PCI_DEVICE_ID_XENA_2 0x5831
#define XGE_PCI_DEVICE_ID_HERC_1 0x5732
#define XGE_PCI_DEVICE_ID_HERC_2 0x5832
-#define XGE_DRIVER_NAME "Xge driver"
-#define XGE_DRIVER_VENDOR "Neterion, Inc"
-#define XGE_CHIP_FAMILY "Xframe"
-#define XGE_SUPPORTED_MEDIA_0 "Fiber"
+#define XGE_DRIVER_NAME "Xge driver"
+#define XGE_DRIVER_VENDOR "Neterion, Inc"
+#define XGE_CHIP_FAMILY "Xframe"
+#define XGE_SUPPORTED_MEDIA_0 "Fiber"
#include "version.h"
+#if defined(__cplusplus)
+#define __EXTERN_BEGIN_DECLS extern "C" {
+#define __EXTERN_END_DECLS }
+#else
+#define __EXTERN_BEGIN_DECLS
+#define __EXTERN_END_DECLS
+#endif
+
+__EXTERN_BEGIN_DECLS
+
/*---------------------------- DMA attributes ------------------------------*/
/* Used in xge_os_dma_malloc() and xge_os_dma_map() */
/*---------------------------- DMA attributes ------------------------------*/
@@ -61,6 +62,8 @@
/*---------------------------- common stuffs -------------------------------*/
+#define XGE_OS_LLXFMT "%llx"
+
#ifdef XGE_OS_MEMORY_CHECK
typedef struct {
void *ptr;
@@ -105,7 +108,7 @@ extern int g_malloc_cnt;
g_malloc_arr[i].ptr = NULL; \
if(_check_size && g_malloc_arr[i].size!=_check_size) { \
xge_os_printf("OSPAL: freeing with wrong " \
- "size %d! allocated at %s:%d:%llx:%d", \
+ "size %d! allocated at %s:%d:"XGE_OS_LLXFMT":%d", \
(int)_check_size, \
g_malloc_arr[i].file, \
g_malloc_arr[i].line, \
@@ -117,7 +120,7 @@ extern int g_malloc_cnt;
} \
} \
if (i == XGE_OS_MALLOC_CNT_MAX) { \
- xge_os_printf("OSPAL: ptr %llx not found!", \
+ xge_os_printf("OSPAL: ptr "XGE_OS_LLXFMT" not found!", \
(unsigned long long)(ulong_t)_vaddr); \
} \
}
@@ -126,4 +129,6 @@ extern int g_malloc_cnt;
#define XGE_OS_MEMORY_CHECK_FREE(vaddr, check_size)
#endif
+__EXTERN_END_DECLS
+
#endif /* XGE_DEFS_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xge-list.h b/usr/src/uts/common/io/xge/hal/include/xge-list.h
index 1f520accc7..a9abadf3b7 100644
--- a/usr/src/uts/common/io/xge/hal/include/xge-list.h
+++ b/usr/src/uts/common/io/xge/hal/include/xge-list.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xge-list.h
*
- * Description: Generic bi-directional linked list implementation
- *
- * Created: 14 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_LIST_H
@@ -35,6 +26,8 @@
#include "xge-debug.h"
+__EXTERN_BEGIN_DECLS
+
/**
* struct xge_list_t - List item.
* @prev: Previous list item.
@@ -76,6 +69,26 @@ static inline int xge_list_is_empty(xge_list_t *header)
}
/**
+ * xge_list_first_get - Return the first item from the linked list.
+ * header: first element of the list (head)
+ *
+ * Returns the next item from the header.
+ * Returns NULL if the next item is header itself
+ * See also: xge_list_remove(), xge_list_insert(), xge_list_t{}.
+ */
+static inline xge_list_t *xge_list_first_get(xge_list_t *header)
+{
+ xge_assert(header != NULL);
+ xge_assert(header->next != NULL);
+ xge_assert(header->prev != NULL);
+
+ if(header->next == header)
+ return NULL;
+ else
+ return header->next;
+}
+
+/**
* xge_list_remove - Remove the specified item from the linked list.
* item: element of the list
*
@@ -172,4 +185,6 @@ static inline void xge_list_insert_before (xge_list_t *new_item,
*/
#define xge_offsetof(t, m) ((size_t) (&((t *)0)->m))
+__EXTERN_END_DECLS
+
#endif /* XGE_LIST_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xge-os-pal.h b/usr/src/uts/common/io/xge/hal/include/xge-os-pal.h
index f3d9fc40d3..c7b3459fab 100644
--- a/usr/src/uts/common/io/xge/hal/include/xge-os-pal.h
+++ b/usr/src/uts/common/io/xge/hal/include/xge-os-pal.h
@@ -17,18 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xge-os-pal.h
*
- * Description: top-level header file. works just like switching between
- * os-depndent parts
- *
- * Created: 6st May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_OS_PAL_H
@@ -36,6 +26,8 @@
#include "xge-defs.h"
+__EXTERN_BEGIN_DECLS
+
/*--------------------------- platform switch ------------------------------*/
/* platform specific header */
@@ -119,4 +111,6 @@ extern char *dmesg_start;
#endif /* __GNUC__ */
#endif
+__EXTERN_END_DECLS
+
#endif /* XGE_OS_PAL_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xge-queue.h b/usr/src/uts/common/io/xge/hal/include/xge-queue.h
index 0f007f398b..7853595db9 100644
--- a/usr/src/uts/common/io/xge/hal/include/xge-queue.h
+++ b/usr/src/uts/common/io/xge/hal/include/xge-queue.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xge-queue.h
*
- * Description: serialized event queue
- *
- * Created: 7 June 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_QUEUE_H
@@ -36,6 +27,9 @@
#include "xge-os-pal.h"
#include "xge-defs.h"
#include "xge-list.h"
+#include "xgehal-event.h"
+
+__EXTERN_BEGIN_DECLS
#define XGE_QUEUE_BUF_SIZE 0x1000
#define XGE_DEFAULT_EVENT_MAX_DATA_SIZE 16
@@ -79,11 +73,11 @@ typedef void* xge_queue_h;
* See also: xge_queue_t{}.
*/
typedef struct xge_queue_item_t {
- xge_list_t item;
- int event_type;
- int data_size;
- int is_critical;
- void *context;
+ xge_list_t item;
+ xge_hal_event_e event_type;
+ int data_size;
+ int is_critical;
+ void *context;
} xge_queue_item_t;
/**
@@ -173,4 +167,6 @@ xge_queue_status_e __io_queue_grow(xge_queue_h qh);
int __queue_get_reset_critical (xge_queue_h qh);
+__EXTERN_END_DECLS
+
#endif /* XGE_QUEUE_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-channel.h b/usr/src/uts/common/io/xge/hal/include/xgehal-channel.h
index f792c5d3f1..5a4e7c2201 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-channel.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-channel.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-channel.h
*
- * Description: HAL channel object functionality
- *
- * Created: 19 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_CHANNEL_H
@@ -38,10 +29,19 @@
#include "xgehal-types.h"
#include "xgehal-stats.h"
+__EXTERN_BEGIN_DECLS
+
/**
* enum xge_hal_channel_type_e - Enumerated channel types.
* @XGE_HAL_CHANNEL_TYPE_FIFO: fifo.
* @XGE_HAL_CHANNEL_TYPE_RING: ring.
+ * @XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: TBD.
+ * @XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE: TBD.
+ * @XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE: TBD.
+ * @XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE: TBD.
+ * @XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE: TBD.
+ * @XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: TBD.
+ * @XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: TBD.
* @XGE_HAL_CHANNEL_TYPE_MAX: Maximum number of HAL-supported
* (and recognized) channel types. Currently: two.
*
@@ -51,6 +51,13 @@
typedef enum xge_hal_channel_type_e {
XGE_HAL_CHANNEL_TYPE_FIFO,
XGE_HAL_CHANNEL_TYPE_RING,
+ XGE_HAL_CHANNEL_TYPE_SEND_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE,
+ XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE,
XGE_HAL_CHANNEL_TYPE_MAX
} xge_hal_channel_type_e;
@@ -131,7 +138,7 @@ typedef enum xge_hal_channel_reopen_e {
* Channel callback gets called by HAL if, and only if, there is at least
* one new completion on a given ring or fifo channel. Upon processing the
* first @dtrh ULD is _supposed_ to continue consuming completions
- * using one of the following HAL APIs:
+ * usingáone of the following HAL APIs:
* - xge_hal_fifo_dtr_next_completed()
* or
* - xge_hal_ring_dtr_next_completed().
@@ -228,6 +235,14 @@ typedef void (*xge_hal_channel_dtr_term_f) (xge_hal_channel_h channelh,
* See also xge_hal_channel_dtr_term_f{}.
* @userdata: User-defined "context" of _that_ channel. Passed back to the
* user as one of the @callback, @dtr_init, and @dtr_term arguments.
+ * @sq_config: Send queue config
+ * @hrq_config: HW receive queue config
+ * @hcq_config: HW completion queue config
+ * @lrq_config: LRO receive queue config
+ * @lcq_config: LRO completion queue config
+ * @dmq_config: Down Message queue config
+ * @umq_config: Up Message queue config
+ *
* @per_dtr_space: If specified (i.e., greater than zero): extra space
* reserved by HAL per each transmit or receive (depending on the
* channel type) descriptor. Can be used to store,
@@ -322,7 +337,6 @@ typedef struct xge_hal_channel_attr_t {
* @is_open: True, if channel is open; false - otherwise.
* @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
* to store per-operation control information.
- *
* HAL channel object. HAL devices (see xge_hal_device_t{}) contains
* zero or more channels. HAL channel contains zero or more descriptors. The
* latter are used by ULD(s) to manage the device and/or send and receive data
@@ -338,8 +352,14 @@ typedef struct {
void **free_arr;
int length;
int free_length;
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) || \
+ defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
spinlock_t free_lock;
+#endif
int compl_index;
+ unsigned int usage_cnt;
+ unsigned int poll_bytes;
+ int unused0;
/* reserve/post data path section */
#ifdef __XGE_WIN__
@@ -386,13 +406,13 @@ typedef struct {
u8 rti;
u8 tti;
u16 unused2;
-#endif
+#endif
#if defined(XGE_HAL_MSI_X)
u64 msix_address;
u32 msix_data;
int msix_idx;
#endif
- int magic;
+ unsigned int magic;
#ifdef __XGE_WIN__
} __xge_os_attr_cacheline_aligned xge_hal_channel_t ;
#else
@@ -422,8 +442,6 @@ __hal_channel_msix_idx(xge_hal_channel_h channelh)
return ((xge_hal_channel_t*)channelh)->msix_vect.idx;
}
-int __hal_channel_dtr_count(xge_hal_channel_h channelh);
-
#if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_CHANNEL)
#define __HAL_STATIC_CHANNEL
#define __HAL_INLINE_CHANNEL
@@ -452,6 +470,9 @@ __hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
/* ========================== CHANNEL PUBLIC API ========================= */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+xge_hal_channel_dtr_count(xge_hal_channel_h channelh);
+
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void*
xge_hal_channel_userdata(xge_hal_channel_h channelh);
@@ -479,4 +500,6 @@ void xge_hal_channel_close(xge_hal_channel_h channelh,
void xge_hal_channel_abort(xge_hal_channel_h channelh,
xge_hal_channel_reopen_e reopen);
+__EXTERN_END_DECLS
+
#endif /* XGE_HAL_CHANNEL_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-config.h b/usr/src/uts/common/io/xge/hal/include/xgehal-config.h
index 69cdb75aa1..55e4f3b2c0 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-config.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-config.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-config.h
- *
- * Description: Xframe configuration.
*
- * Created: 14 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_CONFIG_H
@@ -36,8 +27,8 @@
#include "xge-os-pal.h"
#include "xgehal-types.h"
-/*
- */
+__EXTERN_BEGIN_DECLS
+
#define XGE_HAL_DEFAULT_USE_HARDCODE -1
@@ -109,17 +100,13 @@ typedef struct xge_hal_tti_config_t {
#define XGE_HAL_MIN_TX_UFC_C 0
#define XGE_HAL_MAX_TX_UFC_C 65535
- int urange_d;
-#define XGE_HAL_MIN_TX_URANGE_D 0
-#define XGE_HAL_MAX_TX_URANGE_D 100
-
int ufc_d;
#define XGE_HAL_MIN_TX_UFC_D 0
#define XGE_HAL_MAX_TX_UFC_D 65535
int timer_val_us;
-#define XGE_HAL_MIN_TX_TIMER_VAL 0
-#define XGE_HAL_MAX_TX_TIMER_VAL 65535
+#define XGE_HAL_MIN_TX_TIMER_VAL 0
+#define XGE_HAL_MAX_TX_TIMER_VAL 65535
int timer_ac_en;
#define XGE_HAL_MIN_TX_TIMER_AC_EN 0
@@ -196,8 +183,8 @@ typedef struct xge_hal_rti_config_t {
#define XGE_HAL_MAX_RX_TIMER_AC_EN 1
int timer_val_us;
-#define XGE_HAL_MIN_RX_TIMER_VAL 0
-#define XGE_HAL_MAX_RX_TIMER_VAL 65535
+#define XGE_HAL_MIN_RX_TIMER_VAL 0
+#define XGE_HAL_MAX_RX_TIMER_VAL 65535
} xge_hal_rti_config_t;
@@ -238,10 +225,18 @@ typedef struct xge_hal_fifo_queue_t {
#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_BUFFER 2
#define XGE_HAL_MAX_FIFO_QUEUE_NO_SNOOP_ALL 3
+ int priority;
+#define XGE_HAL_MIN_FIFO_PRIORITY 0
+#define XGE_HAL_MAX_FIFO_PRIORITY 63
+
int configured;
#define XGE_HAL_MIN_FIFO_CONFIGURED 0
#define XGE_HAL_MAX_FIFO_CONFIGURED 1
+#define XGE_HAL_MAX_FIFO_TTI_NUM 7
+#define XGE_HAL_MAX_FIFO_TTI_RING_0 56
+ xge_hal_tti_config_t tti[XGE_HAL_MAX_FIFO_TTI_NUM];
+
} xge_hal_fifo_queue_t;
/**
@@ -404,6 +399,7 @@ typedef struct xge_hal_fifo_config_t {
* upper limit.
* @scatter_mode: Xframe supports two receive scatter modes: A and B.
* For details please refer to Xframe User Guide.
+ * @strip_vlan_tag: TBD
* @queue: Array of all Xframe ring configurations.
*
* Array of ring configurations.
@@ -431,6 +427,7 @@ typedef struct xge_hal_ring_config_t {
/**
* struct xge_hal_mac_config_t - MAC configuration.
+ * @media: Transponder type.
* @tmac_util_period: The sampling period over which the transmit utilization
* is calculated.
* @rmac_util_period: The sampling period over which the receive utilization
@@ -439,6 +436,8 @@ typedef struct xge_hal_ring_config_t {
* the MAC or sent to the host.
* @rmac_bcast_en: Enable frames containing broadcast address to be
* passed to the host.
+ * @rmac_pause_gen_en: Received pause generation enable.
+ * @rmac_pause_rcv_en: Receive pause enable.
* @rmac_pause_time: The value to be inserted in outgoing pause frames.
* Has units of pause quanta (one pause quanta = 512 bit times).
* @mc_pause_threshold_q0q3: Contains thresholds for pause frame generation
@@ -573,19 +572,43 @@ typedef struct xge_hal_mac_config_t {
* @link_stability_period: Specify the period for which the link must be
* stable in order for the adapter to declare "LINK UP".
* The enumerated settings (see Xframe-II UG) are:
- * 0 ........... instantaneous
- * 1 ........... 500 µs
- * 2 ........... 1 ms
- * 3 ........... 64 ms
- * 4 ........... 256 ms
- * 5 ........... 512 ms
- * 6 ........... 1 s
+ * 0 ........... instantaneous
+ * 1 ........... 500 ´s
+ * 2 ........... 1 ms
+ * 3 ........... 64 ms
+ * 4 ........... 256 ms
+ * 5 ........... 512 ms
+ * 6 ........... 1 s
* 7 ........... 2 s
- *
+ * @no_isr_events: TBD
* @device_poll_millis: Specify the interval (in mulliseconds) between
* successive xge_hal_device_poll() runs.
* stable in order for the adapter to declare "LINK UP".
* @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
+ * @lro_sg_size: TBD
+ * @lro_frm_len: TBD
+ * @bimodal_interrupts: Enable bimodal interrupts in device
+ * @bitmap_intr_num: Interrupt Number associated with the bitmap
+ * @max_cqe_groups: The maximum number of adapter CQE group blocks a CQRQ
+ * can own at any one time.
+ * @max_num_wqe_od_groups: The maximum number of WQE Headers/OD Groups that
+ * this S-RQ can own at any one time.
+ * @no_wqe_threshold: Maximum number of times adapter polls WQE Hdr blocks for
+ * WQEs before generating a message or interrupt.
+ * @max_cqrq_per_bit: Maximum number of CQRQs allowed to share a bitmap bit
+ * @max_sess_per_bit: Maximum number of sessions allowed to share a bitmap bit
+ * @refill_threshold_high:This field provides a hysteresis upper bound for
+ * automatic adapter refill operations.
+ * @refill_threshold_low:This field provides a hysteresis lower bound for
+ * automatic adapter refill operations.
+ * @eol_policy:This field sets the policy for handling the end of list condition.
+ * 2'b00 - When EOL is reached,poll until last block wrapper size is no longer 0.
+ * 2'b01 - Send UMQ message when EOL is reached.
+ * 2'b1x - Poll until the poll_count_max is reached and if still EOL,send UMQ message
+ * @eol_poll_count_max:sets the maximum number of times the queue manager will poll for
+ * a non-zero block wrapper before giving up and sending a UMQ message
+ * @ack_blk_limit: Limit on the maximum number of ACK list blocks that can be held
+ * by a session at any one time.
*
* Xframe configuration.
* Contains per-device configuration parameters, including:
@@ -671,7 +694,6 @@ typedef struct xge_hal_device_config_t {
xge_hal_ring_config_t ring;
xge_hal_mac_config_t mac;
- xge_hal_tti_config_t tti;
xge_hal_fifo_config_t fifo;
int dump_on_serr;
@@ -743,6 +765,77 @@ typedef struct xge_hal_device_config_t {
#define XGE_HAL_RTS_MAC_DISABLE 0
#define XGE_HAL_RTS_MAC_ENABLE 1
+ int lro_sg_size;
+#define XGE_HAL_LRO_DEFAULT_SG_SIZE 10
+#define XGE_HAL_LRO_MIN_SG_SIZE 1
+#define XGE_HAL_LRO_MAX_SG_SIZE 64
+
+ int lro_frm_len;
+#define XGE_HAL_LRO_DEFAULT_FRM_LEN 65536
+#define XGE_HAL_LRO_MIN_FRM_LEN 4096
+#define XGE_HAL_LRO_MAX_FRM_LEN 65536
+
+ int bimodal_interrupts;
+#define XGE_HAL_BIMODAL_INTR_MIN -1
+#define XGE_HAL_BIMODAL_INTR_MAX 1
+
+ int bimodal_timer_lo_us;
+#define XGE_HAL_BIMODAL_TIMER_LO_US_MIN 1
+#define XGE_HAL_BIMODAL_TIMER_LO_US_MAX 127
+
+ int bimodal_timer_hi_us;
+#define XGE_HAL_BIMODAL_TIMER_HI_US_MIN 128
+#define XGE_HAL_BIMODAL_TIMER_HI_US_MAX 65535
+
+ int rts_qos_steering_config;
+#define XGE_HAL_RTS_QOS_STEERING_DISABLE 0
+#define XGE_HAL_RTS_QOS_STEERING_ENABLE 1
+
+#ifdef XGEHAL_RNIC
+
+ int bitmap_intr_num;
+#define XGE_HAL_BITMAP_INTR_NUM_MIN 1
+#define XGE_HAL_BITMAP_INTR_NUM_MAX 64
+
+ int max_cqe_groups;
+#define XGE_HAL_MAX_CQE_GROUPS_MIN 1
+#define XGE_HAL_MAX_CQE_GROUPS_MAX 16
+
+ int max_num_wqe_od_groups;
+#define XGE_HAL_MAX_NUM_OD_GROUPS_MIN 1
+#define XGE_HAL_MAX_NUM_OD_GROUPS_MAX 16
+
+ int no_wqe_threshold;
+#define XGE_HAL_NO_WQE_THRESHOLD_MIN 1
+#define XGE_HAL_NO_WQE_THRESHOLD_MAX 16
+
+ int max_cqrq_per_bit;
+#define XGE_HAL_MAX_CQRQ_PER_BIT_MIN 1
+#define XGE_HAL_MAX_CQRQ_PER_BIT_MAX 16
+
+ int max_sess_per_bit;
+#define XGE_HAL_MAX_SESS_PER_BIT_MIN 1
+#define XGE_HAL_MAX_SESS_PER_BIT_MAX 16
+
+ int refill_threshold_high;
+#define XGE_HAL_REFILL_THRESHOLD_HIGH_MIN 1
+#define XGE_HAL_REFILL_THRESHOLD_HIGH_MAX 16
+
+ int refill_threshold_low;
+#define XGE_HAL_REFILL_THRESHOLD_LOW_MIN 1
+#define XGE_HAL_REFILL_THRESHOLD_LOW_MAX 16
+
+ int ack_blk_limit;
+#define XGE_HAL_ACK_BLOCK_LIMIT_MIN 1
+#define XGE_HAL_ACK_BLOCK_LIMIT_MAX 16
+
+ int poll_or_doorbell;
+#define XGE_HAL_POLL_OR_DOORBELL_POLL 1
+#define XGE_HAL_POLL_OR_DOORBELL_DOORBELL 0
+
+
+#endif
+
} xge_hal_device_config_t;
/**
@@ -800,4 +893,6 @@ __hal_device_config_check_herc (xge_hal_device_config_t *new_config);
xge_hal_status_e
__hal_driver_config_check (xge_hal_driver_config_t *new_config);
+__EXTERN_END_DECLS
+
#endif /* XGE_HAL_CONFIG_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-device.h b/usr/src/uts/common/io/xge/hal/include/xgehal-device.h
index e091b300b9..6f7f64f524 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-device.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-device.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-device.h
*
- * Description: HAL device object functionality
- *
- * Created: 14 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_DEVICE_H
@@ -40,6 +31,13 @@
#include "xgehal-regs.h"
#include "xgehal-channel.h"
#include "xgehal-stats.h"
+#include "xgehal-ring.h"
+#ifdef XGEHAL_RNIC
+#include "xgehal-lbwrapper.h"
+#include "xgehal-blockpool.h"
+#endif
+
+__EXTERN_BEGIN_DECLS
#define XGE_HAL_DEVICE_XMSI_WAIT_MAX_MILLIS 500
#define XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS 500
@@ -53,7 +51,7 @@
#define XGE_HAL_DEAD 0xDEADDEAD
#define XGE_HAL_DUMP_BUF_SIZE 0x4000
-#define XGE_HAL_MAX_LRO_SESSIONS 15
+#define XGE_HAL_LRO_MAX_BUCKETS 32
/**
* enum xge_hal_card_e - Xframe adapter type.
@@ -78,6 +76,7 @@ typedef enum xge_hal_card_e {
* @regh0: BAR0 mapped memory handle (Solaris), or simply PCI device @pdev
* (Linux and the rest.)
* @regh1: BAR1 mapped memory handle. Same comment as above.
+ * @regh2: BAR2 mapped memory handle. Same comment as above.
* @bar0: BAR0 virtual address.
* @bar1: BAR1 virtual address.
* @bar2: BAR2 virtual address.
@@ -161,6 +160,7 @@ typedef enum xge_hal_pci_mode_e {
* @XGE_HAL_PCI_BUS_FREQUENCY_66MHZ: PCI bus frequency 66MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_100MHZ: PCI bus frequency 100MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_133MHZ: PCI bus frequency 133MHZ
+ * @XGE_HAL_PCI_BUS_FREQUENCY_200MHZ: PCI bus frequency 200MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_266MHZ: PCI bus frequency 266MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN: Unrecognized PCI bus frequency value.
*
@@ -269,6 +269,10 @@ typedef struct lro {
/* Total length of the fragments clubbed with the inital frame */
u32 frags_len;
+
+ /* LRO frame contains time stamp, if (ts_off != -1) */
+ int ts_off;
+
} lro_t;
#endif
@@ -290,6 +294,23 @@ typedef struct xge_hal_spdm_entry_t {
u8 tgt_queue;
} xge_hal_spdm_entry_t;
+#ifdef XGEHAL_RNIC
+/*
+ * xge_hal_rnic_oid_db_t
+ *
+ * Database used to allocate object Ids.
+ */
+typedef struct xge_hal_rnic_oid_db_t {
+ u8 id_map[4096];
+ u32 id_next_byte;
+ u8 id_inst_number;
+#define XGE_HAL_RNIC_OID_DB_OID_GET(sid,sin) ((sin<<24)|sid)
+#define XGE_HAL_RNIC_OID_DB_SID_GET(id) (id&0xFFFFFF)
+#define XGE_HAL_RNIC_OID_DB_SIN_GET(id) ((id>>24)&0xFF)
+
+}xge_hal_rnic_oid_db_t;
+
+#endif
/*
* xge_hal_device_t
@@ -297,7 +318,7 @@ typedef struct xge_hal_spdm_entry_t {
* HAL device object. Represents Xframe.
*/
typedef struct {
- int magic;
+ unsigned int magic;
pci_reg_h regh0;
pci_reg_h regh1;
pci_reg_h regh2;
@@ -316,6 +337,22 @@ typedef struct {
xge_list_t free_channels;
xge_list_t fifo_channels;
xge_list_t ring_channels;
+#ifdef XGEHAL_RNIC
+ xge_hal_rnic_oid_db_t nce_oid_db;
+ xge_list_t sq_channels;
+ xge_hal_rnic_oid_db_t sq_oid_db;
+ xge_list_t hrq_channels;
+ xge_hal_rnic_oid_db_t hrq_oid_db;
+ xge_list_t hcq_channels;
+ xge_hal_rnic_oid_db_t hcq_oid_db;
+ xge_list_t lrq_channels;
+ xge_hal_rnic_oid_db_t lrq_oid_db;
+ xge_list_t lcq_channels;
+ xge_hal_rnic_oid_db_t lcq_oid_db;
+ xge_list_t umq_channels;
+ xge_list_t dmq_channels;
+ xge_hal_blockpool_t block_pool;
+#endif
volatile int is_initialized;
volatile int terminating;
xge_hal_stats_t stats;
@@ -335,6 +372,11 @@ typedef struct {
u8 inject_bad_tcode;
int inject_bad_tcode_for_chan_type;
int reset_needed_after_close;
+ int tti_enabled;
+ xge_hal_tti_config_t bimodal_tti[XGE_HAL_MAX_RING_NUM];
+ int bimodal_timer_val_us;
+ int bimodal_urange_a_en;
+ int bimodal_intr_cnt;
char *spdm_mem_base;
u16 spdm_max_entries;
xge_hal_spdm_entry_t **spdm_table;
@@ -343,18 +385,32 @@ typedef struct {
u32 msi_mask;
#endif
#if defined(XGE_HAL_CONFIG_LRO)
- lro_t g_lro_pool[XGE_HAL_MAX_LRO_SESSIONS];
+ lro_t lro_pool[XGE_HAL_LRO_MAX_BUCKETS];
+ int lro_next_idx;
+ lro_t *lro_recent;
#endif
spinlock_t xena_post_lock;
+
+ /* bimodal workload stats */
+ int irq_workload_rxd[XGE_HAL_MAX_RING_NUM];
+ int irq_workload_rxcnt[XGE_HAL_MAX_RING_NUM];
+ int irq_workload_rxlen[XGE_HAL_MAX_RING_NUM];
+ int irq_workload_txd[XGE_HAL_MAX_FIFO_NUM];
+ int irq_workload_txcnt[XGE_HAL_MAX_FIFO_NUM];
+ int irq_workload_txlen[XGE_HAL_MAX_FIFO_NUM];
+
+ int mtu_first_time_set;
u64 rxufca_lbolt;
u64 rxufca_lbolt_time;
u64 rxufca_intr_thres;
char* dump_buf;
- int mtu_first_time_set;
xge_hal_pci_mode_e pci_mode;
xge_hal_pci_bus_frequency_e bus_frequency;
xge_hal_pci_bus_width_e bus_width;
volatile int in_poll;
+#ifdef XGEHAL_RNIC
+ void *rnic_context;
+#endif
} xge_hal_device_t;
@@ -399,6 +455,12 @@ __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, int op, u64 mask,
xge_hal_status_e
__hal_device_rts_mac_configure(xge_hal_device_t *hldev);
+xge_hal_status_e
+__hal_device_rts_qos_configure(xge_hal_device_t *hldev);
+
+xge_hal_status_e
+__hal_device_rti_configure(xge_hal_device_t *hldev, int runtime);
+
/* =========================== PUBLIC API ================================= */
unsigned int
@@ -433,6 +495,14 @@ xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macadd
xge_hal_status_e
xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index);
+/**
+ * xge_hal_device_rti_reconfigure
+ */
+static inline xge_hal_status_e
+xge_hal_device_rti_reconfigure(xge_hal_device_t *hldev)
+{
+ return __hal_device_rti_configure(hldev, 1);
+}
/**
* xge_hal_device_is_initialized - Returns 0 if device is not
@@ -608,7 +678,7 @@ xge_hal_device_private(xge_hal_device_h devh)
static inline xge_hal_device_h
xge_hal_device_from_private(void *info_ptr)
{
- return xge_container_of(info_ptr, xge_hal_device_t,
+ return xge_container_of((void * const* ) info_ptr, xge_hal_device_t,
upper_layer_info);
}
@@ -706,11 +776,24 @@ xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
u8 is_tcp, u8 is_ipv4);
+xge_hal_status_e
+xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index);
+
u32 __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value);
int
xge_hal_device_is_closed (xge_hal_device_h devh);
+#ifdef XGEHAL_RNIC
+
+xge_hal_status_e
+__hal_device_oid_allocate(xge_hal_rnic_oid_db_t *objdb, u32 *objid);
+
+xge_hal_status_e
+__hal_device_oid_free(xge_hal_rnic_oid_db_t *objdb, u32 objid);
+
+#endif
+
#if defined(XGE_HAL_MSI)
/* Some function protoypes for MSI implementation. */
xge_hal_status_e
@@ -787,10 +870,16 @@ __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_unmask_all(xge_hal_device_t *hldev);
__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
-xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev);
+xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev, int *got_tx);
__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
-xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev);
+xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev, int *got_rx);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx);
+
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx);
#if defined (XGE_HAL_CONFIG_LRO)
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u8
@@ -812,52 +901,60 @@ __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_header_update_u32(u8 *string, u16 offset, u32 val);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16
-__hal_tcp_seg_len(u8 *ip, u8 *tcp);
+__hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_ip_lro_capable(u8 *ip, xge_hal_dtr_info_t *ext_info);
+__hal_ip_lro_capable(iplro_t *ip, xge_hal_dtr_info_t *ext_info);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_tcp_lro_capable(u8 *ip, u8 *tcp);
+__hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_lro_capable(u8 *buffer, u8 **ip, u8 **tcp, xge_hal_dtr_info_t *ext_info,
- xge_hal_device_t *hldev);
+__hal_lro_capable(u8 *buffer, iplro_t **ip, tcplro_t **tcp,
+ xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_get_lro_session(u8 *buffer, u8 *ip, u8 *tcp, lro_t **lro,
- xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev);
+__hal_get_lro_session(u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro,
+ xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
+ lro_t **lro_end3);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_lro_under_optimal_thresh(u8 *ip, u8 *tcp, lro_t *lro,
+__hal_lro_under_optimal_thresh(iplro_t *ip, tcplro_t *tcp, lro_t *lro,
xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_collapse_ip_hdr(u8 *ip, u8 *tcp, lro_t *lro, xge_hal_device_t *hldev);
+__hal_collapse_ip_hdr(iplro_t *ip, tcplro_t *tcp, lro_t *lro,
+ xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_collapse_tcp_hdr(u8 *ip, u8 *tcp, lro_t *lro, xge_hal_device_t *hldev);
-
+__hal_collapse_tcp_hdr(iplro_t *ip, tcplro_t *tcp, lro_t *lro,
+ xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_append_lro(u8 *ip, u8 *tcp, u32 *seg_len, lro_t *lro,
+__hal_append_lro(iplro_t *ip, tcplro_t **tcp, u32 *seg_len, lro_t *lro,
xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-xge_hal_accumulate_large_rx(u8 *buffer, u8 **tcp, u32 *seglen, lro_t **lro,
- xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev);
-
-__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
-xge_hal_lro_free(lro_t *lro, xge_hal_device_t *hldev);
-
-__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
-__hal_lro_malloc(xge_hal_device_t *hldev);
+xge_hal_accumulate_large_rx(u8 *buffer, tcplro_t **tcp, u32 *seglen,
+ lro_t **lro, xge_hal_dtr_info_t *ext_info,
+ xge_hal_device_t *hldev, lro_t **lro_end3);
void
-xge_hal_lro_terminate(u32 lro_scale xge_hal_device_t *hldev);
+xge_hal_lro_terminate(u32 lro_scale, xge_hal_device_t *hldev);
xge_hal_status_e
xge_hal_lro_init(u32 lro_scale, xge_hal_device_t *hldev);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
+xge_hal_lro_get_next_session(xge_hal_device_t *hldev);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+__hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro,
+ xge_hal_device_t *hldev, int slot, u32 tcp_seg_len,
+ int ts_off);
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+__hal_lro_get_free_slot (xge_hal_device_t *hldev);
#endif
#else /* XGE_FASTPATH_EXTERN */
@@ -866,4 +963,7 @@ xge_hal_lro_init(u32 lro_scale, xge_hal_device_t *hldev);
#include "xgehal-device-fp.c"
#endif /* XGE_FASTPATH_INLINE */
+
+__EXTERN_END_DECLS
+
#endif /* XGE_HAL_DEVICE_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-driver.h b/usr/src/uts/common/io/xge/hal/include/xgehal-driver.h
index 576de68ff4..5bf00a8a97 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-driver.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-driver.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
*
- * FileName : xgehal-driver.h
- *
- * Description: HAL driver object functionality
- *
- * Created: 14 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_DRIVER_H
@@ -40,6 +31,8 @@
#include "xgehal-config.h"
#include "xgehal-event.h"
+__EXTERN_BEGIN_DECLS
+
/* maximum number of events consumed in a syncle poll() cycle */
#define XGE_HAL_DRIVER_QUEUE_CONSUME_MAX 5
@@ -176,6 +169,15 @@ typedef int (*xge_uld_before_device_poll_f) (xge_hal_device_h devh);
typedef void (*xge_uld_after_device_poll_f) (xge_hal_device_h devh);
/**
+ * function xge_uld_xpak_alarm_log_f - ULD "XPAK alarm log" callback.
+ * @devh: HAL device handle.
+ *
+ * Unless NULL is specified,
+ * HAL invokes the callback from inside __hal_chk_xpak_counter()
+ */
+typedef void (*xge_uld_xpak_alarm_log_f) (xge_hal_device_h devh, xge_hal_xpak_alarm_type_e type);
+
+/**
* struct xge_hal_uld_cbs_t - Upper-layer driver "slow-path" callbacks.
* @link_up: See xge_uld_link_up_f{}.
* @link_down: See xge_uld_link_down_f{}.
@@ -207,6 +209,7 @@ typedef struct xge_hal_uld_cbs_t {
xge_uld_before_device_poll_f before_device_poll;
xge_uld_after_device_poll_f after_device_poll;
xge_uld_sched_timer_cb_f sched_timer;
+ xge_uld_xpak_alarm_log_f xpak_alarm_log;
} xge_hal_uld_cbs_t;
/**
@@ -306,5 +309,6 @@ void xge_hal_driver_tracebuf_dump(void);
#define xge_hal_driver_tracebuf_dump()
#endif
+__EXTERN_END_DECLS
#endif /* XGE_HAL_DRIVER_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-event.h b/usr/src/uts/common/io/xge/hal/include/xgehal-event.h
index 9b827501b8..1b2413c213 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-event.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-event.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-event.h
*
- * Description: event types
- *
- * Created: 7 June 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_EVENT_H
@@ -35,6 +26,8 @@
#include "xge-os-pal.h"
+__EXTERN_BEGIN_DECLS
+
#define XGE_HAL_EVENT_BASE 0
#define XGE_LL_EVENT_BASE 100
@@ -74,4 +67,6 @@ typedef enum xge_hal_event_e {
XGE_HAL_EVENT_SLOT_FREEZE = XGE_HAL_EVENT_BASE + 7,
} xge_hal_event_e;
+__EXTERN_END_DECLS
+
#endif /* XGE_HAL_EVENT_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-fifo.h b/usr/src/uts/common/io/xge/hal/include/xgehal-fifo.h
index 26dc74ad5f..31dd7a4d99 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-fifo.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-fifo.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-fifo.h
- *
- * Description: Tx fifo object functionality
*
- * Created: 19 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_FIFO_H
@@ -37,6 +28,8 @@
#include "xgehal-config.h"
#include "xgehal-mm.h"
+__EXTERN_BEGIN_DECLS
+
/* HW fifo configuration */
#define XGE_HAL_FIFO_INT_PER_LIST_THRESHOLD 65
#define XGE_HAL_FIFO_MAX_WRR 5
@@ -106,7 +99,7 @@ typedef struct xge_hal_fifo_txd_t {
#define XGE_HAL_TXD_LSO_COF_CTRL(val) vBIT(val,30,2)
#define XGE_HAL_TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
#define XGE_HAL_TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
-
+#define XGE_HAL_TXD_GET_LSO_BYTES_SENT(val) ((val & vBIT(0xFFFF,16,16))>>32)
u64 control_2;
#define XGE_HAL_TXD_TX_CKO_CONTROL (BIT(5)|BIT(6)|BIT(7))
#define XGE_HAL_TXD_TX_CKO_IPV4_EN BIT(5)
@@ -162,6 +155,7 @@ typedef struct xge_hal_fifo_t {
int txdl_size;
int priv_size;
xge_hal_mempool_t *mempool;
+ int align_size;
} __xge_os_attr_cacheline_aligned xge_hal_fifo_t;
/**
@@ -225,6 +219,8 @@ typedef struct xge_hal_fifo_txdl_priv_t {
int align_used_frags;
int alloc_frags;
int dang_frags;
+ unsigned int bytes_sent;
+ int unused;
xge_hal_fifo_txd_t *dang_txdl;
struct xge_hal_fifo_txdl_priv_t *next_txdl_priv;
xge_hal_fifo_txd_t *first_txdp;
@@ -331,10 +327,15 @@ xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh, u64 cksum_bits);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag);
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh);
+
#else /* XGE_FASTPATH_EXTERN */
#define __HAL_STATIC_FIFO static
#define __HAL_INLINE_FIFO inline
#include "xgehal-fifo-fp.c"
#endif /* XGE_FASTPATH_INLINE */
+__EXTERN_END_DECLS
+
#endif /* XGE_HAL_FIFO_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-mgmt.h b/usr/src/uts/common/io/xge/hal/include/xgehal-mgmt.h
index ba5576cdb1..7755733a64 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-mgmt.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-mgmt.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-mgmt.h
- *
- * Description: management API
*
- * Created: 1 September 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_MGMT_H
@@ -39,6 +30,9 @@
#include "xgehal-config.h"
#include "xgehal-stats.h"
#include "xgehal-regs.h"
+#include "xgehal-device.h"
+
+__EXTERN_BEGIN_DECLS
/**
* struct xge_hal_mgmt_about_info_t - About info.
@@ -76,6 +70,7 @@ typedef struct xge_hal_mgmt_about_info_t {
char ll_minor[4];
char ll_fix[4];
char ll_build[16];
+ u32 transponder_temperature;
} xge_hal_mgmt_about_info_t;
typedef xge_hal_stats_hw_info_t xge_hal_mgmt_hw_stats_t;
@@ -133,6 +128,24 @@ xge_hal_status_e
xge_hal_mgmt_pci_config(xge_hal_device_h devh,
xge_hal_mgmt_pci_config_t *pci_config, int size);
+xge_hal_status_e
+xge_hal_pma_loopback( xge_hal_device_h devh, int enable );
+
+xge_hal_status_e
+xge_hal_rldram_test(xge_hal_device_h devh, u64 * data);
+
+u16
+xge_hal_mdio_read( xge_hal_device_h devh, u32 mmd_type, u64 addr );
+
+xge_hal_status_e
+xge_hal_mdio_write( xge_hal_device_h devh, u32 mmd_type, u64 addr, u32 value );
+
+u32
+xge_hal_read_xfp_current_temp(xge_hal_device_h devh);
+
+void
+__hal_updt_stats_xpak(xge_hal_device_t *hldev);
+
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
xge_hal_status_e
xge_hal_mgmt_trace_read(char *buffer, unsigned buf_size, unsigned *offset,
@@ -154,9 +167,12 @@ xge_hal_flick_link_led(xge_hal_device_h devh);
#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
((((subid >= 0x600B) && (subid <= 0x600D)) || \
((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0)
+#define CHECKBIT(value, nbit) (value & (1 << nbit))
#ifdef XGE_HAL_USE_MGMT_AUX
#include "xgehal-mgmtaux.h"
#endif
+__EXTERN_END_DECLS
+
#endif /* XGE_HAL_MGMT_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-mgmtaux.h b/usr/src/uts/common/io/xge/hal/include/xgehal-mgmtaux.h
index e86d41715e..a5ca1d133a 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-mgmtaux.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-mgmtaux.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-mgmtaux.h
*
- * Description: management auxiliary API
- *
- * Created: 1 September 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_MGMTAUX_H
@@ -35,6 +26,8 @@
#include "xgehal-mgmt.h"
+__EXTERN_BEGIN_DECLS
+
#define XGE_HAL_AUX_SEPA ' '
xge_hal_status_e xge_hal_aux_about_read(xge_hal_device_h devh, int bufsize,
@@ -83,4 +76,7 @@ xge_hal_status_e xge_hal_aux_driver_config_read(int bufsize, char *retbuf,
xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh,
int bufsize, char *retbuf, int *retsize);
+
+__EXTERN_END_DECLS
+
#endif /* XGE_HAL_MGMTAUX_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-mm.h b/usr/src/uts/common/io/xge/hal/include/xgehal-mm.h
index 8fc2ab9615..eabd174504 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-mm.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-mm.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
*
- * FileName : xgehal-mm.h
- *
- * Description: memory pool object
- *
- * Created: 28 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_MM_H
@@ -38,6 +29,8 @@
#include "xgehal-types.h"
#include "xgehal-driver.h"
+__EXTERN_BEGIN_DECLS
+
typedef void* xge_hal_mempool_h;
/*
@@ -162,4 +155,19 @@ xge_hal_mempool_t* __hal_mempool_create(pci_dev_h pdev, int memblock_size,
void __hal_mempool_destroy(xge_hal_mempool_t *mempool);
+
+#ifdef XGEHAL_RNIC
+
+xge_hal_status_e
+__hal_allocate_dma_register(pci_dev_h pdev, int size,
+ void **dma_register, xge_hal_mempool_dma_t *dma_object);
+
+void
+__hal_free_dma_register(pci_dev_h pdev, int size,
+ void *dma_register, xge_hal_mempool_dma_t *dma_object);
+
+#endif
+
+__EXTERN_END_DECLS
+
#endif /* XGE_HAL_MM_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-regs.h b/usr/src/uts/common/io/xge/hal/include/xgehal-regs.h
index 54a05459b1..0620cfccde 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-regs.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-regs.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-regs.h
*
- * Description: Xframe mem-mapped register space
- *
- * Created: 14 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_REGS_H
@@ -127,7 +118,16 @@ typedef struct {
u64 pci_info;
#define XGE_HAL_PCI_INFO vBIT(0xF,0,4)
#define XGE_HAL_PCI_32_BIT BIT(8)
- u8 unused_0[0x800 - 0x128];
+
+ u8 unused0_1[0x160 - 0x128];
+
+ u64 ric_status;
+
+ u8 unused0_2[0x558 - 0x168];
+
+ u64 mbist_status;
+
+ u8 unused0_3[0x800 - 0x560];
/* PCI-X Controller registers */
u64 pic_int_status;
@@ -190,7 +190,7 @@ typedef struct {
#define XGE_HAL_IIC_INT_REG_ACK_ERR BIT(8)
u64 iic_alarms;
- u8 unused4[0x08];
+ u64 msi_pending_reg;
u64 misc_int_reg;
#define XGE_HAL_MISC_INT_REG_DP_ERR_INT BIT(0)
@@ -199,7 +199,13 @@ typedef struct {
u64 misc_int_mask;
u64 misc_alarms;
- u8 unused5[0x38];
+ u64 msi_triggered_reg;
+
+ u64 xfp_gpio_int_reg;
+ u64 xfp_gpio_int_mask;
+ u64 xfp_alarms;
+
+ u8 unused5[0x8E0 - 0x8C8];
u64 tx_traffic_int;
#define XGE_HAL_TX_TRAFFIC_INT_n(n) BIT(n)
@@ -268,7 +274,7 @@ typedef struct {
u64 xmsi_control;
#define XGE_HAL_XMSI_EN BIT(0)
#define XGE_HAL_XMSI_DIS_TINT_SERR BIT(1)
-#define XGE_HAL_XMSI_BYTE_COUNT(val) vBIT(val,13,2)
+#define XGE_HAL_XMSI_BYTE_COUNT(val) vBIT(val,13,3)
u64 xmsi_access;
#define XGE_HAL_XMSI_WR_RDN BIT(7)
@@ -302,6 +308,34 @@ typedef struct {
/* General Configuration */
u64 mdio_control;
+#define XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(n) vBIT(n,0,16)
+#define XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(n) vBIT(n,19,5)
+#define XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(n) vBIT(n,27,5)
+#define XGE_HAL_MDIO_CONTROL_MMD_DATA(n) vBIT(n,32,16)
+#define XGE_HAL_MDIO_CONTROL_MMD_CTRL(n) vBIT(n,56,4)
+#define XGE_HAL_MDIO_CONTROL_MMD_OP(n) vBIT(n,60,2)
+#define XGE_HAL_MDIO_CONTROL_MMD_DATA_GET(n) ((n>>16)&0xFFFF)
+#define XGE_HAL_MDIO_MMD_PMA_DEV_ADDR 0x01
+#define XGE_HAL_MDIO_DOM_REG_ADDR 0xA100
+#define XGE_HAL_MDIO_ALARM_FLAGS_ADDR 0xA070
+#define XGE_HAL_MDIO_WARN_FLAGS_ADDR 0xA074
+#define XGE_HAL_MDIO_CTRL_START 0xE
+#define XGE_HAL_MDIO_OP_ADDRESS 0x0
+#define XGE_HAL_MDIO_OP_WRITE 0x1
+#define XGE_HAL_MDIO_OP_READ 0x3
+#define XGE_HAL_MDIO_OP_READ_POST_INCREMENT 0x2
+#define XGE_HAL_MDIO_ALARM_TEMPHIGH 0x0080
+#define XGE_HAL_MDIO_ALARM_TEMPLOW 0x0040
+#define XGE_HAL_MDIO_ALARM_BIASHIGH 0x0008
+#define XGE_HAL_MDIO_ALARM_BIASLOW 0x0004
+#define XGE_HAL_MDIO_ALARM_POUTPUTHIGH 0x0002
+#define XGE_HAL_MDIO_ALARM_POUTPUTLOW 0x0001
+#define XGE_HAL_MDIO_WARN_TEMPHIGH 0x0080
+#define XGE_HAL_MDIO_WARN_TEMPLOW 0x0040
+#define XGE_HAL_MDIO_WARN_BIASHIGH 0x0008
+#define XGE_HAL_MDIO_WARN_BIASLOW 0x0004
+#define XGE_HAL_MDIO_WARN_POUTPUTHIGH 0x0002
+#define XGE_HAL_MDIO_WARN_POUTPUTLOW 0x0001
u64 dtx_control;
@@ -323,12 +357,13 @@ typedef struct {
u64 xfb_control;
u64 gpio_control;
-#define XGE_HAL_GPIO_CTRL_GPIO_0 BIT(8)
+#define XGE_HAL_GPIO_CTRL_GPIO_0 BIT(8)
u64 txfifo_dw_mask;
u64 split_table_line_no;
u64 sc_timeout;
u64 pic_control_2;
+#define XGE_HAL_TXD_WRITE_BC(n) vBIT(n, 13, 3)
u64 ini_dperr_ctrl;
u64 wreq_split_mask;
u64 qw_per_rxd;
@@ -499,9 +534,17 @@ typedef struct {
/* Recent add, used only debug purposes. */
u64 pcc_enable;
-
- u8 unused10[0x700 - 0x178];
-
+
+ u64 pfc_monitor_0;
+ u64 pfc_monitor_1;
+ u64 pfc_monitor_2;
+ u64 pfc_monitor_3;
+ u64 txd_ownership_ctrl;
+ u64 pfc_read_cntrl;
+ u64 pfc_read_data;
+
+ u8 unused10[0x1700 - 0x11B0];
+
u64 txdma_debug_ctrl;
u8 unused11[0x1800 - 0x1708];
@@ -578,8 +621,9 @@ typedef struct {
#define XGE_HAL_PRC_CTRL_RING_MODE_5 vBIT(2,14,2)
#define XGE_HAL_PRC_CTRL_RING_MODE_x vBIT(3,14,2)
#define XGE_HAL_PRC_CTRL_NO_SNOOP(n) vBIT(n,22,2)
-#define XGE_HAL_PRC_CTRL_RTH_DISABLE BIT(31)
-#define XGE_HAL_PRC_CTRL_GROUP_READS BIT(38)
+#define XGE_HAL_PRC_CTRL_RTH_DISABLE BIT(31)
+#define XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT BIT(37)
+#define XGE_HAL_PRC_CTRL_GROUP_READS BIT(38)
#define XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
u64 prc_alarm_action;
@@ -699,9 +743,11 @@ typedef struct {
u64 rmac_cfg_key;
#define XGE_HAL_RMAC_CFG_KEY(val) vBIT(val,0,16)
-#define XGE_HAL_MAX_MAC_ADDRESSES 64
-#define XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET 63 /* enables all multicast
-pkts */
+#define XGE_HAL_MAX_MAC_ADDRESSES 64
+#define XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET 63
+#define XGE_HAL_MAX_MAC_ADDRESSES_HERC 256
+#define XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC 255
+
u64 rmac_addr_cmd_mem;
#define XGE_HAL_RMAC_ADDR_CMD_MEM_WE BIT(7)
#define XGE_HAL_RMAC_ADDR_CMD_MEM_RD 0
@@ -789,7 +835,14 @@ pkts */
u64 rts_ds_mem_data;
#define XGE_HAL_RTS_DS_MEM_DATA(n) vBIT(n,0,8)
- u8 unused16_0[0x338 - 0x220];
+ u8 unused16_1[0x308 - 0x220];
+
+ u64 rts_vid_mem_ctrl;
+ u64 rts_vid_mem_data;
+ u64 rts_p0_p3_map;
+ u64 rts_p4_p7_map;
+ u64 rts_p8_p11_map;
+ u64 rts_p12_p15_map;
u64 rts_mac_cfg;
#define XGE_HAL_RTS_MAC_SECT0_EN BIT(0)
@@ -801,7 +854,7 @@ pkts */
#define XGE_HAL_RTS_MAC_SECT6_EN BIT(6)
#define XGE_HAL_RTS_MAC_SECT7_EN BIT(7)
- u8 unused16_1[0x380 - 0x340];
+ u8 unused16_2[0x380 - 0x340];
u64 rts_rth_cfg;
#define XGE_HAL_RTS_RTH_EN BIT(3)
@@ -844,7 +897,50 @@ pkts */
u64 rts_rth_status;
#define XGE_HAL_RTH_STATUS_SPDM_USE_L4 BIT(3)
- u8 unused17[0x700 - 0x3e8];
+ u8 unused17[0x400 - 0x3E8];
+
+ u64 rmac_red_fine_q0q3;
+ u64 rmac_red_fine_q4q7;
+ u64 rmac_pthresh_cross;
+ u64 rmac_rthresh_cross;
+ u64 rmac_pnum_range[32];
+
+ u64 rmac_mp_crc_0;
+ u64 rmac_mp_mask_a_0;
+ u64 rmac_mp_mask_b_0;
+
+ u64 rmac_mp_crc_1;
+ u64 rmac_mp_mask_a_1;
+ u64 rmac_mp_mask_b_1;
+
+ u64 rmac_mp_crc_2;
+ u64 rmac_mp_mask_a_2;
+ u64 rmac_mp_mask_b_2;
+
+ u64 rmac_mp_crc_3;
+ u64 rmac_mp_mask_a_3;
+ u64 rmac_mp_mask_b_3;
+
+ u64 rmac_mp_crc_4;
+ u64 rmac_mp_mask_a_4;
+ u64 rmac_mp_mask_b_4;
+
+ u64 rmac_mp_crc_5;
+ u64 rmac_mp_mask_a_5;
+ u64 rmac_mp_mask_b_5;
+
+ u64 rmac_mp_crc_6;
+ u64 rmac_mp_mask_a_6;
+ u64 rmac_mp_mask_b_6;
+
+ u64 rmac_mp_crc_7;
+ u64 rmac_mp_mask_a_7;
+ u64 rmac_mp_mask_b_7;
+
+ u64 mac_ctrl;
+ u64 activity_control;
+
+ u8 unused17_2[0x700 - 0x5F0];
u64 mac_debug_ctrl;
#define XGE_HAL_MAC_DBG_ACTIVITY_VALUE 0x411040400000000ULL
@@ -924,13 +1020,37 @@ pkts */
u64 mc_rldram_test_d1;
u8 unused25[0x300 - 0x288];
u64 mc_rldram_test_d2;
- u8 unused26_1[0x640 - 0x308];
+ u8 unused26_1[0x2C00 - 0x2B08];
+ u64 mc_rldram_test_read_d0;
+ u8 unused26_2[0x20 - 0x8];
+ u64 mc_rldram_test_read_d1;
+ u8 unused26_3[0x40 - 0x28];
+ u64 mc_rldram_test_read_d2;
+ u8 unused26_4[0x60 - 0x48];
+ u64 mc_rldram_test_add_bkg;
+ u8 unused26_5[0x80 - 0x68];
+ u64 mc_rldram_test_d0_bkg;
+ u8 unused26_6[0xD00 - 0xC88];
+ u64 mc_rldram_test_d1_bkg;
+ u8 unused26_7[0x20 - 0x8];
+ u64 mc_rldram_test_d2_bkg;
+ u8 unused26_8[0x40 - 0x28];
+ u64 mc_rldram_test_read_d0_bkg;
+ u8 unused26_9[0x60 - 0x48];
+ u64 mc_rldram_test_read_d1_bkg;
+ u8 unused26_10[0x80 - 0x68];
+ u64 mc_rldram_test_read_d2_bkg;
+ u8 unused26_11[0xE00 - 0xD88];
+ u64 mc_rldram_generation;
+ u8 unused26_12[0x20 - 0x8];
+ u64 mc_driver;
+ u8 unused26_13[0x40 - 0x28];
u64 mc_rldram_ref_per_herc;
#define XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(n) vBIT(n, 0, 16)
- u8 unused26_2[0x660 - 0x648];
+ u8 unused26_14[0x660 - 0x648];
u64 mc_rldram_mrs_herc;
#define XGE_HAL_MC_RLDRAM_MRS(n) vBIT(n, 14, 17)
- u8 unused26_3[0x700 - 0x668];
+ u8 unused26_15[0x700 - 0x668];
u64 mc_debug_ctrl;
u8 unused27[0x3000 - 0x2f08];
@@ -955,14 +1075,26 @@ pkts */
u64 xgxs_rxgxs_err_alarm;
u8 unused28[0x100 - 0x40];
+
+ u64 spi_err_reg;
+ u64 spi_err_mask;
+ u64 spi_err_alarm;
u64 xgxs_cfg;
u64 xgxs_status;
u64 xgxs_cfg_key;
- u64 xgxs_efifo_cfg; /* CHANGED */
- u64 rxgxs_ber_0; /* CHANGED */
- u64 rxgxs_ber_1; /* CHANGED */
+ u64 xgxs_efifo_cfg; /* CHANGED */
+ u64 rxgxs_ber_0; /* CHANGED */
+ u64 rxgxs_ber_1; /* CHANGED */
+
+ u64 spi_control;
+ u64 spi_data;
+ u64 spi_write_protect;
+
+ u8 unused29[0x80 - 0x48];
+
+ u64 xgxs_cfg_1;
} xge_hal_pci_bar0_t;
/* Using this strcture to calculate offsets */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-ring.h b/usr/src/uts/common/io/xge/hal/include/xgehal-ring.h
index 7f4ca91a6d..855a9d2b40 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-ring.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-ring.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-ring.h
*
- * Description: HAL Rx ring object functionality
- *
- * Created: 19 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_RING_H
@@ -37,6 +28,8 @@
#include "xgehal-config.h"
#include "xgehal-mm.h"
+__EXTERN_BEGIN_DECLS
+
/* HW ring configuration */
#define XGE_HAL_RING_RXDBLOCK_SIZE 0x1000
@@ -232,7 +225,7 @@ typedef struct {
#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6_EX 0x7
#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6_EX 0x8
#define XGE_HAL_RXD_HASH_TYPE_IPV6_EX 0x9
-
+
typedef u8 xge_hal_ring_block_t[XGE_HAL_RING_RXDBLOCK_SIZE];
#define XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET 0xFF8
@@ -450,11 +443,15 @@ xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
+xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh);
+
#else /* XGE_FASTPATH_EXTERN */
#define __HAL_STATIC_RING static
#define __HAL_INLINE_RING inline
#include "xgehal-ring-fp.c"
#endif /* XGE_FASTPATH_INLINE */
+__EXTERN_END_DECLS
#endif /* XGE_HAL_RING_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-stats.h b/usr/src/uts/common/io/xge/hal/include/xgehal-stats.h
index 9604f9b19a..b11af70fce 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-stats.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-stats.h
@@ -1,4 +1,5 @@
/*
+ xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
@@ -17,17 +18,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-stats.h
- *
- * Description: HW statistics object
*
- * Created: 2 June 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_STATS_H
@@ -38,6 +30,8 @@
#include "xgehal-types.h"
#include "xgehal-config.h"
+__EXTERN_BEGIN_DECLS
+
/**
* struct xge_hal_stats_hw_info_t - Xframe hardware statistics.
* Transmit MAC Statistics:
@@ -351,6 +345,58 @@
* across multiple PCI transactions.
* @rxf_wr_cnt: Count of receive frame write requests.
*
+ * @tmac_frms_oflow: tbd
+ * @tmac_data_octets_oflow: tbd
+ * @tmac_mcst_frms_oflow: tbd
+ * @tmac_bcst_frms_oflow: tbd
+ * @tmac_ttl_octets_oflow: tbd
+ * @tmac_ucst_frms_oflow: tbd
+ * @tmac_nucst_frms_oflow: tbd
+ * @tmac_any_err_frms_oflow: tbd
+ * @tmac_vlan_frms: tbd
+ * @tmac_vld_ip_oflow: tbd
+ * @tmac_drop_ip_oflow: tbd
+ * @tmac_icmp_oflow: tbd
+ * @tmac_rst_tcp_oflow: tbd
+ * @tmac_udp_oflow: tbd
+ * @tpa_unknown_protocol: tbd
+ * @tpa_parse_failure: tbd
+ * @rmac_vld_frms_oflow: tbd
+ * @rmac_data_octets_oflow: tbd
+ * @rmac_vld_mcst_frms_oflow: tbd
+ * @rmac_vld_bcst_frms_oflow: tbd
+ * @rmac_ttl_octets_oflow: tbd
+ * @rmac_accepted_ucst_frms_oflow: tbd
+ * @rmac_accepted_nucst_frms_oflow: tbd
+ * @rmac_discarded_frms_oflow: tbd
+ * @rmac_drop_events_oflow: tbd
+ * @rmac_usized_frms_oflow: tbd
+ * @rmac_osized_frms_oflow: tbd
+ * @rmac_frag_frms_oflow: tbd
+ * @rmac_jabber_frms_oflow: tbd
+ * @rmac_ip_oflow: tbd
+ * @rmac_drop_ip_oflow: tbd
+ * @rmac_icmp_oflow: tbd
+ * @rmac_udp_oflow: tbd
+ * @rmac_err_drp_udp_oflow: tbd
+ * @rmac_pause_cnt_oflow: tbd
+ * @rmac_ttl_1519_4095_frms: tbd
+ * @rmac_ttl_4096_8191_frms: tbd
+ * @rmac_ttl_8192_max_frms: tbd
+ * @rmac_ttl_gt_max_frms: tbd
+ * @rmac_osized_alt_frms: tbd
+ * @rmac_jabber_alt_frms: tbd
+ * @rmac_gt_max_alt_frms: tbd
+ * @rmac_vlan_frms: tbd
+ * @rmac_fcs_discard: tbd
+ * @rmac_len_discard: tbd
+ * @rmac_da_discard: tbd
+ * @rmac_pf_discard: tbd
+ * @rmac_rts_discard: tbd
+ * @rmac_red_discard: tbd
+ * @rmac_ingm_full_discard: tbd
+ * @rmac_accepted_ip_oflow: tbd
+ * @link_fault_cnt: TBD
* Xframe hardware statistics.
*/
typedef struct xge_hal_stats_hw_info_t {
@@ -486,6 +532,7 @@ typedef struct xge_hal_stats_hw_info_t {
u32 tmac_udp_oflow;
u32 tpa_unknown_protocol;
u32 tpa_parse_failure;
+ u32 reserved_10;
u32 rmac_vld_frms_oflow;
u32 rmac_data_octets_oflow;
u32 rmac_vld_mcst_frms_oflow;
@@ -505,6 +552,7 @@ typedef struct xge_hal_stats_hw_info_t {
u32 rmac_udp_oflow;
u32 rmac_err_drp_udp_oflow;
u32 rmac_pause_cnt_oflow;
+ u32 reserved_11;
u64 rmac_ttl_1519_4095_frms;
u64 rmac_ttl_4096_8191_frms;
u64 rmac_ttl_8192_max_frms;
@@ -518,10 +566,13 @@ typedef struct xge_hal_stats_hw_info_t {
u32 rmac_da_discard;
u32 rmac_pf_discard;
u32 rmac_rts_discard;
+ u32 rmac_wol_discard;
u32 rmac_red_discard;
u32 rmac_ingm_full_discard;
u32 rmac_accepted_ip_oflow;
+ u32 reserved_12;
u32 link_fault_cnt;
+ u32 reserved_13;
#else
/* Tx MAC statistics counters. */
u32 tmac_data_octets;
@@ -653,26 +704,28 @@ typedef struct xge_hal_stats_hw_info_t {
u32 tmac_icmp_oflow;
u32 tpa_unknown_protocol;
u32 tmac_udp_oflow;
- u32 rmac_vld_frms_oflow;
+ u32 reserved_10;
u32 tpa_parse_failure;
- u32 rmac_vld_mcst_frms_oflow;
u32 rmac_data_octets_oflow;
- u32 rmac_ttl_octets_oflow;
+ u32 rmac_vld_frms_oflow;
u32 rmac_vld_bcst_frms_oflow;
- u32 rmac_accepted_nucst_frms_oflow;
+ u32 rmac_vld_mcst_frms_oflow;
u32 rmac_accepted_ucst_frms_oflow;
- u32 rmac_drop_events_oflow;
+ u32 rmac_ttl_octets_oflow;
u32 rmac_discarded_frms_oflow;
- u32 rmac_osized_frms_oflow;
+ u32 rmac_accepted_nucst_frms_oflow;
u32 rmac_usized_frms_oflow;
- u32 rmac_jabber_frms_oflow;
+ u32 rmac_drop_events_oflow;
u32 rmac_frag_frms_oflow;
- u32 rmac_drop_ip_oflow;
+ u32 rmac_osized_frms_oflow;
u32 rmac_ip_oflow;
- u32 rmac_udp_oflow;
+ u32 rmac_jabber_frms_oflow;
u32 rmac_icmp_oflow;
- u32 rmac_pause_cnt_oflow;
+ u32 rmac_drop_ip_oflow;
u32 rmac_err_drp_udp_oflow;
+ u32 rmac_udp_oflow;
+ u32 reserved_11;
+ u32 rmac_pause_cnt_oflow;
u64 rmac_ttl_1519_4095_frms;
u64 rmac_ttl_4096_8191_frms;
u64 rmac_ttl_8192_max_frms;
@@ -685,10 +738,13 @@ typedef struct xge_hal_stats_hw_info_t {
u32 rmac_fcs_discard;
u32 rmac_pf_discard;
u32 rmac_da_discard;
- u32 rmac_red_discard;
+ u32 rmac_wol_discard;
u32 rmac_rts_discard;
- u32 rmac_accepted_ip_oflow;
u32 rmac_ingm_full_discard;
+ u32 rmac_red_discard;
+ u32 reserved_12;
+ u32 rmac_accepted_ip_oflow;
+ u32 reserved_13;
u32 link_fault_cnt;
#endif
} xge_hal_stats_hw_info_t;
@@ -739,13 +795,16 @@ typedef struct xge_hal_stats_hw_info_t {
* See also: xge_hal_stats_device_info_t{}.
*/
typedef struct xge_hal_stats_channel_info_t {
- u32 out_of_dtrs_cnt;
+ u32 full_cnt;
+ u32 usage_max;
u32 reserve_free_swaps_cnt;
u32 avg_compl_per_intr_cnt;
u32 total_compl_cnt;
u32 total_posts;
u32 total_posts_many;
u32 total_buffers;
+ u32 copied_frags;
+ u32 copied_buffers;
u32 avg_buffers_per_post;
u32 avg_buffer_size;
u32 avg_post_size;
@@ -756,9 +815,57 @@ typedef struct xge_hal_stats_channel_info_t {
u32 total_posts_dang_frags;
} xge_hal_stats_channel_info_t;
+/**
+ * struct xge_hal_xpak_counter_t - HAL xpak error counters
+ * @excess_temp: excess transceiver_temperature count
+ * @excess_bias_current: excess laser_bias_current count
+ * @excess_laser_output: excess laser_output_power count
+ * @tick_period: tick count for each cycle
+ */
+typedef struct xge_hal_xpak_counter_t {
+ u32 excess_temp;
+ u32 excess_bias_current;
+ u32 excess_laser_output;
+ u32 tick_period;
+} xge_hal_xpak_counter_t;
+
+/**
+ * struct xge_hal_stats_xpak_t - HAL xpak stats
+ * @alarm_transceiver_temp_high: alarm_transceiver_temp_high count value
+ * @alarm_transceiver_temp_low : alarm_transceiver_temp_low count value
+ * @alarm_laser_bias_current_high: alarm_laser_bias_current_high count value
+ * @alarm_laser_bias_current_low: alarm_laser_bias_current_low count value
+ * @alarm_laser_output_power_high: alarm_laser_output_power_high count value
+ * @alarm_laser_output_power_low: alarm_laser_output_power_low count value
+ * @warn_transceiver_temp_high: warn_transceiver_temp_high count value
+ * @warn_transceiver_temp_low: warn_transceiver_temp_low count value
+ * @warn_laser_bias_current_high: warn_laser_bias_current_high count value
+ * @warn_laser_bias_current_low: warn_laser_bias_current_low count value
+ * @warn_laser_output_power_high: warn_laser_output_power_high count value
+ * @warn_laser_output_power_low: warn_laser_output_power_low count value
+ */
+typedef struct xge_hal_stats_xpak_t {
+ u16 alarm_transceiver_temp_high;
+ u16 alarm_transceiver_temp_low;
+ u16 alarm_laser_bias_current_high;
+ u16 alarm_laser_bias_current_low;
+ u16 alarm_laser_output_power_high;
+ u16 alarm_laser_output_power_low;
+ u16 warn_transceiver_temp_high;
+ u16 warn_transceiver_temp_low;
+ u16 warn_laser_bias_current_high;
+ u16 warn_laser_bias_current_low;
+ u16 warn_laser_output_power_high;
+ u16 warn_laser_output_power_low;
+} xge_hal_stats_xpak_t;
+
+
/**
* struct xge_hal_stats_sw_err_t - HAL device error statistics.
+ * @sm_err_cnt: TBD
+ * @single_ecc_err_cnt: TBD
+ * @double_ecc_err_cnt: TBD
* @ecc_err_cnt: ECC error count.
* @parity_err_cnt: Parity error count.
* @serr_cnt: Number of exceptions indicated to the host via PCI SERR#.
@@ -783,10 +890,24 @@ typedef struct xge_hal_stats_sw_err_t {
u32 serr_cnt;
u32 rxd_t_code_err_cnt[16];
u32 txd_t_code_err_cnt[16];
+ xge_hal_stats_xpak_t stats_xpak;
+ xge_hal_xpak_counter_t xpak_counter;
} xge_hal_stats_sw_err_t;
/**
* struct xge_hal_stats_device_info_t - HAL own per-device statistics.
+ *
+ * @rx_traffic_intr_cnt: TBD
+ * @tx_traffic_intr_cnt: TBD
+ * @txpic_intr_cnt: TBD
+ * @txdma_intr_cnt: TBD
+ * @txmac_intr_cnt: TBD
+ * @txxgxs_intr_cnt: TBD
+ * @rxpic_intr_cnt: TBD
+ * @rxdma_intr_cnt: TBD
+ * @rxmac_intr_cnt: TBD
+ * @rxxgxs_intr_cnt: TBD
+ * @mc_intr_cnt: TBD
* @not_traffic_intr_cnt: Number of times the host was interrupted
* without new completions.
* "Non-traffic interrupt counter".
@@ -797,6 +918,13 @@ typedef struct xge_hal_stats_sw_err_t {
* @soft_reset_cnt: Number of times soft reset is done on this device.
* @rxufca_hi_adjust_cnt: TODO
* @rxufca_lo_adjust_cnt: TODO
+ *
+ * @tot_frms_lroised: TBD
+ * @tot_lro_sessions: TBD
+ * @lro_frm_len_exceed_cnt: TBD
+ * @lro_sg_exceed_cnt: TBD
+ * @lro_out_of_seq_pkt_cnt: TBD
+ * @lro_dup_pkt_cnt: TBD
*
* HAL per-device statistics.
* See also: xge_hal_stats_channel_info_t{}.
@@ -804,15 +932,30 @@ typedef struct xge_hal_stats_sw_err_t {
typedef struct xge_hal_stats_device_info_t {
u32 rx_traffic_intr_cnt;
u32 tx_traffic_intr_cnt;
+ u32 txpic_intr_cnt;
+ u32 txdma_intr_cnt;
+ u32 txmac_intr_cnt;
+ u32 txxgxs_intr_cnt;
+ u32 rxpic_intr_cnt;
+ u32 rxdma_intr_cnt;
+ u32 rxmac_intr_cnt;
+ u32 rxxgxs_intr_cnt;
+ u32 mc_intr_cnt;
u32 not_traffic_intr_cnt;
u32 traffic_intr_cnt;
u32 total_intr_cnt;
u32 soft_reset_cnt;
u32 rxufca_hi_adjust_cnt;
u32 rxufca_lo_adjust_cnt;
+ u32 bimodal_hi_adjust_cnt;
+ u32 bimodal_lo_adjust_cnt;
#ifdef XGE_HAL_CONFIG_LRO
u32 tot_frms_lroised;
u32 tot_lro_sessions;
+ u32 lro_frm_len_exceed_cnt;
+ u32 lro_sg_exceed_cnt;
+ u32 lro_out_of_seq_pkt_cnt;
+ u32 lro_dup_pkt_cnt;
#endif
} xge_hal_stats_device_info_t;
@@ -829,6 +972,8 @@ typedef struct xge_hal_stats_device_info_t {
* corresponding value will be simply pointer to PCI device.
*
* @hw_info: Xframe statistics maintained by the hardware.
+ * @hw_info_saved: TBD
+ * @hw_info_latest: TBD
* @sw_dev_info_stats: HAL's "soft" device informational statistics, e.g. number
* of completions per interrupt.
* @sw_dev_err_stats: HAL's "soft" device error statistics.
@@ -891,4 +1036,6 @@ xge_hal_status_e xge_hal_stats_channel(xge_hal_channel_h channelh,
xge_hal_status_e xge_hal_stats_reset(xge_hal_device_h devh);
+__EXTERN_END_DECLS
+
#endif /* XGE_HAL_STATS_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal-types.h b/usr/src/uts/common/io/xge/hal/include/xgehal-types.h
index 293c780bb7..e1784ab401 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal-types.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal-types.h
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-types.h
- *
- * Description: HAL commonly used types and enumerations
*
- * Created: 19 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_TYPES_H
@@ -35,6 +26,8 @@
#include "xge-os-pal.h"
+__EXTERN_BEGIN_DECLS
+
/*
* BIT(loc) - set bit at offset
*/
@@ -46,6 +39,32 @@
#define vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
#define vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
+/*
+ * bVALx(bits, loc) - Get the value of x bits at location
+ */
+#define bVAL1(bits, loc) ((((u64)bits) >> (64-(loc+1))) & 0x1)
+#define bVAL2(bits, loc) ((((u64)bits) >> (64-(loc+2))) & 0x3)
+#define bVAL3(bits, loc) ((((u64)bits) >> (64-(loc+3))) & 0x7)
+#define bVAL4(bits, loc) ((((u64)bits) >> (64-(loc+4))) & 0xF)
+#define bVAL5(bits, loc) ((((u64)bits) >> (64-(loc+5))) & 0x1F)
+#define bVAL6(bits, loc) ((((u64)bits) >> (64-(loc+6))) & 0x3F)
+#define bVAL7(bits, loc) ((((u64)bits) >> (64-(loc+7))) & 0x7F)
+#define bVAL8(bits, loc) ((((u64)bits) >> (64-(loc+8))) & 0xFF)
+#define bVAL12(bits, loc) ((((u64)bits) >> (64-(loc+12))) & 0xFFF)
+#define bVAL16(bits, loc) ((((u64)bits) >> (64-(loc+16))) & 0xFFFF)
+#define bVAL20(bits, loc) ((((u64)bits) >> (64-(loc+20))) & 0xFFFFF)
+#define bVAL22(bits, loc) ((((u64)bits) >> (64-(loc+22))) & 0x3FFFFF)
+#define bVAL24(bits, loc) ((((u64)bits) >> (64-(loc+24))) & 0xFFFFFF)
+#define bVAL28(bits, loc) ((((u64)bits) >> (64-(loc+28))) & 0xFFFFFFF)
+#define bVAL32(bits, loc) ((((u64)bits) >> (64-(loc+32))) & 0xFFFFFFFF)
+#define bVAL36(bits, loc) ((((u64)bits) >> (64-(loc+36))) & 0xFFFFFFFFF)
+#define bVAL40(bits, loc) ((((u64)bits) >> (64-(loc+40))) & 0xFFFFFFFFFF)
+#define bVAL44(bits, loc) ((((u64)bits) >> (64-(loc+44))) & 0xFFFFFFFFFFF)
+#define bVAL48(bits, loc) ((((u64)bits) >> (64-(loc+48))) & 0xFFFFFFFFFFFF)
+#define bVAL52(bits, loc) ((((u64)bits) >> (64-(loc+52))) & 0xFFFFFFFFFFFFF)
+#define bVAL56(bits, loc) ((((u64)bits) >> (64-(loc+56))) & 0xFFFFFFFFFFFFFF)
+#define bVAL60(bits, loc) ((((u64)bits) >> (64-(loc+60))) & 0xFFFFFFFFFFFFFFF)
+
#define XGE_HAL_BASE_INF 100
#define XGE_HAL_BASE_ERR 200
#define XGE_HAL_BASE_BADCFG 300
@@ -94,8 +113,12 @@
* @XGE_HAL_INF_LRO_END_2: Returned by ULD LRO module, when new
* frame triggers LRO flush. Lro frame should be flushed first then
* new frame should be flushed next.
+ * @XGE_HAL_INF_LRO_END_3: Returned by ULD LRO module, when new
+ * frame triggers close of current LRO session and opening of new LRO session
+ * with the frame.
* @XGE_HAL_INF_LRO_SESSIONS_XCDED: Returned by ULD LRO module, when no
* more LRO sessions can be added.
+ * @XGE_HAL_INF_NOT_ENOUGH_HW_CQES: TBD
* @XGE_HAL_ERR_DRIVER_NOT_INITIALIZED: HAL is not initialized.
* @XGE_HAL_ERR_OUT_OF_MEMORY: Out of memory (example, when and
* allocating descriptors).
@@ -161,8 +184,6 @@
* the structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_UFC_C: Invalid frame count for Tx link utilization
* range C. See the structure xge_hal_tti_config_t{} for valid values.
- * @XGE_HAL_BADCFG_TX_URANGE_D: Invalid Tx link utilization range D. See
- * the structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_UFC_D: Invalid frame count for Tx link utilization
* range D. See the structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_TIMER_VAL: Invalid Tx timer value. See the
@@ -288,6 +309,10 @@
* See also xge_hal_device_config_t{}.
* @XGE_HAL_BADCFG_LINK_STABILITY_PERIOD: Invalid link stability period.
* @XGE_HAL_BADCFG_DEVICE_POLL_MILLIS: Invalid device poll interval.
+ * @XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN: TBD
+ * @XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN: TBD
+ * @XGE_HAL_BADCFG_MEDIA: TBD
+ * @XGE_HAL_BADCFG_NO_ISR_EVENTS: TBD
* See the structure xge_hal_device_config_t{} for valid values.
* @XGE_HAL_EOF_TRACE_BUF: End of the circular (in memory) trace buffer.
* Returned by xge_hal_mgmt_trace_read(), when user tries to read the trace
@@ -295,7 +320,12 @@
* or more reads.
* @XGE_HAL_BADCFG_RING_RTS_MAC_EN: Invalid value of RTS_MAC_EN enable. See
* the structure xge_hal_ring_queue_t for valid values.
- *
+ * @XGE_HAL_BADCFG_LRO_SG_SIZE : Invalid value of LRO scatter gatter size.
+ * See the structure xge_hal_device_config_t for valid values.
+ * @XGE_HAL_BADCFG_LRO_FRM_LEN : Invalid value of LRO frame length.
+ * See the structure xge_hal_device_config_t for valid values.
+ * @XGE_HAL_BADCFG_WQE_NUM_ODS: TBD
+ * @XGE_HAL_BADCFG_BIMODAL_INTR: Invalid value to configure bimodal interrupts
* Enumerates status and error codes returned by HAL public
* API functions.
*/
@@ -316,8 +346,9 @@ typedef enum xge_hal_status_e {
XGE_HAL_INF_LRO_UNCAPABLE = XGE_HAL_BASE_INF + 10,
XGE_HAL_INF_LRO_END_1 = XGE_HAL_BASE_INF + 11,
XGE_HAL_INF_LRO_END_2 = XGE_HAL_BASE_INF + 12,
- XGE_HAL_INF_LRO_SESSIONS_XCDED = XGE_HAL_BASE_INF + 13,
-
+ XGE_HAL_INF_LRO_END_3 = XGE_HAL_BASE_INF + 13,
+ XGE_HAL_INF_LRO_SESSIONS_XCDED = XGE_HAL_BASE_INF + 14,
+ XGE_HAL_INF_NOT_ENOUGH_HW_CQES = XGE_HAL_BASE_INF + 15,
XGE_HAL_ERR_DRIVER_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 1,
XGE_HAL_ERR_OUT_OF_MEMORY = XGE_HAL_BASE_ERR + 4,
XGE_HAL_ERR_CHANNEL_NOT_FOUND = XGE_HAL_BASE_ERR + 5,
@@ -346,6 +377,8 @@ typedef enum xge_hal_status_e {
XGE_HAL_ERR_INVALID_PCI_INFO = XGE_HAL_BASE_ERR + 28,
XGE_HAL_ERR_CRITICAL = XGE_HAL_BASE_ERR + 29,
XGE_HAL_ERR_RESET_FAILED = XGE_HAL_BASE_ERR + 30,
+ XGE_HAL_ERR_INVALID_WR = XGE_HAL_BASE_ERR + 31,
+ XGE_HAL_ERR_TOO_MANY = XGE_HAL_BASE_ERR + 32,
XGE_HAL_BADCFG_TX_URANGE_A = XGE_HAL_BASE_BADCFG + 1,
XGE_HAL_BADCFG_TX_UFC_A = XGE_HAL_BASE_BADCFG + 2,
@@ -353,7 +386,6 @@ typedef enum xge_hal_status_e {
XGE_HAL_BADCFG_TX_UFC_B = XGE_HAL_BASE_BADCFG + 4,
XGE_HAL_BADCFG_TX_URANGE_C = XGE_HAL_BASE_BADCFG + 5,
XGE_HAL_BADCFG_TX_UFC_C = XGE_HAL_BASE_BADCFG + 6,
- XGE_HAL_BADCFG_TX_URANGE_D = XGE_HAL_BASE_BADCFG + 7,
XGE_HAL_BADCFG_TX_UFC_D = XGE_HAL_BASE_BADCFG + 8,
XGE_HAL_BADCFG_TX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 9,
XGE_HAL_BADCFG_TX_TIMER_CI_EN = XGE_HAL_BASE_BADCFG + 10,
@@ -417,7 +449,14 @@ typedef enum xge_hal_status_e {
XGE_HAL_BADCFG_MEDIA = XGE_HAL_BASE_BADCFG + 69,
XGE_HAL_BADCFG_NO_ISR_EVENTS = XGE_HAL_BASE_BADCFG + 70,
XGE_HAL_BADCFG_RING_RTS_MAC_EN = XGE_HAL_BASE_BADCFG + 71,
-
+ XGE_HAL_BADCFG_LRO_SG_SIZE = XGE_HAL_BASE_BADCFG + 72,
+ XGE_HAL_BADCFG_LRO_FRM_LEN = XGE_HAL_BASE_BADCFG + 73,
+ XGE_HAL_BADCFG_WQE_NUM_ODS = XGE_HAL_BASE_BADCFG + 74,
+ XGE_HAL_BADCFG_BIMODAL_INTR = XGE_HAL_BASE_BADCFG + 75,
+ XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US = XGE_HAL_BASE_BADCFG + 76,
+ XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US = XGE_HAL_BASE_BADCFG + 77,
+ XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED = XGE_HAL_BASE_BADCFG + 78,
+ XGE_HAL_BADCFG_RTS_QOS_STEERING_CONFIG = XGE_HAL_BASE_BADCFG + 79,
XGE_HAL_EOF_TRACE_BUF = -1
} xge_hal_status_e;
@@ -540,11 +579,28 @@ full */
typedef void* xge_hal_device_h;
typedef void* xge_hal_dtr_h;
typedef void* xge_hal_channel_h;
-
+#ifdef XGEHAL_RNIC
+typedef void* xge_hal_towi_h;
+typedef void* xge_hal_hw_wqe_h;
+typedef void* xge_hal_hw_cqe_h;
+typedef void* xge_hal_lro_wqe_h;
+typedef void* xge_hal_lro_cqe_h;
+typedef void* xge_hal_up_msg_h;
+typedef void* xge_hal_down_msg_h;
+#endif
/*
* I2C device id. Used in I2C control register for accessing EEPROM device
* memory.
*/
#define XGE_DEV_ID 5
+typedef enum xge_hal_xpak_alarm_type_e {
+ XGE_HAL_XPAK_ALARM_EXCESS_TEMP = 1,
+ XGE_HAL_XPAK_ALARM_EXCESS_BIAS_CURRENT = 2,
+ XGE_HAL_XPAK_ALARM_EXCESS_LASER_OUTPUT = 3,
+} xge_hal_xpak_alarm_type_e;
+
+
+__EXTERN_END_DECLS
+
#endif /* XGE_HAL_TYPES_H */
diff --git a/usr/src/uts/common/io/xge/hal/include/xgehal.h b/usr/src/uts/common/io/xge/hal/include/xgehal.h
index f19957fbe5..a4b9a78caf 100644
--- a/usr/src/uts/common/io/xge/hal/include/xgehal.h
+++ b/usr/src/uts/common/io/xge/hal/include/xgehal.h
@@ -17,18 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal.h
- *
- * Description: Consolidated header. Upper layers should include it to
- * avoid include order problems.
*
- * Created: 14 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifndef XGE_HAL_H
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xge-queue.c b/usr/src/uts/common/io/xge/hal/xgehal/xge-queue.c
index 928e8ce94c..a50a2095ca 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xge-queue.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xge-queue.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xge-queue.c
- *
- * Description: serialized event queue
*
- * Created: 7 June 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xge-queue.h"
@@ -71,7 +62,8 @@ __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
queue->head_ptr = (char *)queue->head_ptr + real_size;
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the head: "
- "0x%llx:0x%llx:0x%llx:0x%llx elem 0x%llx length %d",
+ "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
+ ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
@@ -83,7 +75,8 @@ __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
queue->tail_ptr = (char *)queue->tail_ptr - real_size;
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the tail: "
- "0x%llx:0x%llx:0x%llx:0x%llx elem 0x%llx length %d",
+ "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
+ ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
@@ -94,7 +87,8 @@ __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
} else {
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the list: "
- "0x%llx:0x%llx:0x%llx:0x%llx elem 0x%llx length %d",
+ "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
+ ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
@@ -167,11 +161,12 @@ xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
try_again:
if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
- elem = queue->tail_ptr;
+ elem = (xge_queue_item_t *) queue->tail_ptr;
queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
xge_debug_queue(XGE_TRACE,
"event_type: %d adding to the tail: "
- "0x%llx:0x%llx:0x%llx:0x%llx elem 0x%llx length %d",
+ "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
+ ":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
@@ -181,11 +176,12 @@ try_again:
real_size);
} else if ((char *)queue->head_ptr - real_size >=
(char *)queue->start_ptr) {
- elem = (void *)((char *)queue->head_ptr - real_size);
+ elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
queue->head_ptr = elem;
xge_debug_queue(XGE_TRACE,
"event_type: %d adding to the head: "
- "0x%llx:0x%llx:0x%llx:0x%llx length %d",
+ "0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
+ ":0x"XGE_OS_LLXFMT" length %d",
event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
@@ -218,7 +214,7 @@ try_again:
xge_assert(queue->head_ptr >= queue->start_ptr &&
queue->head_ptr < queue->end_ptr);
elem->data_size = data_size;
- elem->event_type = event_type;
+ elem->event_type = (xge_hal_event_e) event_type;
elem->is_critical = is_critical;
if (is_critical)
queue->has_critical_event = 1;
@@ -258,7 +254,7 @@ xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
{
xge_queue_t *queue;
- if ((queue = xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
+ if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
return NULL;
queue->queued_func = queued;
@@ -295,8 +291,8 @@ void xge_queue_destroy(xge_queue_h queueh)
{
xge_queue_t *queue = (xge_queue_t *)queueh;
if (!xge_list_is_empty(&queue->list_head)) {
- xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x%llx",
- (u64)(ulong_t)queue);
+ xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
+ XGE_OS_LLXFMT, (u64)(ulong_t)queue);
}
xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
XGE_QUEUE_BUF_SIZE);
@@ -324,7 +320,7 @@ __io_queue_grow(xge_queue_h queueh)
xge_list_t *item;
xge_queue_item_t *elem;
- xge_debug_queue(XGE_TRACE, "queue 0x%llx:%d is growing",
+ xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing",
(u64)(ulong_t)queue, queue->pages_current);
newbuf = xge_os_malloc(queue->pdev,
@@ -420,7 +416,7 @@ void xge_queue_flush(xge_queue_h queueh)
XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
item) != XGE_QUEUE_IS_EMPTY) {
/* do nothing */
- xge_debug_queue(XGE_TRACE, "item %llx(%d) flushed",
+ xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed",
item, item->event_type);
}
(void) __queue_get_reset_critical (queueh);
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel-fp.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel-fp.c
index bdb1262098..a37b20660f 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel-fp.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel-fp.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-channel-fp.c
*
- * Description: HAL channel object functionality (fast path)
- *
- * Created: 10 June 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifdef XGE_DEBUG_FP
@@ -35,20 +26,22 @@
#endif
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_channel_dtr_alloc(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
+__hal_channel_dtr_alloc(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
{
void **tmp_arr;
- xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
- unsigned long flags = 0;
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ unsigned long flags = 0;
+#endif
- if (channel->reserve_length - channel->reserve_top >
- channel->reserve_threshold) {
+ if (channel->reserve_length - channel->reserve_top >
+ channel->reserve_threshold) {
_alloc_after_swap:
- *dtrh = channel->reserve_arr[--channel->reserve_length];
+ *dtrh = channel->reserve_arr[--channel->reserve_length];
- xge_debug_channel(XGE_TRACE, "dtrh 0x%llx allocated, "
- "channel %d:%d:%d, reserve_idx %d",
+ xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" allocated, "
+ "channel %d:%d:%d, reserve_idx %d",
(unsigned long long)(ulong_t)*dtrh,
channel->type, channel->post_qid,
channel->compl_qid, channel->reserve_length);
@@ -56,107 +49,119 @@ _alloc_after_swap:
return XGE_HAL_OK;
}
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_lock_irq(&channel->free_lock, flags);
+#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
+ xge_os_spin_lock(&channel->free_lock);
+#endif
- /* switch between empty and full arrays */
+ /* switch between empty and full arrays */
- /* the idea behind such a design is that by having free and reserved
- * arrays separated we basically separated irq and non-irq parts.
- * i.e. no additional lock need to be done when we free a resource */
+ /* the idea behind such a design is that by having free and reserved
+ * arrays separated we basically separated irq and non-irq parts.
+ * i.e. no additional lock need to be done when we free a resource */
- if (channel->reserve_initial - channel->free_length >
- channel->reserve_threshold) {
+ if (channel->reserve_initial - channel->free_length >
+ channel->reserve_threshold) {
- tmp_arr = channel->reserve_arr;
+ tmp_arr = channel->reserve_arr;
channel->reserve_arr = channel->free_arr;
- channel->reserve_length = channel->reserve_initial;
- channel->free_arr = tmp_arr;
+ channel->reserve_length = channel->reserve_initial;
+ channel->free_arr = tmp_arr;
channel->reserve_top = channel->free_length;
channel->free_length = channel->reserve_initial;
channel->stats.reserve_free_swaps_cnt++;
xge_debug_channel(XGE_TRACE,
- "switch on channel %d:%d:%d, reserve_length %d, "
- "free_length %d", channel->type, channel->post_qid,
+ "switch on channel %d:%d:%d, reserve_length %d, "
+ "free_length %d", channel->type, channel->post_qid,
channel->compl_qid, channel->reserve_length,
channel->free_length);
- xge_os_spin_unlock_irq(&channel->free_lock, flags);
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ xge_os_spin_unlock_irq(&channel->free_lock, flags);
+#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
+ xge_os_spin_unlock(&channel->free_lock);
+#endif
goto _alloc_after_swap;
}
- xge_os_spin_unlock_irq(&channel->free_lock, flags);
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
+ xge_os_spin_unlock_irq(&channel->free_lock, flags);
+#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
+ xge_os_spin_unlock(&channel->free_lock);
+#endif
xge_debug_channel(XGE_TRACE, "channel %d:%d:%d is empty!",
channel->type, channel->post_qid,
channel->compl_qid);
- channel->stats.out_of_dtrs_cnt++;
+ channel->stats.full_cnt++;
- *dtrh = NULL;
+ *dtrh = NULL;
return XGE_HAL_INF_OUT_OF_DESCRIPTORS;
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
-__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int offset)
{
- xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
- /* restore a previously allocated dtrh at current offset and update
- * the available reserve length accordingly. If dtrh is null just
+ /* restore a previously allocated dtrh at current offset and update
+ * the available reserve length accordingly. If dtrh is null just
* update the reserve length, only */
if (dtrh) {
channel->reserve_arr[channel->reserve_length + offset] = dtrh;
- xge_debug_channel(XGE_TRACE, "dtrh 0x%llx restored for "
- "channel %d:%d:%d, offset %d at reserve index %d, ",
+ xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" restored for "
+ "channel %d:%d:%d, offset %d at reserve index %d, ",
(unsigned long long)(ulong_t)dtrh, channel->type,
channel->post_qid, channel->compl_qid, offset,
- channel->reserve_length + offset);
+ channel->reserve_length + offset);
}
else {
- channel->reserve_length += offset;
- xge_debug_channel(XGE_TRACE, "channel %d:%d:%d, restored "
- "for offset %d, new reserve_length %d, free length %d",
+ channel->reserve_length += offset;
+ xge_debug_channel(XGE_TRACE, "channel %d:%d:%d, restored "
+ "for offset %d, new reserve_length %d, free length %d",
channel->type, channel->post_qid, channel->compl_qid,
- offset, channel->reserve_length, channel->free_length);
+ offset, channel->reserve_length, channel->free_length);
}
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
- xge_hal_channel_t *channel = (xge_hal_channel_t*)channelh;
+ xge_hal_channel_t *channel = (xge_hal_channel_t*)channelh;
xge_assert(channel->work_arr[channel->post_index] == NULL);
channel->work_arr[channel->post_index++] = dtrh;
- /* wrap-around */
- if (channel->post_index == channel->length)
- channel->post_index = 0;
+ /* wrap-around */
+ if (channel->post_index == channel->length)
+ channel->post_index = 0;
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_try_complete(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
{
- xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_assert(channel->work_arr);
- xge_assert(channel->compl_index < channel->length);
+ xge_assert(channel->compl_index < channel->length);
- *dtrh = channel->work_arr[channel->compl_index];
+ *dtrh = channel->work_arr[channel->compl_index];
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_complete(xge_hal_channel_h channelh)
{
- xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
- channel->work_arr[channel->compl_index] = NULL;
+ channel->work_arr[channel->compl_index] = NULL;
/* wrap-around */
if (++channel->compl_index == channel->length)
@@ -168,96 +173,113 @@ __hal_channel_dtr_complete(xge_hal_channel_h channelh)
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
- xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
- channel->free_arr[--channel->free_length] = dtrh;
+ channel->free_arr[--channel->free_length] = dtrh;
- xge_debug_channel(XGE_TRACE, "dtrh 0x%llx freed, "
- "channel %d:%d:%d, new free_length %d",
+ xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" freed, "
+ "channel %d:%d:%d, new free_length %d",
(unsigned long long)(ulong_t)dtrh,
channel->type, channel->post_qid,
channel->compl_qid, channel->free_length);
}
/**
- * xge_hal_channel_userdata - Get user-specified channel context.
+ * xge_hal_channel_dtr_count
+ *
+ * Retreive number of DTRs available. This function can not be called
+ * from data path. ring_initial_replenishi() is the only user.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+xge_hal_channel_dtr_count(xge_hal_channel_h channelh)
+{
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+
+ return ((channel->reserve_length - channel->reserve_top) +
+ (channel->reserve_initial - channel->free_length) -
+ channel->reserve_threshold);
+}
+
+/**
+ * xge_hal_channel_userdata - Get user-specified channel context.
* @channelh: Channel handle. Obtained via xge_hal_channel_open().
*
- * Returns: per-channel "user data", which can be any ULD-defined context.
- * The %userdata "gets" into the channel at open time
- * (see xge_hal_channel_open()).
+ * Returns: per-channel "user data", which can be any ULD-defined context.
+ * The %userdata "gets" into the channel at open time
+ * (see xge_hal_channel_open()).
*
* See also: xge_hal_channel_open().
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void*
xge_hal_channel_userdata(xge_hal_channel_h channelh)
{
- xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
return channel->userdata;
}
/**
- * xge_hal_channel_id - Get channel ID.
+ * xge_hal_channel_id - Get channel ID.
* @channelh: Channel handle. Obtained via xge_hal_channel_open().
*
- * Returns: channel ID. For link layer channel id is the number
- * in the range from 0 to 7 that identifies hardware ring or fifo,
- * depending on the channel type.
+ * Returns: channel ID. For link layer channel id is the number
+ * in the range from 0 to 7 that identifies hardware ring or fifo,
+ * depending on the channel type.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_channel_id(xge_hal_channel_h channelh)
{
- xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
return channel->post_qid;
}
/**
- * xge_hal_check_alignment - Check buffer alignment and calculate the
- * "misaligned" portion.
- * @dma_pointer: DMA address of the buffer.
+ * xge_hal_check_alignment - Check buffer alignment and calculate the
+ * "misaligned" portion.
+ * @dma_pointer: DMA address of the buffer.
* @size: Buffer size, in bytes.
- * @alignment: Alignment "granularity" (see below), in bytes.
- * @copy_size: Maximum number of bytes to "extract" from the buffer
- * (in order to spost it as a separate scatter-gather entry). See below.
+ * @alignment: Alignment "granularity" (see below), in bytes.
+ * @copy_size: Maximum number of bytes to "extract" from the buffer
+ * (in order to spost it as a separate scatter-gather entry). See below.
*
- * Check buffer alignment and calculate "misaligned" portion, if exists.
- * The buffer is considered aligned if its address is multiple of
- * the specified @alignment. If this is the case,
+ * Check buffer alignment and calculate "misaligned" portion, if exists.
+ * The buffer is considered aligned if its address is multiple of
+ * the specified @alignment. If this is the case,
* xge_hal_check_alignment() returns zero.
- * Otherwise, xge_hal_check_alignment() uses the last argument,
+ * Otherwise, xge_hal_check_alignment() uses the last argument,
* @copy_size,
- * to calculate the size to "extract" from the buffer. The @copy_size
- * may or may not be equal @alignment. The difference between these two
- * arguments is that the @alignment is used to make the decision: aligned
- * or not aligned. While the @copy_size is used to calculate the portion
- * of the buffer to "extract", i.e. to post as a separate entry in the
- * transmit descriptor. For example, the combination
- * @alignment=8 and @copy_size=64 will work okay on AMD Opteron boxes.
+ * to calculate the size to "extract" from the buffer. The @copy_size
+ * may or may not be equal @alignment. The difference between these two
+ * arguments is that the @alignment is used to make the decision: aligned
+ * or not aligned. While the @copy_size is used to calculate the portion
+ * of the buffer to "extract", i.e. to post as a separate entry in the
+ * transmit descriptor. For example, the combination
+ * @alignment=8 and @copy_size=64 will work okay on AMD Opteron boxes.
*
- * Note: @copy_size should be a multiple of @alignment. In many practical
- * cases @copy_size and @alignment will probably be equal.
+ * Note: @copy_size should be a multiple of @alignment. In many practical
+ * cases @copy_size and @alignment will probably be equal.
*
* See also: xge_hal_fifo_dtr_buffer_set_aligned().
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
-xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment,
- int copy_size)
+xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment,
+ int copy_size)
{
- int misaligned_size;
+ int misaligned_size;
- misaligned_size = (int)(dma_pointer & (alignment - 1));
+ misaligned_size = (int)(dma_pointer & (alignment - 1));
if (!misaligned_size) {
return 0;
}
if (size > copy_size) {
- misaligned_size = (int)(dma_pointer & (copy_size - 1));
- misaligned_size = copy_size - misaligned_size;
+ misaligned_size = (int)(dma_pointer & (copy_size - 1));
+ misaligned_size = copy_size - misaligned_size;
} else {
- misaligned_size = size;
+ misaligned_size = size;
}
return misaligned_size;
}
+
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel.c
index e523cd4472..b9fa1be245 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
*
- * FileName : xgehal-channel.c
- *
- * Description: chipset channel abstraction
- *
- * Created: 10 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xgehal-channel.h"
@@ -35,25 +26,14 @@
#include "xgehal-ring.h"
#include "xgehal-device.h"
#include "xgehal-regs.h"
+#ifdef XGEHAL_RNIC
+#include "xgehal-types.h"
+#include "xgehal-rnic.h"
+#endif
static int msix_idx = 0;
/*
- * __hal_channel_dtr_count
- *
- * Retreive number of DTRs available. This function can not be called
- * from data path. ring_initial_replenishi() is the only user.
- */
-int __hal_channel_dtr_count(xge_hal_channel_h channelh)
-{
- xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
-
- return ((channel->reserve_length - channel->reserve_top) +
- (channel->reserve_initial - channel->free_length) -
- channel->reserve_threshold);
-}
-
-/*
* __hal_channel_dtr_next_reservelist
*
* Walking through the all available DTRs.
@@ -93,7 +73,7 @@ __hal_channel_dtr_next_freelist(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
}
/*
- * __hal_ring_dtr_next_not_completed - Get the _next_ posted but
+ * __hal_channel_dtr_next_not_completed - Get the _next_ posted but
* not completed descriptor.
*
* Walking through the "not completed" DTRs.
@@ -102,15 +82,14 @@ static xge_hal_status_e
__hal_channel_dtr_next_not_completed(xge_hal_channel_h channelh,
xge_hal_dtr_h *dtrh)
{
- xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
-
__hal_channel_dtr_try_complete(channelh, dtrh);
- rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
- if (rxdp == NULL) {
+ if (*dtrh == NULL) {
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
- xge_assert(rxdp->host_control!=0);
+#ifndef XGEHAL_RNIC
+ xge_assert(((xge_hal_ring_rxd_1_t *)*dtrh)->host_control!=0);
+#endif
__hal_channel_dtr_complete(channelh);
@@ -125,22 +104,49 @@ __hal_channel_allocate(xge_hal_device_h devh, int post_qid,
xge_hal_channel_t *channel;
int size = 0;
- xge_assert((type == XGE_HAL_CHANNEL_TYPE_RING) ||
- (type == XGE_HAL_CHANNEL_TYPE_FIFO));
-
- if (type == XGE_HAL_CHANNEL_TYPE_RING) {
- xge_assert(post_qid + 1 >= XGE_HAL_MIN_RING_NUM &&
- post_qid + 1 <= XGE_HAL_MAX_RING_NUM);
- size = sizeof(xge_hal_ring_t);
- } else if (type == XGE_HAL_CHANNEL_TYPE_FIFO) {
- xge_assert(post_qid + 1 >= XGE_HAL_MIN_FIFO_NUM &&
- post_qid + 1 <= XGE_HAL_MAX_FIFO_NUM);
- size = sizeof(xge_hal_fifo_t);
+ switch(type) {
+ case XGE_HAL_CHANNEL_TYPE_FIFO:
+ xge_assert(post_qid + 1 >= XGE_HAL_MIN_FIFO_NUM &&
+ post_qid + 1 <= XGE_HAL_MAX_FIFO_NUM);
+ size = sizeof(xge_hal_fifo_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RING:
+ xge_assert(post_qid + 1 >= XGE_HAL_MIN_RING_NUM &&
+ post_qid + 1 <= XGE_HAL_MAX_RING_NUM);
+ size = sizeof(xge_hal_ring_t);
+ break;
+#ifdef XGEHAL_RNIC
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ size = sizeof(xge_hal_sq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE:
+ size = sizeof(xge_hal_hrq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE:
+ size = sizeof(xge_hal_hcq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE:
+ size = sizeof(xge_hal_lrq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE:
+ size = sizeof(xge_hal_lcq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ size = sizeof(xge_hal_umq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ size = sizeof(xge_hal_dmq_t);
+ break;
+#endif
+ default :
+ xge_assert(size);
+ break;
+
}
/* allocate FIFO channel */
- channel = xge_os_malloc(hldev->pdev, size);
+ channel = (xge_hal_channel_t *) xge_os_malloc(hldev->pdev, size);
if (channel == NULL) {
return NULL;
}
@@ -163,10 +169,48 @@ void __hal_channel_free(xge_hal_channel_t *channel)
xge_assert(channel->pdev);
- if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
- size = sizeof(xge_hal_ring_t);
- } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
- size = sizeof(xge_hal_fifo_t);
+ switch(channel->type) {
+ case XGE_HAL_CHANNEL_TYPE_FIFO:
+ size = sizeof(xge_hal_fifo_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RING:
+ size = sizeof(xge_hal_ring_t);
+ break;
+#ifdef XGEHAL_RNIC
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ size = sizeof(xge_hal_sq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE:
+ size = sizeof(xge_hal_hrq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE:
+ size = sizeof(xge_hal_hcq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE:
+ size = sizeof(xge_hal_lrq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE:
+ size = sizeof(xge_hal_lcq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ size = sizeof(xge_hal_umq_t);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ size = sizeof(xge_hal_dmq_t);
+ break;
+#else
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ xge_assert(size);
+ break;
+#endif
+ default:
+ break;
}
xge_os_free(channel->pdev, channel, size);
@@ -195,7 +239,7 @@ __hal_channel_initialize (xge_hal_channel_h channelh,
channel->reserve_length = channel->reserve_initial;
channel->reserve_threshold = reserve_threshold;
channel->reserve_top = 0;
- channel->saved_arr = xge_os_malloc(hldev->pdev,
+ channel->saved_arr = (void **) xge_os_malloc(hldev->pdev,
sizeof(void*)*channel->reserve_max);
if (channel->saved_arr == NULL) {
return XGE_HAL_ERR_OUT_OF_MEMORY;
@@ -203,7 +247,7 @@ __hal_channel_initialize (xge_hal_channel_h channelh,
xge_os_memzero(channel->saved_arr, sizeof(void*)*channel->reserve_max);
channel->free_arr = channel->saved_arr;
channel->free_length = channel->reserve_initial;
- channel->work_arr = xge_os_malloc(hldev->pdev,
+ channel->work_arr = (void **) xge_os_malloc(hldev->pdev,
sizeof(void*)*channel->reserve_max);
if (channel->work_arr == NULL) {
return XGE_HAL_ERR_OUT_OF_MEMORY;
@@ -214,8 +258,8 @@ __hal_channel_initialize (xge_hal_channel_h channelh,
channel->compl_index = 0;
channel->length = channel->reserve_initial;
- channel->orig_arr = xge_os_malloc(hldev->pdev,
- sizeof(void*)*channel->reserve_max);
+ channel->orig_arr = (void **) xge_os_malloc(hldev->pdev,
+ sizeof(void*)*channel->reserve_max);
if (channel->orig_arr == NULL)
return XGE_HAL_ERR_OUT_OF_MEMORY;
@@ -223,7 +267,7 @@ __hal_channel_initialize (xge_hal_channel_h channelh,
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_lock_init_irq(&channel->free_lock, hldev->irqh);
-#else
+#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_lock_init(&channel->free_lock, hldev->pdev);
#endif
@@ -259,7 +303,7 @@ void __hal_channel_terminate(xge_hal_channel_h channelh)
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_lock_destroy_irq(&channel->free_lock, hldev->irqh);
-#else
+#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_lock_destroy(&channel->free_lock, hldev->pdev);
#endif
}
@@ -332,33 +376,95 @@ xge_hal_channel_open(xge_hal_device_h devh,
*channelh = NULL;
+#ifdef XGEHAL_RNIC
+ if((attr->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
+ (attr->type == XGE_HAL_CHANNEL_TYPE_RING)) {
+#endif
/* find channel */
- xge_list_for_each(item, &device->free_channels) {
- xge_hal_channel_t *tmp;
+ xge_list_for_each(item, &device->free_channels) {
+ xge_hal_channel_t *tmp;
+
+ tmp = xge_container_of(item, xge_hal_channel_t, item);
+ if (tmp->type == attr->type &&
+ tmp->post_qid == attr->post_qid &&
+ tmp->compl_qid == attr->compl_qid) {
+ channel = tmp;
+ break;
+ }
+ }
- tmp = xge_container_of(item, xge_hal_channel_t, item);
- if (tmp->type == attr->type &&
- tmp->post_qid == attr->post_qid &&
- tmp->compl_qid == attr->compl_qid) {
- channel = tmp;
- break;
+ if (channel == NULL) {
+ return XGE_HAL_ERR_CHANNEL_NOT_FOUND;
}
- }
- if (channel == NULL) {
- /* most likely configuration mistake */
- return XGE_HAL_ERR_CHANNEL_NOT_FOUND;
+#ifdef XGEHAL_RNIC
}
+ else {
+ channel = __hal_channel_allocate(devh, attr->post_qid, attr->type);
+ if (channel == NULL) {
+ xge_debug_device(XGE_ERR,
+ "__hal_channel_allocate failed");
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+ }
+#endif
+#ifndef XGEHAL_RNIC
xge_assert((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(channel->type == XGE_HAL_CHANNEL_TYPE_RING));
+#endif
+#ifdef XGEHAL_RNIC
+ if((reopen == XGE_HAL_CHANNEL_OC_NORMAL) ||
+ ((channel->type != XGE_HAL_CHANNEL_TYPE_FIFO) &&
+ (channel->type != XGE_HAL_CHANNEL_TYPE_RING))) {
+#else
if (reopen == XGE_HAL_CHANNEL_OC_NORMAL) {
+#endif
/* allocate memory, initialize pointers, etc */
- if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO)
- status = __hal_fifo_open(channel, attr);
- else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING)
- status = __hal_ring_open(channel, attr);
+ switch(channel->type) {
+ case XGE_HAL_CHANNEL_TYPE_FIFO:
+ status = __hal_fifo_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RING:
+ status = __hal_ring_open(channel, attr);
+ break;
+#ifdef XGEHAL_RNIC
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ status = __hal_sq_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE:
+ status = __hal_hrq_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE:
+ status = __hal_hcq_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE:
+ status = __hal_lrq_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE:
+ status = __hal_lcq_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ status = __hal_umq_open(channel, attr);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ status = __hal_dmq_open(channel, attr);
+ break;
+#else
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ status = XGE_HAL_FAIL;
+ break;
+#endif
+ default:
+ break;
+ }
if (status == XGE_HAL_OK) {
for (i = 0; i < channel->reserve_initial; i++) {
@@ -389,13 +495,53 @@ xge_hal_channel_open(xge_hal_device_h devh,
}
/* move channel to the open state list */
- xge_list_remove(&channel->item);
- if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
- xge_list_insert(&channel->item, &device->fifo_channels);
- } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
- xge_list_insert(&channel->item, &device->ring_channels);
- }
+ switch(channel->type) {
+ case XGE_HAL_CHANNEL_TYPE_FIFO:
+ xge_list_remove(&channel->item);
+ xge_list_insert(&channel->item, &device->fifo_channels);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RING:
+ xge_list_remove(&channel->item);
+ xge_list_insert(&channel->item, &device->ring_channels);
+ break;
+#ifdef XGEHAL_RNIC
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ xge_list_insert(&channel->item, &device->sq_channels);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE:
+ xge_list_insert(&channel->item, &device->hrq_channels);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE:
+ xge_list_insert(&channel->item, &device->hcq_channels);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE:
+ xge_list_insert(&channel->item, &device->lrq_channels);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE:
+ xge_list_insert(&channel->item, &device->lcq_channels);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ xge_list_insert(&channel->item, &device->umq_channels);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ xge_list_insert(&channel->item, &device->dmq_channels);
+ break;
+#else
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO ||
+ channel->type == XGE_HAL_CHANNEL_TYPE_RING);
+ break;
+#endif
+ default:
+ break;
+ }
channel->is_open = 1;
/*
* The magic check the argument validity, has to be
@@ -442,7 +588,9 @@ void xge_hal_channel_abort(xge_hal_channel_h channelh,
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated);
} else {
- xge_assert(!__hal_ring_rxd_priv(channelh, dtr)->allocated);
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
+ xge_assert(!__hal_ring_rxd_priv(channelh, dtr)->allocated);
+ }
}
#endif
check_cnt++;
@@ -459,14 +607,17 @@ void xge_hal_channel_abort(xge_hal_channel_h channelh,
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(__hal_fifo_txdl_priv(dtr)->allocated);
} else {
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
xge_assert(__hal_ring_rxd_priv(channelh, dtr)
->allocated);
+ }
}
#endif
check_cnt++;
#endif
channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_POSTED,
channel->userdata, reopen);
+
}
reserve_top_sav = channel->reserve_top;
@@ -477,7 +628,9 @@ void xge_hal_channel_abort(xge_hal_channel_h channelh,
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated);
} else {
- xge_assert(!__hal_ring_rxd_priv(channelh, dtr)->allocated);
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
+ xge_assert(!__hal_ring_rxd_priv(channelh, dtr)->allocated);
+ }
}
#endif
check_cnt++;
@@ -520,42 +673,98 @@ void xge_hal_channel_close(xge_hal_channel_h channelh,
channel->is_open = 0;
channel->magic = XGE_HAL_DEAD;
- /* sanity check: make sure channel is not in free list */
- xge_list_for_each(item, &hldev->free_channels) {
- xge_hal_channel_t *tmp;
-
- tmp = xge_container_of(item, xge_hal_channel_t, item);
- xge_assert(!tmp->is_open);
- if (channel == tmp) {
- return;
+#ifdef XGEHAL_RNIC
+ if((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
+ (channel->type == XGE_HAL_CHANNEL_TYPE_RING)) {
+#endif
+ /* sanity check: make sure channel is not in free list */
+ xge_list_for_each(item, &hldev->free_channels) {
+ xge_hal_channel_t *tmp;
+
+ tmp = xge_container_of(item, xge_hal_channel_t, item);
+ xge_assert(!tmp->is_open);
+ if (channel == tmp) {
+ return;
+ }
}
+#ifdef XGEHAL_RNIC
}
+#endif
xge_hal_channel_abort(channel, reopen);
+#ifndef XGEHAL_RNIC
xge_assert((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(channel->type == XGE_HAL_CHANNEL_TYPE_RING));
+#endif
if (reopen == XGE_HAL_CHANNEL_OC_NORMAL) {
/* de-allocate */
- if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
- __hal_fifo_close(channelh);
- } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
- __hal_ring_close(channelh);
+ switch(channel->type) {
+ case XGE_HAL_CHANNEL_TYPE_FIFO:
+ __hal_fifo_close(channelh);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_RING:
+ __hal_ring_close(channelh);
+ break;
+#ifdef XGEHAL_RNIC
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ __hal_sq_close(channelh);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE:
+ __hal_hrq_close(channelh);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE:
+ __hal_hcq_close(channelh);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE:
+ __hal_lrq_close(channelh);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE:
+ __hal_lcq_close(channelh);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ __hal_umq_close(channelh);
+ break;
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ __hal_dmq_close(channelh);
+ break;
+#else
+ case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_HW_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_HW_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_LRO_RECEIVE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_LRO_COMPLETION_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
+ case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
+ xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO ||
+ channel->type == XGE_HAL_CHANNEL_TYPE_RING);
+ break;
+#endif
+ default:
+ break;
}
}
- else
- xge_assert(reopen == XGE_HAL_CHANNEL_RESET_ONLY);
/* move channel back to free state list */
xge_list_remove(&channel->item);
- xge_list_insert(&channel->item, &hldev->free_channels);
+#ifdef XGEHAL_RNIC
+ if((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
+ (channel->type == XGE_HAL_CHANNEL_TYPE_RING)) {
+#endif
+ xge_list_insert(&channel->item, &hldev->free_channels);
- if (xge_list_is_empty(&hldev->fifo_channels) &&
- xge_list_is_empty(&hldev->ring_channels)) {
- /* clear msix_idx in case of following HW reset */
- msix_idx = 0;
- hldev->reset_needed_after_close = 1;
+ if (xge_list_is_empty(&hldev->fifo_channels) &&
+ xge_list_is_empty(&hldev->ring_channels)) {
+ /* clear msix_idx in case of following HW reset */
+ msix_idx = 0;
+ hldev->reset_needed_after_close = 1;
+ }
+#ifdef XGEHAL_RNIC
}
+ else {
+ __hal_channel_free(channel);
+ }
+#endif
}
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-config.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-config.c
index 2e83af3993..63e285605b 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-config.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-config.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
*
- * FileName : xgehal-config.c
- *
- * Description: configuration functionality
- *
- * Created: 14 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xgehal-config.h"
@@ -73,11 +64,6 @@ __hal_tti_config_check (xge_hal_tti_config_t *new_config)
return XGE_HAL_BADCFG_TX_UFC_C;
}
- if ((new_config->urange_d < XGE_HAL_MIN_TX_URANGE_D) ||
- (new_config->urange_d > XGE_HAL_MAX_TX_URANGE_D)) {
- return XGE_HAL_BADCFG_TX_URANGE_D;
- }
-
if ((new_config->ufc_d < XGE_HAL_MIN_TX_UFC_D) ||
(new_config->ufc_d > XGE_HAL_MAX_TX_UFC_D)) {
return XGE_HAL_BADCFG_TX_UFC_D;
@@ -168,28 +154,50 @@ __hal_rti_config_check (xge_hal_rti_config_t *new_config)
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*/
static xge_hal_status_e
-__hal_fifo_queue_check (xge_hal_fifo_queue_t *new_config)
+__hal_fifo_queue_check (xge_hal_fifo_config_t *new_config,
+ xge_hal_fifo_queue_t *new_queue)
{
- if ((new_config->initial < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) ||
- (new_config->initial > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
+ int i;
+
+ if ((new_queue->initial < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) ||
+ (new_queue->initial > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH;
}
/* FIXME: queue "grow" feature is not supported.
* Use "initial" queue size as the "maximum";
* Remove the next line when fixed. */
- new_config->max = new_config->initial;
+ new_queue->max = new_queue->initial;
- if ((new_config->max < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) ||
- (new_config->max > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
+ if ((new_queue->max < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) ||
+ (new_queue->max > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH;
}
- if ((new_config->intr < XGE_HAL_MIN_FIFO_QUEUE_INTR) ||
- (new_config->intr > XGE_HAL_MAX_FIFO_QUEUE_INTR)) {
+ if (new_queue->max < new_config->reserve_threshold) {
+ return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD;
+ }
+
+ if ((new_queue->intr < XGE_HAL_MIN_FIFO_QUEUE_INTR) ||
+ (new_queue->intr > XGE_HAL_MAX_FIFO_QUEUE_INTR)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INTR;
}
+ for(i = 0; i < XGE_HAL_MAX_FIFO_TTI_NUM; i++) {
+ /*
+ * Validate the tti configuration parameters only if
+ * the TTI feature is enabled.
+ */
+ if (new_queue->tti[i].enabled) {
+ xge_hal_status_e status;
+
+ if ((status = __hal_tti_config_check(
+ &new_queue->tti[i])) != XGE_HAL_OK) {
+ return status;
+ }
+ }
+ }
+
return XGE_HAL_OK;
}
@@ -374,8 +382,8 @@ __hal_fifo_config_check (xge_hal_fifo_config_t *new_config)
if (!new_config->queue[i].configured)
continue;
- if ((status = __hal_fifo_queue_check(&new_config->queue[i]))
- != XGE_HAL_OK) {
+ if ((status = __hal_fifo_queue_check(new_config,
+ &new_config->queue[i])) != XGE_HAL_OK) {
return status;
}
}
@@ -438,6 +446,23 @@ __hal_device_config_check_common (xge_hal_device_config_t *new_config)
return XGE_HAL_BADCFG_MAX_MTU;
}
+ if ((new_config->bimodal_interrupts < XGE_HAL_BIMODAL_INTR_MIN) ||
+ (new_config->bimodal_interrupts > XGE_HAL_BIMODAL_INTR_MAX)) {
+ return XGE_HAL_BADCFG_BIMODAL_INTR;
+ }
+
+ if (new_config->bimodal_interrupts &&
+ ((new_config->bimodal_timer_lo_us < XGE_HAL_BIMODAL_TIMER_LO_US_MIN) ||
+ (new_config->bimodal_timer_lo_us > XGE_HAL_BIMODAL_TIMER_LO_US_MAX))) {
+ return XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US;
+ }
+
+ if (new_config->bimodal_interrupts &&
+ ((new_config->bimodal_timer_hi_us < XGE_HAL_BIMODAL_TIMER_HI_US_MIN) ||
+ (new_config->bimodal_timer_hi_us > XGE_HAL_BIMODAL_TIMER_HI_US_MAX))) {
+ return XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US;
+ }
+
if ((new_config->no_isr_events < XGE_HAL_NO_ISR_EVENTS_MIN) ||
(new_config->no_isr_events > XGE_HAL_NO_ISR_EVENTS_MAX)) {
return XGE_HAL_BADCFG_NO_ISR_EVENTS;
@@ -505,7 +530,17 @@ __hal_device_config_check_common (xge_hal_device_config_t *new_config)
return XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT;
}
- if (new_config->sched_timer_us) {
+ /*
+ * Check adaptive schema parameters. Note that there are two
+ * configuration variables needs to be enabled in ULD:
+ *
+ * a) sched_timer_us should not be zero;
+ * b) rxufca_hi_lim should not be equal to rxufca_lo_lim.
+ *
+ * The code bellow checking for those conditions.
+ */
+ if (new_config->sched_timer_us &&
+ new_config->rxufca_hi_lim != new_config->rxufca_lo_lim) {
if ((new_config->rxufca_intr_thres <
XGE_HAL_RXUFCA_INTR_THRES_MIN) ||
(new_config->rxufca_intr_thres >
@@ -564,6 +599,29 @@ __hal_device_config_check_common (xge_hal_device_config_t *new_config)
}
}
+ if ((new_config->rts_qos_steering_config < XGE_HAL_RTS_QOS_STEERING_DISABLE) ||
+ (new_config->rts_qos_steering_config > XGE_HAL_RTS_QOS_STEERING_ENABLE)) {
+ return XGE_HAL_BADCFG_RTS_QOS_STEERING_CONFIG;
+ }
+
+#if defined(XGE_HAL_CONFIG_LRO)
+ if (new_config->lro_sg_size !=
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ if ((new_config->lro_sg_size < XGE_HAL_LRO_MIN_SG_SIZE) ||
+ (new_config->lro_sg_size > XGE_HAL_LRO_MAX_SG_SIZE)) {
+ return XGE_HAL_BADCFG_LRO_SG_SIZE;
+ }
+ }
+
+ if (new_config->lro_frm_len !=
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ if ((new_config->lro_frm_len < XGE_HAL_LRO_MIN_FRM_LEN) ||
+ (new_config->lro_frm_len > XGE_HAL_LRO_MAX_FRM_LEN)) {
+ return XGE_HAL_BADCFG_LRO_FRM_LEN;
+ }
+ }
+#endif
+
if ((status = __hal_ring_config_check(&new_config->ring))
!= XGE_HAL_OK) {
return status;
@@ -574,17 +632,6 @@ __hal_device_config_check_common (xge_hal_device_config_t *new_config)
return status;
}
- /*
- * Validate the tti configuration parameters only if the TTI
- * feature is enabled.
- */
- if (new_config->tti.enabled) {
- if ((status = __hal_tti_config_check(&new_config->tti)) !=
- XGE_HAL_OK) {
- return status;
- }
- }
-
if ((status = __hal_fifo_config_check(&new_config->fifo)) !=
XGE_HAL_OK) {
return status;
@@ -611,7 +658,8 @@ __hal_device_config_check_xena (xge_hal_device_config_t *new_config)
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_66) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_100) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_133) &&
- (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_266)) {
+ (new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_266) &&
+ (new_config->pci_freq_mherz != XGE_HAL_DEFAULT_USE_HARDCODE)) {
return XGE_HAL_BADCFG_PCI_FREQ_MHERZ;
}
@@ -636,7 +684,7 @@ __hal_device_config_check_herc (xge_hal_device_config_t *new_config)
}
-/**
+/*
* __hal_driver_config_check - Check HAL configuration
* @new_config: Driver configuration information
*
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-device-fp.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-device-fp.c
index d614143eaf..d3c127a3fa 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-device-fp.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-device-fp.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
*
- * FileName : xgehal-device-fp.c
- *
- * Description: HAL device object functionality (fast path)
- *
- * Created: 10 June 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifdef XGE_DEBUG_FP
@@ -41,22 +32,22 @@
* xge_hal_device_bar0 - Get BAR0 mapped address.
* @hldev: HAL device handle.
*
- * Returns: BAR0 address of the specified device.
+ * Returns: BAR0 address of the specified device.
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
xge_hal_device_bar0(xge_hal_device_t *hldev)
{
return hldev->bar0;
}
/**
- * xge_hal_device_isrbar0 - Get BAR0 mapped address.
+ * xge_hal_device_isrbar0 - Get BAR0 mapped address.
* @hldev: HAL device handle.
*
- * Returns: BAR0 address of the specified device.
+ * Returns: BAR0 address of the specified device.
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
-xge_hal_device_isrbar0(xge_hal_device_t *hldev)
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
+xge_hal_device_isrbar0(xge_hal_device_t *hldev)
{
return hldev->isrbar0;
}
@@ -65,9 +56,9 @@ xge_hal_device_isrbar0(xge_hal_device_t *hldev)
* xge_hal_device_bar1 - Get BAR1 mapped address.
* @hldev: HAL device handle.
*
- * Returns: BAR1 address of the specified device.
+ * Returns: BAR1 address of the specified device.
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char *
xge_hal_device_bar1(xge_hal_device_t *hldev)
{
return hldev->bar1;
@@ -77,23 +68,23 @@ xge_hal_device_bar1(xge_hal_device_t *hldev)
* xge_hal_device_bar0_set - Set BAR0 mapped address.
* @hldev: HAL device handle.
* @bar0: BAR0 mapped address.
- * * Set BAR0 address in the HAL device object.
+ * * Set BAR0 address in the HAL device object.
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_bar0_set(xge_hal_device_t *hldev, char *bar0)
{
xge_assert(bar0);
- hldev->bar0 = bar0;
+ hldev->bar0 = bar0;
}
/**
- * xge_hal_device_isrbar0_set - Set BAR0 mapped address.
+ * xge_hal_device_isrbar0_set - Set BAR0 mapped address.
* @hldev: HAL device handle.
* @isrbar0: BAR0 mapped address.
- * * Set BAR0 address in the HAL device object.
+ * * Set BAR0 address in the HAL device object.
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
-xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0)
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0)
{
xge_assert(isrbar0);
hldev->isrbar0 = isrbar0;
@@ -105,172 +96,182 @@ xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0)
* @channelh: Channel handle.
* @bar1: BAR1 mapped address.
*
- * Set BAR1 address for the given channel.
+ * Set BAR1 address for the given channel.
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh,
- char *bar1)
+ char *bar1)
{
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
xge_assert(bar1);
xge_assert(fifo);
- /* Initializing the BAR1 address as the start of
- * the FIFO queue pointer and as a location of FIFO control
+ /* Initializing the BAR1 address as the start of
+ * the FIFO queue pointer and as a location of FIFO control
* word. */
fifo->hw_pair =
- (xge_hal_fifo_hw_pair_t *) (bar1 +
- (fifo->channel.post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));
- hldev->bar1 = bar1;
+ (xge_hal_fifo_hw_pair_t *) (bar1 +
+ (fifo->channel.post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));
+ hldev->bar1 = bar1;
}
/**
- * xge_hal_device_rev - Get Device revision number.
+ * xge_hal_device_rev - Get Device revision number.
* @hldev: HAL device handle.
*
- * Returns: Device revision number
+ * Returns: Device revision number
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE int
-xge_hal_device_rev(xge_hal_device_t *hldev)
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE int
+xge_hal_device_rev(xge_hal_device_t *hldev)
{
- return hldev->revision;
+ return hldev->revision;
}
/**
- * xge_hal_device_begin_irq - Begin IRQ processing.
+ * xge_hal_device_begin_irq - Begin IRQ processing.
* @hldev: HAL device handle.
- * @reason: "Reason" for the interrupt, the value of Xframe's
- * general_int_status register.
+ * @reason: "Reason" for the interrupt, the value of Xframe's
+ * general_int_status register.
*
- * The function performs two actions, It first checks whether (shared IRQ) the
- * interrupt was raised by the device. Next, it masks the device interrupts.
+ * The function performs two actions, It first checks whether (shared IRQ) the
+ * interrupt was raised by the device. Next, it masks the device interrupts.
*
* Note:
* xge_hal_device_begin_irq() does not flush MMIO writes through the
* bridge. Therefore, two back-to-back interrupts are potentially possible.
- * It is the responsibility of the ULD to make sure that only one
+ * It is the responsibility of the ULD to make sure that only one
* xge_hal_device_continue_irq() runs at a time.
*
- * Returns: 0, if the interrupt is not "ours" (note that in this case the
+ * Returns: 0, if the interrupt is not "ours" (note that in this case the
* device remain enabled).
* Otherwise, xge_hal_device_begin_irq() returns 64bit general adapter
* status.
* See also: xge_hal_device_handle_irq()
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason)
{
- u64 val64;
- xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+ u64 val64;
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
hldev->stats.sw_dev_info_stats.total_intr_cnt++;
- val64 = xge_os_pio_mem_read64(hldev->pdev,
- hldev->regh0, &isrbar0->general_int_status);
+ val64 = xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &isrbar0->general_int_status);
if (xge_os_unlikely(!val64)) {
- /* not Xframe interrupt */
+ /* not Xframe interrupt */
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
- *reason = 0;
- return XGE_HAL_ERR_WRONG_IRQ;
+ *reason = 0;
+ return XGE_HAL_ERR_WRONG_IRQ;
}
if (xge_os_unlikely(val64 == XGE_HAL_ALL_FOXES)) {
- u64 adapter_status =
- xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
- &isrbar0->adapter_status);
- if (adapter_status == XGE_HAL_ALL_FOXES) {
- (void) xge_queue_produce(hldev->queueh,
+ u64 adapter_status =
+ xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &isrbar0->adapter_status);
+ if (adapter_status == XGE_HAL_ALL_FOXES) {
+ (void) xge_queue_produce(hldev->queueh,
XGE_HAL_EVENT_SLOT_FREEZE,
hldev,
- 1, /* critical: slot freeze */
+ 1, /* critical: slot freeze */
sizeof(u64),
(void*)&adapter_status);
- *reason = 0;
+ *reason = 0;
return XGE_HAL_ERR_CRITICAL;
}
}
- *reason = val64;
+ *reason = val64;
- /* separate fast path, i.e. no errors */
- if (val64 & XGE_HAL_GEN_INTR_RXTRAFFIC) {
+ /* separate fast path, i.e. no errors */
+ if (val64 & XGE_HAL_GEN_INTR_RXTRAFFIC) {
hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt++;
return XGE_HAL_OK;
}
- if (val64 & XGE_HAL_GEN_INTR_TXTRAFFIC) {
+ if (val64 & XGE_HAL_GEN_INTR_TXTRAFFIC) {
hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt++;
return XGE_HAL_OK;
}
- if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXPIC)) {
+ hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXPIC)) {
xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.txpic_intr_cnt++;
status = __hal_device_handle_txpic(hldev, val64);
if (status != XGE_HAL_OK) {
return status;
}
}
- if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXDMA)) {
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXDMA)) {
xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.txdma_intr_cnt++;
status = __hal_device_handle_txdma(hldev, val64);
if (status != XGE_HAL_OK) {
return status;
}
}
- if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXMAC)) {
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXMAC)) {
xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.txmac_intr_cnt++;
status = __hal_device_handle_txmac(hldev, val64);
if (status != XGE_HAL_OK) {
return status;
}
}
- if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXXGXS)) {
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXXGXS)) {
xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.txxgxs_intr_cnt++;
status = __hal_device_handle_txxgxs(hldev, val64);
if (status != XGE_HAL_OK) {
return status;
}
}
- if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXPIC)) {
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXPIC)) {
xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.rxpic_intr_cnt++;
status = __hal_device_handle_rxpic(hldev, val64);
if (status != XGE_HAL_OK) {
return status;
}
}
- if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXDMA)) {
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXDMA)) {
xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.rxdma_intr_cnt++;
status = __hal_device_handle_rxdma(hldev, val64);
if (status != XGE_HAL_OK) {
return status;
}
}
- if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXMAC)) {
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXMAC)) {
xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.rxmac_intr_cnt++;
status = __hal_device_handle_rxmac(hldev, val64);
if (status != XGE_HAL_OK) {
return status;
}
}
- if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXXGXS)) {
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXXGXS)) {
xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.rxxgxs_intr_cnt++;
status = __hal_device_handle_rxxgxs(hldev, val64);
if (status != XGE_HAL_OK) {
return status;
}
}
- if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_MC)) {
+ if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_MC)) {
xge_hal_status_e status;
+ hldev->stats.sw_dev_info_stats.mc_intr_cnt++;
status = __hal_device_handle_mc(hldev, val64);
if (status != XGE_HAL_OK) {
return status;
@@ -282,42 +283,137 @@ xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason)
/**
* xge_hal_device_clear_rx - Acknowledge (that is, clear) the
- * condition that has caused the RX interrupt.
+ * condition that has caused the RX interrupt.
* @hldev: HAL device handle.
*
- * Acknowledge (that is, clear) the condition that has caused
+ * Acknowledge (that is, clear) the condition that has caused
* the Rx interrupt.
* See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(),
* xge_hal_device_clear_tx(), xge_hal_device_mask_rx().
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_clear_rx(xge_hal_device_t *hldev)
{
- xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
- 0xFFFFFFFFFFFFFFFFULL,
- &isrbar0->rx_traffic_int);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &isrbar0->rx_traffic_int);
}
/**
* xge_hal_device_clear_tx - Acknowledge (that is, clear) the
- * condition that has caused the TX interrupt.
+ * condition that has caused the TX interrupt.
* @hldev: HAL device handle.
*
- * Acknowledge (that is, clear) the condition that has caused
+ * Acknowledge (that is, clear) the condition that has caused
* the Tx interrupt.
* See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(),
* xge_hal_device_clear_rx(), xge_hal_device_mask_tx().
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_clear_tx(xge_hal_device_t *hldev)
{
- xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &isrbar0->tx_traffic_int);
+}
+
+/**
+ * xge_hal_device_poll_rx_channel - Poll Rx channel for completed
+ * descriptors and process the same.
+ * @channel: HAL channel.
+ *
+ * The function polls the Rx channel for the completed descriptors and calls
+ * the upper-layer driver (ULD) via supplied completion callback.
+ *
+ * Returns: XGE_HAL_OK, if the polling is completed successful.
+ * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
+ * descriptors available which are yet to be processed.
+ *
+ * See also: xge_hal_device_poll_tx_channel()
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx)
+{
+ xge_hal_status_e ret = XGE_HAL_OK;
+ xge_hal_dtr_h first_dtrh;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
+ u8 t_code;
+ int got_bytes;
+
+ /* for each opened rx channel */
+ got_bytes = *got_rx = 0;
+ ((xge_hal_ring_t *)channel)->cmpl_cnt = 0;
+ channel->poll_bytes = 0;
+ if ((ret = xge_hal_ring_dtr_next_completed (channel, &first_dtrh,
+ &t_code)) == XGE_HAL_OK) {
+ if (channel->callback(channel, first_dtrh,
+ t_code, channel->userdata) != XGE_HAL_OK) {
+ (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1;
+ got_bytes += channel->poll_bytes + 1;
+ ret = XGE_HAL_COMPLETIONS_REMAIN;
+ } else {
+ (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1;
+ got_bytes += channel->poll_bytes + 1;
+ }
+ }
+
+ if (*got_rx) {
+ hldev->irq_workload_rxd[channel->post_qid] += *got_rx;
+ hldev->irq_workload_rxcnt[channel->post_qid] ++;
+ }
+ hldev->irq_workload_rxlen[channel->post_qid] += got_bytes;
+
+ return ret;
+}
+
+/**
+ * xge_hal_device_poll_tx_channel - Poll Tx channel for completed
+ * descriptors and process the same.
+ * @hldev: HAL channel.
+ *
+ * The function polls the Tx channel for the completed descriptors and calls
+ * the upper-layer driver (ULD) via supplied completion callback.
+ *
+ * Returns: XGE_HAL_OK, if the polling is completed successful.
+ * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
+ * descriptors available which are yet to be processed.
+ *
+ * See also: xge_hal_device_poll_rx_channel().
+ */
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx)
+{
+ xge_hal_dtr_h first_dtrh;
+ xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
+ u8 t_code;
+ int got_bytes;
+
+ /* for each opened tx channel */
+ got_bytes = *got_tx = 0;
+ channel->poll_bytes = 0;
+ if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh,
+ &t_code) == XGE_HAL_OK) {
+ if (channel->callback(channel, first_dtrh,
+ t_code, channel->userdata) != XGE_HAL_OK) {
+ (*got_tx)++;
+ got_bytes += channel->poll_bytes + 1;
+ return XGE_HAL_COMPLETIONS_REMAIN;
+ }
+ (*got_tx)++;
+ got_bytes += channel->poll_bytes + 1;
+ }
+
+ if (*got_tx) {
+ hldev->irq_workload_txd[channel->post_qid] += *got_tx;
+ hldev->irq_workload_txcnt[channel->post_qid] ++;
+ }
+ hldev->irq_workload_txlen[channel->post_qid] += got_bytes;
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
- 0xFFFFFFFFFFFFFFFFULL,
- &isrbar0->tx_traffic_int);
+ return XGE_HAL_OK;
}
/**
@@ -325,36 +421,27 @@ xge_hal_device_clear_tx(xge_hal_device_t *hldev)
* descriptors and process the same.
* @hldev: HAL device handle.
*
- * The function polls the Rx channels for the completed descriptors and calls
- * the upper-layer driver (ULD) via supplied completion callback.
+ * The function polls the Rx channels for the completed descriptors and calls
+ * the upper-layer driver (ULD) via supplied completion callback.
*
- * Returns: XGE_HAL_OK, if the polling is completed successful.
- * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
+ * Returns: XGE_HAL_OK, if the polling is completed successful.
+ * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
* descriptors available which are yet to be processed.
*
- * See also: xge_hal_device_poll_tx_channels(), xge_hal_device_continue_irq().
+ * See also: xge_hal_device_poll_tx_channels(), xge_hal_device_continue_irq().
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
-xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev)
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev, int *got_rx)
{
xge_list_t *item;
xge_hal_channel_t *channel;
- xge_hal_dtr_h first_dtrh;
- u8 t_code;
/* for each opened rx channel */
- xge_list_for_each(item, &hldev->ring_channels) {
- channel = xge_container_of(item,
- xge_hal_channel_t, item);
-
- ((xge_hal_ring_t*)channel)->cmpl_cnt = 0;
- if (xge_hal_ring_dtr_next_completed (channel, &first_dtrh,
- &t_code) == XGE_HAL_OK) {
- if (channel->callback(channel, first_dtrh,
- t_code, channel->userdata) != XGE_HAL_OK) {
- return XGE_HAL_COMPLETIONS_REMAIN;
- }
- }
+ xge_list_for_each(item, &hldev->ring_channels) {
+ if (hldev->terminating)
+ return XGE_HAL_OK;
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+ (void) xge_hal_device_poll_rx_channel(channel, got_rx);
}
return XGE_HAL_OK;
@@ -365,130 +452,122 @@ xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev)
* descriptors and process the same.
* @hldev: HAL device handle.
*
- * The function polls the Tx channels for the completed descriptors and calls
- * the upper-layer driver (ULD) via supplied completion callback.
+ * The function polls the Tx channels for the completed descriptors and calls
+ * the upper-layer driver (ULD) via supplied completion callback.
*
- * Returns: XGE_HAL_OK, if the polling is completed successful.
- * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
+ * Returns: XGE_HAL_OK, if the polling is completed successful.
+ * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed
* descriptors available which are yet to be processed.
*
- * See also: xge_hal_device_poll_rx_channels(), xge_hal_device_continue_irq().
+ * See also: xge_hal_device_poll_rx_channels(), xge_hal_device_continue_irq().
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
-xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev)
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev, int *got_tx)
{
xge_list_t *item;
xge_hal_channel_t *channel;
- xge_hal_dtr_h first_dtrh;
- u8 t_code;
/* for each opened tx channel */
- xge_list_for_each(item, &hldev->fifo_channels) {
- channel = xge_container_of(item,
- xge_hal_channel_t, item);
-
- if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh,
- &t_code) == XGE_HAL_OK) {
- if (channel->callback(channel, first_dtrh,
- t_code, channel->userdata) != XGE_HAL_OK) {
- return XGE_HAL_COMPLETIONS_REMAIN;
- }
- }
+ xge_list_for_each(item, &hldev->fifo_channels) {
+ if (hldev->terminating)
+ return XGE_HAL_OK;
+ channel = xge_container_of(item, xge_hal_channel_t, item);
+ (void) xge_hal_device_poll_tx_channel(channel, got_tx);
}
return XGE_HAL_OK;
}
/**
- * xge_hal_device_mask_tx - Mask Tx interrupts.
+ * xge_hal_device_mask_tx - Mask Tx interrupts.
* @hldev: HAL device handle.
*
- * Mask Tx device interrupts.
+ * Mask Tx device interrupts.
*
* See also: xge_hal_device_unmask_tx(), xge_hal_device_mask_rx(),
* xge_hal_device_clear_tx().
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
-xge_hal_device_mask_tx(xge_hal_device_t *hldev)
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_mask_tx(xge_hal_device_t *hldev)
{
- xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
- 0xFFFFFFFFFFFFFFFFULL,
- &isrbar0->tx_traffic_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &isrbar0->tx_traffic_mask);
}
/**
- * xge_hal_device_mask_rx - Mask Rx interrupts.
+ * xge_hal_device_mask_rx - Mask Rx interrupts.
* @hldev: HAL device handle.
*
- * Mask Rx device interrupts.
+ * Mask Rx device interrupts.
*
* See also: xge_hal_device_unmask_rx(), xge_hal_device_mask_tx(),
* xge_hal_device_clear_rx().
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
-xge_hal_device_mask_rx(xge_hal_device_t *hldev)
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+xge_hal_device_mask_rx(xge_hal_device_t *hldev)
{
- xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
- 0xFFFFFFFFFFFFFFFFULL,
- &isrbar0->rx_traffic_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &isrbar0->rx_traffic_mask);
}
/**
* xge_hal_device_mask_all - Mask all device interrupts.
* @hldev: HAL device handle.
*
- * Mask all device interrupts.
+ * Mask all device interrupts.
*
* See also: xge_hal_device_unmask_all()
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_mask_all(xge_hal_device_t *hldev)
{
- xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
- 0xFFFFFFFFFFFFFFFFULL,
- &isrbar0->general_int_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0xFFFFFFFFFFFFFFFFULL,
+ &isrbar0->general_int_mask);
}
/**
- * xge_hal_device_unmask_tx - Unmask Tx interrupts.
+ * xge_hal_device_unmask_tx - Unmask Tx interrupts.
* @hldev: HAL device handle.
*
- * Unmask Tx device interrupts.
+ * Unmask Tx device interrupts.
*
* See also: xge_hal_device_mask_tx(), xge_hal_device_clear_tx().
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_unmask_tx(xge_hal_device_t *hldev)
{
- xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
- 0x0ULL,
- &isrbar0->tx_traffic_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0ULL,
+ &isrbar0->tx_traffic_mask);
}
/**
- * xge_hal_device_unmask_rx - Unmask Rx interrupts.
+ * xge_hal_device_unmask_rx - Unmask Rx interrupts.
* @hldev: HAL device handle.
*
- * Unmask Rx device interrupts.
+ * Unmask Rx device interrupts.
*
* See also: xge_hal_device_mask_rx(), xge_hal_device_clear_rx().
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_unmask_rx(xge_hal_device_t *hldev)
{
- xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
- 0x0ULL,
- &isrbar0->rx_traffic_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0ULL,
+ &isrbar0->rx_traffic_mask);
}
/**
@@ -499,109 +578,63 @@ xge_hal_device_unmask_rx(xge_hal_device_t *hldev)
*
* See also: xge_hal_device_mask_all()
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_unmask_all(xge_hal_device_t *hldev)
{
- xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
+ xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0;
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
- 0x0ULL,
- &isrbar0->general_int_mask);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ 0x0ULL,
+ &isrbar0->general_int_mask);
}
/**
- * xge_hal_device_continue_irq - Continue handling IRQ: process all
+ * xge_hal_device_continue_irq - Continue handling IRQ: process all
* completed descriptors.
* @hldev: HAL device handle.
*
- * Process completed descriptors and unmask the device interrupts.
+ * Process completed descriptors and unmask the device interrupts.
*
- * The xge_hal_device_continue_irq() walks all open channels
- * and calls upper-layer driver (ULD) via supplied completion
- * callback. Note that the completion callback is specified at channel open
+ * The xge_hal_device_continue_irq() walks all open channels
+ * and calls upper-layer driver (ULD) via supplied completion
+ * callback. Note that the completion callback is specified at channel open
* time, see xge_hal_channel_open().
*
- * Note that the xge_hal_device_continue_irq is part of the _fast_ path.
- * To optimize the processing, the function does _not_ check for
+ * Note that the xge_hal_device_continue_irq is part of the _fast_ path.
+ * To optimize the processing, the function does _not_ check for
* errors and alarms.
*
- * The latter is done in a polling fashion, via xge_hal_device_poll().
+ * The latter is done in a polling fashion, via xge_hal_device_poll().
*
- * Returns: XGE_HAL_OK.
+ * Returns: XGE_HAL_OK.
*
* See also: xge_hal_device_handle_irq(), xge_hal_device_poll(),
* xge_hal_ring_dtr_next_completed(),
* xge_hal_fifo_dtr_next_completed(), xge_hal_channel_callback_f{}.
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
xge_hal_device_continue_irq(xge_hal_device_t *hldev)
{
- xge_list_t *item;
- xge_hal_channel_t *channel;
- xge_hal_dtr_h first_dtrh;
- int got_rx = 0, got_tx = 0;
- unsigned int isr_polling_cnt = (unsigned int) hldev->config.isr_polling_cnt;
- u8 t_code;
+ int got_rx = 1, got_tx = 1;
+ int isr_polling_cnt = hldev->config.isr_polling_cnt;
+ int count = 0;
-_try_again:
+ do
+ {
+ if (got_rx)
+ (void) xge_hal_device_poll_rx_channels(hldev, &got_rx);
+ if (got_tx && hldev->tti_enabled)
+ (void) xge_hal_device_poll_tx_channels(hldev, &got_tx);
- /* for each opened rx channel */
- xge_list_for_each(item, &hldev->ring_channels) {
- channel = xge_container_of(item,
- xge_hal_channel_t, item);
-
- ((xge_hal_ring_t*)channel)->cmpl_cnt = 0;
- if (xge_hal_ring_dtr_next_completed (channel, &first_dtrh,
- &t_code) == XGE_HAL_OK) {
- channel->callback(channel, first_dtrh,
- t_code, channel->userdata);
- got_rx++;
- }
+ if (!got_rx && !got_tx)
+ break;
- if (hldev->terminating)
- return XGE_HAL_OK;
+ count += (got_rx + got_tx);
+ }while (isr_polling_cnt--);
- }
-
- /* Note.
- * All interrupts are masked by general_int_status at this point,
- * i.e. no new interrupts going to be produced by the adapter.
- * We intentionally do not mask rx/tx interrupts right after
- * walking to continue processing new descriptors on next
- * interation if configured. */
-
- /* for each opened tx channel */
- xge_list_for_each(item, &hldev->fifo_channels) {
- channel = xge_container_of(item,
- xge_hal_channel_t, item);
-
- if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh,
- &t_code) == XGE_HAL_OK) {
- channel->callback(channel, first_dtrh,
- t_code, channel->userdata);
- got_tx++;
- }
-
- if (hldev->terminating)
- return XGE_HAL_OK;
-
- }
-
- if (got_rx || got_tx) {
- xge_hal_pci_bar0_t *isrbar0 =
- (xge_hal_pci_bar0_t *)hldev->isrbar0;
- got_tx = got_rx = 0;
- if (isr_polling_cnt--)
- goto _try_again;
- /* to avoid interrupt loss, we force bridge to flush cached
- * writes, in simple case OSDEP needs to just readl(), some
- * OSes (e.g. M$ Windows) has special bridge flush API */
- (void) xge_os_flush_bridge(hldev->pdev, hldev->regh0,
- &isrbar0->general_int_status);
- } else if (isr_polling_cnt == hldev->config.isr_polling_cnt) {
+ if (!count)
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
- }
return XGE_HAL_OK;
}
@@ -610,37 +643,37 @@ _try_again:
* xge_hal_device_handle_irq - Handle device IRQ.
* @hldev: HAL device handle.
*
- * Perform the complete handling of the line interrupt. The function
- * performs two calls.
- * First it uses xge_hal_device_begin_irq() to check the reason for
+ * Perform the complete handling of the line interrupt. The function
+ * performs two calls.
+ * First it uses xge_hal_device_begin_irq() to check the reason for
* the interrupt and mask the device interrupts.
- * Second, it calls xge_hal_device_continue_irq() to process all
+ * Second, it calls xge_hal_device_continue_irq() to process all
* completed descriptors and re-enable the interrupts.
*
- * Returns: XGE_HAL_OK - success;
- * XGE_HAL_ERR_WRONG_IRQ - (shared) IRQ produced by other device.
+ * Returns: XGE_HAL_OK - success;
+ * XGE_HAL_ERR_WRONG_IRQ - (shared) IRQ produced by other device.
*
* See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq().
*/
-__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
+__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e
xge_hal_device_handle_irq(xge_hal_device_t *hldev)
{
- u64 reason;
+ u64 reason;
xge_hal_status_e status;
xge_hal_device_mask_all(hldev);
- status = xge_hal_device_begin_irq(hldev, &reason);
- if (status != XGE_HAL_OK) {
+ status = xge_hal_device_begin_irq(hldev, &reason);
+ if (status != XGE_HAL_OK) {
xge_hal_device_unmask_all(hldev);
- return status;
+ return status;
}
if (reason & XGE_HAL_GEN_INTR_RXTRAFFIC) {
xge_hal_device_clear_rx(hldev);
}
- status = xge_hal_device_continue_irq(hldev);
+ status = xge_hal_device_continue_irq(hldev);
xge_hal_device_clear_tx(hldev);
@@ -649,55 +682,79 @@ xge_hal_device_handle_irq(xge_hal_device_t *hldev)
return status;
}
-#if defined(XGE_HAL_CONFIG_LRO)
+#if defined(XGE_HAL_CONFIG_LRO)
+
+
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+__hal_lro_check_for_session_match(lro_t *lro, tcplro_t *tcp, iplro_t *ip)
+{
+
+ /* Match Source address field */
+ if ((lro->ip_hdr->saddr != ip->saddr))
+ return XGE_HAL_FAIL;
+
+ /* Match Destination address field */
+ if ((lro->ip_hdr->daddr != ip->daddr))
+ return XGE_HAL_FAIL;
+
+ /* Match Source Port field */
+ if ((lro->tcp_hdr->source != tcp->source))
+ return XGE_HAL_FAIL;
+
+ /* Match Destination Port field */
+ if ((lro->tcp_hdr->dest != tcp->dest))
+ return XGE_HAL_FAIL;
+
+ return XGE_HAL_OK;
+}
/*
* __hal_tcp_seg_len: Find the tcp seg len.
- * @ip: ip header.
+ * @ip: ip header.
* @tcp: tcp header.
- * returns: Tcp seg length.
+ * returns: Tcp seg length.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16
-__hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp)
+__hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp)
{
- u16 ret;
+ u16 ret;
- ret = (xge_os_ntohs(ip->tot_len) -
- ((ip->version_ihl & 0x0F)<<2) -
- ((tcp->doff_res)>>2));
+ ret = (xge_os_ntohs(ip->tot_len) -
+ ((ip->version_ihl & 0x0F)<<2) -
+ ((tcp->doff_res)>>2));
return (ret);
}
/*
* __hal_ip_lro_capable: Finds whether ip is lro capable.
- * @ip: ip header.
+ * @ip: ip header.
* @ext_info: descriptor info.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_ip_lro_capable(iplro_t *ip,
- xge_hal_dtr_info_t *ext_info)
+ xge_hal_dtr_info_t *ext_info)
{
#ifdef XGE_LL_DEBUG_DUMP_PKT
{
- u16 i;
- u8 ch, *iph = (u8 *)ip;
+ u16 i;
+ u8 ch, *iph = (u8 *)ip;
- xge_debug_ring(XGE_TRACE, "Dump Ip:" );
- for (i =0; i < 40; i++) {
- ch = ntohs(*((u8 *)(iph + i)) );
+ xge_debug_ring(XGE_TRACE, "Dump Ip:" );
+ for (i =0; i < 40; i++) {
+ ch = ntohs(*((u8 *)(iph + i)) );
printf("i:%d %02x, ",i,ch);
}
}
#endif
- if (ip->version_ihl != IP_FAST_PATH_HDR_MASK) {
- xge_debug_ring(XGE_ERR, "iphdr !=45 :%d",ip->version_ihl);
+ if (ip->version_ihl != IP_FAST_PATH_HDR_MASK) {
+ xge_debug_ring(XGE_ERR, "iphdr !=45 :%d",ip->version_ihl);
return XGE_HAL_FAIL;
}
- if (ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) {
- xge_debug_ring(XGE_ERR, "IP fragmented");
+ if (ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) {
+ xge_debug_ring(XGE_ERR, "IP fragmented");
return XGE_HAL_FAIL;
}
@@ -705,39 +762,123 @@ __hal_ip_lro_capable(iplro_t *ip,
}
/*
- * __hal_tcp_lro_capable: Finds whether tcp is lro capable.
- * @ip: ip header.
+ * __hal_tcp_lro_capable: Finds whether tcp is lro capable.
+ * @ip: ip header.
* @tcp: tcp header.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp)
+__hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off)
{
#ifdef XGE_LL_DEBUG_DUMP_PKT
{
u8 ch;
- u16 i;
+ u16 i;
- xge_debug_ring(XGE_TRACE, "Dump Tcp:" );
- for (i =0; i < 20; i++) {
- ch = ntohs(*((u8 *)((u8 *)tcp + i)) );
- xge_os_printf("i:%d %02x, ",i,ch);
+ xge_debug_ring(XGE_TRACE, "Dump Tcp:" );
+ for (i =0; i < 20; i++) {
+ ch = ntohs(*((u8 *)((u8 *)tcp + i)) );
+ xge_os_printf("i:%d %02x, ",i,ch);
}
}
#endif
- if ((TCP_FAST_PATH_HDR_MASK1 != tcp->doff_res) ||
- ((TCP_FAST_PATH_HDR_MASK2 != tcp->ctrl) &&
- (TCP_FAST_PATH_HDR_MASK3 != tcp->ctrl))) {
- xge_debug_ring(XGE_ERR, "tcphdr not fastpth %02x %02x \n", tcp->doff_res, tcp->ctrl);
- return XGE_HAL_FAIL;
+ if ((TCP_FAST_PATH_HDR_MASK2 != tcp->ctrl) &&
+ (TCP_FAST_PATH_HDR_MASK3 != tcp->ctrl))
+ goto _exit_fail;
+
+ *ts_off = -1;
+ if (TCP_FAST_PATH_HDR_MASK1 != tcp->doff_res) {
+ u16 tcp_hdr_len = tcp->doff_res >> 2; /* TCP header len */
+ u16 off = 20; /* Start of tcp options */
+ int i, diff;
+
+ /* Does Packet can contain time stamp */
+ if (tcp_hdr_len < 32) {
+ /*
+ * If the session is not opened, we can consider
+ * this packet for LRO
+ */
+ if (lro == NULL)
+ return XGE_HAL_OK;
+
+ goto _exit_fail;
+ }
+
+ /* Ignore No-operation 0x1 */
+ while (((u8 *)tcp)[off] == 0x1)
+ off++;
+
+ /* Next option == Timestamp */
+ if (((u8 *)tcp)[off] != 0x8) {
+ /*
+ * If the session ie not opened, we can consider
+ * this packet for LRO
+ */
+ if (lro == NULL)
+ return XGE_HAL_OK;
+
+ goto _exit_fail;
+ }
+
+ *ts_off = off;
+ if (lro == NULL)
+ return XGE_HAL_OK;
+
+ /*
+ * Now the session is opened. If the LRO frame doesn't
+ * have time stamp, we cannot consider current packet for
+ * LRO.
+ */
+ if (lro->ts_off == -1) {
+ xge_debug_ring(XGE_ERR, "Pkt received with time stamp after session opened with no time stamp : %02x %02x\n", tcp->doff_res, tcp->ctrl);
+ return XGE_HAL_FAIL;
+ }
+
+ /*
+ * If the difference is greater than three, then there are
+ * more options possible.
+ * else, there are two cases:
+ * case 1: remaining are padding bytes.
+ * case 2: remaining can contain options or padding
+ */
+ off += ((u8 *)tcp)[off+1];
+ diff = tcp_hdr_len - off;
+ if (diff > 3) {
+ /*
+ * Probably contains more options.
+ */
+ xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x \n", tcp->doff_res, tcp->ctrl);
+ return XGE_HAL_FAIL;
+ }
+
+ for (i = 0; i < diff; i++) {
+ u8 byte = ((u8 *)tcp)[off+i];
+
+ /* Ignore No-operation 0x1 */
+ if ((byte == 0x0) || (byte == 0x1))
+ continue;
+ xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x \n", tcp->doff_res, tcp->ctrl);
+ return XGE_HAL_FAIL;
+ }
+
+ /*
+ * Update the time stamp of LRO frame.
+ */
+ xge_os_memcpy(((char *)lro->tcp_hdr + lro->ts_off + 2),
+ (char *)((char *)tcp + (*ts_off) + 2), 8);
}
return XGE_HAL_OK;
+
+_exit_fail:
+ xge_debug_ring(XGE_ERR, "tcphdr not fastpth %02x %02x\n", tcp->doff_res, tcp->ctrl);
+ return XGE_HAL_FAIL;
+
}
/*
- * __hal_lro_capable: Finds whether frame is lro capable.
- * @buffer: Ethernet frame.
- * @ip: ip frame.
+ * __hal_lro_capable: Finds whether frame is lro capable.
+ * @buffer: Ethernet frame.
+ * @ip: ip frame.
* @tcp: tcp frame.
* @ext_info: Descriptor info.
* @hldev: Hal context.
@@ -745,25 +886,25 @@ __hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp)
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_lro_capable( u8 *buffer,
iplro_t **ip,
- tcplro_t **tcp,
+ tcplro_t **tcp,
xge_hal_dtr_info_t *ext_info,
- xge_hal_device_t *hldev)
+ xge_hal_device_t *hldev)
{
u8 ip_off, ip_length;
- if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_TCP)) {
- xge_debug_ring(XGE_ERR, "Cant do lro %d", ext_info->proto);
+ if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_TCP)) {
+ xge_debug_ring(XGE_ERR, "Cant do lro %d", ext_info->proto);
return XGE_HAL_FAIL;
}
#ifdef XGE_LL_DEBUG_DUMP_PKT
{
u8 ch;
- u16 i;
+ u16 i;
- xge_os_printf("Dump Eth:" );
- for (i =0; i < 60; i++) {
+ xge_os_printf("Dump Eth:" );
+ for (i =0; i < 60; i++) {
ch = ntohs(*((u8 *)(buffer + i)) );
- xge_os_printf("i:%d %02x, ",i,ch);
+ xge_os_printf("i:%d %02x, ",i,ch);
}
}
#endif
@@ -773,201 +914,269 @@ __hal_lro_capable( u8 *buffer,
ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
break;
case XGE_HAL_FRAME_TYPE_LLC:
- ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
+ ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
XGE_HAL_HEADER_802_2_SIZE);
break;
case XGE_HAL_FRAME_TYPE_SNAP:
- ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
+ ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
XGE_HAL_HEADER_SNAP_SIZE);
break;
- default: // XGE_HAL_FRAME_TYPE_IPX, etc.
+ default: // XGE_HAL_FRAME_TYPE_IPX, etc.
return XGE_HAL_FAIL;
}
- if (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED) {
+ if (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED) {
ip_off += XGE_HAL_HEADER_VLAN_SIZE;
}
- /* Grab ip, tcp headers */
- *ip = (iplro_t *)((char*)buffer + ip_off);
+ /* Grab ip, tcp headers */
+ *ip = (iplro_t *)((char*)buffer + ip_off);
- ip_length = (u8)((*ip)->version_ihl & 0x0F);
- ip_length = ip_length <<2;
+ ip_length = (u8)((*ip)->version_ihl & 0x0F);
+ ip_length = ip_length <<2;
*tcp = (tcplro_t *)((unsigned long)*ip + ip_length);
- xge_debug_ring(XGE_TRACE, "ip_length:%d ip:%llx tcp:%llx", (int)ip_length,
- (u64)(unsigned long)*ip, (u64)(unsigned long)*tcp);
+ xge_debug_ring(XGE_TRACE, "ip_length:%d ip:"XGE_OS_LLXFMT
+ " tcp:"XGE_OS_LLXFMT"", (int)ip_length,
+ (unsigned long long)(long)*ip, (unsigned long long)(long)*tcp);
return XGE_HAL_OK;
}
-/**
- * xge_hal_lro_free - Used to recycle lro memory.
- * @lro: LRO memory.
- * @hldev: Hal device structure.
- *
+
+/*
+ * __hal_open_lro_session: Open a new LRO session.
+ * @buffer: Ethernet frame.
+ * @ip: ip header.
+ * @tcp: tcp header.
+ * @lro: lro pointer
+ * @ext_info: Descriptor info.
+ * @hldev: Hal context.
+ * @slot: Bucket no.
+ * @tcp_seg_len: Length of tcp segment.
+ * @ts_off: time stamp offset in the packet.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
-xge_hal_lro_free(lro_t *lro, xge_hal_device_t *hldev)
+__hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro,
+ xge_hal_device_t *hldev, int slot, u32 tcp_seg_len,
+ int ts_off)
{
- lro->in_use = 0;
-#if 1 // For debug.
- xge_os_memzero(lro, sizeof(lro_t));
-#endif
+
+ lro_t *lro_new = &hldev->lro_pool[slot];
+
+ lro_new->in_use = 1;
+ lro_new->ll_hdr = buffer;
+ lro_new->ip_hdr = ip;
+ lro_new->tcp_hdr = tcp;
+ lro_new->tcp_next_seq_num = tcp_seg_len + xge_os_ntohl(
+ tcp->seq);
+ lro_new->tcp_seq_num = tcp->seq;
+ lro_new->tcp_ack_num = tcp->ack_seq;
+ lro_new->sg_num = 1;
+ lro_new->total_length = xge_os_ntohs(ip->tot_len);
+ lro_new->frags_len = 0;
+ lro_new->ts_off = ts_off;
+
+ hldev->stats.sw_dev_info_stats.tot_frms_lroised++;
+ hldev->stats.sw_dev_info_stats.tot_lro_sessions++;
+
+ *lro = hldev->lro_recent = lro_new;
+ return;
}
/*
- * __hal_lro_malloc - Gets LRO from free memory pool.
- * @hldev: Hal device structure.
+ * __hal_lro_get_free_slot: Get a free LRO bucket.
+ * @hldev: Hal context.
*/
-__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
-__hal_lro_malloc(xge_hal_device_t *hldev)
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
+__hal_lro_get_free_slot (xge_hal_device_t *hldev)
{
- hldev->g_lro_pool->in_use = 1;
- return (hldev->g_lro_pool);
-}
+ int i;
+ for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) {
+ lro_t *lro_temp = &hldev->lro_pool[i];
+
+ if (!lro_temp->in_use)
+ return i;
+ }
+ return -1;
+}
/*
- * __hal_get_lro_session: Gets matching LRO session or creates one.
- * @buffer: Ethernet frame.
- * @ip: ip header.
+ * __hal_get_lro_session: Gets matching LRO session or creates one.
+ * @buffer: Ethernet frame.
+ * @ip: ip header.
* @tcp: tcp header.
* @lro: lro pointer
* @ext_info: Descriptor info.
* @hldev: Hal context.
- * Note: Current implementation will contain only one LRO session.
- * Global lro will not exist once more LRO sessions are permitted.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_get_lro_session (u8 *buffer,
- iplro_t *ip,
- tcplro_t *tcp,
- lro_t **lro,
- xge_hal_dtr_info_t *ext_info,
- xge_hal_device_t *hldev)
+ iplro_t *ip,
+ tcplro_t *tcp,
+ lro_t **lro,
+ xge_hal_dtr_info_t *ext_info,
+ xge_hal_device_t *hldev,
+ lro_t **lro_end3 /* Valid only when ret=END_3 */)
{
- xge_hal_status_e ret;
- lro_t *g_lro;
- int i, free_slot = -1;
-
- /***********************************************************
- Search in the pool of LROs for the session that matches the incoming
- frame.
- ************************************************************/
- *lro = g_lro = NULL;
- for (i = 0; i < XGE_HAL_MAX_LRO_SESSIONS; i++) {
- g_lro = &hldev->g_lro_pool[i];
-
- if (!g_lro->in_use) {
- if (free_slot == -1)
- free_slot = i;
- continue;
- }
-
- /* Match Source address field */
- if ((g_lro->ip_hdr->saddr != ip->saddr))
- continue;
-
- /* Match Destination address field */
- if ((g_lro->ip_hdr->daddr != ip->daddr))
- continue;
-
+ lro_t *lro_match;
+ int i, free_slot = -1;
+ u32 tcp_seg_len;
+ int ts_off = -1;
- /* Match Source Port field */
- if ((g_lro->tcp_hdr->source != tcp->source))
- continue;
+ *lro = lro_match = NULL;
+ /*
+ * Compare the incoming frame with the lro session left from the
+ * previous call. There is a good chance that this incoming frame
+ * matches the lro session.
+ */
+ if (hldev->lro_recent && hldev->lro_recent->in_use) {
+ if (__hal_lro_check_for_session_match(hldev->lro_recent,
+ tcp, ip)
+ == XGE_HAL_OK)
+ lro_match = hldev->lro_recent;
+ }
-
- /* Match Destination Port field */
- if ((g_lro->tcp_hdr->dest != tcp->dest))
- continue;
-
- *lro = g_lro;
+ if (!lro_match) {
+ /*
+ * Search in the pool of LROs for the session that matches
+ * the incoming frame.
+ */
+ for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) {
+ lro_t *lro_temp = &hldev->lro_pool[i];
+
+ if (!lro_temp->in_use) {
+ if (free_slot == -1)
+ free_slot = i;
+ continue;
+ }
+
+ if (__hal_lro_check_for_session_match(lro_temp, tcp,
+ ip) == XGE_HAL_OK) {
+ lro_match = lro_temp;
+ break;
+ }
+ }
+ }
- if (g_lro->tcp_next_seq_num != xge_os_ntohl(tcp->seq)) {
- xge_debug_ring(XGE_ERR, "**retransmit **"
+
+ if (lro_match) {
+ /*
+ * Matching LRO Session found
+ */
+ *lro = lro_match;
+
+ if (lro_match->tcp_next_seq_num != xge_os_ntohl(tcp->seq)) {
+ xge_debug_ring(XGE_ERR, "**retransmit **"
"found***");
+ hldev->stats.sw_dev_info_stats.lro_out_of_seq_pkt_cnt++;
return XGE_HAL_INF_LRO_END_2;
}
if (XGE_HAL_OK != __hal_ip_lro_capable(ip, ext_info))
return XGE_HAL_INF_LRO_END_2;
- if (XGE_HAL_OK != __hal_tcp_lro_capable(ip, tcp))
- return XGE_HAL_INF_LRO_END_2;
+ if (XGE_HAL_OK != __hal_tcp_lro_capable(ip, tcp, lro_match,
+ &ts_off)) {
+ /*
+ * Close the current session and open a new
+ * LRO session with this packet,
+ * provided it has tcp payload
+ */
+ tcp_seg_len = __hal_tcp_seg_len(ip, tcp);
+ if (tcp_seg_len == 0)
+ return XGE_HAL_INF_LRO_END_2;
+
+ /* Get a free bucket */
+ free_slot = __hal_lro_get_free_slot(hldev);
+ if (free_slot == -1)
+ return XGE_HAL_INF_LRO_END_2;
- /*
- * The frame is good, in-sequence, can be LRO-ed;
- * take its (latest) ACK - unless it is a dupack.
- * Note: to be exact need to check window size as well..
- */
- if (g_lro->tcp_ack_num == tcp->ack_seq &&
- g_lro->tcp_seq_num == tcp->seq)
+ /*
+ * Open a new LRO session
+ */
+ __hal_open_lro_session (buffer, ip, tcp, lro_end3,
+ hldev, free_slot, tcp_seg_len,
+ ts_off);
+
+ return XGE_HAL_INF_LRO_END_3;
+ }
+
+ /*
+ * The frame is good, in-sequence, can be LRO-ed;
+ * take its (latest) ACK - unless it is a dupack.
+ * Note: to be exact need to check window size as well..
+ */
+ if (lro_match->tcp_ack_num == tcp->ack_seq &&
+ lro_match->tcp_seq_num == tcp->seq) {
+ hldev->stats.sw_dev_info_stats.lro_dup_pkt_cnt++;
return XGE_HAL_INF_LRO_END_2;
+ }
- g_lro->tcp_seq_num = tcp->seq;
- g_lro->tcp_ack_num = tcp->ack_seq;
- g_lro->frags_len += __hal_tcp_seg_len(ip, tcp);
+ lro_match->tcp_seq_num = tcp->seq;
+ lro_match->tcp_ack_num = tcp->ack_seq;
+ lro_match->frags_len += __hal_tcp_seg_len(ip, tcp);
+ hldev->lro_recent = lro_match;
+
return XGE_HAL_INF_LRO_CONT;
}
+ /* ********** New Session ***************/
if (free_slot == -1)
return XGE_HAL_INF_LRO_UNCAPABLE;
- g_lro = &hldev->g_lro_pool[free_slot];
- if (XGE_HAL_FAIL == __hal_ip_lro_capable(ip, ext_info))
+ if (XGE_HAL_FAIL == __hal_ip_lro_capable(ip, ext_info))
return XGE_HAL_INF_LRO_UNCAPABLE;
- if (XGE_HAL_FAIL == __hal_tcp_lro_capable(ip, tcp))
+ if (XGE_HAL_FAIL == __hal_tcp_lro_capable(ip, tcp, NULL, &ts_off))
return XGE_HAL_INF_LRO_UNCAPABLE;
- *lro = g_lro;
- xge_debug_ring(XGE_TRACE, "Creating lro session.");
-
- g_lro->in_use = 1;
- g_lro->ll_hdr = buffer;
- g_lro->ip_hdr = ip;
- g_lro->tcp_hdr = tcp;
- g_lro->tcp_next_seq_num = __hal_tcp_seg_len(ip, tcp) +
- xge_os_ntohl(tcp->seq);
- g_lro->tcp_seq_num = tcp->seq;
- g_lro->tcp_ack_num = tcp->ack_seq;
- g_lro->sg_num = 1;
- g_lro->total_length = xge_os_ntohs(ip->tot_len);
- g_lro->frags_len = 0;
- hldev->stats.sw_dev_info_stats.tot_frms_lroised++;
- hldev->stats.sw_dev_info_stats.tot_lro_sessions++;
+ xge_debug_ring(XGE_TRACE, "Creating lro session.");
+
+ /*
+ * Open a LRO session, provided the packet contains payload.
+ */
+ tcp_seg_len = __hal_tcp_seg_len(ip, tcp);
+ if (tcp_seg_len == 0)
+ return XGE_HAL_INF_LRO_UNCAPABLE;
+
+ __hal_open_lro_session (buffer, ip, tcp, lro, hldev, free_slot,
+ tcp_seg_len, ts_off);
return XGE_HAL_INF_LRO_BEGIN;
}
/*
* __hal_lro_under_optimal_thresh: Finds whether combined session is optimal.
- * @ip: ip header.
+ * @ip: ip header.
* @tcp: tcp header.
* @lro: lro pointer
* @hldev: Hal context.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_lro_under_optimal_thresh (iplro_t *ip,
- tcplro_t *tcp,
+__hal_lro_under_optimal_thresh (iplro_t *ip,
+ tcplro_t *tcp,
lro_t *lro,
xge_hal_device_t *hldev)
{
if (!lro) return XGE_HAL_FAIL;
- if ((lro->total_length + __hal_tcp_seg_len(ip, tcp) ) >
- CONFIG_LRO_MAX_ACCUM_LENGTH) {
- xge_debug_ring(XGE_TRACE, "Max accumulation length exceeded: max length %d \n", CONFIG_LRO_MAX_ACCUM_LENGTH);
+ if ((lro->total_length + __hal_tcp_seg_len(ip, tcp) ) >
+ hldev->config.lro_frm_len) {
+ xge_debug_ring(XGE_TRACE, "Max LRO frame len exceeded:"
+ "max length %d \n", hldev->config.lro_frm_len);
+ hldev->stats.sw_dev_info_stats.lro_frm_len_exceed_cnt++;
return XGE_HAL_FAIL;
}
- if (lro->sg_num == CONFIG_LRO_MAX_SG_NUM) {
- xge_debug_ring(XGE_TRACE, "Max sg count exceeded: max sg %d \n", CONFIG_LRO_MAX_SG_NUM);
+ if (lro->sg_num == hldev->config.lro_sg_size) {
+ xge_debug_ring(XGE_TRACE, "Max sg count exceeded:"
+ "max sg %d \n", hldev->config.lro_sg_size);
+ hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++;
return XGE_HAL_FAIL;
}
@@ -975,14 +1184,14 @@ __hal_lro_under_optimal_thresh (iplro_t *ip,
}
/*
- * __hal_collapse_ip_hdr: Collapses ip header.
- * @ip: ip header.
+ * __hal_collapse_ip_hdr: Collapses ip header.
+ * @ip: ip header.
* @tcp: tcp header.
* @lro: lro pointer
* @hldev: Hal context.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-__hal_collapse_ip_hdr ( iplro_t *ip,
+__hal_collapse_ip_hdr ( iplro_t *ip,
tcplro_t *tcp,
lro_t *lro,
xge_hal_device_t *hldev)
@@ -990,7 +1199,7 @@ __hal_collapse_ip_hdr ( iplro_t *ip,
lro->total_length += __hal_tcp_seg_len(ip, tcp);
- /* May be we have to handle time stamps or more options */
+ /* May be we have to handle time stamps or more options */
return XGE_HAL_OK;
@@ -998,7 +1207,7 @@ __hal_collapse_ip_hdr ( iplro_t *ip,
/*
* __hal_collapse_tcp_hdr: Collapses tcp header.
- * @ip: ip header.
+ * @ip: ip header.
* @tcp: tcp header.
* @lro: lro pointer
* @hldev: Hal context.
@@ -1009,7 +1218,6 @@ __hal_collapse_tcp_hdr ( iplro_t *ip,
lro_t *lro,
xge_hal_device_t *hldev)
{
-
lro->tcp_next_seq_num += __hal_tcp_seg_len(ip, tcp);
return XGE_HAL_OK;
@@ -1017,8 +1225,8 @@ __hal_collapse_tcp_hdr ( iplro_t *ip,
/*
* __hal_append_lro: Appends new frame to existing LRO session.
- * @ip: ip header.
- * @tcp: tcp header.
+ * @ip: ip header.
+ * @tcp: IN tcp header, OUT tcp payload.
* @seg_len: tcp payload length.
* @lro: lro pointer
* @hldev: Hal context.
@@ -1030,112 +1238,129 @@ __hal_append_lro(iplro_t *ip,
lro_t *lro,
xge_hal_device_t *hldev)
{
- __hal_collapse_ip_hdr(ip, *tcp, lro, hldev);
- __hal_collapse_tcp_hdr(ip, *tcp, lro, hldev);
- // Update mbuf chain will be done in ll driver.
+ (void) __hal_collapse_ip_hdr(ip, *tcp, lro, hldev);
+ (void) __hal_collapse_tcp_hdr(ip, *tcp, lro, hldev);
+ // Update mbuf chain will be done in ll driver.
// xge_hal_accumulate_large_rx on success of appending new frame to
- // lro will return to ll driver tcpdata pointer, and tcp payload length.
- // along with return code lro frame appended.
+ // lro will return to ll driver tcpdata pointer, and tcp payload length.
+ // along with return code lro frame appended.
lro->sg_num++;
*seg_len = __hal_tcp_seg_len(ip, *tcp);
- *tcp = (tcplro_t *)((unsigned long)*tcp + (((*tcp)->doff_res)>>2));
+ *tcp = (tcplro_t *)((unsigned long)*tcp + (((*tcp)->doff_res)>>2));
return XGE_HAL_OK;
}
/**
- * xge_hal_accumulate_large_rx: LRO a given frame
+ * xge_hal_accumulate_large_rx: LRO a given frame
* frames
- * @buffer: Ethernet frame.
+ * @buffer: Ethernet frame.
* @tcp: tcp header.
- * @seglen: packet length.
+ * @seglen: packet length.
* @p_lro: lro pointer.
* @ext_info: descriptor info, see xge_hal_dtr_info_t{}.
* @hldev: HAL device.
*
- * LRO the newly received frame, i.e. attach it (if possible) to the
+ * LRO the newly received frame, i.e. attach it (if possible) to the
* already accumulated (i.e., already LRO-ed) received frames (if any),
- * to form one super-sized frame for the subsequent processing
+ * to form one super-sized frame for the subsequent processing
* by the stack.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
-xge_hal_accumulate_large_rx(u8 *buffer,
- u8 **tcp,
- u32 *seglen,
- lro_t **p_lro,
- xge_hal_dtr_info_t *ext_info,
- xge_hal_device_t *hldev)
+xge_hal_accumulate_large_rx(u8 *buffer, tcplro_t **tcp, u32 *seglen,
+lro_t **p_lro, xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
+lro_t **lro_end3)
{
- iplro_t *ip;
+ iplro_t *ip;
xge_hal_status_e ret;
lro_t *lro;
xge_debug_ring(XGE_TRACE, "Entered accumu lro. ");
if (XGE_HAL_OK != __hal_lro_capable(buffer, &ip, (tcplro_t **)tcp,
- ext_info, hldev))
+ ext_info, hldev))
return XGE_HAL_INF_LRO_UNCAPABLE;
/*
- * This function shall get matching LRO or else
+ * This function shall get matching LRO or else
* create one and return it
*/
- ret = __hal_get_lro_session(buffer, ip,
- (tcplro_t *)*tcp,
- p_lro, ext_info, hldev);
+ ret = __hal_get_lro_session(buffer, ip, (tcplro_t *)*tcp,
+ p_lro, ext_info, hldev, lro_end3);
xge_debug_ring(XGE_TRACE, "ret from get_lro:%d ",ret);
lro = *p_lro;
if (XGE_HAL_INF_LRO_CONT == ret) {
if (XGE_HAL_OK == __hal_lro_under_optimal_thresh(ip,
- (tcplro_t *)*tcp, lro, hldev)) {
- __hal_append_lro(ip,(tcplro_t **) tcp, seglen,
- lro,
+ (tcplro_t *)*tcp, lro, hldev)) {
+ (void) __hal_append_lro(ip,(tcplro_t **) tcp, seglen,
+ lro,
hldev);
hldev->stats.sw_dev_info_stats.tot_frms_lroised++;
- if (lro->sg_num >= CONFIG_LRO_MAX_SG_NUM)
- ret = XGE_HAL_INF_LRO_END_1;
+ if (lro->sg_num >= hldev->config.lro_sg_size) {
+ hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++;
+ ret = XGE_HAL_INF_LRO_END_1;
+ }
} else ret = XGE_HAL_INF_LRO_END_2;
}
/*
* Since its time to flush,
- * update ip header so that it can be sent up
+ * update ip header so that it can be sent up
*/
if ((ret == XGE_HAL_INF_LRO_END_1) ||
- (ret == XGE_HAL_INF_LRO_END_2)) {
+ (ret == XGE_HAL_INF_LRO_END_2) ||
+ (ret == XGE_HAL_INF_LRO_END_3)) {
lro->ip_hdr->tot_len = xge_os_htons((*p_lro)->total_length);
lro->ip_hdr->check = xge_os_htons(0);
- lro->ip_hdr->check =
- XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)),
- (lro->ip_hdr->version_ihl & 0x0F));
- lro->tcp_hdr->ack_seq = lro->tcp_ack_num;
+ lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)),
+ (lro->ip_hdr->version_ihl & 0x0F));
+ lro->tcp_hdr->ack_seq = lro->tcp_ack_num;
}
return (ret);
}
/**
- * xge_hal_lro_exist: Returns LRO list head if any.
+ * xge_hal_lro_close_session: Close LRO session
+ * @lro: LRO Session.
+ * @hldev: HAL Context.
+ */
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
+xge_hal_lro_close_session (lro_t *lro)
+{
+ lro->in_use = 0;
+}
+
+/**
+ * xge_hal_lro_get_next_session: Returns next LRO session in the list or NULL
+ * if none exists.
* @hldev: Hal context.
*/
-__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
-xge_hal_lro_exist (xge_hal_device_t *hldev)
+__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
+xge_hal_lro_get_next_session (xge_hal_device_t *hldev)
{
+ int i;
+ int start_idx = hldev->lro_next_idx;
+
+ for(i = start_idx; i < XGE_HAL_LRO_MAX_BUCKETS; i++) {
+ lro_t *lro = &hldev->lro_pool[i];
+
+ if (!lro->in_use)
+ continue;
- if (hldev->g_lro_pool->in_use) {
- /* Since its time to flush, Update ip header so that it can be sent up*/
- lro_t *lro;
- lro = hldev->g_lro_pool;
lro->ip_hdr->tot_len = xge_os_htons(lro->total_length);
lro->ip_hdr->check = xge_os_htons(0);
lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)),
- (lro->ip_hdr->version_ihl & 0x0F));
- return (hldev->g_lro_pool);
+ (lro->ip_hdr->version_ihl & 0x0F));
+ hldev->lro_next_idx = i + 1;
+ return lro;
}
+ hldev->lro_next_idx = 0;
return NULL;
+
}
#endif
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-device.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-device.c
index 1d98aba5c4..7f58b8ee7d 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-device.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-device.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-device.c
- *
- * Description: HAL device object functionality
*
- * Created: 10 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xgehal-device.h"
@@ -132,21 +123,29 @@ xge_hal_status_e
__hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg,
int op, u64 mask, int max_millis)
{
- xge_hal_status_e ret = XGE_HAL_FAIL;
u64 val64;
int i = 0;
+ xge_hal_status_e ret = XGE_HAL_FAIL;
+
+ xge_os_udelay(10);
do {
- xge_os_udelay(1000);
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
- if (op == 0 && !(val64 & mask)) {
- ret = XGE_HAL_OK;
- break;
- } else if (op == 1 && (val64 & mask) == mask) {
- ret = XGE_HAL_OK;
- break;
- }
- } while (++i <= max_millis);
+ if (op == 0 && !(val64 & mask))
+ return XGE_HAL_OK;
+ else if (op == 1 && (val64 & mask) == mask)
+ return XGE_HAL_OK;
+ xge_os_udelay(100);
+ } while (++i <= 9);
+
+ do {
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
+ if (op == 0 && !(val64 & mask))
+ return XGE_HAL_OK;
+ else if (op == 1 && (val64 & mask) == mask)
+ return XGE_HAL_OK;
+ xge_os_udelay(1000);
+ } while (++i < max_millis);
return ret;
}
@@ -194,7 +193,7 @@ __hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status)
/**
* xge_hal_device_is_slot_freeze
- * @hldev: the device
+ * @devh: the device
*
* Returns non-zero if the slot is freezed.
* The determination is made based on the adapter_status
@@ -210,24 +209,20 @@ xge_hal_device_is_slot_freeze(xge_hal_device_h devh)
u64 adapter_status =
xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->adapter_status);
- u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
- &bar0->pif_rd_swapper_fb);
xge_os_pci_read16(hldev->pdev,hldev->cfgh,
xge_offsetof(xge_hal_pci_config_le_t, device_id),
&device_id);
#ifdef TX_DEBUG
- if (adapter_status == XGE_HAL_ALL_FOXES &&
- val64 == XGE_HAL_ALL_FOXES)
+ if (adapter_status == XGE_HAL_ALL_FOXES)
{
- u64 dummy;
- dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
- &bar0->pcc_enable);
- printf(">>> Slot is frozen!\n");
- brkpoint(0);
+ u64 dummy;
+ dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->pcc_enable);
+ printf(">>> Slot is frozen!\n");
+ brkpoint(0);
}
#endif
- return ((adapter_status == XGE_HAL_ALL_FOXES &&
- val64 == XGE_HAL_ALL_FOXES) || (device_id == 0xffff));
+ return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff));
}
@@ -360,7 +355,7 @@ xge_hal_device_bcast_enable(xge_hal_device_h devh)
__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
(u32)(val64 >> 32), &bar0->mac_cfg);
- xge_debug_device(XGE_TRACE, "mac_cfg 0x%llx: broadcast %s",
+ xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
(unsigned long long)val64,
hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
}
@@ -390,7 +385,7 @@ xge_hal_device_bcast_disable(xge_hal_device_h devh)
__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
(u32)(val64 >> 32), &bar0->mac_cfg);
- xge_debug_device(XGE_TRACE, "mac_cfg 0x%llx: broadcast %s",
+ xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
(unsigned long long)val64,
hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
}
@@ -455,8 +450,8 @@ __hal_device_rmac_padding_configure(xge_hal_device_t *hldev)
xge_os_mdelay(1);
xge_debug_device(XGE_TRACE,
- "mac_cfg 0x%llx: frame padding configured",
- (unsigned long long)val64);
+ "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured",
+ (unsigned long long)val64);
}
/*
@@ -647,7 +642,7 @@ __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag)
hldev->regh0, temp64,
&bar0->misc_int_mask);
xge_debug_device(XGE_TRACE,
- "unmask link up flag %llx",
+ "unmask link up flag "XGE_OS_LLXFMT,
(unsigned long long)temp64);
}
#endif
@@ -667,7 +662,7 @@ __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag)
hldev->regh0, temp64,
&bar0->misc_int_mask);
xge_debug_device(XGE_TRACE,
- "mask link up/down flag %llx",
+ "mask link up/down flag "XGE_OS_LLXFMT,
(unsigned long long)temp64);
}
#endif
@@ -880,28 +875,231 @@ __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag)
if (gim != gim_saved) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim,
&bar0->general_int_mask);
- xge_debug_device(XGE_TRACE, "general_int_mask updated %llx => %llx",
+ xge_debug_device(XGE_TRACE, "general_int_mask updated "
+ XGE_OS_LLXFMT" => "XGE_OS_LLXFMT,
(unsigned long long)gim_saved, (unsigned long long)gim);
}
}
/*
+ * __hal_device_bimodal_configure
+ * @hldev: HAL device handle.
+ *
+ * Bimodal parameters initialization.
+ */
+static void
+__hal_device_bimodal_configure(xge_hal_device_t *hldev)
+{
+ int i;
+
+ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
+ xge_hal_tti_config_t *tti;
+ xge_hal_rti_config_t *rti;
+
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ rti = &hldev->config.ring.queue[i].rti;
+ tti = &hldev->bimodal_tti[i];
+
+ tti->enabled = 1;
+ tti->urange_a = hldev->bimodal_urange_a_en * 10;
+ tti->urange_b = 20;
+ tti->urange_c = 30;
+ tti->ufc_a = hldev->bimodal_urange_a_en * 8;
+ tti->ufc_b = 16;
+ tti->ufc_c = 32;
+ tti->ufc_d = 64;
+ tti->timer_val_us = hldev->bimodal_timer_val_us;
+ tti->timer_ac_en = 1;
+ tti->timer_ci_en = 0;
+
+ rti->urange_a = 10;
+ rti->urange_b = 20;
+ rti->urange_c = 30;
+ rti->ufc_a = 1; /* <= for netpipe type of tests */
+ rti->ufc_b = 4;
+ rti->ufc_c = 4;
+ rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */
+ rti->timer_ac_en = 1;
+ rti->timer_val_us = 5; /* for optimal bus efficiency usage */
+ }
+}
+
+/*
+ * __hal_device_tti_apply
+ * @hldev: HAL device handle.
+ *
+ * apply TTI configuration.
+ */
+static xge_hal_status_e
+__hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti,
+ int num, int runtime)
+{
+ u64 val64, data1 = 0, data2 = 0;
+ xge_hal_pci_bar0_t *bar0;
+
+ if (runtime)
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
+ else
+ bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ if (tti->timer_val_us) {
+ unsigned int tx_interval;
+
+ if (hldev->config.pci_freq_mherz) {
+ tx_interval = hldev->config.pci_freq_mherz *
+ tti->timer_val_us / 64;
+ tx_interval =
+ __hal_fix_time_ival_herc(hldev,
+ tx_interval);
+ } else {
+ tx_interval = tti->timer_val_us;
+ }
+ data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval);
+ if (tti->timer_ac_en) {
+ data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN;
+ }
+ if (tti->timer_ci_en) {
+ data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN;
+ }
+
+ if (!runtime) {
+ xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s",
+ num, tx_interval, tti->timer_ci_en ?
+ "enabled": "disabled");
+ }
+ }
+
+ if (tti->urange_a ||
+ tti->urange_b ||
+ tti->urange_c ||
+ tti->ufc_a ||
+ tti->ufc_b ||
+ tti->ufc_c ||
+ tti->ufc_d ) {
+ data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) |
+ XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) |
+ XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c);
+
+ data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) |
+ XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) |
+ XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) |
+ XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d);
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
+ &bar0->tti_data1_mem);
+ (void)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->tti_data1_mem);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
+ &bar0->tti_data2_mem);
+ (void)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->tti_data2_mem);
+ xge_os_wmb();
+
+ val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD |
+ XGE_HAL_TTI_CMD_MEM_OFFSET(num);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->tti_command_mem);
+
+ if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem,
+ 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ if (!runtime) {
+ xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x"
+ XGE_OS_LLXFMT, num,
+ (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->tti_data1_mem));
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_device_tti_configure
+ * @hldev: HAL device handle.
+ *
+ * TTI Initialization.
+ * Initialize Transmit Traffic Interrupt Scheme.
+ */
+static xge_hal_status_e
+__hal_device_tti_configure(xge_hal_device_t *hldev, int runtime)
+{
+ int i;
+
+ for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
+ int j;
+
+ if (!hldev->config.fifo.queue[i].configured)
+ continue;
+
+ for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
+ xge_hal_status_e status;
+
+ if (!hldev->config.fifo.queue[i].tti[j].enabled)
+ continue;
+
+ /* at least some TTI enabled. Record it. */
+ hldev->tti_enabled = 1;
+
+ status = __hal_device_tti_apply(hldev,
+ &hldev->config.fifo.queue[i].tti[j],
+ i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime);
+ if (status != XGE_HAL_OK)
+ return status;
+ }
+ }
+
+ /* processing bimodal TTIs */
+ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
+ xge_hal_status_e status;
+
+ if (!hldev->bimodal_tti[i].enabled)
+ continue;
+
+ /* at least some bimodal TTI enabled. Record it. */
+ hldev->tti_enabled = 1;
+
+ status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i],
+ XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime);
+ if (status != XGE_HAL_OK)
+ return status;
+
+ }
+
+ return XGE_HAL_OK;
+}
+
+/*
* __hal_device_rti_configure
* @hldev: HAL device handle.
*
* RTI Initialization.
* Initialize Receive Traffic Interrupt Scheme.
*/
-static xge_hal_status_e
+xge_hal_status_e
__hal_device_rti_configure(xge_hal_device_t *hldev, int runtime)
{
xge_hal_pci_bar0_t *bar0;
u64 val64, data1 = 0, data2 = 0;
int i;
- if (runtime)
+ if (runtime) {
+ /*
+ * we don't want to re-configure RTI in case when
+ * bimodal interrupts are in use. Instead reconfigure TTI
+ * with new RTI values.
+ */
+ if (hldev->config.bimodal_interrupts) {
+ __hal_device_bimodal_configure(hldev);
+ return __hal_device_tti_configure(hldev, 1);
+ }
bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
- else
+ } else
bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
@@ -948,8 +1146,12 @@ __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime)
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
&bar0->rti_data1_mem);
+ (void)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->rti_data1_mem);
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
&bar0->rti_data2_mem);
+ (void)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->rti_data2_mem);
xge_os_wmb();
val64 = XGE_HAL_RTI_CMD_MEM_WE |
@@ -965,97 +1167,16 @@ __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime)
/* upper layer may require to repeat */
return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
}
- }
- if (!runtime) {
- xge_debug_device(XGE_TRACE,
- "RTI configured: rti_data1_mem 0x%llx",
- (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
- hldev->regh0, &bar0->rti_data1_mem));
- }
-
- return XGE_HAL_OK;
-}
-
-/*
- * __hal_device_tti_configure
- * @hldev: HAL device handle.
- *
- * TTI Initialization.
- * Initialize Transmit Traffic Interrupt Scheme.
- */
-static xge_hal_status_e
-__hal_device_tti_configure(xge_hal_device_t *hldev)
-{
- xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
- xge_hal_tti_config_t *tti = &hldev->config.tti;
- u64 val64, data1 = 0, data2 = 0;
-
- if (tti->timer_val_us) {
- unsigned int tx_interval;
-
- if (hldev->config.pci_freq_mherz) {
- tx_interval = hldev->config.pci_freq_mherz *
- tti->timer_val_us / 64;
- tx_interval =
- __hal_fix_time_ival_herc(hldev,
- tx_interval);
- } else {
- tx_interval = tti->timer_val_us;
- }
- data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval);
- if (tti->timer_ac_en) {
- data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN;
- }
- if (tti->timer_ci_en) {
- data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN;
+ if (!runtime) {
+ xge_debug_device(XGE_TRACE,
+ "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT,
+ i,
+ (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
+ hldev->regh0, &bar0->rti_data1_mem));
}
-
- xge_debug_device(XGE_TRACE, "TTI timer enabled to %d, ci %s",
- tx_interval, tti->timer_ci_en ?
- "enabled": "disabled");
}
- if (tti->urange_a ||
- tti->urange_b ||
- tti->urange_c ||
- tti->ufc_a ||
- tti->ufc_b ||
- tti->ufc_c ||
- tti->ufc_d ) {
- data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) |
- XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) |
- XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c);
-
- data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) |
- XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) |
- XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) |
- XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d);
-
- xge_debug_device(XGE_TRACE, "%s", "TTI utiliz. enabled");
- }
-
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
- &bar0->tti_data1_mem);
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
- &bar0->tti_data2_mem);
-
- val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD;
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
- &bar0->tti_command_mem);
-
- if (__hal_device_register_poll(hldev, &bar0->tti_command_mem, 0,
- XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD,
- XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
- /* upper layer may require to repeat */
- return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
- }
-
- xge_debug_device(XGE_TRACE, "TTI configured: tti_data1_mem 0x%llx",
- (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
- hldev->regh0,
- &bar0->tti_data1_mem));
-
return XGE_HAL_OK;
}
@@ -1157,8 +1278,8 @@ __hal_device_xaui_configure(xge_hal_device_t *hldev)
} else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
default_dtx_cfg = default_herc_dtx_cfg;
default_mdio_cfg = default_herc_mdio_cfg;
- }
- xge_assert(default_dtx_cfg);
+ } else
+ xge_assert(default_dtx_cfg);
do {
dtx_cfg:
@@ -1263,7 +1384,7 @@ __hal_device_set_swapper(xge_hal_device_t *hldev)
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->swapper_ctrl);
- xge_debug_device(XGE_TRACE, "using custom HW swapper 0x%llx",
+ xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
#elif !defined(XGE_OS_HOST_BIG_ENDIAN)
@@ -1319,7 +1440,7 @@ __hal_device_set_swapper(xge_hal_device_t *hldev)
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->pif_rd_swapper_fb);
if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) {
- xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read %llx",
+ xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT,
(unsigned long long) val64);
return XGE_HAL_ERR_SWAPPER_CTRL;
}
@@ -1358,6 +1479,84 @@ __hal_device_rts_mac_configure(xge_hal_device_t *hldev)
}
/*
+ * __hal_device_rts_qos_configure - Configure RTS steering based on
+ * qos.
+ * @hldev: HAL device handle.
+ *
+ */
+xge_hal_status_e
+__hal_device_rts_qos_configure(xge_hal_device_t *hldev)
+{
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+ u64 val64;
+ int j;
+
+ if (!hldev->config.rts_qos_steering_config) {
+ return XGE_HAL_OK;
+ }
+
+ /* First clear the RTS_DS_MEM_DATA */
+ val64 = 0;
+ for (j = 0; j < 64; j++ )
+ {
+ /* First clear the value */
+ val64 = XGE_HAL_RTS_DS_MEM_DATA(0);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_ds_mem_data);
+
+ val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE |
+ XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
+ XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j );
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_ds_mem_ctrl);
+
+
+ /* poll until done */
+ if (__hal_device_register_poll(hldev,
+ &bar0->rts_ds_mem_ctrl, 0,
+ XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
+ XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
+ /* upper layer may require to repeat */
+ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
+ }
+
+ }
+ /* Check for enhanced mode */
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_ctrl);
+
+ /* Check to see if QOS Steering is turned ON and adapter is in classic mode */
+ if (!(val64 & XGE_HAL_RTS_CTRL_ENHANCED_MODE))
+ {
+ /* Set the priority calendar - hard coded as all rings should be enabled */
+ val64 = 0x0706050407030602;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rx_w_round_robin_0);
+
+ val64 = 0x0507040601070503;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rx_w_round_robin_1);
+
+ val64 = 0x0604070205060700;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rx_w_round_robin_2);
+
+ val64 = 0x0403060705010207;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rx_w_round_robin_3);
+
+ val64 = 0x0604050300000000;
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rx_w_round_robin_4);
+
+ }
+ return XGE_HAL_OK;
+}
+
+/*
* xge__hal_device_rts_mac_enable
*
* @devh: HAL device handle.
@@ -1379,14 +1578,15 @@ __hal_device_rts_mac_configure(xge_hal_device_t *hldev)
xge_hal_status_e
xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr)
{
- u64 val64;
- int section;
+ int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
xge_hal_status_e status;
xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
- xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
- if ( index >= XGE_HAL_MAX_MAC_ADDRESSES )
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
+
+ if ( index >= max_addr )
return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
/*
@@ -1399,49 +1599,7 @@ xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macadd
return status;
}
- /*
- * Calculate the section value
- */
- section = index / 32;
-
- xge_debug_device(XGE_TRACE, "the Section value is %d \n", section);
-
- val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
- &bar0->rts_mac_cfg);
- switch(section)
- {
- case 0:
- val64 |= XGE_HAL_RTS_MAC_SECT0_EN;
- break;
- case 1:
- val64 |= XGE_HAL_RTS_MAC_SECT1_EN;
- break;
- case 2:
- val64 |= XGE_HAL_RTS_MAC_SECT2_EN;
- break;
- case 3:
- val64 |= XGE_HAL_RTS_MAC_SECT3_EN;
- break;
- case 4:
- val64 |= XGE_HAL_RTS_MAC_SECT4_EN;
- break;
- case 5:
- val64 |= XGE_HAL_RTS_MAC_SECT5_EN;
- break;
- case 6:
- val64 |= XGE_HAL_RTS_MAC_SECT6_EN;
- break;
- case 7:
- val64 |= XGE_HAL_RTS_MAC_SECT7_EN;
- break;
- default:
- xge_debug_device(XGE_ERR, "Invalid Section value %d \n"
- , section);
- }
-
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
- val64, &bar0->rts_mac_cfg);
- return XGE_HAL_OK;
+ return xge_hal_device_rts_section_enable(hldev, index);
}
/*
@@ -1458,11 +1616,16 @@ xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index)
{
xge_hal_status_e status;
u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
xge_debug_ll(XGE_TRACE, "the index value is %d \n", index);
- if ( index >= XGE_HAL_MAX_MAC_ADDRESSES )
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
+
+ if ( index >= max_addr )
return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
/*
@@ -1589,23 +1752,9 @@ __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip,
u8 line_no;
/*
- * Poll the rxpic_int_reg register until spdm ready bit is set or
- * timeout happens.
- */
- if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
- XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
- XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
-
- /* upper layer may require to repeat */
- return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
- }
-
- /*
* Clear the SPDM READY bit
*/
- val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
- &bar0->rxpic_int_reg);
- val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
+ val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rxpic_int_reg);
@@ -2064,7 +2213,7 @@ __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
default:
rc_status = XGE_HAL_ERR_INVALID_PCI_INFO;
xge_debug_device(XGE_ERR,
- "invalid pci info %llx",
+ "invalid pci info "XGE_OS_LLXFMT,
(unsigned long long)pci_info);
break;
}
@@ -2072,8 +2221,10 @@ __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
xge_debug_device(XGE_TRACE, "PCI info: mode %d width "
"%d frequency %d", *pci_mode, *bus_width,
*bus_frequency);
-
- hldev->config.pci_freq_mherz = *bus_frequency;
+ if (hldev->config.pci_freq_mherz ==
+ XGE_HAL_DEFAULT_USE_HARDCODE) {
+ hldev->config.pci_freq_mherz = *bus_frequency;
+ }
}
/* for XENA, we report PCI mode, only. PCI bus frequency, and bus width
* are set to unknown */
@@ -2320,6 +2471,7 @@ __hal_device_handle_link_state_change(xge_hal_device_t *hldev)
int retcode;
xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
u64 val64;
+ int i = 0;
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->adapter_control);
@@ -2332,11 +2484,20 @@ __hal_device_handle_link_state_change(xge_hal_device_t *hldev)
return(__hal_device_handle_link_down_ind(hldev));
}
- (void) xge_hal_device_status(hldev, &hw_status);
- hw_link_state = (hw_status &
- (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
- XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ?
- XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP;
+ do {
+ xge_os_mdelay(1);
+ (void) xge_hal_device_status(hldev, &hw_status);
+ hw_link_state = (hw_status &
+ (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
+ XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ?
+ XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP;
+
+ /* check if the current link state is still considered
+ * to be changed. This way we will make sure that this is
+ * not a noise which needs to be filtered out */
+ if (hldev->link_state == hw_link_state)
+ break;
+ } while (i++ < hldev->config.link_valid_cnt);
/* If the current link state is same as previous, just return */
if (hldev->link_state == hw_link_state)
@@ -2365,7 +2526,7 @@ __hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value)
(void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev,
1, sizeof(u64), (void *)&value);
- xge_debug_device(XGE_ERR, "%s: read %llx", reg,
+ xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
(unsigned long long) value);
}
@@ -2388,7 +2549,7 @@ __hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value)
1, sizeof(u64), (void *)&value);
}
- xge_debug_device(XGE_ERR, "%s: read %llx", reg,
+ xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
(unsigned long long) value);
}
@@ -2406,7 +2567,7 @@ __hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value)
(void) xge_queue_produce_context(hldev->queueh,
XGE_HAL_EVENT_PARITYERR, hldev);
- xge_debug_device(XGE_ERR, "%s: read %llx", reg,
+ xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
(unsigned long long) value);
}
@@ -2476,6 +2637,13 @@ __hal_device_hw_initialize(xge_hal_device_t *hldev)
xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode");
}
+ /* added this to set the no of bytes used to update lso_bytes_sent
+ returned TxD0 */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->pic_control_2);
+ val64 |= XGE_HAL_TXD_WRITE_BC(0x4);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->pic_control_2);
/* added this to clear the EOI_RESET field while leaving XGXS_RESET
* in reset, then a 1-second delay */
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
@@ -2525,34 +2693,64 @@ __hal_device_hw_initialize(xge_hal_device_t *hldev)
val64, &bar0->misc_control);
/*
- * Initialize the device tti registers only if the TTI feature is
- * enabled.
+ * bimodal interrupts is when all Rx traffic interrupts
+ * will go to TTI, so we need to adjust RTI settings and
+ * use adaptive TTI timer. We need to make sure RTI is
+ * properly configured to sane value which will not
+ * distrupt bimodal behavior.
*/
- if (hldev->config.tti.enabled) {
- if ((status = __hal_device_tti_configure(hldev)) !=
- XGE_HAL_OK) {
- return status;
+ if (hldev->config.bimodal_interrupts) {
+ int i;
+
+ /* force polling_cnt to be "0", otherwise
+ * IRQ workload statistics will be screwed. This could
+ * be worked out in TXPIC handler later. */
+ hldev->config.isr_polling_cnt = 0;
+ hldev->config.sched_timer_us = 10000;
+
+ /* disable all TTI < 56 */
+ for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
+ int j;
+ if (!hldev->config.fifo.queue[i].configured)
+ continue;
+ for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
+ if (hldev->config.fifo.queue[i].tti[j].enabled)
+ hldev->config.fifo.queue[i].tti[j].enabled = 0;
+ }
}
+
+ /* now configure bimodal interrupts */
+ __hal_device_bimodal_configure(hldev);
}
+ status = __hal_device_tti_configure(hldev, 0);
+ if (status != XGE_HAL_OK)
+ return status;
+
status = __hal_device_rti_configure(hldev, 0);
- if (status != XGE_HAL_OK) {
+ if (status != XGE_HAL_OK)
return status;
- }
+
status = __hal_device_rth_it_configure(hldev);
- if (status != XGE_HAL_OK) {
+ if (status != XGE_HAL_OK)
return status;
- }
+
status = __hal_device_rth_spdm_configure(hldev);
- if (status != XGE_HAL_OK) {
+ if (status != XGE_HAL_OK)
return status;
- }
+
status = __hal_device_rts_mac_configure(hldev);
if (status != XGE_HAL_OK) {
xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed \n");
return status;
}
+ status = __hal_device_rts_qos_configure(hldev);
+ if (status != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed \n");
+ return status;
+ }
+
__hal_device_pause_frames_configure(hldev);
__hal_device_rmac_padding_configure(hldev);
__hal_device_shared_splits_configure(hldev);
@@ -2581,7 +2779,7 @@ __hal_device_hw_initialize(xge_hal_device_t *hldev)
return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
}
- xge_debug_device(XGE_TRACE, "device 0x%llx is quiescent",
+ xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent",
(unsigned long long)(ulong_t)hldev);
#if defined(XGE_HAL_MSI)
@@ -2608,11 +2806,12 @@ __hal_device_hw_initialize(xge_hal_device_t *hldev)
* Reset the device, and subsequently restore
* the previously saved PCI configuration space.
*/
+#define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50
static xge_hal_status_e
__hal_device_reset(xge_hal_device_t *hldev)
{
xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
- int i, swap_done, pcisize = 0;
+ int i, j, swap_done, pcisize = 0;
u64 val64, rawval = 0ULL;
#if defined(XGE_HAL_MSI_X)
@@ -2651,6 +2850,36 @@ __hal_device_reset(xge_hal_device_t *hldev)
&bar0->sw_reset);
}
+ pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
+ XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
+
+ xge_os_mdelay(20); /* Wait for 20 ms after reset */
+
+ {
+ /* Poll for no more than 1 second */
+ for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++)
+ {
+ for (j = 0; j < pcisize; j++) {
+ xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
+ *((u32*)&hldev->pci_config_space + j));
+ }
+
+ xge_os_pci_read16(hldev->pdev,hldev->cfgh,
+ xge_offsetof(xge_hal_pci_config_le_t, device_id),
+ &hldev->device_id);
+
+ if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN)
+ break;
+ xge_os_mdelay(20);
+ }
+ }
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN)
+ {
+ xge_debug_device(XGE_ERR, "device reset failed");
+ return XGE_HAL_ERR_RESET_FAILED;
+ }
+
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
int cnt = 0;
@@ -2672,11 +2901,6 @@ __hal_device_reset(xge_hal_device_t *hldev)
xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS);
}
- for (i = 0; i < pcisize; i++) {
- xge_os_pci_write32(hldev->pdev, hldev->cfgh, i * 4,
- *((u32*)&hldev->pci_config_space + i));
- }
-
#if defined(XGE_HAL_MSI_X)
/* Restore MSI-X vector table */
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
@@ -2706,7 +2930,7 @@ __hal_device_reset(xge_hal_device_t *hldev)
if (val64 != rawval) {
xge_debug_device(XGE_ERR, "device has not been reset "
- "got 0x%llx, expected 0x%llx",
+ "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT,
(unsigned long long)val64, (unsigned long long)rawval);
return XGE_HAL_ERR_RESET_FAILED;
}
@@ -2838,12 +3062,160 @@ __hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status)
}
static void
+__hal_update_bimodal(xge_hal_device_t *hldev, int ring_no)
+{
+ int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist;
+ int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg;
+ int iwl_cnt, i;
+
+#define _HIST_SIZE 50 /* 0.5 sec history */
+#define _HIST_ADJ_TIMER 1
+#define _STEP 2
+
+ static int bytes_avg_history[_HIST_SIZE] = {0};
+ static int d_avg_history[_HIST_SIZE] = {0};
+ static int history_idx = 0;
+ static int pstep = 1;
+ static int hist_adj_timer = 0;
+
+ /*
+ * tval - current value of this bimodal timer
+ */
+ tval = hldev->bimodal_tti[ring_no].timer_val_us;
+
+ /*
+ * d - how many interrupts we were getting since last
+ * bimodal timer tick.
+ */
+ d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt -
+ hldev->bimodal_intr_cnt;
+
+ /* advance bimodal interrupt counter */
+ hldev->bimodal_intr_cnt =
+ hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
+
+ /*
+ * iwl_cnt - how many interrupts we've got since last
+ * bimodal timer tick.
+ */
+ iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ?
+ hldev->irq_workload_rxcnt[ring_no] : 1);
+ iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ?
+ hldev->irq_workload_txcnt[ring_no] : 1);
+ iwl_cnt = iwl_rxcnt + iwl_txcnt;
+ iwl_cnt = iwl_cnt; /* just to remove the lint warning */
+
+ /*
+ * we need to take hldev->config.isr_polling_cnt into account
+ * but for some reason this line causing GCC to produce wrong
+ * code on Solaris. As of now, if bimodal_interrupts is configured
+ * hldev->config.isr_polling_cnt is forced to be "0".
+ *
+ * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */
+
+ /*
+ * iwl_avg - how many RXDs on avarage been processed since
+ * last bimodal timer tick. This indirectly includes
+ * CPU utilizations.
+ */
+ iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt;
+ iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt;
+ iwl_avg = iwl_rxavg + iwl_txavg;
+ iwl_avg = iwl_avg == 0 ? 1 : iwl_avg;
+
+ /*
+ * len_avg - how many bytes on avarage been processed since
+ * last bimodal timer tick. i.e. avarage frame size.
+ */
+ len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] /
+ (hldev->irq_workload_rxd[ring_no] ?
+ hldev->irq_workload_rxd[ring_no] : 1);
+ len_txavg = 1 + hldev->irq_workload_txlen[ring_no] /
+ (hldev->irq_workload_txd[ring_no] ?
+ hldev->irq_workload_txd[ring_no] : 1);
+ len_avg = len_rxavg + len_txavg;
+ if (len_avg < 60)
+ len_avg = 60;
+
+ /* align on low boundary */
+ if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us)
+ tval = hldev->config.bimodal_timer_lo_us;
+
+ /* reset faster */
+ if (iwl_avg == 1) {
+ tval = hldev->config.bimodal_timer_lo_us;
+ /* reset history */
+ for (i = 0; i < _HIST_SIZE; i++)
+ bytes_avg_history[i] = d_avg_history[i] = 0;
+ history_idx = 0;
+ pstep = 1;
+ hist_adj_timer = 0;
+ }
+
+ /* always try to ajust timer to the best throughput value */
+ bytes_avg = iwl_avg * len_avg;
+ history_idx %= _HIST_SIZE;
+ bytes_avg_history[history_idx] = bytes_avg;
+ d_avg_history[history_idx] = d;
+ history_idx++;
+ d_hist = bytes_hist = 0;
+ for (i = 0; i < _HIST_SIZE; i++) {
+ /* do not re-configure until history is gathered */
+ if (!bytes_avg_history[i]) {
+ tval = hldev->config.bimodal_timer_lo_us;
+ goto _end;
+ }
+ bytes_hist += bytes_avg_history[i];
+ d_hist += d_avg_history[i];
+ }
+ bytes_hist /= _HIST_SIZE;
+ d_hist /= _HIST_SIZE;
+
+// xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d",
+// d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg,
+// d_hist*bytes_hist, pstep);
+
+ /* make an adaptive step */
+ if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) {
+ pstep = !pstep;
+ hist_adj_timer = 0;
+ }
+
+ if (pstep &&
+ (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) {
+ tval += _STEP;
+ hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++;
+ } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) {
+ tval -= _STEP;
+ hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++;
+ }
+
+ /* enable TTI range A for better latencies */
+ hldev->bimodal_urange_a_en = 0;
+ if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2)
+ hldev->bimodal_urange_a_en = 1;
+
+_end:
+ /* reset workload statistics counters */
+ hldev->irq_workload_rxcnt[ring_no] = 0;
+ hldev->irq_workload_rxd[ring_no] = 0;
+ hldev->irq_workload_rxlen[ring_no] = 0;
+ hldev->irq_workload_txcnt[ring_no] = 0;
+ hldev->irq_workload_txd[ring_no] = 0;
+ hldev->irq_workload_txlen[ring_no] = 0;
+
+ /* reconfigure TTI56 + ring_no with new timer value */
+ hldev->bimodal_timer_val_us = tval;
+ (void) __hal_device_rti_configure(hldev, 1);
+}
+
+static void
__hal_update_rxufca(xge_hal_device_t *hldev, int ring_no)
{
- int ufc = hldev->config.ring.queue[ring_no].rti.ufc_a;
- int ic = hldev->stats.sw_dev_info_stats.total_intr_cnt -
- hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt;
- int i;
+ int ufc, ic, i;
+
+ ufc = hldev->config.ring.queue[ring_no].rti.ufc_a;
+ ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
/* urange_a adaptive coalescing */
if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) {
@@ -2982,7 +3354,7 @@ __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason)
u64 temp64;
xge_debug_device(XGE_TRACE,
- "both link up and link down detected %llx",
+ "both link up and link down detected "XGE_OS_LLXFMT,
(unsigned long long)val64);
temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT |
@@ -2993,13 +3365,13 @@ __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason)
}
else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) {
xge_debug_device(XGE_TRACE,
- "link up call request, misc_int %llx",
+ "link up call request, misc_int "XGE_OS_LLXFMT,
(unsigned long long)val64);
__hal_device_handle_link_up_ind(hldev);
}
else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){
xge_debug_device(XGE_TRACE,
- "link down request, misc_int %llx",
+ "link down request, misc_int "XGE_OS_LLXFMT,
(unsigned long long)val64);
__hal_device_handle_link_down_ind(hldev);
}
@@ -3053,7 +3425,7 @@ __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason)
g_xge_hal_driver->uld_callbacks.sched_timer(
hldev, hldev->upper_layer_info);
/*
- * This feature implement adaptive receive interrupt
+ * This feature implements adaptive receive interrupt
* coalecing. It is disabled by default. To enable it
* set hldev->config.rxufca_lo_lim to be not equal to
* hldev->config.rxufca_hi_lim.
@@ -3065,13 +3437,31 @@ __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason)
* For those who familiar with Linux, lbolt means jiffies
* of this timer. I.e. timer tick.
*/
- for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
- if (hldev->config.ring.queue[i].rti.urange_a) {
- if (hldev->config.rxufca_lo_lim !=
- hldev->config.rxufca_hi_lim)
+ if (hldev->config.rxufca_lo_lim !=
+ hldev->config.rxufca_hi_lim &&
+ hldev->config.rxufca_lo_lim != 0) {
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ if (hldev->config.ring.queue[i].rti.urange_a)
__hal_update_rxufca(hldev, i);
}
}
+
+ /*
+ * This feature implements adaptive TTI timer re-calculation
+ * based on host utilization, number of interrupt processed,
+ * number of RXD per tick and avarage length of packets per
+ * tick.
+ */
+ if (hldev->config.bimodal_interrupts) {
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ if (hldev->bimodal_tti[i].enabled)
+ __hal_update_bimodal(hldev, i);
+ }
+ }
}
return XGE_HAL_OK;
@@ -3430,7 +3820,8 @@ xge_hal_device_enable(xge_hal_device_t *hldev)
XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
}
xge_debug_device(XGE_TRACE,
- "adp_status: %llx, link is up on "
+ "adp_status: "XGE_OS_LLXFMT
+ ", link is up on "
"adapter enable!",
(unsigned long long)adp_status);
val64 = xge_os_pio_mem_read64(
@@ -3596,7 +3987,9 @@ xge_hal_device_disable(xge_hal_device_t *hldev)
xge_assert(!hldev->stats.is_enabled);
#endif
+#ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP
__hal_device_bus_master_disable(hldev);
+#endif
return status;
}
@@ -3623,7 +4016,7 @@ xge_hal_device_reset(xge_hal_device_t *hldev)
/* increment the soft reset counter */
u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt;
- xge_debug_device(XGE_ERR, "%s (%d)", "resetting the device", reset_cnt);
+ xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt);
if (!hldev->is_initialized)
return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED;
@@ -3739,31 +4132,34 @@ xge_hal_device_intr_enable(xge_hal_device_t *hldev)
}
/* enable traffic only interrupts */
-#if defined(XGE_HAL_MSI)
- /*
- * make sure all interrupts going to be disabled if MSI
- * is enabled.
- */
- __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
-#else
+ if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) {
+ /*
+ * make sure all interrupts going to be disabled if MSI
+ * is enabled.
+ */
+ __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
+ } else {
- /*
- * Enable the Tx traffic interrupts only if the TTI feature is
- * enabled.
- */
- val64 = 0;
- if (hldev->config.tti.enabled) {
- val64 = XGE_HAL_TX_TRAFFIC_INTR;
- }
+ /*
+ * Enable the Tx traffic interrupts only if the TTI feature is
+ * enabled.
+ */
+ val64 = 0;
+ if (hldev->tti_enabled)
+ val64 = XGE_HAL_TX_TRAFFIC_INTR;
- val64 |= XGE_HAL_RX_TRAFFIC_INTR |
- XGE_HAL_TX_PIC_INTR |
- XGE_HAL_MC_INTR |
- (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ?
- XGE_HAL_SCHED_INTR : 0);
- __hal_device_intr_mgmt(hldev, val64, 1);
+ if (!hldev->config.bimodal_interrupts)
+ val64 |= XGE_HAL_RX_TRAFFIC_INTR;
-#endif
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
+ val64 |= XGE_HAL_RX_TRAFFIC_INTR;
+
+ val64 |=XGE_HAL_TX_PIC_INTR |
+ XGE_HAL_MC_INTR |
+ (hldev->config.sched_timer_us !=
+ XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0);
+ __hal_device_intr_mgmt(hldev, val64, 1);
+ }
xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled");
}
@@ -3791,9 +4187,8 @@ xge_hal_device_intr_disable(xge_hal_device_t *hldev)
* enabled.
*/
val64 = 0;
- if (hldev->config.tti.enabled) {
+ if (hldev->tti_enabled)
val64 = XGE_HAL_TX_TRAFFIC_INTR;
- }
val64 |= XGE_HAL_RX_TRAFFIC_INTR |
XGE_HAL_TX_PIC_INTR |
@@ -3835,6 +4230,7 @@ xge_hal_device_mcast_enable(xge_hal_device_t *hldev)
{
u64 val64;
xge_hal_pci_bar0_t *bar0;
+ int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
if (hldev == NULL)
return XGE_HAL_ERR_INVALID_DEVICE;
@@ -3842,6 +4238,9 @@ xge_hal_device_mcast_enable(xge_hal_device_t *hldev)
if (hldev->mcast_refcnt)
return XGE_HAL_OK;
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
+
hldev->mcast_refcnt = 1;
bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
@@ -3855,8 +4254,7 @@ xge_hal_device_mcast_enable(xge_hal_device_t *hldev)
&bar0->rmac_addr_data1_mem);
val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(
- XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET);
+ XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rmac_addr_cmd_mem);
@@ -3887,6 +4285,7 @@ xge_hal_device_mcast_disable(xge_hal_device_t *hldev)
{
u64 val64;
xge_hal_pci_bar0_t *bar0;
+ int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
if (hldev == NULL)
return XGE_HAL_ERR_INVALID_DEVICE;
@@ -3894,6 +4293,9 @@ xge_hal_device_mcast_disable(xge_hal_device_t *hldev)
if (hldev->mcast_refcnt == 0)
return XGE_HAL_OK;
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
+
hldev->mcast_refcnt = 0;
bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
@@ -3908,8 +4310,7 @@ xge_hal_device_mcast_disable(xge_hal_device_t *hldev)
val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
- XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(
- XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET);
+ XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rmac_addr_cmd_mem);
@@ -3958,7 +4359,7 @@ xge_hal_device_promisc_enable(xge_hal_device_t *hldev)
hldev->is_promisc = 1;
xge_debug_device(XGE_TRACE,
- "mac_cfg 0x%llx: promisc enabled",
+ "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled",
(unsigned long long)val64);
}
}
@@ -3997,7 +4398,7 @@ xge_hal_device_promisc_disable(xge_hal_device_t *hldev)
hldev->is_promisc = 0;
xge_debug_device(XGE_TRACE,
- "mac_cfg 0x%llx: promisc disabled",
+ "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled",
(unsigned long long)val64);
}
}
@@ -4267,7 +4668,7 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
int total_dram_size, ring_auto_dram_cfg, left_dram_size;
int total_dram_size_max = 0;
- xge_debug_device(XGE_TRACE, "device 0x%llx is initializing",
+ xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing",
(unsigned long long)(ulong_t)hldev);
/* sanity check */
@@ -4307,6 +4708,9 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
hldev->irqh = attr->irqh;
hldev->cfgh = attr->cfgh;
+ /* set initial bimodal timer for bimodal adaptive schema */
+ hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us;
+
hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh,
g_xge_hal_driver->config.queue_size_initial,
g_xge_hal_driver->config.queue_size_max,
@@ -4334,6 +4738,15 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
xge_list_init(&hldev->free_channels);
xge_list_init(&hldev->fifo_channels);
xge_list_init(&hldev->ring_channels);
+#ifdef XGEHAL_RNIC
+ xge_list_init(&hldev->sq_channels);
+ xge_list_init(&hldev->hrq_channels);
+ xge_list_init(&hldev->hcq_channels);
+ xge_list_init(&hldev->lrq_channels);
+ xge_list_init(&hldev->lcq_channels);
+ xge_list_init(&hldev->umq_channels);
+ xge_list_init(&hldev->dmq_channels);
+#endif
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
/* fixups for xena */
@@ -4347,6 +4760,12 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
xge_hal_device_terminate(hldev);
return status;
}
+ if (hldev->config.bimodal_interrupts == 1) {
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED;
+ } else if (hldev->config.bimodal_interrupts ==
+ XGE_HAL_DEFAULT_USE_HARDCODE)
+ hldev->config.bimodal_interrupts = 0;
} else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
/* fixups for herc */
total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC;
@@ -4355,6 +4774,9 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
xge_hal_device_terminate(hldev);
return status;
}
+ if (hldev->config.bimodal_interrupts ==
+ XGE_HAL_DEFAULT_USE_HARDCODE)
+ hldev->config.bimodal_interrupts = 1;
} else {
xge_debug_device(XGE_ERR,
"detected unknown device_id 0x%x", hldev->device_id);
@@ -4362,6 +4784,18 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
return XGE_HAL_ERR_BAD_DEVICE_ID;
}
+#ifdef XGEHAL_RNIC
+
+ if(__hal_blockpool_create(hldev,&hldev->block_pool,
+ XGE_HAL_BLOCKPOOL_SIZE) != XGE_HAL_OK) {
+ xge_debug_device(XGE_ERR,
+ "block pool: __hal_blockpool_create failed");
+ xge_hal_device_terminate(hldev);
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+#endif
+
/* allocate and initialize FIFO types of channels according to
* configuration */
for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
@@ -4504,8 +4938,7 @@ xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
xge_hal_device_terminate(hldev);
return status;
}
-
- hldev->dump_buf = xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE);
+ hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE);
if (hldev->dump_buf == NULL) {
xge_debug_device(XGE_ERR,
"__hal_device_hw_initialize failed");
@@ -4570,7 +5003,7 @@ xge_hal_device_terminate(xge_hal_device_t *hldev)
xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev);
#endif
- xge_debug_device(XGE_TRACE, "device %llx is terminating",
+ xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating",
(unsigned long long)(ulong_t)hldev);
xge_assert(xge_list_is_empty(&hldev->fifo_channels));
@@ -4612,6 +5045,7 @@ xge_hal_device_terminate(xge_hal_device_t *hldev)
XGE_HAL_DUMP_BUF_SIZE);
hldev->dump_buf = NULL;
}
+
}
/**
@@ -4647,9 +5081,10 @@ xge_hal_device_handle_tcode (xge_hal_channel_h channelh,
#if defined(XGE_HAL_DEBUG_BAD_TCODE)
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
- xge_os_printf("%llx:%llx:%llx:%llx",
- txdp->control_1, txdp->control_2, txdp->buffer_pointer,
- txdp->host_control);
+ xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
+ XGE_OS_LLXFMT":"XGE_OS_LLXFMT,
+ txdp->control_1, txdp->control_2, txdp->buffer_pointer,
+ txdp->host_control);
#endif
/* handle link "down" immediately without going through
@@ -4685,9 +5120,10 @@ xge_hal_device_handle_tcode (xge_hal_channel_h channelh,
#if defined(XGE_HAL_DEBUG_BAD_TCODE)
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
- xge_os_printf("%llx:%llx:%llx:%llx", rxdp->control_1,
- rxdp->control_2, rxdp->buffer0_ptr,
- rxdp->host_control);
+ xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT
+ ":"XGE_OS_LLXFMT, rxdp->control_1,
+ rxdp->control_2, rxdp->buffer0_ptr,
+ rxdp->host_control);
#endif
if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) {
hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
@@ -4767,7 +5203,7 @@ void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us,
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
val64, &bar0->scheduled_int_ctrl);
- xge_debug_device(XGE_TRACE, "sched_timer 0x%llx: %s",
+ xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s",
(unsigned long long)val64,
interval ? "enabled" : "disabled");
}
@@ -5659,9 +6095,17 @@ xge_hal_status_e
xge_hal_lro_init(u32 lro_scale,
xge_hal_device_t *hldev)
{
- int i;
- for(i = 0; i < XGE_HAL_MAX_LRO_SESSIONS; i++)
- hldev->g_lro_pool[i].in_use = 0;
+ xge_os_memzero(hldev->lro_pool,
+ sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS);
+
+ if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE)
+ hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE;
+
+ if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE)
+ hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN;
+
+ hldev->lro_next_idx = 0;
+ hldev->lro_recent = NULL;
return XGE_HAL_OK;
}
@@ -5670,7 +6114,7 @@ xge_hal_lro_init(u32 lro_scale,
/**
* xge_hal_device_poll - HAL device "polling" entry point.
- * @hldev: HAL device.
+ * @devh: HAL device.
*
* HAL "polling" entry point. Note that this is part of HAL public API.
* Upper-Layer driver _must_ periodically poll HAL via
@@ -5713,6 +6157,21 @@ _again:
hldev->magic != XGE_HAL_MAGIC)
return;
+ if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000)
+ {
+ /*
+ * Wait for an Hour
+ */
+ hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++;
+ } else {
+ /*
+ * Logging Error messages in the excess temperature,
+ * Bias current, laser ouput for three cycle
+ */
+ __hal_updt_stats_xpak(hldev);
+ hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0;
+ }
+
if (!queue_has_critical_event)
queue_has_critical_event =
__queue_get_reset_critical(hldev->queueh);
@@ -5727,8 +6186,8 @@ _again:
break;
xge_debug_queue(XGE_TRACE,
- "queueh 0x%llx consumed event: %d ctxt 0x%llx",
- (u64)(ulong_t)hldev->queueh, item->event_type,
+ "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x"
+ XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type,
(u64)(ulong_t)item->context);
if (!hldev->is_initialized ||
@@ -5760,7 +6219,7 @@ _again:
case XGE_HAL_EVENT_TARGETABORT:
case XGE_HAL_EVENT_SLOT_FREEZE: {
void *item_data = xge_queue_item_data(item);
- int event_type = item->event_type;
+ xge_hal_event_e event_type = item->event_type;
u64 val64 = *((u64*)item_data);
if (event_type != XGE_HAL_EVENT_SLOT_FREEZE)
@@ -6041,7 +6500,7 @@ xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key)
/**
* xge_hal_device_is_closed - Device is closed
*
- * @hldev: HAL device handle.
+ * @devh: HAL device handle.
*/
int
xge_hal_device_is_closed(xge_hal_device_h devh)
@@ -6055,3 +6514,143 @@ xge_hal_device_is_closed(xge_hal_device_h devh)
return 0;
}
+xge_hal_status_e
+xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index)
+{
+ u64 val64;
+ int section;
+ int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
+
+ xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
+
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
+
+ if ( index >= max_addr )
+ return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
+
+ /*
+ * Calculate the section value
+ */
+ section = index / 32;
+
+ xge_debug_device(XGE_TRACE, "the Section value is %d \n", section);
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_mac_cfg);
+ switch(section)
+ {
+ case 0:
+ val64 |= XGE_HAL_RTS_MAC_SECT0_EN;
+ break;
+ case 1:
+ val64 |= XGE_HAL_RTS_MAC_SECT1_EN;
+ break;
+ case 2:
+ val64 |= XGE_HAL_RTS_MAC_SECT2_EN;
+ break;
+ case 3:
+ val64 |= XGE_HAL_RTS_MAC_SECT3_EN;
+ break;
+ case 4:
+ val64 |= XGE_HAL_RTS_MAC_SECT4_EN;
+ break;
+ case 5:
+ val64 |= XGE_HAL_RTS_MAC_SECT5_EN;
+ break;
+ case 6:
+ val64 |= XGE_HAL_RTS_MAC_SECT6_EN;
+ break;
+ case 7:
+ val64 |= XGE_HAL_RTS_MAC_SECT7_EN;
+ break;
+ default:
+ xge_debug_device(XGE_ERR, "Invalid Section value %d \n"
+ , section);
+ }
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
+ val64, &bar0->rts_mac_cfg);
+ return XGE_HAL_OK;
+}
+
+#ifdef XGEHAL_RNIC
+
+static u8 __hal_device_free_bit[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 7, 8 };
+
+xge_hal_status_e
+__hal_device_oid_allocate(xge_hal_rnic_oid_db_t *objdb, u32 *objid)
+{
+ u32 i;
+ u32 fb;
+
+ if(objid == NULL)
+ return XGE_HAL_FAIL;
+
+ for( i = objdb->id_next_byte; i < sizeof(objdb->id_map); i++ )
+ {
+ fb = __hal_device_free_bit[objdb->id_map[i]];
+
+ if(fb < 8){
+ *objid = XGE_HAL_RNIC_OID_DB_OID_GET((i*8+fb),
+ objdb->id_inst_number);
+ objdb->id_next_byte = i;
+ objdb->id_map[i] |= (0x80 >> fb);
+ return XGE_HAL_OK;
+ }
+ }
+
+ objdb->id_inst_number++;
+
+ for( i = 0; i < objdb->id_next_byte; i++ )
+ {
+ fb = __hal_device_free_bit[objdb->id_map[i]];
+
+ if(fb < 8){
+ *objid = XGE_HAL_RNIC_OID_DB_OID_GET((i*8+fb),
+ objdb->id_inst_number);
+ objdb->id_next_byte = i;
+ objdb->id_map[i] |= (0x80 >> fb);
+ return XGE_HAL_OK;
+ }
+ }
+
+ return XGE_HAL_FAIL;
+}
+
+xge_hal_status_e
+__hal_device_oid_free(xge_hal_rnic_oid_db_t *objdb, u32 objid)
+{
+ u32 i;
+ u32 fb;
+
+ i = XGE_HAL_RNIC_OID_DB_SID_GET(objid) / 8;
+ fb = XGE_HAL_RNIC_OID_DB_SID_GET(objid) - i * 8;
+
+ if( i >= sizeof(objdb->id_map) )
+ return XGE_HAL_FAIL;
+
+ objdb->id_map[i] &= ~(0x80 >> fb);
+
+ return XGE_HAL_OK;
+}
+
+#endif
+
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-driver.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-driver.c
index e084617f87..e04dcf85fa 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-driver.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-driver.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-driver.c
- *
- * Description: HAL driver object functionality
*
- * Created: 10 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xgehal-driver.h"
@@ -221,7 +212,7 @@ xge_hal_driver_terminate(void)
for (i=0; i<g_malloc_cnt; i++) {
if (g_malloc_arr[i].ptr != NULL) {
xge_os_printf("OSPAL: memory leak detected at "
- "%s:%d:%llx:%d",
+ "%s:%d:"XGE_OS_LLXFMT":%d",
g_malloc_arr[i].file,
g_malloc_arr[i].line,
(unsigned long long)(ulong_t)
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-fifo-fp.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-fifo-fp.c
index 14fa996b7a..a8fe1bf590 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-fifo-fp.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-fifo-fp.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-fifo-fp.c
- *
- * Description: Tx fifo object functionality (fast path)
*
- * Created: 10 June 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifdef XGE_DEBUG_FP
@@ -116,18 +107,22 @@ __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
xge_os_pio_mem_write64(fifo->channel.pdev, fifo->channel.regh1,
ctrl, &hw_pair->list_control);
- xge_debug_fifo(XGE_TRACE, "posted txdl 0x%llx ctrl 0x%llx "
- "into 0x%llx", (unsigned long long)txdl_priv->dma_addr,
+ xge_debug_fifo(XGE_TRACE, "posted txdl 0x"XGE_OS_LLXFMT" ctrl 0x"XGE_OS_LLXFMT" "
+ "into 0x"XGE_OS_LLXFMT"", (unsigned long long)txdl_priv->dma_addr,
(unsigned long long)ctrl,
(unsigned long long)(ulong_t)&hw_pair->txdl_pointer);
#ifdef XGE_HAL_FIFO_DUMP_TXD
- xge_os_printf("%llx:%llx:%llx:%llx dma %llx",
- txdp->control_1, txdp->control_2, txdp->buffer_pointer,
- txdp->host_control, txdl_priv->dma_addr);
+ xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
+ XGE_OS_LLXFMT" dma "XGE_OS_LLXFMT,
+ txdp->control_1, txdp->control_2, txdp->buffer_pointer,
+ txdp->host_control, txdl_priv->dma_addr);
#endif
fifo->channel.stats.total_posts++;
+ fifo->channel.usage_cnt++;
+ if (fifo->channel.stats.usage_max < fifo->channel.usage_cnt)
+ fifo->channel.stats.usage_max = fifo->channel.usage_cnt;
}
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
@@ -282,7 +277,7 @@ xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
#endif
while(alloc_frags < frags) {
status = __hal_channel_dtr_alloc(channelh,
- (xge_hal_dtr_h *)&next_txdp);
+ (xge_hal_dtr_h *)(void*)&next_txdp);
if (status != XGE_HAL_OK){
xge_debug_fifo(XGE_ERR,
"failed to allocate linked fragments rc %d",
@@ -291,12 +286,12 @@ xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
if (*dtrh) {
xge_assert(alloc_frags/max_frags);
__hal_fifo_txdl_restore_many(channelh,
- *dtrh, alloc_frags/max_frags);
+ (xge_hal_fifo_txd_t *) *dtrh, alloc_frags/max_frags);
}
if (dang_dtrh) {
xge_assert(dang_frags/max_frags);
__hal_fifo_txdl_restore_many(channelh,
- dang_dtrh, dang_frags/max_frags);
+ (xge_hal_fifo_txd_t *) dang_dtrh, dang_frags/max_frags);
}
break;
}
@@ -336,7 +331,7 @@ xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
xge_debug_fifo(XGE_TRACE,
"dangling dtrh %p, linked with dtrh %p",
*dtrh, next_txdp);
- next_txdl_priv->dang_txdl = *dtrh;
+ next_txdl_priv->dang_txdl = (xge_hal_fifo_txd_t *) *dtrh;
next_txdl_priv->dang_frags = alloc_frags;
alloc_frags = max_frags;
*dtrh = next_txdp;
@@ -361,6 +356,7 @@ xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
txdl_priv->align_used_frags = 0;
txdl_priv->frags = 0;
+ txdl_priv->bytes_sent = 0;
txdl_priv->alloc_frags = alloc_frags;
/* reset TxD0 */
txdp->control_1 = txdp->control_2 = 0;
@@ -442,6 +438,7 @@ xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
txdl_priv->dang_txdl = NULL;
txdl_priv->dang_frags = 0;
txdl_priv->next_txdl_priv = NULL;
+ txdl_priv->bytes_sent = 0;
/* reset TxD0 */
txdp->control_1 = txdp->control_2 = 0;
@@ -651,8 +648,8 @@ xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh,
xge_hal_dtr_h *dtrh, u8 *t_code)
{
xge_hal_fifo_txd_t *txdp;
-#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
xge_hal_fifo_txdl_priv_t *txdl_priv;
#endif
@@ -688,6 +685,9 @@ xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh,
/* see XGE_HAL_SET_TXD_T_CODE() above.. */
xge_assert(*t_code != XGE_HAL_TXD_T_CODE_UNUSED_5);
+ if (fifo->channel.usage_cnt > 0)
+ fifo->channel.usage_cnt--;
+
return XGE_HAL_OK;
}
@@ -769,6 +769,8 @@ xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr)
else
__hal_channel_dtr_free(channelh, dtr);
+ ((xge_hal_channel_t *)channelh)->poll_bytes += txdl_priv->bytes_sent;
+
#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
__hal_fifo_txdl_priv(dtr)->allocated = 0;
#endif
@@ -787,7 +789,7 @@ xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr)
* in fifo descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
- * @frag_idx: Index of the data buffer in the caller's scatter-gather list 
+ * @frag_idx: Index of the data buffer in the caller's scatter-gather listá
* (of buffers).
* @vaddr: Virtual address of the data buffer.
* @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
@@ -867,6 +869,7 @@ xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(misaligned_size);
+ txdl_priv->bytes_sent += misaligned_size;
fifo->channel.stats.total_buffers++;
txdl_priv->frags++;
txdl_priv->align_used_frags++;
@@ -890,6 +893,7 @@ xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
misaligned_size;
txdp->control_1 =
XGE_HAL_TXD_BUFFER0_SIZE(remaining_size);
+ txdl_priv->bytes_sent += remaining_size;
txdp->control_2 = 0;
fifo->channel.stats.total_buffers++;
txdl_priv->frags++;
@@ -923,21 +927,22 @@ xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
{
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
xge_hal_fifo_txdl_priv_t *txdl_priv;
+ ptrdiff_t used;
xge_assert(size > 0);
txdl_priv = __hal_fifo_txdl_priv(dtrh);
- if (txdl_priv->align_dma_offset + (unsigned int)size > (unsigned int)fifo->config->alignment_size)
- return XGE_HAL_ERR_OUT_ALIGNED_FRAGS; /* FIXME */
-
- if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
+ used = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
+ used += txdl_priv->align_dma_offset;
+ if (used + (unsigned int)size > (unsigned int)fifo->align_size)
return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
- }
xge_os_memcpy((char*)txdl_priv->align_vaddr_start +
txdl_priv->align_dma_offset, vaddr, size);
+ fifo->channel.stats.copied_frags++;
+
txdl_priv->align_dma_offset += size;
return XGE_HAL_OK;
}
@@ -966,11 +971,11 @@ xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
xge_hal_fifo_txd_t *txdp;
ptrdiff_t prev_boff;
+ xge_assert(frag_idx < fifo->config->max_frags);
+
txdl_priv = __hal_fifo_txdl_priv(dtrh);
txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
- xge_assert(frag_idx < fifo->config->max_frags);
-
if (frag_idx != 0) {
txdp->control_1 = txdp->control_2 = 0;
}
@@ -979,7 +984,9 @@ xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
txdp->control_1 |=
XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv->align_dma_offset);
+ txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset;
fifo->channel.stats.total_buffers++;
+ fifo->channel.stats.copied_buffers++;
txdl_priv->frags++;
txdl_priv->align_used_frags++;
@@ -1003,7 +1010,7 @@ xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
* descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
- * @frag_idx: Index of the data buffer in the caller's scatter-gather list 
+ * @frag_idx: Index of the data buffer in the caller's scatter-gather listá
* (of buffers).
* @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
* @size: Size of the data buffer (in bytes).
@@ -1050,6 +1057,7 @@ xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
txdp->buffer_pointer = (u64)dma_pointer;
txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(size);
+ txdl_priv->bytes_sent += size;
fifo->channel.stats.total_buffers++;
txdl_priv->frags++;
}
@@ -1127,4 +1135,28 @@ xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag)
txdp->control_2 |= XGE_HAL_TXD_VLAN_TAG(vlan_tag);
}
+/**
+ * xge_hal_fifo_is_next_dtr_completed - Checks if the next dtr is completed
+ * @channelh: Channel handle.
+ */
+__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
+xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh)
+{
+ xge_hal_fifo_txd_t *txdp;
+ xge_hal_dtr_h dtrh;
+
+ __hal_channel_dtr_try_complete(channelh, &dtrh);
+ txdp = (xge_hal_fifo_txd_t *)dtrh;
+ if (txdp == NULL) {
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+ }
+
+ /* check whether host owns it */
+ if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {
+ xge_assert(txdp->host_control!=0);
+ return XGE_HAL_OK;
+ }
+ /* no more completions */
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+}
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-fifo.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-fifo.c
index 5985b7d893..92b7512b0a 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-fifo.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-fifo.c
@@ -17,16 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/* Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-fifo.c
- *
- * Description: fifo object implementation
*
- * Created: 10 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xgehal-fifo.h"
@@ -48,9 +40,11 @@ __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh,
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata;
xge_assert(item);
- txdl_priv = __hal_mempool_item_priv(mempoolh, memblock_index,
- item, &memblock_item_idx);
-
+ txdl_priv = (xge_hal_fifo_txdl_priv_t *) \
+ __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
+ memblock_index,
+ item,
+ &memblock_item_idx);
xge_assert(txdl_priv);
/* pre-format HAL's TxDL's private */
@@ -82,8 +76,7 @@ __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh,
xge_debug_mm(XGE_ERR,
"align buffer[%d] %d bytes, status %d",
index,
- fifo->config->alignment_size *
- fifo->config->max_aligned_frags,
+ fifo->align_size,
status);
return status;
}
@@ -119,8 +112,11 @@ __hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh,
xge_assert(item);
- txdl_priv = __hal_mempool_item_priv(mempoolh, memblock_index,
- item, &memblock_item_idx);
+ txdl_priv = (xge_hal_fifo_txdl_priv_t *) \
+ __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
+ memblock_index,
+ item,
+ &memblock_item_idx);
xge_assert(txdl_priv);
#ifdef XGE_HAL_ALIGN_XMIT
@@ -129,8 +125,7 @@ __hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh,
xge_os_dma_unmap(fifo->channel.pdev,
txdl_priv->align_dma_handle,
txdl_priv->align_dma_addr,
- fifo->config->alignment_size *
- fifo->config->max_aligned_frags,
+ fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE);
txdl_priv->align_dma_addr = 0;
@@ -139,8 +134,7 @@ __hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh,
if (txdl_priv->align_vaddr != NULL) {
xge_os_dma_free(fifo->channel.pdev,
txdl_priv->align_vaddr,
- fifo->config->alignment_size *
- fifo->config->max_aligned_frags,
+ fifo->align_size,
&txdl_priv->align_dma_acch,
&txdl_priv->align_dma_handle);
@@ -188,6 +182,9 @@ __hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
}
#endif
+ fifo->align_size =
+ fifo->config->alignment_size * fifo->config->max_aligned_frags;
+
/* Initializing the BAR1 address as the start of
* the FIFO queue pointer and as a location of FIFO control
* word. */
@@ -266,7 +263,7 @@ __hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
}
status = __hal_channel_initialize(channelh, attr,
- __hal_mempool_items_arr(fifo->mempool),
+ (void **) __hal_mempool_items_arr(fifo->mempool),
queue->initial, queue->max,
fifo->config->reserve_threshold);
if (status != XGE_HAL_OK) {
@@ -348,7 +345,6 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh)
u64* tx_fifo_partitions[4];
u64* tx_fifo_wrr[5];
u64 val64, part0;
- int priority = 0;
int i;
/* Tx DMA Initialization */
@@ -385,9 +381,8 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh)
int reg_half = i % 2;
int reg_num = i / 2;
- priority = 0;
-
if (hldev->config.fifo.queue[i].configured) {
+ int priority = hldev->config.fifo.queue[i].priority;
val64 |=
vBIT((hldev->config.fifo.queue[i].max-1),
(((reg_half) * 32) + 19),
@@ -407,9 +402,9 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh)
val64, tx_fifo_partitions[reg_num]);
xge_debug_fifo(XGE_TRACE,
"fifo partition_%d at: "
- "0x%llx is: 0x%llx", reg_num,
- (unsigned long long)(ulong_t)
- tx_fifo_partitions[reg_num],
+ "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
+ reg_num, (unsigned long long)(ulong_t)
+ tx_fifo_partitions[reg_num],
(unsigned long long)val64);
}
val64 = 0;
@@ -423,7 +418,7 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh)
__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32),
tx_fifo_partitions[0]);
xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: "
- "0x%llx is: 0x%llx",
+ "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
(unsigned long long)(ulong_t)
tx_fifo_partitions[0],
(unsigned long long) part0);
@@ -457,8 +452,7 @@ __hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
xge_os_dma_unmap(fifo->channel.pdev,
txdl_priv->align_dma_handle,
txdl_priv->align_dma_addr,
- fifo->config->alignment_size *
- fifo->config->max_aligned_frags,
+ fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE);
txdl_priv->align_dma_addr = 0;
@@ -467,8 +461,7 @@ __hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
if (txdl_priv->align_vaddr != NULL) {
xge_os_dma_free(fifo->channel.pdev,
txdl_priv->align_vaddr,
- fifo->config->alignment_size *
- fifo->config->max_aligned_frags,
+ fifo->align_size,
&txdl_priv->align_dma_acch,
&txdl_priv->align_dma_handle);
@@ -490,8 +483,7 @@ __hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
/* allocate alignment DMA-buffer */
txdl_priv->align_vaddr = xge_os_dma_malloc(fifo->channel.pdev,
- fifo->config->alignment_size *
- fifo->config->max_aligned_frags,
+ fifo->align_size,
XGE_OS_DMA_CACHELINE_ALIGNED |
XGE_OS_DMA_STREAMING,
&txdl_priv->align_dma_handle,
@@ -503,8 +495,7 @@ __hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
/* map it */
txdl_priv->align_dma_addr = xge_os_dma_map(fifo->channel.pdev,
txdl_priv->align_dma_handle, txdl_priv->align_vaddr,
- fifo->config->alignment_size *
- fifo->config->max_aligned_frags,
+ fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING);
if (txdl_priv->align_dma_addr == XGE_OS_INVALID_DMA_ADDR) {
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mgmt.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mgmt.c
index 59912f0f70..b07c025cf4 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mgmt.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mgmt.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
*
- * FileName : xgehal-mgmt.c
- *
- * Description: Xframe-family management facility implementation
- *
- * Created: 1 September 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xgehal-mgmt.h"
@@ -99,6 +90,9 @@ xge_hal_mgmt_about(xge_hal_device_h devh, xge_hal_mgmt_about_info_t *about_info,
xge_os_strcpy(about_info->ll_fix, XGELL_VERSION_FIX);
xge_os_strcpy(about_info->ll_build, XGELL_VERSION_BUILD);
+ about_info->transponder_temperature =
+ xge_hal_read_xfp_current_temp(devh);
+
return XGE_HAL_OK;
}
@@ -246,7 +240,17 @@ xge_hal_mgmt_hw_stats(xge_hal_device_h devh, xge_hal_mgmt_hw_stats_t *hw_stats,
}
/**
- * FIXME: document
+ * xge_hal_mgmt_hw_stats_off - TBD.
+ * @devh: HAL device handle.
+ * @off: TBD
+ * @size: TBD
+ * @out: TBD
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_ERR_INVALID_DEVICE - Device is not valid.
+ * XGE_HAL_ERR_VERSION_CONFLICT - Version it not maching.
+ *
+ * See also: xge_hal_mgmt_sw_stats().
*/
xge_hal_status_e
xge_hal_mgmt_hw_stats_off(xge_hal_device_h devh, int off, int size, char *out)
@@ -308,6 +312,9 @@ xge_hal_mgmt_sw_stats(xge_hal_device_h devh, xge_hal_mgmt_sw_stats_t *sw_stats,
return XGE_HAL_INF_STATS_IS_NOT_READY;
}
+ /* Updating xpak stats value */
+ __hal_updt_stats_xpak(hldev);
+
xge_os_memcpy(sw_stats, &hldev->stats.sw_dev_err_stats,
sizeof(xge_hal_stats_sw_err_t));
@@ -409,7 +416,7 @@ xge_hal_mgmt_channel_stats(xge_hal_channel_h channelh,
{
xge_hal_status_e status;
xge_hal_stats_channel_info_t *channel_info;
- xge_hal_channel_t *channel = channelh;
+ xge_hal_channel_t *channel = (xge_hal_channel_t* ) channelh;
if (size != sizeof(xge_hal_stats_channel_info_t)) {
return XGE_HAL_ERR_VERSION_CONFLICT;
@@ -421,8 +428,7 @@ xge_hal_mgmt_channel_stats(xge_hal_channel_h channelh,
}
if (xge_hal_device_check_id(channel->devh) == XGE_HAL_CARD_HERC) {
- __hal_update_ring_bump(channel->devh, channel->post_qid,
- channel_info);
+ __hal_update_ring_bump( (xge_hal_device_t *) channel->devh, channel->post_qid, channel_info);
}
xge_os_memcpy(channel_stats, channel_info,
@@ -757,15 +763,14 @@ xge_hal_flick_link_led(xge_hal_device_h devh)
* Returns: -1 on failure, 0 on success.
*/
xge_hal_status_e
-xge_hal_read_eeprom(xge_hal_device_h devh, xge_hal_status_e off, u32* data)
+xge_hal_read_eeprom(xge_hal_device_h devh, int off, u32* data)
{
xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
- int ret = XGE_HAL_FAIL;
+ xge_hal_status_e ret = XGE_HAL_FAIL;
u32 exit_cnt = 0;
u64 val64;
xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
-
val64 = XGE_HAL_I2C_CONTROL_DEV_ID(XGE_DEV_ID) |
XGE_HAL_I2C_CONTROL_ADDR(off) |
XGE_HAL_I2C_CONTROL_BYTE_CNT(0x3) |
@@ -810,7 +815,8 @@ xge_hal_status_e
xge_hal_write_eeprom(xge_hal_device_h devh, int off, u32 data, int cnt)
{
xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
- int exit_cnt = 0, ret = XGE_HAL_FAIL;
+ xge_hal_status_e ret = XGE_HAL_FAIL;
+ u32 exit_cnt = 0;
u64 val64;
xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
@@ -924,7 +930,7 @@ xge_hal_register_test(xge_hal_device_h devh, u64 *data)
* 0 on success.
*/
xge_hal_status_e
-xge_hal_rldram_test(xge_hal_device_h devh, uint64_t * data)
+xge_hal_rldram_test(xge_hal_device_h devh, u64 *data)
{
xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
@@ -1042,6 +1048,246 @@ xge_hal_rldram_test(xge_hal_device_h devh, uint64_t * data)
}
/*
+ * xge_hal_pma_loopback - Enable or disable PMA loopback
+ * @devh: HAL device handle.
+ * @enable:Boolean set to 1 to enable and 0 to disable.
+ *
+ * Enable or disable PMA loopback.
+ * Return value:
+ * 0 on success.
+ */
+xge_hal_status_e
+xge_hal_pma_loopback( xge_hal_device_h devh, int enable )
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64;
+ u16 data;
+
+ /*
+ * This code if for MAC loopbak
+ * Should be enabled through another parameter
+ */
+#if 0
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mac_cfg);
+ if ( enable )
+ {
+ val64 |= ( XGE_HAL_MAC_CFG_TMAC_LOOPBACK | XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE );
+ }
+ __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
+ (u32)(val64 >> 32), (char*)&bar0->mac_cfg);
+ xge_os_mdelay(1);
+#endif
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ xge_os_mdelay(100);
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ xge_os_mdelay(100);
+
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mdio_control);
+
+ data = (u16)XGE_HAL_MDIO_CONTROL_MMD_DATA_GET(val64);
+
+#define _HAL_LOOPBK_PMA 1
+
+ if( enable )
+ data |= 1;
+ else
+ data &= 0xfe;
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ xge_os_mdelay(100);
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DATA(data) |
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(0x0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_WRITE);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ xge_os_mdelay(100);
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(1) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(0x0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ xge_os_mdelay(100);
+
+ return XGE_HAL_OK;
+}
+
+u16
+xge_hal_mdio_read( xge_hal_device_h devh, u32 mmd_type, u64 addr )
+{
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u64 val64 = 0x0;
+ u16 rval16 = 0x0;
+ u8 i = 0;
+
+ /* address transaction */
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ do
+ {
+ xge_os_mdelay(100);
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mdio_control);
+ if (i++ > 10)
+ {
+ break;
+ }
+ }while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) != XGE_HAL_MDIO_CONTROL_MMD_CTRL(1));
+
+ /* Data transaction */
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_READ);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+
+ i = 0;
+
+ do
+ {
+ xge_os_mdelay(100);
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mdio_control);
+ if (i++ > 10)
+ {
+ break;
+ }
+ }while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) != XGE_HAL_MDIO_CONTROL_MMD_CTRL(1));
+
+ rval16 = (u16)XGE_HAL_MDIO_CONTROL_MMD_DATA_GET(val64);
+
+ return rval16;
+}
+
+xge_hal_status_e
+xge_hal_mdio_write( xge_hal_device_h devh, u32 mmd_type, u64 addr, u32 value )
+{
+ u64 val64 = 0x0;
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
+ u8 i = 0;
+ /* address transaction */
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_ADDRESS);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ do
+ {
+ xge_os_mdelay(100);
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mdio_control);
+ if (i++ > 10)
+ {
+ break;
+ }
+ } while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) !=
+ XGE_HAL_MDIO_CONTROL_MMD_CTRL(1));
+
+ /* Data transaction */
+
+ val64 = 0x0;
+
+ val64 = XGE_HAL_MDIO_CONTROL_MMD_INDX_ADDR(addr) |
+ XGE_HAL_MDIO_CONTROL_MMD_DEV_ADDR(mmd_type) |
+ XGE_HAL_MDIO_CONTROL_MMD_PRT_ADDR(0) |
+ XGE_HAL_MDIO_CONTROL_MMD_DATA(value) |
+ XGE_HAL_MDIO_CONTROL_MMD_OP(XGE_HAL_MDIO_OP_WRITE);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+
+ val64 |= XGE_HAL_MDIO_CONTROL_MMD_CTRL(XGE_HAL_MDIO_CTRL_START);
+
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->mdio_control);
+ i = 0;
+
+ do
+ {
+ xge_os_mdelay(100);
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->mdio_control);
+ if (i++ > 10)
+ {
+ break;
+ }
+ }while((val64 & XGE_HAL_MDIO_CONTROL_MMD_CTRL(0xF)) != XGE_HAL_MDIO_CONTROL_MMD_CTRL(1));
+
+ return XGE_HAL_OK;
+}
+
+/*
* xge_hal_eeprom_test - to verify that EEprom in the xena can be
programmed.
* @devh: HAL device handle.
@@ -1130,7 +1376,8 @@ xge_hal_bist_test(xge_hal_device_h devh, u64 *data)
{
xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
u8 bist = 0;
- int cnt = 0, ret = XGE_HAL_FAIL;
+ int cnt = 0;
+ xge_hal_status_e ret = XGE_HAL_FAIL;
xge_os_pci_read8(hldev->pdev, hldev->cfgh, 0x0f, &bist);
bist |= 0x40;
@@ -1236,3 +1483,232 @@ int xge_hal_setpause_data(xge_hal_device_h devh, int tx, int rx)
return 0;
}
+/**
+ * xge_hal_read_xfp_current_temp -
+ * @hldev: HAL device handle.
+ *
+ * This routine only gets the temperature for XFP modules. Also, updating of the
+ * NVRAM can sometimes fail and so the reading we might get may not be uptodate.
+ */
+u32 xge_hal_read_xfp_current_temp(xge_hal_device_h hldev)
+{
+ u16 val_1, val_2, i = 0;
+ u32 actual;
+
+ /* First update the NVRAM table of XFP. */
+
+ (void) xge_hal_mdio_write(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8000, 0x3);
+
+
+ /* Now wait for the transfer to complete */
+ do
+ {
+ xge_os_mdelay( 50 ); // wait 50 milliseonds
+
+ val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8000);
+
+ if ( i++ > 10 )
+ {
+ // waited 500 ms which should be plenty of time.
+ break;
+ }
+ }while (( val_1 & 0x000C ) != 0x0004);
+
+ /* Now NVRAM table of XFP should be updated, so read the temp */
+ val_1 = (u8) xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8067);
+ val_2 = (u8) xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, 0x8068);
+
+ actual = ((val_1 << 8) | val_2);
+
+ if (actual >= 32768)
+ actual = actual- 65536;
+ actual = actual/256;
+
+ return actual;
+}
+
+/**
+ * __hal_chk_xpak_counter - check the Xpak error count and log the msg.
+ * @hldev: pointer to xge_hal_device_t structure
+ * @type: xpak stats error type
+ * @value: xpak stats value
+ *
+ * It is used to log the error message based on the xpak stats value
+ * Return value:
+ * None
+ */
+
+void __hal_chk_xpak_counter(xge_hal_device_t *hldev, int type, u32 value)
+{
+ /*
+ * If the value is high for three consecutive cylce,
+ * log a error message
+ */
+ if(value == 3)
+ {
+ switch(type)
+ {
+ case 1:
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_temp = 0;
+
+ /*
+ * Notify the ULD on Excess Xpak temperature alarm msg
+ */
+ if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) {
+ g_xge_hal_driver->uld_callbacks.xpak_alarm_log(
+ hldev->upper_layer_info,
+ XGE_HAL_XPAK_ALARM_EXCESS_TEMP);
+ }
+ break;
+ case 2:
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_bias_current = 0;
+
+ /*
+ * Notify the ULD on Excess xpak bias current alarm msg
+ */
+ if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) {
+ g_xge_hal_driver->uld_callbacks.xpak_alarm_log(
+ hldev->upper_layer_info,
+ XGE_HAL_XPAK_ALARM_EXCESS_BIAS_CURRENT);
+ }
+ break;
+ case 3:
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_laser_output = 0;
+
+ /*
+ * Notify the ULD on Excess Xpak Laser o/p power
+ * alarm msg
+ */
+ if (g_xge_hal_driver->uld_callbacks.xpak_alarm_log) {
+ g_xge_hal_driver->uld_callbacks.xpak_alarm_log(
+ hldev->upper_layer_info,
+ XGE_HAL_XPAK_ALARM_EXCESS_LASER_OUTPUT);
+ }
+ break;
+ default:
+ xge_debug_osdep(XGE_TRACE, "Incorrect XPAK Alarm "
+ "type \n");
+ }
+ }
+
+}
+
+/**
+ * __hal_updt_stats_xpak - update the Xpak error count.
+ * @hldev: pointer to xge_hal_device_t structure
+ *
+ * It is used to update the xpak stats value
+ * Return value:
+ * None
+ */
+void __hal_updt_stats_xpak(xge_hal_device_t *hldev)
+{
+ u16 val_1;
+ u64 addr;
+
+ /* Check the communication with the MDIO slave */
+ addr = 0x0000;
+ val_1 = 0x0;
+ val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr);
+ if((val_1 == 0xFFFF) || (val_1 == 0x0000))
+ {
+ xge_debug_osdep(XGE_TRACE, "ERR: MDIO slave access failed - "
+ "Returned %x\n", val_1);
+ return;
+ }
+
+ /* Check for the expected value of 2040 at PMA address 0x0000 */
+ if(val_1 != 0x2040)
+ {
+ xge_debug_osdep(XGE_TRACE, "Incorrect value at PMA address 0x0000 - ");
+ xge_debug_osdep(XGE_TRACE, "Returned: %llx- Expected: 0x2040\n", val_1);
+ return;
+ }
+
+ /* Loading the DOM register to MDIO register */
+ addr = 0xA100;
+ (void) xge_hal_mdio_write(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr, 0x0);
+ val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr);
+
+ /*
+ * Reading the Alarm flags
+ */
+ addr = 0xA070;
+ val_1 = 0x0;
+ val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr);
+ if(CHECKBIT(val_1, 0x7))
+ {
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_transceiver_temp_high++;
+ hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp++;
+ __hal_chk_xpak_counter(hldev, 0x1,
+ hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp);
+ } else {
+ hldev->stats.sw_dev_err_stats.xpak_counter.excess_temp = 0;
+ }
+ if(CHECKBIT(val_1, 0x6))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_transceiver_temp_low++;
+
+ if(CHECKBIT(val_1, 0x3))
+ {
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_laser_bias_current_high++;
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_bias_current++;
+ __hal_chk_xpak_counter(hldev, 0x2,
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_bias_current);
+ } else {
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_bias_current = 0;
+ }
+ if(CHECKBIT(val_1, 0x2))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_laser_bias_current_low++;
+
+ if(CHECKBIT(val_1, 0x1))
+ {
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_laser_output_power_high++;
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_laser_output++;
+ __hal_chk_xpak_counter(hldev, 0x3,
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_laser_output);
+ } else {
+ hldev->stats.sw_dev_err_stats.xpak_counter.
+ excess_laser_output = 0;
+ }
+ if(CHECKBIT(val_1, 0x0))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ alarm_laser_output_power_low++;
+
+ /*
+ * Reading the warning flags
+ */
+ addr = 0xA074;
+ val_1 = 0x0;
+ val_1 = xge_hal_mdio_read(hldev, XGE_HAL_MDIO_MMD_PMA_DEV_ADDR, addr);
+ if(CHECKBIT(val_1, 0x7))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_transceiver_temp_high++;
+ if(CHECKBIT(val_1, 0x6))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_transceiver_temp_low++;
+ if(CHECKBIT(val_1, 0x3))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_laser_bias_current_high++;
+ if(CHECKBIT(val_1, 0x2))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_laser_bias_current_low++;
+ if(CHECKBIT(val_1, 0x1))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_laser_output_power_high++;
+ if(CHECKBIT(val_1, 0x0))
+ hldev->stats.sw_dev_err_stats.stats_xpak.
+ warn_laser_output_power_low++;
+}
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mgmtaux.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mgmtaux.c
index 8b569237a7..16b727a26a 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mgmtaux.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mgmtaux.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-mgmtaux.c
- *
- * Description: Xframe-family management auxiliary API implementation
*
- * Created: 1 September 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xgehal-mgmt.h"
@@ -219,6 +210,9 @@ xge_hal_status_e xge_hal_aux_about_read(xge_hal_device_h devh, int bufsize,
__HAL_AUX_ENTRY("ll_fix", about_info.ll_fix, "%s");
__HAL_AUX_ENTRY("ll_build", about_info.ll_build, "%s");
+ __HAL_AUX_ENTRY("transponder_temperature",
+ about_info.transponder_temperature, "%d C");
+
__HAL_AUX_ENTRY_END(bufsize, retsize);
return XGE_HAL_OK;
@@ -626,22 +620,54 @@ xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh,
return status;
}
- __HAL_AUX_ENTRY("rx_traffic_intr_cnt", devstat.rx_traffic_intr_cnt, "%u");
+ if (!hldev->config.bimodal_interrupts) {
+ __HAL_AUX_ENTRY("rx_traffic_intr_cnt",
+ devstat.rx_traffic_intr_cnt, "%u");
+ }
__HAL_AUX_ENTRY("tx_traffic_intr_cnt", devstat.tx_traffic_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("txpic_intr_cnt", devstat.txpic_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("txdma_intr_cnt", devstat.txdma_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("txmac_intr_cnt", devstat.txmac_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("txxgxs_intr_cnt", devstat.txxgxs_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("rxpic_intr_cnt", devstat.rxpic_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("rxdma_intr_cnt", devstat.rxdma_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("rxmac_intr_cnt", devstat.rxmac_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("rxxgxs_intr_cnt", devstat.rxxgxs_intr_cnt, "%u");
+ __HAL_AUX_ENTRY("mc_intr_cnt", devstat.mc_intr_cnt, "%u");
__HAL_AUX_ENTRY("not_traffic_intr_cnt",
devstat.not_traffic_intr_cnt, "%u");
__HAL_AUX_ENTRY("traffic_intr_cnt", devstat.traffic_intr_cnt, "%u");
__HAL_AUX_ENTRY("total_intr_cnt", devstat.total_intr_cnt, "%u");
__HAL_AUX_ENTRY("soft_reset_cnt", devstat.soft_reset_cnt, "%u");
- __HAL_AUX_ENTRY("rxufca_lo_adjust_cnt",
- devstat.rxufca_lo_adjust_cnt, "%u");
- __HAL_AUX_ENTRY("rxufca_hi_adjust_cnt",
- devstat.rxufca_hi_adjust_cnt, "%u");
+
+ if (hldev->config.rxufca_hi_lim != hldev->config.rxufca_lo_lim &&
+ hldev->config.rxufca_lo_lim != 0) {
+ __HAL_AUX_ENTRY("rxufca_lo_adjust_cnt",
+ devstat.rxufca_lo_adjust_cnt, "%u");
+ __HAL_AUX_ENTRY("rxufca_hi_adjust_cnt",
+ devstat.rxufca_hi_adjust_cnt, "%u");
+ }
+
+ if (hldev->config.bimodal_interrupts) {
+ __HAL_AUX_ENTRY("bimodal_lo_adjust_cnt",
+ devstat.bimodal_lo_adjust_cnt, "%u");
+ __HAL_AUX_ENTRY("bimodal_hi_adjust_cnt",
+ devstat.bimodal_hi_adjust_cnt, "%u");
+ }
+
#if defined(XGE_HAL_CONFIG_LRO)
__HAL_AUX_ENTRY("tot_frms_lroised",
devstat.tot_frms_lroised, "%u");
__HAL_AUX_ENTRY("tot_lro_sessions",
devstat.tot_lro_sessions, "%u");
+ __HAL_AUX_ENTRY("lro_frm_len_exceed_cnt",
+ devstat.lro_frm_len_exceed_cnt, "%u");
+ __HAL_AUX_ENTRY("lro_sg_exceed_cnt",
+ devstat.lro_sg_exceed_cnt, "%u");
+ __HAL_AUX_ENTRY("lro_out_of_seq_pkt_cnt",
+ devstat.lro_out_of_seq_pkt_cnt, "%u");
+ __HAL_AUX_ENTRY("lro_dup_pkt_cnt",
+ devstat.lro_dup_pkt_cnt, "%u");
#endif
/* for each opened rx channel */
@@ -658,11 +684,17 @@ xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh,
(void) xge_os_sprintf(key, "ring%d_", channel->post_qid);
xge_os_strcpy(key+6, "full_cnt");
- __HAL_AUX_ENTRY(key, chstat.out_of_dtrs_cnt, "%u");
+ __HAL_AUX_ENTRY(key, chstat.full_cnt, "%u");
+ xge_os_strcpy(key+6, "usage_max");
+ __HAL_AUX_ENTRY(key, chstat.usage_max, "%u");
+ xge_os_strcpy(key+6, "usage_cnt");
+ __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u");
xge_os_strcpy(key+6, "reserve_free_swaps_cnt");
__HAL_AUX_ENTRY(key, chstat.reserve_free_swaps_cnt, "%u");
- xge_os_strcpy(key+6, "avg_compl_per_intr_cnt");
- __HAL_AUX_ENTRY(key, chstat.avg_compl_per_intr_cnt, "%u");
+ if (!hldev->config.bimodal_interrupts) {
+ xge_os_strcpy(key+6, "avg_compl_per_intr_cnt");
+ __HAL_AUX_ENTRY(key, chstat.avg_compl_per_intr_cnt, "%u");
+ }
xge_os_strcpy(key+6, "total_compl_cnt");
__HAL_AUX_ENTRY(key, chstat.total_compl_cnt, "%u");
xge_os_strcpy(key+6, "bump_cnt");
@@ -683,7 +715,11 @@ xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh,
(void) xge_os_sprintf(key, "fifo%d_", channel->post_qid);
xge_os_strcpy(key+6, "full_cnt");
- __HAL_AUX_ENTRY(key, chstat.out_of_dtrs_cnt, "%u");
+ __HAL_AUX_ENTRY(key, chstat.full_cnt, "%u");
+ xge_os_strcpy(key+6, "usage_max");
+ __HAL_AUX_ENTRY(key, chstat.usage_max, "%u");
+ xge_os_strcpy(key+6, "usage_cnt");
+ __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u");
xge_os_strcpy(key+6, "reserve_free_swaps_cnt");
__HAL_AUX_ENTRY(key, chstat.reserve_free_swaps_cnt, "%u");
xge_os_strcpy(key+6, "avg_compl_per_intr_cnt");
@@ -694,6 +730,10 @@ xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh,
__HAL_AUX_ENTRY(key, chstat.total_posts, "%u");
xge_os_strcpy(key+6, "total_posts_many");
__HAL_AUX_ENTRY(key, chstat.total_posts_many, "%u");
+ xge_os_strcpy(key+6, "copied_frags");
+ __HAL_AUX_ENTRY(key, chstat.copied_frags, "%u");
+ xge_os_strcpy(key+6, "copied_buffers");
+ __HAL_AUX_ENTRY(key, chstat.copied_buffers, "%u");
xge_os_strcpy(key+6, "total_buffers");
__HAL_AUX_ENTRY(key, chstat.total_buffers, "%u");
xge_os_strcpy(key+6, "avg_buffers_per_post");
@@ -772,6 +812,30 @@ xge_hal_status_e xge_hal_aux_stats_sw_dev_read(xge_hal_device_h devh,
__HAL_AUX_ENTRY(buf, t_code_cnt, "%u");
}
}
+ __HAL_AUX_ENTRY("alarm_transceiver_temp_high",sw_dev_err_stats.
+ stats_xpak.alarm_transceiver_temp_high, "%u");
+ __HAL_AUX_ENTRY("alarm_transceiver_temp_low",sw_dev_err_stats.
+ stats_xpak.alarm_transceiver_temp_low, "%u");
+ __HAL_AUX_ENTRY("alarm_laser_bias_current_high",sw_dev_err_stats.
+ stats_xpak.alarm_laser_bias_current_high, "%u");
+ __HAL_AUX_ENTRY("alarm_laser_bias_current_low",sw_dev_err_stats.
+ stats_xpak.alarm_laser_bias_current_low, "%u");
+ __HAL_AUX_ENTRY("alarm_laser_output_power_high",sw_dev_err_stats.
+ stats_xpak.alarm_laser_output_power_high, "%u");
+ __HAL_AUX_ENTRY("alarm_laser_output_power_low",sw_dev_err_stats.
+ stats_xpak.alarm_laser_output_power_low, "%u");
+ __HAL_AUX_ENTRY("warn_transceiver_temp_high",sw_dev_err_stats.
+ stats_xpak.warn_transceiver_temp_high, "%u");
+ __HAL_AUX_ENTRY("warn_transceiver_temp_low",sw_dev_err_stats.
+ stats_xpak.warn_transceiver_temp_low, "%u");
+ __HAL_AUX_ENTRY("warn_laser_bias_current_high",sw_dev_err_stats.
+ stats_xpak.warn_laser_bias_current_high, "%u");
+ __HAL_AUX_ENTRY("warn_laser_bias_current_low",sw_dev_err_stats.
+ stats_xpak.warn_laser_bias_current_low, "%u");
+ __HAL_AUX_ENTRY("warn_laser_output_power_high",sw_dev_err_stats.
+ stats_xpak.warn_laser_output_power_high, "%u");
+ __HAL_AUX_ENTRY("warn_laser_output_power_low",sw_dev_err_stats.
+ stats_xpak.warn_laser_output_power_low, "%u");
__HAL_AUX_ENTRY_END(bufsize, retsize);
@@ -917,26 +981,28 @@ xge_hal_status_e xge_hal_aux_channel_read(xge_hal_device_h devh,
__HAL_AUX_ENTRY(key, channel->type, "%u");
xge_os_strcpy(key+6, "length");
__HAL_AUX_ENTRY(key, channel->length, "%u");
- xge_os_strcpy(key+6, "is open");
+ xge_os_strcpy(key+6, "is_open");
__HAL_AUX_ENTRY(key, channel->is_open, "%u");
- xge_os_strcpy(key+6, "reserve initial");
+ xge_os_strcpy(key+6, "reserve_initial");
__HAL_AUX_ENTRY(key, channel->reserve_initial, "%u");
- xge_os_strcpy(key+6, "reserve max");
+ xge_os_strcpy(key+6, "reserve_max");
__HAL_AUX_ENTRY(key, channel->reserve_max, "%u");
- xge_os_strcpy(key+6, "reserve length");
+ xge_os_strcpy(key+6, "reserve_length");
__HAL_AUX_ENTRY(key, channel->reserve_length, "%u");
- xge_os_strcpy(key+6, "reserve top");
+ xge_os_strcpy(key+6, "reserve_top");
__HAL_AUX_ENTRY(key, channel->reserve_top, "%u");
- xge_os_strcpy(key+6, "reserve threshold");
+ xge_os_strcpy(key+6, "reserve_threshold");
__HAL_AUX_ENTRY(key, channel->reserve_threshold, "%u");
- xge_os_strcpy(key+6, "free length");
+ xge_os_strcpy(key+6, "free_length");
__HAL_AUX_ENTRY(key, channel->free_length, "%u");
- xge_os_strcpy(key+6, "post index");
+ xge_os_strcpy(key+6, "post_index");
__HAL_AUX_ENTRY(key, channel->post_index, "%u");
- xge_os_strcpy(key+6, "compl index");
+ xge_os_strcpy(key+6, "compl_index");
__HAL_AUX_ENTRY(key, channel->compl_index, "%u");
- xge_os_strcpy(key+6, "per dtr space");
+ xge_os_strcpy(key+6, "per_dtr_space");
__HAL_AUX_ENTRY(key, channel->per_dtr_space, "%u");
+ xge_os_strcpy(key+6, "usage_cnt");
+ __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u");
}
/* for each opened tx channel */
@@ -952,26 +1018,28 @@ xge_hal_status_e xge_hal_aux_channel_read(xge_hal_device_h devh,
__HAL_AUX_ENTRY(key, channel->type, "%u");
xge_os_strcpy(key+6, "length");
__HAL_AUX_ENTRY(key, channel->length, "%u");
- xge_os_strcpy(key+6, "is open");
+ xge_os_strcpy(key+6, "is_open");
__HAL_AUX_ENTRY(key, channel->is_open, "%u");
- xge_os_strcpy(key+6, "reserve initial");
+ xge_os_strcpy(key+6, "reserve_initial");
__HAL_AUX_ENTRY(key, channel->reserve_initial, "%u");
- xge_os_strcpy(key+6, "reserve max");
+ xge_os_strcpy(key+6, "reserve_max");
__HAL_AUX_ENTRY(key, channel->reserve_max, "%u");
- xge_os_strcpy(key+6, "reserve length");
+ xge_os_strcpy(key+6, "reserve_length");
__HAL_AUX_ENTRY(key, channel->reserve_length, "%u");
- xge_os_strcpy(key+6, "reserve top");
+ xge_os_strcpy(key+6, "reserve_top");
__HAL_AUX_ENTRY(key, channel->reserve_top, "%u");
- xge_os_strcpy(key+6, "reserve threshold");
+ xge_os_strcpy(key+6, "reserve_threshold");
__HAL_AUX_ENTRY(key, channel->reserve_threshold, "%u");
- xge_os_strcpy(key+6, "free length");
+ xge_os_strcpy(key+6, "free_length");
__HAL_AUX_ENTRY(key, channel->free_length, "%u");
- xge_os_strcpy(key+6, "post index");
+ xge_os_strcpy(key+6, "post_index");
__HAL_AUX_ENTRY(key, channel->post_index, "%u");
- xge_os_strcpy(key+6, "compl index");
+ xge_os_strcpy(key+6, "compl_index");
__HAL_AUX_ENTRY(key, channel->compl_index, "%u");
- xge_os_strcpy(key+6, "per dtr space");
+ xge_os_strcpy(key+6, "per_dtr_space");
__HAL_AUX_ENTRY(key, channel->per_dtr_space, "%u");
+ xge_os_strcpy(key+6, "usage_cnt");
+ __HAL_AUX_ENTRY(key, channel->usage_cnt, "%u");
}
__HAL_AUX_ENTRY_END(bufsize, retsize);
@@ -1167,22 +1235,27 @@ xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh,
}
__HAL_AUX_ENTRY("mtu", dev_config.mtu, "%u");
- __HAL_AUX_ENTRY("isr polling count", dev_config.isr_polling_cnt, "%u");
- __HAL_AUX_ENTRY("latency timer", dev_config.latency_timer, "%u");
- __HAL_AUX_ENTRY("max split transactions",
+ __HAL_AUX_ENTRY("isr_polling_count", dev_config.isr_polling_cnt, "%u");
+ __HAL_AUX_ENTRY("latency_timer", dev_config.latency_timer, "%u");
+ __HAL_AUX_ENTRY("max_splits_trans",
dev_config.max_splits_trans, "%u");
- __HAL_AUX_ENTRY("mmrb count", dev_config.mmrb_count, "%d");
- __HAL_AUX_ENTRY("shared splits", dev_config.shared_splits, "%u");
- __HAL_AUX_ENTRY("statistics refresh time(in sec)",
+ __HAL_AUX_ENTRY("mmrb_count", dev_config.mmrb_count, "%d");
+ __HAL_AUX_ENTRY("shared_splits", dev_config.shared_splits, "%u");
+ __HAL_AUX_ENTRY("stats_refresh_time_sec",
dev_config.stats_refresh_time_sec, "%u");
- __HAL_AUX_ENTRY("pci freq(in mherz)", dev_config.pci_freq_mherz, "%u");
- __HAL_AUX_ENTRY("intr mode", dev_config.intr_mode, "%u");
- __HAL_AUX_ENTRY("sched timer(in us)", dev_config.sched_timer_us, "%u");
- __HAL_AUX_ENTRY("sched timer one shot(flag)",
- dev_config.sched_timer_one_shot, "%u");
- __HAL_AUX_ENTRY("ring memblock size",
+ __HAL_AUX_ENTRY("pci_freq_mherz", dev_config.pci_freq_mherz, "%u");
+ __HAL_AUX_ENTRY("intr_mode", dev_config.intr_mode, "%u");
+ __HAL_AUX_ENTRY("ring_memblock_size",
dev_config.ring.memblock_size, "%u");
+ __HAL_AUX_ENTRY("sched_timer_us", dev_config.sched_timer_us, "%u");
+ __HAL_AUX_ENTRY("sched_timer_one_shot",
+ dev_config.sched_timer_one_shot, "%u");
+ __HAL_AUX_ENTRY("rxufca_intr_thres", dev_config.rxufca_intr_thres, "%u");
+ __HAL_AUX_ENTRY("rxufca_lo_lim", dev_config.rxufca_lo_lim, "%u");
+ __HAL_AUX_ENTRY("rxufca_hi_lim", dev_config.rxufca_hi_lim, "%u");
+ __HAL_AUX_ENTRY("rxufca_lbolt_period", dev_config.rxufca_lbolt_period, "%u");
+
for(i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
{
xge_hal_ring_queue_t *ring = &dev_config.ring.queue[i];
@@ -1192,24 +1265,27 @@ xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh,
continue;
(void) xge_os_sprintf(key, "ring%d_", i);
- xge_os_strcpy(key+6, "inital rxd blocks");
+ xge_os_strcpy(key+6, "inital");
__HAL_AUX_ENTRY(key, ring->initial, "%u");
- xge_os_strcpy(key+6, "max rxd blocks");
+ xge_os_strcpy(key+6, "max");
__HAL_AUX_ENTRY(key, ring->max, "%u");
- xge_os_strcpy(key+6, "buffer mode");
+ xge_os_strcpy(key+6, "buffer_mode");
__HAL_AUX_ENTRY(key, ring->buffer_mode, "%u");
- xge_os_strcpy(key+6, "dram size(in mb)");
+ xge_os_strcpy(key+6, "dram_size_mb");
__HAL_AUX_ENTRY(key, ring->dram_size_mb, "%u");
- xge_os_strcpy(key+6, "backoff interval(in us)");
+ xge_os_strcpy(key+6, "backoff_interval_us");
__HAL_AUX_ENTRY(key, ring->backoff_interval_us, "%u");
- xge_os_strcpy(key+6, "max frame len");
+ xge_os_strcpy(key+6, "max_frame_len");
__HAL_AUX_ENTRY(key, ring->max_frm_len, "%d");
xge_os_strcpy(key+6, "priority");
__HAL_AUX_ENTRY(key, ring->priority, "%u");
- xge_os_strcpy(key+6, "rth en");
+ xge_os_strcpy(key+6, "rth_en");
__HAL_AUX_ENTRY(key, ring->rth_en, "%u");
- xge_os_strcpy(key+6, "no snoop bits");
+ xge_os_strcpy(key+6, "no_snoop_bits");
__HAL_AUX_ENTRY(key, ring->no_snoop_bits, "%u");
+ xge_os_strcpy(key+6, "indicate_max_pkts");
+ __HAL_AUX_ENTRY(key, ring->indicate_max_pkts, "%u");
+
xge_os_strcpy(key+6, "urange_a");
__HAL_AUX_ENTRY(key, rti->urange_a, "%u");
xge_os_strcpy(key+6, "ufc_a");
@@ -1224,83 +1300,129 @@ xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh,
__HAL_AUX_ENTRY(key, rti->ufc_c, "%u");
xge_os_strcpy(key+6, "ufc_d");
__HAL_AUX_ENTRY(key, rti->ufc_d, "%u");
- xge_os_strcpy(key+6, "timer val(in us)");
+ xge_os_strcpy(key+6, "timer_val_us");
__HAL_AUX_ENTRY(key, rti->timer_val_us, "%u");
}
+
{
xge_hal_mac_config_t *mac= &dev_config.mac;
- __HAL_AUX_ENTRY("tmac util period",
+ __HAL_AUX_ENTRY("tmac_util_period",
mac->tmac_util_period, "%u");
- __HAL_AUX_ENTRY("rmac util period",
+ __HAL_AUX_ENTRY("rmac_util_period",
mac->rmac_util_period, "%u");
- __HAL_AUX_ENTRY("rmac bcast enable(flag)",
+ __HAL_AUX_ENTRY("rmac_bcast_en",
mac->rmac_bcast_en, "%u");
- __HAL_AUX_ENTRY("rmac pause generator enable(flag)",
+ __HAL_AUX_ENTRY("rmac_pause_gen_en",
mac->rmac_pause_gen_en, "%d");
- __HAL_AUX_ENTRY("rmac pause receive enable(flag)",
+ __HAL_AUX_ENTRY("rmac_pause_rcv_en",
mac->rmac_pause_rcv_en, "%d");
- __HAL_AUX_ENTRY("rmac pause time",
+ __HAL_AUX_ENTRY("rmac_pause_time",
mac->rmac_pause_time, "%u");
- __HAL_AUX_ENTRY("mc pause threshold qoq3",
+ __HAL_AUX_ENTRY("mc_pause_threshold_q0q3",
mac->mc_pause_threshold_q0q3, "%u");
- __HAL_AUX_ENTRY("mc pause threshold q4q7",
+ __HAL_AUX_ENTRY("mc_pause_threshold_q4q7",
mac->mc_pause_threshold_q4q7, "%u");
}
- {
- xge_hal_tti_config_t *tti = &dev_config.tti;
- __HAL_AUX_ENTRY("tti enabled", tti->enabled, "%u");
- __HAL_AUX_ENTRY("tti urange_a", tti->urange_a, "%u");
- __HAL_AUX_ENTRY("tti ufc_a", tti->ufc_a, "%u");
- __HAL_AUX_ENTRY("tti urange_b", tti->urange_b, "%u");
- __HAL_AUX_ENTRY("tti ufc_b", tti->ufc_b, "%u");
- __HAL_AUX_ENTRY("tti urange_c", tti->urange_c, "%u");
- __HAL_AUX_ENTRY("tti ufc_c", tti->ufc_c, "%u");
- __HAL_AUX_ENTRY("tti urange_d", tti->urange_d, "%u");
- __HAL_AUX_ENTRY("tti ufc_d", tti->ufc_d, "%u");
- __HAL_AUX_ENTRY("tti timer val(in us)",
- tti->timer_val_us, "%u");
- __HAL_AUX_ENTRY("tti timer ci en(flag)",
- tti->timer_ci_en, "%u");
- }
-
- __HAL_AUX_ENTRY("fifo max frags", dev_config.fifo.max_frags, "%u");
- __HAL_AUX_ENTRY("fifo reserve threshold",
+ __HAL_AUX_ENTRY("fifo_max_frags", dev_config.fifo.max_frags, "%u");
+ __HAL_AUX_ENTRY("fifo_reserve_threshold",
dev_config.fifo.reserve_threshold, "%u");
- __HAL_AUX_ENTRY("fifo memblock size",
+ __HAL_AUX_ENTRY("fifo_memblock_size",
dev_config.fifo.memblock_size, "%u");
#ifdef XGE_HAL_ALIGN_XMIT
- __HAL_AUX_ENTRY("fifo alignment size",
+ __HAL_AUX_ENTRY("fifo_alignment_size",
dev_config.fifo.alignment_size, "%u");
#endif
- for(i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++)
- {
+ for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
+ int j;
xge_hal_fifo_queue_t *fifo = &dev_config.fifo.queue[i];
if (!fifo->configured)
continue;
(void) xge_os_sprintf(key, "fifo%d_", i);
- xge_os_strcpy(key+6, "initial len");
+ xge_os_strcpy(key+6, "initial");
__HAL_AUX_ENTRY(key, fifo->initial, "%u");
- xge_os_strcpy(key+6, "max len");
+ xge_os_strcpy(key+6, "max");
__HAL_AUX_ENTRY(key, fifo->max, "%u");
- xge_os_strcpy(key+6, "intr mode");
+ xge_os_strcpy(key+6, "intr");
__HAL_AUX_ENTRY(key, fifo->intr, "%u");
- xge_os_strcpy(key+6, "no snoop bits");
+ xge_os_strcpy(key+6, "no_snoop_bits");
__HAL_AUX_ENTRY(key, fifo->no_snoop_bits, "%u");
+
+ for (j = 0; j < XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
+ xge_hal_tti_config_t *tti =
+ &dev_config.fifo.queue[i].tti[j];
+
+ if (!tti->enabled)
+ continue;
+
+ (void) xge_os_sprintf(key, "fifo%d_tti%02d_", i,
+ i * XGE_HAL_MAX_FIFO_TTI_NUM + j);
+ xge_os_strcpy(key+12, "urange_a");
+ __HAL_AUX_ENTRY(key, tti->urange_a, "%u");
+ xge_os_strcpy(key+12, "ufc_a");
+ __HAL_AUX_ENTRY(key, tti->ufc_a, "%u");
+ xge_os_strcpy(key+12, "urange_b");
+ __HAL_AUX_ENTRY(key, tti->urange_b, "%u");
+ xge_os_strcpy(key+12, "ufc_b");
+ __HAL_AUX_ENTRY(key, tti->ufc_b, "%u");
+ xge_os_strcpy(key+12, "urange_c");
+ __HAL_AUX_ENTRY(key, tti->urange_c, "%u");
+ xge_os_strcpy(key+12, "ufc_c");
+ __HAL_AUX_ENTRY(key, tti->ufc_c, "%u");
+ xge_os_strcpy(key+12, "ufc_d");
+ __HAL_AUX_ENTRY(key, tti->ufc_d, "%u");
+ xge_os_strcpy(key+12, "timer_val_us");
+ __HAL_AUX_ENTRY(key, tti->timer_val_us, "%u");
+ xge_os_strcpy(key+12, "timer_ci_en");
+ __HAL_AUX_ENTRY(key, tti->timer_ci_en, "%u");
+ }
+ }
+
+ /* and bimodal TTIs */
+ for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
+ xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
+ xge_hal_tti_config_t *tti = &hldev->bimodal_tti[i];
+
+ if (!tti->enabled)
+ continue;
+
+ (void) xge_os_sprintf(key, "tti%02d_",
+ XGE_HAL_MAX_FIFO_TTI_RING_0 + i);
+
+ xge_os_strcpy(key+6, "urange_a");
+ __HAL_AUX_ENTRY(key, tti->urange_a, "%u");
+ xge_os_strcpy(key+6, "ufc_a");
+ __HAL_AUX_ENTRY(key, tti->ufc_a, "%u");
+ xge_os_strcpy(key+6, "urange_b");
+ __HAL_AUX_ENTRY(key, tti->urange_b, "%u");
+ xge_os_strcpy(key+6, "ufc_b");
+ __HAL_AUX_ENTRY(key, tti->ufc_b, "%u");
+ xge_os_strcpy(key+6, "urange_c");
+ __HAL_AUX_ENTRY(key, tti->urange_c, "%u");
+ xge_os_strcpy(key+6, "ufc_c");
+ __HAL_AUX_ENTRY(key, tti->ufc_c, "%u");
+ xge_os_strcpy(key+6, "ufc_d");
+ __HAL_AUX_ENTRY(key, tti->ufc_d, "%u");
+ xge_os_strcpy(key+6, "timer_val_us");
+ __HAL_AUX_ENTRY(key, tti->timer_val_us, "%u");
+ xge_os_strcpy(key+6, "timer_ac_en");
+ __HAL_AUX_ENTRY(key, tti->timer_ac_en, "%u");
+ xge_os_strcpy(key+6, "timer_ci_en");
+ __HAL_AUX_ENTRY(key, tti->timer_ci_en, "%u");
}
- __HAL_AUX_ENTRY("dump on serr(flag)", dev_config.dump_on_serr, "%u");
- __HAL_AUX_ENTRY("dump on ecc err(flag)",
+ __HAL_AUX_ENTRY("dump_on_serr", dev_config.dump_on_serr, "%u");
+ __HAL_AUX_ENTRY("dump_on_eccerr",
dev_config.dump_on_eccerr, "%u");
- __HAL_AUX_ENTRY("dump on parity err(flag)",
+ __HAL_AUX_ENTRY("dump_on_parityerr",
dev_config.dump_on_parityerr, "%u");
- __HAL_AUX_ENTRY("rth en(flag)", dev_config.rth_en, "%u");
- __HAL_AUX_ENTRY("rth bucket size", dev_config.rth_bucket_size, "%u");
+ __HAL_AUX_ENTRY("rth_en", dev_config.rth_en, "%u");
+ __HAL_AUX_ENTRY("rth_bucket_size", dev_config.rth_bucket_size, "%u");
__HAL_AUX_ENTRY_END(bufsize, retsize);
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mm.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mm.c
index 8259692ba9..22442de612 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mm.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-mm.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
*
- * FileName : hal-mm.c
- *
- * Description: chipset memory pool object implementation
- *
- * Created: 10 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xge-os-pal.h"
@@ -197,8 +188,8 @@ __hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate,
}
xge_debug_mm(XGE_TRACE,
- "memblock%d: allocated %dk, vaddr 0x%llx, "
- "dma_addr 0x%llx", i, mempool->memblock_size / 1024,
+ "memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", "
+ "dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024,
(unsigned long long)(ulong_t)mempool->memblocks_arr[i],
(unsigned long long)dma_object->addr);
@@ -248,7 +239,8 @@ __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
return NULL;
}
- mempool = xge_os_malloc(pdev, sizeof(xge_hal_mempool_t));
+ mempool = (xge_hal_mempool_t *) \
+ xge_os_malloc(pdev, sizeof(xge_hal_mempool_t));
if (mempool == NULL) {
xge_debug_mm(XGE_ERR, "mempool allocation failure");
return NULL;
@@ -273,7 +265,7 @@ __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
mempool->items_per_memblock;
/* allocate array of memblocks */
- mempool->memblocks_arr = xge_os_malloc(mempool->pdev,
+ mempool->memblocks_arr = (void ** ) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->memblocks_max);
if (mempool->memblocks_arr == NULL) {
xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure");
@@ -284,7 +276,7 @@ __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
sizeof(void*) * mempool->memblocks_max);
/* allocate array of private parts of items per memblocks */
- mempool->memblocks_priv_arr = xge_os_malloc(mempool->pdev,
+ mempool->memblocks_priv_arr = (void **) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->memblocks_max);
if (mempool->memblocks_priv_arr == NULL) {
xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure");
@@ -295,8 +287,10 @@ __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
sizeof(void*) * mempool->memblocks_max);
/* allocate array of memblocks DMA objects */
- mempool->memblocks_dma_arr = xge_os_malloc(mempool->pdev,
- sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
+ mempool->memblocks_dma_arr =
+ (xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev,
+ sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
+
if (mempool->memblocks_dma_arr == NULL) {
xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure");
__hal_mempool_destroy(mempool);
@@ -306,7 +300,7 @@ __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
/* allocate hash array of items */
- mempool->items_arr = xge_os_malloc(mempool->pdev,
+ mempool->items_arr = (void **) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->items_max);
if (mempool->items_arr == NULL) {
xge_debug_mm(XGE_ERR, "items_arr allocation failure");
@@ -315,8 +309,8 @@ __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
}
xge_os_memzero(mempool->items_arr, sizeof(void *) * mempool->items_max);
- mempool->shadow_items_arr = xge_os_malloc(mempool->pdev,sizeof(void*) *
- mempool->items_max);
+ mempool->shadow_items_arr = (void **) xge_os_malloc(mempool->pdev,
+ sizeof(void*) * mempool->items_max);
if (mempool->shadow_items_arr == NULL) {
xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure");
__hal_mempool_destroy(mempool);
@@ -427,3 +421,89 @@ __hal_mempool_destroy(xge_hal_mempool_t *mempool)
xge_os_free(mempool->pdev, mempool, sizeof(xge_hal_mempool_t));
}
+
+#ifdef XGEHAL_RNIC
+
+/*
+ * __hal_allocate_dma_register
+ *
+ * Will allocate dmable memory for register.
+ */
+xge_hal_status_e
+__hal_allocate_dma_register(pci_dev_h pdev, int size,
+ void **dma_register, xge_hal_mempool_dma_t *dma_object)
+{
+ int dma_flags;
+
+ dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
+#ifdef XGE_HAL_DMA_DTR_CONSISTENT
+ dma_flags |= XGE_OS_DMA_CONSISTENT;
+#else
+ dma_flags |= XGE_OS_DMA_STREAMING;
+#endif
+
+ xge_os_memzero(dma_object, sizeof(xge_hal_mempool_dma_t));
+
+ /* allocate DMA-capable memblock */
+ *dma_register = xge_os_dma_malloc(pdev,
+ size,
+ dma_flags,
+ &dma_object->handle,
+ &dma_object->acc_handle);
+ if (*dma_register == NULL) {
+ xge_debug_mm(XGE_ERR, "dma_register: out of DMA memory");
+ return XGE_HAL_ERR_OUT_OF_MEMORY;
+ }
+
+ xge_os_memzero(*dma_register, size);
+
+ /* map memblock to physical memory */
+ dma_object->addr = xge_os_dma_map(pdev,
+ dma_object->handle,
+ *dma_register,
+ size,
+ XGE_OS_DMA_DIR_BIDIRECTIONAL,
+#ifdef XGE_HAL_DMA_DTR_CONSISTENT
+ XGE_OS_DMA_CONSISTENT
+#else
+ XGE_OS_DMA_STREAMING
+#endif
+ );
+ if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) {
+ xge_os_dma_free(pdev,
+ *dma_register,
+ size,
+ &dma_object->acc_handle,
+ &dma_object->handle);
+ return XGE_HAL_ERR_OUT_OF_MAPPING;
+ }
+
+ xge_debug_mm(XGE_TRACE,
+ "dmareg: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", "
+ "dma_addr 0x"XGE_OS_LLXFMT, size / 1024,
+ (unsigned long long)(ulong_t)*dma_register,
+ (unsigned long long)dma_object->addr);
+
+
+ return XGE_HAL_OK;
+}
+
+/*
+ * __hal_free_dma_register
+ */
+void
+__hal_free_dma_register(pci_dev_h pdev, int size,
+ void *dma_register, xge_hal_mempool_dma_t *dma_object)
+
+{
+
+ xge_os_dma_unmap(pdev,
+ dma_object->handle, dma_object->addr,
+ size, XGE_OS_DMA_DIR_BIDIRECTIONAL);
+
+ xge_os_dma_free(pdev, dma_register, size,
+ &dma_object->acc_handle, &dma_object->handle);
+
+}
+
+#endif
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring-fp.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring-fp.c
index b35db4f969..63d6cf555c 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring-fp.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring-fp.c
@@ -17,138 +17,129 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
*
- * FileName : xgehal-ring-fp.c
- *
- * Description: HAL Rx ring object functionality (fast path)
- *
- * Created: 10 June 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifdef XGE_DEBUG_FP
#include "xgehal-ring.h"
#endif
-__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
-__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh)
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
+__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
- xge_hal_ring_rxd_priv_t *rxd_priv;
+ xge_hal_ring_rxd_priv_t *rxd_priv;
xge_assert(rxdp);
-#if defined(XGE_HAL_USE_5B_MODE)
+#if defined(XGE_HAL_USE_5B_MODE)
xge_assert(ring);
if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh;
-#if defined (XGE_OS_PLATFORM_64BIT)
- int memblock_idx = rxdp_5->host_control >> 16;
- int i = rxdp_5->host_control & 0xFFFF;
- rxd_priv = (xge_hal_ring_rxd_priv_t *)
- ((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i);
+#if defined (XGE_OS_PLATFORM_64BIT)
+ int memblock_idx = rxdp_5->host_control >> 16;
+ int i = rxdp_5->host_control & 0xFFFF;
+ rxd_priv = (xge_hal_ring_rxd_priv_t *)
+ ((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i);
#else
/* 32-bit case */
- rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control;
+ rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control;
#endif
} else
#endif
{
- rxd_priv = (xge_hal_ring_rxd_priv_t *)
+ rxd_priv = (xge_hal_ring_rxd_priv_t *)
(ulong_t)rxdp->host_control;
}
xge_assert(rxd_priv);
xge_assert(rxd_priv->dma_object);
- xge_assert(rxd_priv->dma_object->handle == rxd_priv->dma_handle);
+ xge_assert(rxd_priv->dma_object->handle == rxd_priv->dma_handle);
- xge_assert(rxd_priv->dma_object->addr + rxd_priv->dma_offset ==
+ xge_assert(rxd_priv->dma_object->addr + rxd_priv->dma_offset ==
rxd_priv->dma_addr);
return rxd_priv;
}
-__HAL_STATIC_RING __HAL_INLINE_RING int
+__HAL_STATIC_RING __HAL_INLINE_RING int
__hal_ring_block_memblock_idx(xge_hal_ring_block_t *block)
{
- return (int)*((u64 *)(void *)((char *)block +
- XGE_HAL_RING_MEMBLOCK_IDX_OFFSET));
+ return (int)*((u64 *)(void *)((char *)block +
+ XGE_HAL_RING_MEMBLOCK_IDX_OFFSET));
}
-__HAL_STATIC_RING __HAL_INLINE_RING void
+__HAL_STATIC_RING __HAL_INLINE_RING void
__hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx)
{
- *((u64 *)(void *)((char *)block +
- XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) =
- memblock_idx;
+ *((u64 *)(void *)((char *)block +
+ XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) =
+ memblock_idx;
}
-__HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t
+__HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t
__hal_ring_block_next_pointer(xge_hal_ring_block_t *block)
{
return (dma_addr_t)*((u64 *)(void *)((char *)block +
XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET));
}
-__HAL_STATIC_RING __HAL_INLINE_RING void
+__HAL_STATIC_RING __HAL_INLINE_RING void
__hal_ring_block_next_pointer_set(xge_hal_ring_block_t *block,
dma_addr_t dma_next)
{
- *((u64 *)(void *)((char *)block +
+ *((u64 *)(void *)((char *)block +
XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
}
/**
- * xge_hal_ring_dtr_private - Get ULD private per-descriptor data.
+ * xge_hal_ring_dtr_private - Get ULD private per-descriptor data.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
*
- * Returns: private ULD info associated with the descriptor.
- * ULD requests per-descriptor space via xge_hal_channel_open().
+ * Returns: private ULD info associated with the descriptor.
+ * ULD requests per-descriptor space via xge_hal_channel_open().
*
* See also: xge_hal_fifo_dtr_private().
* Usage: See ex_rx_compl{}.
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void*
+__HAL_STATIC_RING __HAL_INLINE_RING void*
xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
- return (char *)__hal_ring_rxd_priv(channelh, dtrh) +
+ return (char *)__hal_ring_rxd_priv((xge_hal_ring_t *) channelh, dtrh) +
sizeof(xge_hal_ring_rxd_priv_t);
}
/**
- * xge_hal_ring_dtr_reserve - Reserve ring descriptor.
+ * xge_hal_ring_dtr_reserve - Reserve ring descriptor.
* @channelh: Channel handle.
- * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
- * with a valid handle.
+ * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
+ * with a valid handle.
*
- * Reserve Rx descriptor for the subsequent filling-in (by upper layer
- * driver (ULD)) and posting on the corresponding channel (@channelh)
+ * Reserve Rx descriptor for the subsequent filling-in (by upper layer
+ * driver (ULD)) and posting on the corresponding channel (@channelh)
* via xge_hal_ring_dtr_post().
*
- * Returns: XGE_HAL_OK - success.
- * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
*
* See also: xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_free(),
* xge_hal_fifo_dtr_reserve_sp(), xge_hal_status_e{}.
* Usage: See ex_post_all_rx{}.
*/
-__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
{
xge_hal_status_e status;
-#if defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
+#if defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
unsigned long flags;
#endif
-#if defined(XGE_HAL_RX_MULTI_RESERVE)
+#if defined(XGE_HAL_RX_MULTI_RESERVE)
xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock);
#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
@@ -157,20 +148,20 @@ xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
status = __hal_channel_dtr_alloc(channelh, dtrh);
-#if defined(XGE_HAL_RX_MULTI_RESERVE)
+#if defined(XGE_HAL_RX_MULTI_RESERVE)
xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock);
#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
- flags);
+ flags);
#endif
if (status == XGE_HAL_OK) {
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
- /* instead of memset: reset this RxD */
- rxdp->control_1 = rxdp->control_2 = 0;
+ /* instead of memset: reset this RxD */
+ rxdp->control_1 = rxdp->control_2 = 0;
-#if defined(XGE_OS_MEMORY_CHECK)
+#if defined(XGE_OS_MEMORY_CHECK)
__hal_ring_rxd_priv(channelh, rxdp)->allocated = 1;
#endif
}
@@ -179,105 +170,105 @@ xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
}
/**
- * xge_hal_ring_dtr_info_get - Get extended information associated with
+ * xge_hal_ring_dtr_info_get - Get extended information associated with
* a completed receive descriptor for 1b mode.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
- * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
+ * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
*
- * Retrieve extended information associated with a completed receive descriptor.
+ * Retrieve extended information associated with a completed receive descriptor.
*
* See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
* xge_hal_ring_dtr_5b_get().
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
-xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
xge_hal_dtr_info_t *ext_info)
{
- /* cast to 1-buffer mode RxD: the code below relies on the fact
- * that control_1 and control_2 are formatted the same way.. */
+ /* cast to 1-buffer mode RxD: the code below relies on the fact
+ * that control_1 and control_2 are formatted the same way.. */
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
- ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
- ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
+ ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
+ ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
- /* Herc only, a few extra cycles imposed on Xena and/or
- * when RTH is not enabled.
- * Alternatively, could check
- * xge_hal_device_check_id(), hldev->config.rth_en, queue->rth_en */
+ /* Herc only, a few extra cycles imposed on Xena and/or
+ * when RTH is not enabled.
+ * Alternatively, could check
+ * xge_hal_device_check_id(), hldev->config.rth_en, queue->rth_en */
ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
ext_info->rth_spdm_hit =
XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
- ext_info->rth_hash_type =
+ ext_info->rth_hash_type =
XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
- ext_info->rth_value = XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2);
+ ext_info->rth_value = XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2);
}
/**
- * xge_hal_ring_dtr_info_nb_get - Get extended information associated
- * with a completed receive descriptor for 3b or 5b
+ * xge_hal_ring_dtr_info_nb_get - Get extended information associated
+ * with a completed receive descriptor for 3b or 5b
* modes.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
- * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
+ * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
*
- * Retrieve extended information associated with a completed receive descriptor.
+ * Retrieve extended information associated with a completed receive descriptor.
*
* See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
- * xge_hal_ring_dtr_5b_get().
+ * xge_hal_ring_dtr_5b_get().
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
+__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_info_nb_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
xge_hal_dtr_info_t *ext_info)
{
- /* cast to 1-buffer mode RxD: the code below relies on the fact
- * that control_1 and control_2 are formatted the same way.. */
+ /* cast to 1-buffer mode RxD: the code below relies on the fact
+ * that control_1 and control_2 are formatted the same way.. */
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
- ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
- ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
- ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
- /* Herc only, a few extra cycles imposed on Xena and/or
- * when RTH is not enabled. Same comment as above. */
+ ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
+ ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
+ ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
+ /* Herc only, a few extra cycles imposed on Xena and/or
+ * when RTH is not enabled. Same comment as above. */
ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
ext_info->rth_spdm_hit =
XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
- ext_info->rth_hash_type =
+ ext_info->rth_hash_type =
XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
- ext_info->rth_value = (u32)rxdp->buffer0_ptr;
+ ext_info->rth_value = (u32)rxdp->buffer0_ptr;
}
/**
* xge_hal_ring_dtr_1b_set - Prepare 1-buffer-mode descriptor.
* @dtrh: Descriptor handle.
- * @dma_pointer: DMA address of a single receive buffer this descriptor
- * should carry. Note that by the time
- * xge_hal_ring_dtr_1b_set
- * is called, the receive buffer should be already mapped
- * to the corresponding Xframe device.
+ * @dma_pointer: DMA address of a single receive buffer this descriptor
+ * should carry. Note that by the time
+ * xge_hal_ring_dtr_1b_set
+ * is called, the receive buffer should be already mapped
+ * to the corresponding Xframe device.
* @size: Size of the receive @dma_pointer buffer.
*
- * Prepare 1-buffer-mode Rx descriptor for posting
- * (via xge_hal_ring_dtr_post()).
+ * Prepare 1-buffer-mode Rx descriptor for posting
+ * (via xge_hal_ring_dtr_post()).
*
- * This inline helper-function does not return any parameters and always
+ * This inline helper-function does not return any parameters and always
* succeeds.
*
- * See also: xge_hal_ring_dtr_3b_set(), xge_hal_ring_dtr_5b_set().
+ * See also: xge_hal_ring_dtr_3b_set(), xge_hal_ring_dtr_5b_set().
* Usage: See ex_post_all_rx{}.
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
-xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size)
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
- rxdp->buffer0_ptr = dma_pointer;
- rxdp->control_2 &= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE);
- rxdp->control_2 |= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size);
+ rxdp->buffer0_ptr = dma_pointer;
+ rxdp->control_2 &= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size);
}
/**
@@ -285,60 +276,62 @@ xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size)
* descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
- * @dma_pointer: DMA address of a single receive buffer _this_ descriptor
- * carries. Returned by HAL.
- * @pkt_length: Length (in bytes) of the data in the buffer pointed by
- * @dma_pointer. Returned by HAL.
+ * @dma_pointer: DMA address of a single receive buffer _this_ descriptor
+ * carries. Returned by HAL.
+ * @pkt_length: Length (in bytes) of the data in the buffer pointed by
+ * @dma_pointer. Returned by HAL.
*
- * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
- * This inline helper-function uses completed descriptor to populate receive
- * buffer pointer and other "out" parameters. The function always succeeds.
+ * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
+ * This inline helper-function uses completed descriptor to populate receive
+ * buffer pointer and other "out" parameters. The function always succeeds.
*
- * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
+ * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
* Usage: See ex_rx_compl{}.
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
-xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t *dma_pointer, int *pkt_length)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
*pkt_length = XGE_HAL_RXD_1_GET_BUFFER0_SIZE(rxdp->control_2);
*dma_pointer = rxdp->buffer0_ptr;
+
+ ((xge_hal_channel_t *)channelh)->poll_bytes += *pkt_length;
}
/**
* xge_hal_ring_dtr_3b_set - Prepare 3-buffer-mode descriptor.
* @dtrh: Descriptor handle.
- * @dma_pointers: Array of DMA addresses. Contains exactly 3 receive buffers
- * _this_ descriptor should carry.
- * Note that by the time xge_hal_ring_dtr_3b_set
- * is called, the receive buffers should be mapped
- * to the corresponding Xframe device.
- * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
- * buffer from @dma_pointers.
- *
- * Prepare 3-buffer-mode Rx descriptor for posting (via
+ * @dma_pointers: Array of DMA addresses. Contains exactly 3 receive buffers
+ * _this_ descriptor should carry.
+ * Note that by the time xge_hal_ring_dtr_3b_set
+ * is called, the receive buffers should be mapped
+ * to the corresponding Xframe device.
+ * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
+ * buffer from @dma_pointers.
+ *
+ * Prepare 3-buffer-mode Rx descriptor for posting (via
* xge_hal_ring_dtr_post()).
- * This inline helper-function does not return any parameters and always
+ * This inline helper-function does not return any parameters and always
* succeeds.
*
- * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_5b_set().
+ * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_5b_set().
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
-xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
- int sizes[])
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
+ int sizes[])
{
xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
- rxdp->buffer0_ptr = dma_pointers[0];
- rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE);
- rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]);
- rxdp->buffer1_ptr = dma_pointers[1];
- rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE);
- rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]);
- rxdp->buffer2_ptr = dma_pointers[2];
- rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE);
- rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]);
+ rxdp->buffer0_ptr = dma_pointers[0];
+ rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]);
+ rxdp->buffer1_ptr = dma_pointers[1];
+ rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]);
+ rxdp->buffer2_ptr = dma_pointers[2];
+ rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]);
}
/**
@@ -346,74 +339,77 @@ xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
* descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
- * @dma_pointers: DMA addresses of the 3 receive buffers _this_ descriptor
- * carries. The first two buffers contain ethernet and
- * (IP + transport) headers. The 3rd buffer contains packet
- * data.
- * Returned by HAL.
- * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
+ * @dma_pointers: DMA addresses of the 3 receive buffers _this_ descriptor
+ * carries. The first two buffers contain ethernet and
+ * (IP + transport) headers. The 3rd buffer contains packet
+ * data.
+ * Returned by HAL.
+ * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
* buffer from @dma_pointers. Returned by HAL.
*
- * Retrieve protocol data from the completed 3-buffer-mode Rx descriptor.
- * This inline helper-function uses completed descriptor to populate receive
- * buffer pointer and other "out" parameters. The function always succeeds.
+ * Retrieve protocol data from the completed 3-buffer-mode Rx descriptor.
+ * This inline helper-function uses completed descriptor to populate receive
+ * buffer pointer and other "out" parameters. The function always succeeds.
*
- * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
+ * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
-xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t dma_pointers[], int sizes[])
{
xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
- dma_pointers[0] = rxdp->buffer0_ptr;
+ dma_pointers[0] = rxdp->buffer0_ptr;
sizes[0] = XGE_HAL_RXD_3_GET_BUFFER0_SIZE(rxdp->control_2);
- dma_pointers[1] = rxdp->buffer1_ptr;
+ dma_pointers[1] = rxdp->buffer1_ptr;
sizes[1] = XGE_HAL_RXD_3_GET_BUFFER1_SIZE(rxdp->control_2);
- dma_pointers[2] = rxdp->buffer2_ptr;
+ dma_pointers[2] = rxdp->buffer2_ptr;
sizes[2] = XGE_HAL_RXD_3_GET_BUFFER2_SIZE(rxdp->control_2);
+
+ ((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
+ sizes[2];
}
/**
* xge_hal_ring_dtr_5b_set - Prepare 5-buffer-mode descriptor.
* @dtrh: Descriptor handle.
- * @dma_pointers: Array of DMA addresses. Contains exactly 5 receive buffers
- * _this_ descriptor should carry.
- * Note that by the time xge_hal_ring_dtr_5b_set
- * is called, the receive buffers should be mapped
- * to the corresponding Xframe device.
- * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
- * buffer from @dma_pointers.
- *
- * Prepare 3-buffer-mode Rx descriptor for posting (via
+ * @dma_pointers: Array of DMA addresses. Contains exactly 5 receive buffers
+ * _this_ descriptor should carry.
+ * Note that by the time xge_hal_ring_dtr_5b_set
+ * is called, the receive buffers should be mapped
+ * to the corresponding Xframe device.
+ * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
+ * buffer from @dma_pointers.
+ *
+ * Prepare 3-buffer-mode Rx descriptor for posting (via
* xge_hal_ring_dtr_post()).
- * This inline helper-function does not return any parameters and always
+ * This inline helper-function does not return any parameters and always
* succeeds.
*
- * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_3b_set().
+ * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_3b_set().
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
-xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
- int sizes[])
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
+ int sizes[])
{
xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
- rxdp->buffer0_ptr = dma_pointers[0];
- rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE);
- rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]);
- rxdp->buffer1_ptr = dma_pointers[1];
- rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE);
- rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]);
- rxdp->buffer2_ptr = dma_pointers[2];
- rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE);
- rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]);
- rxdp->buffer3_ptr = dma_pointers[3];
- rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE);
- rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]);
- rxdp->buffer4_ptr = dma_pointers[4];
- rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE);
- rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[4]);
+ rxdp->buffer0_ptr = dma_pointers[0];
+ rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]);
+ rxdp->buffer1_ptr = dma_pointers[1];
+ rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]);
+ rxdp->buffer2_ptr = dma_pointers[2];
+ rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE);
+ rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]);
+ rxdp->buffer3_ptr = dma_pointers[3];
+ rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE);
+ rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]);
+ rxdp->buffer4_ptr = dma_pointers[4];
+ rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE);
+ rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER4_SIZE(sizes[4]);
}
/**
@@ -421,91 +417,99 @@ xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
* descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
- * @dma_pointers: DMA addresses of the 5 receive buffers _this_ descriptor
- * carries. The first 4 buffers contains L2 (ethernet) through
- * L5 headers. The 5th buffer contain received (applicaion)
- * data. Returned by HAL.
- * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
+ * @dma_pointers: DMA addresses of the 5 receive buffers _this_ descriptor
+ * carries. The first 4 buffers contains L2 (ethernet) through
+ * L5 headers. The 5th buffer contain received (applicaion)
+ * data. Returned by HAL.
+ * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
* buffer from @dma_pointers. Returned by HAL.
*
- * Retrieve protocol data from the completed 5-buffer-mode Rx descriptor.
- * This inline helper-function uses completed descriptor to populate receive
- * buffer pointer and other "out" parameters. The function always succeeds.
+ * Retrieve protocol data from the completed 5-buffer-mode Rx descriptor.
+ * This inline helper-function uses completed descriptor to populate receive
+ * buffer pointer and other "out" parameters. The function always succeeds.
*
- * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
+ * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
-xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t dma_pointers[], int sizes[])
{
xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
- dma_pointers[0] = rxdp->buffer0_ptr;
+ dma_pointers[0] = rxdp->buffer0_ptr;
sizes[0] = XGE_HAL_RXD_5_GET_BUFFER0_SIZE(rxdp->control_2);
- dma_pointers[1] = rxdp->buffer1_ptr;
+ dma_pointers[1] = rxdp->buffer1_ptr;
sizes[1] = XGE_HAL_RXD_5_GET_BUFFER1_SIZE(rxdp->control_2);
- dma_pointers[2] = rxdp->buffer2_ptr;
+ dma_pointers[2] = rxdp->buffer2_ptr;
sizes[2] = XGE_HAL_RXD_5_GET_BUFFER2_SIZE(rxdp->control_2);
- dma_pointers[3] = rxdp->buffer3_ptr;
+ dma_pointers[3] = rxdp->buffer3_ptr;
sizes[3] = XGE_HAL_RXD_5_GET_BUFFER3_SIZE(rxdp->control_3);
- dma_pointers[4] = rxdp->buffer4_ptr;
+ dma_pointers[4] = rxdp->buffer4_ptr;
sizes[4] = XGE_HAL_RXD_5_GET_BUFFER4_SIZE(rxdp->control_3);
+
+ ((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
+ sizes[2] + sizes[3] + sizes[4];
}
/**
- * FIXME - document
+ * xge_hal_ring_dtr_pre_post - FIXME.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ *
+ * TBD
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
-xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
-#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
- xge_hal_ring_rxd_priv_t *priv;
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ xge_hal_ring_rxd_priv_t *priv;
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
#endif
-#if defined(XGE_HAL_RX_MULTI_POST_IRQ)
+#if defined(XGE_HAL_RX_MULTI_POST_IRQ)
unsigned long flags;
#endif
- rxdp->control_2 |= XGE_HAL_RXD_NOT_COMPLETED;
+ rxdp->control_2 |= XGE_HAL_RXD_NOT_COMPLETED;
#ifdef XGE_DEBUG_ASSERT
- /* make sure Xena overwrites the (illegal) t_code on completion */
- XGE_HAL_RXD_SET_T_CODE(rxdp->control_1, XGE_HAL_RXD_T_CODE_UNUSED_C);
+ /* make sure Xena overwrites the (illegal) t_code on completion */
+ XGE_HAL_RXD_SET_T_CODE(rxdp->control_1, XGE_HAL_RXD_T_CODE_UNUSED_C);
#endif
- xge_debug_ring(XGE_TRACE, "posted %d rxd 0x%llx post_qid %d",
+ xge_debug_ring(XGE_TRACE, "posted %d rxd 0x"XGE_OS_LLXFMT" post_qid %d",
((xge_hal_ring_t *)channelh)->channel.post_index,
(unsigned long long)(ulong_t)dtrh,
((xge_hal_ring_t *)channelh)->channel.post_qid);
-#if defined(XGE_HAL_RX_MULTI_POST)
+#if defined(XGE_HAL_RX_MULTI_POST)
xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
flags);
#endif
-#if defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER)
+#if defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER)
{
- xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
+ xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
- if (channel->post_index != 0) {
+ if (channel->post_index != 0) {
xge_hal_dtr_h prev_dtrh;
- xge_hal_ring_rxd_priv_t *rxdp_priv;
+ xge_hal_ring_rxd_priv_t *rxdp_priv;
- rxdp_priv = __hal_ring_rxd_priv(channelh, rxdp);
- prev_dtrh = channel->work_arr[channel->post_index - 1];
+ rxdp_priv = __hal_ring_rxd_priv(channelh, rxdp);
+ prev_dtrh = channel->work_arr[channel->post_index - 1];
- if ((rxdp_priv->dma_offset & (~0xFFF)) !=
+ if (prev_dtrh != NULL &&
+ (rxdp_priv->dma_offset & (~0xFFF)) !=
rxdp_priv->dma_offset) {
xge_assert((char *)prev_dtrh +
- ((xge_hal_ring_t*)channel)->rxd_size == dtrh);
+ ((xge_hal_ring_t*)channel)->rxd_size == dtrh);
}
}
}
@@ -513,137 +517,143 @@ xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
__hal_channel_dtr_post(channelh, dtrh);
-#if defined(XGE_HAL_RX_MULTI_POST)
+#if defined(XGE_HAL_RX_MULTI_POST)
xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
- flags);
+ flags);
#endif
}
/**
- * FIXME - document
+ * xge_hal_ring_dtr_post_post - FIXME.
+ * @channelh: Channel handle.
+ * @dtrh: Descriptor handle.
+ *
+ * TBD
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
+__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
-#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
- xge_hal_ring_rxd_priv_t *priv;
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ xge_hal_ring_rxd_priv_t *priv;
#endif
/* do POST */
- rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
+ rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
-#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
priv = __hal_ring_rxd_priv(ring, rxdp);
xge_os_dma_sync(ring->channel.pdev,
- priv->dma_handle, priv->dma_addr,
- priv->dma_offset, ring->rxd_size,
- XGE_OS_DMA_DIR_TODEVICE);
+ priv->dma_handle, priv->dma_addr,
+ priv->dma_offset, ring->rxd_size,
+ XGE_OS_DMA_DIR_TODEVICE);
#endif
+ if (ring->channel.usage_cnt > 0)
+ ring->channel.usage_cnt--;
}
/**
- * xge_hal_ring_dtr_post - Post descriptor on the ring channel.
+ * xge_hal_ring_dtr_post - Post descriptor on the ring channel.
* @channelh: Channel handle.
* @dtrh: Descriptor obtained via xge_hal_ring_dtr_reserve().
*
- * Post descriptor on the 'ring' type channel.
- * Prior to posting the descriptor should be filled in accordance with
- * Host/Xframe interface specification for a given service (LL, etc.).
+ * Post descriptor on the 'ring' type channel.
+ * Prior to posting the descriptor should be filled in accordance with
+ * Host/Xframe interface specification for a given service (LL, etc.).
*
* See also: xge_hal_fifo_dtr_post_many(), xge_hal_fifo_dtr_post().
* Usage: See ex_post_all_rx{}.
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
-xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
- xge_hal_ring_dtr_pre_post(channelh, dtrh);
+ xge_hal_ring_dtr_pre_post(channelh, dtrh);
xge_hal_ring_dtr_post_post(channelh, dtrh);
}
/**
- * xge_hal_ring_dtr_next_completed - Get the _next_ completed
+ * xge_hal_ring_dtr_next_completed - Get the _next_ completed
* descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle. Returned by HAL.
- * @t_code: Transfer code, as per Xframe User Guide,
- * Receive Descriptor Format. Returned by HAL.
+ * @t_code: Transfer code, as per Xframe User Guide,
+ * Receive Descriptor Format. Returned by HAL.
*
- * Retrieve the _next_ completed descriptor.
- * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
- * upper-layer driver (ULD) of new completed descriptors. After that
+ * Retrieve the _next_ completed descriptor.
+ * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
+ * upper-layer driver (ULD) of new completed descriptors. After that
* the ULD can use xge_hal_ring_dtr_next_completed to retrieve the rest
- * completions (the very first completion is passed by HAL via
+ * completions (the very first completion is passed by HAL via
* xge_hal_channel_callback_f).
*
- * Implementation-wise, the upper-layer driver is free to call
+ * Implementation-wise, the upper-layer driver is free to call
* xge_hal_ring_dtr_next_completed either immediately from inside the
- * channel callback, or in a deferred fashion and separate (from HAL)
+ * channel callback, or in a deferred fashion and separate (from HAL)
* context.
*
- * Non-zero @t_code means failure to fill-in receive buffer(s)
+ * Non-zero @t_code means failure to fill-in receive buffer(s)
* of the descriptor.
- * For instance, parity error detected during the data transfer.
- * In this case Xframe will complete the descriptor and indicate
- * for the host that the received data is not to be used.
- * For details please refer to Xframe User Guide.
+ * For instance, parity error detected during the data transfer.
+ * In this case Xframe will complete the descriptor and indicate
+ * for the host that the received data is not to be used.
+ * For details please refer to Xframe User Guide.
*
- * Returns: XGE_HAL_OK - success.
- * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
* are currently available for processing.
*
* See also: xge_hal_channel_callback_f{},
* xge_hal_fifo_dtr_next_completed(), xge_hal_status_e{}.
* Usage: See ex_rx_compl{}.
*/
-__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
-xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
+xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
u8 *t_code)
{
- xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
+ xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
-#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
- xge_hal_ring_rxd_priv_t *priv;
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ xge_hal_ring_rxd_priv_t *priv;
#endif
__hal_channel_dtr_try_complete(ring, dtrh);
rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
- if (rxdp == NULL) {
+ if (rxdp == NULL) {
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
-#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
- /* Note: 24 bytes at most means:
- * - Control_3 in case of 5-buffer mode
- * - Control_1 and Control_2
+#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
+ /* Note: 24 bytes at most means:
+ * - Control_3 in case of 5-buffer mode
+ * - Control_1 and Control_2
*
- * This is the only length needs to be invalidated
- * type of channels.*/
+ * This is the only length needs to be invalidated
+ * type of channels.*/
priv = __hal_ring_rxd_priv(ring, rxdp);
xge_os_dma_sync(ring->channel.pdev,
- priv->dma_handle, priv->dma_addr,
- priv->dma_offset, 24,
- XGE_OS_DMA_DIR_FROMDEVICE);
+ priv->dma_handle, priv->dma_addr,
+ priv->dma_offset, 24,
+ XGE_OS_DMA_DIR_FROMDEVICE);
#endif
- /* check whether it is not the end */
- if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
- !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
-#ifndef XGE_HAL_IRQ_POLLING
- if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
- /* reset it. since we don't want to return
+ /* check whether it is not the end */
+ if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
+ !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
+#ifndef XGE_HAL_IRQ_POLLING
+ if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
+ /* reset it. since we don't want to return
* garbage to the ULD */
- *dtrh = 0;
- return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+ *dtrh = 0;
+ return XGE_HAL_COMPLETIONS_REMAIN;
}
#endif
#ifdef XGE_DEBUG_ASSERT
-#if defined(XGE_HAL_USE_5B_MODE)
-#if !defined(XGE_OS_PLATFORM_64BIT)
+#if defined(XGE_HAL_USE_5B_MODE)
+#if !defined(XGE_OS_PLATFORM_64BIT)
if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
xge_assert(((xge_hal_ring_rxd_5_t *)
rxdp)->host_control!=0);
@@ -657,45 +667,49 @@ xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
__hal_channel_dtr_complete(ring);
- *t_code = (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1);
+ *t_code = (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1);
- /* see XGE_HAL_SET_RXD_T_CODE() above.. */
+ /* see XGE_HAL_SET_RXD_T_CODE() above.. */
xge_assert(*t_code != XGE_HAL_RXD_T_CODE_UNUSED_C);
xge_debug_ring(XGE_TRACE,
- "compl_index %d post_qid %d rxd 0x%llx",
+ "compl_index %d post_qid %d rxd 0x"XGE_OS_LLXFMT,
((xge_hal_channel_t*)ring)->compl_index,
((xge_hal_channel_t*)ring)->post_qid,
(unsigned long long)(ulong_t)rxdp);
+ ring->channel.usage_cnt++;
+ if (ring->channel.stats.usage_max < ring->channel.usage_cnt)
+ ring->channel.stats.usage_max = ring->channel.usage_cnt;
+
return XGE_HAL_OK;
}
- /* reset it. since we don't want to return
+ /* reset it. since we don't want to return
* garbage to the ULD */
- *dtrh = 0;
+ *dtrh = 0;
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
/**
- * xge_hal_ring_dtr_free - Free descriptor.
+ * xge_hal_ring_dtr_free - Free descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
*
- * Free the reserved descriptor. This operation is "symmetrical" to
- * xge_hal_ring_dtr_reserve. The "free-ing" completes the descriptor's
+ * Free the reserved descriptor. This operation is "symmetrical" to
+ * xge_hal_ring_dtr_reserve. The "free-ing" completes the descriptor's
* lifecycle.
*
- * After free-ing (see xge_hal_ring_dtr_free()) the descriptor again can
+ * After free-ing (see xge_hal_ring_dtr_free()) the descriptor again can
* be:
*
* - reserved (xge_hal_ring_dtr_reserve);
*
- * - posted (xge_hal_ring_dtr_post);
+ * - posted (xge_hal_ring_dtr_post);
*
* - completed (xge_hal_ring_dtr_next_completed);
*
- * - and recycled again (xge_hal_ring_dtr_free).
+ * - and recycled again (xge_hal_ring_dtr_free).
*
* For alternative state transitions and more details please refer to
* the design doc.
@@ -703,14 +717,14 @@ xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
* See also: xge_hal_ring_dtr_reserve(), xge_hal_fifo_dtr_free().
* Usage: See ex_rx_compl{}.
*/
-__HAL_STATIC_RING __HAL_INLINE_RING void
-xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
+__HAL_STATIC_RING __HAL_INLINE_RING void
+xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
-#if defined(XGE_HAL_RX_MULTI_FREE_IRQ)
+#if defined(XGE_HAL_RX_MULTI_FREE_IRQ)
unsigned long flags;
#endif
-#if defined(XGE_HAL_RX_MULTI_FREE)
+#if defined(XGE_HAL_RX_MULTI_FREE)
xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
#elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
@@ -718,14 +732,60 @@ xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
#endif
__hal_channel_dtr_free(channelh, dtrh);
-#if defined(XGE_OS_MEMORY_CHECK)
+#if defined(XGE_OS_MEMORY_CHECK)
__hal_ring_rxd_priv(channelh, dtrh)->allocated = 0;
#endif
-#if defined(XGE_HAL_RX_MULTI_FREE)
+#if defined(XGE_HAL_RX_MULTI_FREE)
xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
#elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
flags);
#endif
}
+
+/**
+ * xge_hal_ring_is_next_dtr_completed - Check if the next dtr is completed
+ * @channelh: Channel handle.
+ *
+ * Checks if the the _next_ completed descriptor is in host memory
+ *
+ * Returns: XGE_HAL_OK - success.
+ * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
+ * are currently available for processing.
+ */
+__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
+xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh)
+{
+ xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
+ xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
+ xge_hal_dtr_h dtrh;
+
+ __hal_channel_dtr_try_complete(ring, &dtrh);
+ rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
+ if (rxdp == NULL) {
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+ }
+
+ /* check whether it is not the end */
+ if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
+ !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
+
+#ifdef XGE_DEBUG_ASSERT
+#if defined(XGE_HAL_USE_5B_MODE)
+#if !defined(XGE_OS_PLATFORM_64BIT)
+ if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
+ xge_assert(((xge_hal_ring_rxd_5_t *)
+ rxdp)->host_control!=0);
+ }
+#endif
+
+#else
+ xge_assert(rxdp->host_control!=0);
+#endif
+#endif
+ return XGE_HAL_OK;
+ }
+
+ return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+}
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring.c
index 96231542c0..fed0e99aee 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : hal-ring.c
*
- * Description: Rx ring object implementation
- *
- * Created: 10 May 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xgehal-ring.h"
@@ -61,14 +52,16 @@ __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
ptrdiff_t dma_item_offset;
/* get owner memblock index */
- memblock_idx = __hal_ring_block_memblock_idx(item);
+ memblock_idx = __hal_ring_block_memblock_idx((xge_hal_ring_block_t *) item);
/* get owner memblock by memblock index */
- memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
+ memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh,
+ memblock_idx);
/* get memblock DMA object by memblock index */
memblock_dma_object =
- __hal_mempool_memblock_dma(mempoolh, memblock_idx);
+ __hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
+ memblock_idx);
/* calculate offset in the memblock of this item */
dma_item_offset = (char*)item - (char*)memblock;
@@ -87,11 +80,13 @@ __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
pci_dma_h to_dma_handle, from_dma_handle;
/* get "from" RxD block */
- from_item = __hal_mempool_item(mempoolh, from);
+ from_item = (xge_hal_ring_block_t *)
+ __hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
xge_assert(from_item);
/* get "to" RxD block */
- to_item = __hal_mempool_item(mempoolh, to);
+ to_item = (xge_hal_ring_block_t *)
+ __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
xge_assert(to_item);
/* return address of the beginning of previous RxD block */
@@ -116,7 +111,7 @@ __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
XGE_OS_DMA_DIR_TODEVICE);
#endif
- xge_debug_ring(XGE_TRACE, "block%d:0x%llx => block%d:0x%llx",
+ xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
from, (unsigned long long)from_dma, to,
(unsigned long long)to_dma);
}
@@ -153,8 +148,9 @@ __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
* the memblock. For instance, in case of three RxD-blocks
* per memblock this value can be 0,1 or 2. */
rxdblock_priv =
- __hal_mempool_item_priv(mempoolh, memblock_index, item,
- &memblock_item_idx);
+ __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
+ memblock_index, item,
+ &memblock_item_idx);
rxdp = (xge_hal_ring_rxd_1_t *)
ring->reserved_rxds_arr[reserve_index];
rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
@@ -194,7 +190,7 @@ __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
#endif
}
- __hal_ring_block_memblock_idx_set(item, memblock_index);
+ __hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index);
if (is_last) {
/* link last one with first one */
@@ -215,7 +211,7 @@ __hal_ring_initial_replenish(xge_hal_channel_t *channel,
{
xge_hal_dtr_h dtr;
- while (__hal_channel_dtr_count(channel) > 0) {
+ while (xge_hal_channel_dtr_count(channel) > 0) {
xge_hal_status_e status;
status = xge_hal_ring_dtr_reserve(channel, &dtr);
@@ -282,8 +278,9 @@ __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
/* calculate actual RxD block private size */
ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
- ring->reserved_rxds_arr = xge_os_malloc(ring->channel.pdev,
+ ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
sizeof(void*) * queue->max * ring->rxds_per_block);
+
if (ring->reserved_rxds_arr == NULL) {
__hal_ring_close(channelh);
return XGE_HAL_ERR_OUT_OF_MEMORY;
@@ -327,14 +324,19 @@ __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
* Currently we don't have a case when the 1) is done without the 2).
*/
if (ring->channel.dtr_init) {
- if ((status = __hal_ring_initial_replenish(channelh,
- XGE_HAL_CHANNEL_OC_NORMAL))
- != XGE_HAL_OK) {
+ if ((status = __hal_ring_initial_replenish (
+ (xge_hal_channel_t *) channelh,
+ XGE_HAL_CHANNEL_OC_NORMAL) )
+ != XGE_HAL_OK) {
__hal_ring_close(channelh);
return status;
}
}
+ /* initial replenish will increment the counter in its post() routine,
+ * we have to reset it */
+ ring->channel.usage_cnt = 0;
+
return XGE_HAL_OK;
}
@@ -408,7 +410,7 @@ __hal_ring_prc_enable(xge_hal_channel_h channelh)
xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
- xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x%llx initialized",
+ xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
ring->channel.post_qid, (unsigned long long)val64);
val64 = xge_os_pio_mem_read64(ring->channel.pdev,
@@ -429,8 +431,12 @@ __hal_ring_prc_enable(xge_hal_channel_h channelh)
/* Herc: always use group_reads */
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
- val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
+ val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
+ if (hldev->config.bimodal_interrupts)
+ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
+ val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
+
xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
@@ -489,7 +495,7 @@ __hal_ring_hw_initialize(xge_hal_device_h devh)
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rx_queue_priority);
- xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x%llx",
+ xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
/* Configuring ring queues according to per-ring configuration */
@@ -501,25 +507,30 @@ __hal_ring_hw_initialize(xge_hal_device_h devh)
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rx_queue_cfg);
- xge_debug_ring(XGE_TRACE, "DRAM configured to 0x%llx",
- (unsigned long long)val64);
-
- /* Activate Rx steering */
- val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
- &bar0->rts_qos_steering);
- for (j = 0; j < 8 /* QoS max */; j++) {
- for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
- if (!hldev->config.ring.queue[i].configured)
- continue;
- if (!hldev->config.ring.queue[i].rth_en)
- val64 |= (BIT(i) >> (j*8));
- }
- }
- xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
- &bar0->rts_qos_steering);
- xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x%llx",
+ xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
+ if (!hldev->config.rts_qos_steering_config) {
+
+ /* Activate Rx steering */
+ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
+ &bar0->rts_qos_steering);
+ for (j = 0; j < 8 /* QoS max */; j++)
+ {
+ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
+ {
+ if (!hldev->config.ring.queue[i].configured)
+ continue;
+ if (!hldev->config.ring.queue[i].rth_en)
+ val64 |= (BIT(i) >> (j*8));
+ }
+ }
+ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
+ &bar0->rts_qos_steering);
+ xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
+ (unsigned long long)val64);
+
+ }
/* Note: If a queue does not exist, it should be assigned a maximum
* length of zero. Otherwise, packet loss could occur.
* P. 4-4 User guide.
@@ -568,7 +579,7 @@ __hal_ring_hw_initialize(xge_hal_device_h devh)
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->mc_rldram_mrs_herc);
- xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x%llx",
+ xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
val64 = 0x0003570003010300ULL;
diff --git a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-stats.c b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-stats.c
index 830fb367ed..cc2b65f770 100644
--- a/usr/src/uts/common/io/xge/hal/xgehal/xgehal-stats.c
+++ b/usr/src/uts/common/io/xge/hal/xgehal/xgehal-stats.c
@@ -17,17 +17,8 @@
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2002-2005 Neterion, Inc.
- * All right Reserved.
- *
- * FileName : xgehal-stats.c
- *
- * Description: statistics object implementation
*
- * Created: 2 June 2004
+ * Copyright (c) 2002-2006 Neterion, Inc.
*/
#include "xgehal-stats.h"
@@ -57,11 +48,12 @@ __hal_stats_initialize (xge_hal_stats_t *stats, xge_hal_device_h devh)
dma_flags |= XGE_OS_DMA_STREAMING;
#endif
- stats->hw_info = xge_os_dma_malloc(hldev->pdev,
- sizeof(xge_hal_stats_hw_info_t),
+ stats->hw_info = (xge_hal_stats_hw_info_t *) xge_os_dma_malloc(hldev->pdev,
+ sizeof(xge_hal_stats_hw_info_t),
dma_flags,
&stats->hw_info_dmah,
- &stats->hw_info_dma_acch);
+ &stats->hw_info_dma_acch);
+
if (stats->hw_info == NULL) {
xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
return XGE_HAL_ERR_OUT_OF_MEMORY;
@@ -85,7 +77,7 @@ __hal_stats_initialize (xge_hal_stats_t *stats, xge_hal_device_h devh)
#endif
);
if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
- xge_debug_stats(XGE_ERR, "can not map vaddr 0x%llx to DMA",
+ xge_debug_stats(XGE_ERR, "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
(unsigned long long)(ulong_t)stats->hw_info);
xge_os_dma_free(hldev->pdev,
stats->hw_info,
@@ -146,7 +138,7 @@ __hal_stats_disable (xge_hal_stats_t *stats)
(void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->stat_cfg);
- xge_debug_stats(XGE_TRACE, "stats disabled at 0x%llx",
+ xge_debug_stats(XGE_TRACE, "stats disabled at 0x"XGE_OS_LLXFMT,
(unsigned long long)stats->dma_addr);
stats->is_enabled = 0;
@@ -220,12 +212,12 @@ __hal_stats_enable (xge_hal_stats_t *stats)
#ifdef XGE_HAL_HERC_EMULATION
/*
- * The clocks in the emulator are running ~1000 times slower than real world,
+ * The clocks in the emulator are running ~1000 times slower than real world,
* so the stats transfer will occur ~1000 times less frequent.
- * STAT_CFG.STAT_TRSF_PERIOD should be set to 0x20C for Hercules emulation
+ * STAT_CFG.STAT_TRSF_PERIOD should be set to 0x20C for Hercules emulation
* (stats transferred every 0.5 sec).
- */
-
+ */
+
val64 = (0x20C | XGE_HAL_STAT_CFG_STAT_RO | XGE_HAL_STAT_CFG_STAT_EN);
#else
val64 = XGE_HAL_SET_UPDT_PERIOD(refresh_time_pci_clocks) |
@@ -236,7 +228,7 @@ __hal_stats_enable (xge_hal_stats_t *stats)
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->stat_cfg);
- xge_debug_stats(XGE_TRACE, "stats enabled at 0x%llx",
+ xge_debug_stats(XGE_TRACE, "stats enabled at 0x"XGE_OS_LLXFMT,
(unsigned long long)stats->dma_addr);
stats->is_enabled = 1;
@@ -555,9 +547,20 @@ xge_hal_stats_channel(xge_hal_channel_h channelh,
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt;
if (hldev->stats.sw_dev_info_stats.traffic_intr_cnt) {
- channel->stats.avg_compl_per_intr_cnt =
- channel->stats.total_compl_cnt /
- hldev->stats.sw_dev_info_stats.traffic_intr_cnt;
+ int rxcnt = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
+ int txcnt = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
+ if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
+ if (!txcnt)
+ txcnt = 1;
+ channel->stats.avg_compl_per_intr_cnt =
+ channel->stats.total_compl_cnt / txcnt;
+ } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING &&
+ !hldev->config.bimodal_interrupts) {
+ if (!rxcnt)
+ rxcnt = 1;
+ channel->stats.avg_compl_per_intr_cnt =
+ channel->stats.total_compl_cnt / rxcnt;
+ }
if (channel->stats.avg_compl_per_intr_cnt == 0) {
/* to not confuse user */
channel->stats.avg_compl_per_intr_cnt = 1;
diff --git a/usr/src/uts/common/os/strsubr.c b/usr/src/uts/common/os/strsubr.c
index ae99e5198a..db039e241e 100644
--- a/usr/src/uts/common/os/strsubr.c
+++ b/usr/src/uts/common/os/strsubr.c
@@ -8301,7 +8301,8 @@ hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd,
*end = (uint32_t)DB_CKSUMEND(mp);
if (value != NULL)
*value = (uint32_t)DB_CKSUM16(mp);
- }
+ } else if ((*flags & HW_LSO) && (value != NULL))
+ *value = (uint32_t)DB_LSOMSS(mp);
}
} else {
pattrinfo_t hck_attr = {PATTR_HCKSUM};
diff --git a/usr/src/uts/common/sys/dld_impl.h b/usr/src/uts/common/sys/dld_impl.h
index c3274aea0b..bd61db8dc4 100644
--- a/usr/src/uts/common/sys/dld_impl.h
+++ b/usr/src/uts/common/sys/dld_impl.h
@@ -170,6 +170,12 @@ struct dld_str {
boolean_t ds_soft_ring;
/*
+ * LSO is enabled if ds_lso is set.
+ */
+ boolean_t ds_lso;
+ uint64_t ds_lso_max;
+
+ /*
* State of DLPI user: may be active (regular network layer),
* passive (snoop-like monitoring), or unknown (not yet
* determined).
diff --git a/usr/src/uts/common/sys/dlpi.h b/usr/src/uts/common/sys/dlpi.h
index b6f3715289..3ffb8b5762 100644
--- a/usr/src/uts/common/sys/dlpi.h
+++ b/usr/src/uts/common/sys/dlpi.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -580,6 +579,8 @@ union DL_qos_types {
/* dl_data is dl_capab_dls_t */
#define DL_CAPAB_SOFT_RING 0x07 /* Soft ring capable */
/* dl_data is dl_capab_dls_t */
+#define DL_CAPAB_LSO 0x08 /* Large Send Offload capability */
+ /* dl_data is dl_capab_lso_t */
typedef struct {
t_uscalar_t dl_cap; /* capability type */
@@ -774,6 +775,30 @@ typedef struct {
#define DL_CAPAB_VMSAFE_MEM 0x01 /* Driver is zero-copy safe */
/* wrt VM named buffers on */
/* transmit */
+
+/*
+ * Large Send Offload sub-capability (follows dl_capability_sub_t)
+ */
+typedef struct {
+ t_uscalar_t lso_version; /* interface version */
+ t_uscalar_t lso_flags; /* capability flags */
+ t_uscalar_t lso_max; /* maximum payload */
+ t_uscalar_t reserved[1]; /* reserved fields */
+ dl_mid_t lso_mid; /* module ID */
+} dl_capab_lso_t;
+
+/*
+ * Large Send Offload revision definition history
+ */
+#define LSO_CURRENT_VERSION 0x01
+#define LSO_VERSION_1 0x01
+
+/*
+ * Currently supported values of lso_flags
+ */
+#define LSO_TX_ENABLE 0x01 /* to enable LSO */
+#define LSO_TX_BASIC_TCP_IPV4 0x02 /* TCP LSO capability */
+
/*
* DLPI interface primitive definitions.
*
diff --git a/usr/src/uts/common/sys/mac.h b/usr/src/uts/common/sys/mac.h
index ee51f24423..ed11df7e4a 100644
--- a/usr/src/uts/common/sys/mac.h
+++ b/usr/src/uts/common/sys/mac.h
@@ -162,6 +162,27 @@ typedef struct mac_info_s {
} mac_info_t;
/*
+ * LSO capability
+ */
+typedef struct lso_basic_tcp_ipv4_s {
+ t_uscalar_t lso_max; /* maximum payload */
+} lso_basic_tcp_ipv4_t;
+
+/*
+ * Future LSO capabilities can be added at the end of the mac_capab_lso_t.
+ * When such capability is added to the GLDv3 framework, the size of the
+ * mac_capab_lso_t it allocates and passes to the drivers increases. Older
+ * drivers wil access only the (upper) sections of that structure, that is the
+ * sections carrying the capabilities they understand. This ensures the
+ * interface can be safely extended in a binary compatible way.
+ */
+typedef struct mac_capab_lso_s {
+ t_uscalar_t lso_flags;
+ lso_basic_tcp_ipv4_t lso_basic_tcp_ipv4;
+ /* Add future lso capabilities here */
+} mac_capab_lso_t;
+
+/*
* MAC layer capabilities. These capabilities are handled by the drivers'
* mc_capab_get() callbacks. Some capabilities require the driver to fill
* in a given data structure, and others are simply boolean capabilities.
@@ -172,7 +193,8 @@ typedef struct mac_info_s {
typedef enum {
MAC_CAPAB_HCKSUM = 0x01, /* data is a uint32_t for the txflags */
MAC_CAPAB_POLL = 0x02, /* boolean only, no data */
- MAC_CAPAB_MULTIADDRESS = 0x04 /* data is multiaddress_capab_t */
+ MAC_CAPAB_MULTIADDRESS = 0x04, /* data is multiaddress_capab_t */
+ MAC_CAPAB_LSO = 0x08 /* data is mac_capab_lso_t */
/* add new capabilities here */
} mac_capab_t;
diff --git a/usr/src/uts/common/sys/pattr.h b/usr/src/uts/common/sys/pattr.h
index 638f759c57..cac046d675 100644
--- a/usr/src/uts/common/sys/pattr.h
+++ b/usr/src/uts/common/sys/pattr.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -93,6 +92,12 @@ typedef struct pattr_hcksum_s {
/* check the attached h/w computed */
/* checksum value to determine if */
/* checksum was bad */
+/*
+ * Extended hardware offloading flags that also use hcksum_flags
+ */
+#define HW_LSO 0x10 /* On Transmit: hardware does LSO */
+ /* On Receive: N/A */
+
/*
* Structure used for zerocopy attribute.
diff --git a/usr/src/uts/common/sys/strsubr.h b/usr/src/uts/common/sys/strsubr.h
index 6301848676..10ce2e4012 100644
--- a/usr/src/uts/common/sys/strsubr.h
+++ b/usr/src/uts/common/sys/strsubr.h
@@ -1283,6 +1283,8 @@ extern int SAMESTR(queue_t *);
#define DB_CKSUMFLAGS(mp) ((mp)->b_datap->db_struioun.cksum.flags)
#define DB_CKSUM16(mp) ((mp)->b_datap->db_cksum16)
#define DB_CKSUM32(mp) ((mp)->b_datap->db_cksum32)
+#define DB_LSOFLAGS(mp) ((mp)->b_datap->db_struioun.cksum.flags)
+#define DB_LSOMSS(mp) ((mp)->b_datap->db_struioun.cksum.pad)
#endif /* _KERNEL */
#ifdef __cplusplus
diff --git a/usr/src/uts/intel/xge/Makefile b/usr/src/uts/intel/xge/Makefile
index 438f36ac65..6689f7a758 100644
--- a/usr/src/uts/intel/xge/Makefile
+++ b/usr/src/uts/intel/xge/Makefile
@@ -82,22 +82,13 @@ TRACE_CFLAGS = -DXGE_DEBUG_MODULE_MASK=0x00003010 \
-DXGE_DEBUG_TRACE_MASK=0x00000000 \
-DXGE_DEBUG_ERR_MASK=0x00003010
-#
-# ASSERT/DEBUG SECTION: Disable/enable assert and debug mode
-#
-ASSERT_CFLAGS = -DXGE_DEBUG_ASSERT
-
-#
-# FAST PATH SECTION: Will activate usage of inlines as a regular functions
-# on fast data path
-#FP_CFLAGS = -DXGE_DEBUG_FP=0xff
-
-CPPFLAGS += $(HAL_CFLAGS) $(TRACE_CFLAGS) $(ASSERT_CFLAGS) $(CCVERBOSE) \
+XGE_CFLAGS = $(HAL_CFLAGS) $(TRACE_CFLAGS) $(CCVERBOSE) \
-I$(UTSBASE)/common/io/xge/hal/include \
-I$(UTSBASE)/common/io/xge/hal/xgehal \
-I$(UTSBASE)/common/io/xge/drv -DSOLARIS
-CFLAGS += $(CPPFLAGS) -xc99=%all
+CFLAGS += $(XGE_CFLAGS) -xO4 -xcrossfile -xc99=%all
+CFLAGS64 += $(XGE_CFLAGS) -xO4 -xcrossfile -xc99=%all
#
# Driver depends on MAC & IP
@@ -106,7 +97,7 @@ LDFLAGS += -dy -N misc/mac -N drv/ip
# Lint flag
#
-LINTFLAGS += $(CPPFLAGS) -Xc99=%all
+LINTFLAGS += $(XGE_CFLAGS) -Xc99=%all
#
# For now, disable these lint checks; maintainers should endeavor
diff --git a/usr/src/uts/sparc/xge/Makefile b/usr/src/uts/sparc/xge/Makefile
index a8ec183a01..2d66030c07 100644
--- a/usr/src/uts/sparc/xge/Makefile
+++ b/usr/src/uts/sparc/xge/Makefile
@@ -82,22 +82,13 @@ TRACE_CFLAGS = -DXGE_DEBUG_MODULE_MASK=0x00003010 \
-DXGE_DEBUG_TRACE_MASK=0x00000000 \
-DXGE_DEBUG_ERR_MASK=0x00003010
-#
-# ASSERT/DEBUG SECTION: Disable/enable assert and debug mode
-#
-ASSERT_CFLAGS = -DXGE_DEBUG_ASSERT
-
-#
-# FAST PATH SECTION: Will activate usage of inlines as a regular functions
-# on fast data path
-#FP_CFLAGS = -DXGE_DEBUG_FP=0xff
-
-CPPFLAGS += $(HAL_CFLAGS) $(TRACE_CFLAGS) $(ASSERT_CFLAGS) $(CCVERBOSE) \
+XGE_CFLAGS = $(HAL_CFLAGS) $(TRACE_CFLAGS) $(CCVERBOSE) \
-I$(UTSBASE)/common/io/xge/hal/include \
-I$(UTSBASE)/common/io/xge/hal/xgehal \
-I$(UTSBASE)/common/io/xge/drv -DSOLARIS
-CFLAGS += $(CPPFLAGS) -xc99=%all
+CFLAGS += $(XGE_CFLAGS) -xO4 -xcrossfile -xc99=%all
+CFLAGS64 += $(XGE_CFLAGS) -xO4 -xcrossfile -xc99=%all
#
# Driver depends on MAC & IP
@@ -106,7 +97,7 @@ LDFLAGS += -dy -N misc/mac -N drv/ip
# Lint flag
#
-LINTFLAGS += $(CPPFLAGS) -Xc99=%all
+LINTFLAGS += $(XGE_CFLAGS) -Xc99=%all
#
# For now, disable these lint checks; maintainers should endeavor