summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Winder <paul@winder.uk.net>2020-07-06 12:24:40 +0100
committerPaul Winder <paul@winder.uk.net>2020-07-17 16:39:13 +0100
commit93f1cac532b220b8db1e7715dbb2ae7ec5fe8fe7 (patch)
treef43617035f3d143752ebeaa0bb6277256ceaadfc
parent2ba19bafbe44c6a57d09e79cc5e11875088875bf (diff)
downloadillumos-joyent-93f1cac532b220b8db1e7715dbb2ae7ec5fe8fe7.tar.gz
12933 Update i40e core code to support NVM v1.7
Portions contributed by: Krzysztof Galazka <krzysztof.galazka@intel.com> Reviewed by: Robert Mustacchi <rm@fingolfin.org> Reviewed by: Randy Fishel <randyf@sibernet.com> Approved by: Garrett D'Amore <garrett@damore.org>
-rw-r--r--exception_lists/cstyle4
-rw-r--r--usr/src/uts/common/Makefile.files2
-rw-r--r--usr/src/uts/common/io/i40e/core/README.illumos167
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_adminq.c44
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_adminq.h4
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_adminq_cmd.h180
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_alloc.h2
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_common.c366
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_dcb.c1385
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_dcb.h224
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_devids.h7
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_hmc.c3
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_hmc.h2
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_lan_hmc.c2
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_lan_hmc.h2
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_nvm.c450
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_prototype.h58
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_register.h4
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_status.h3
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_type.h89
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_virtchnl.h424
-rw-r--r--usr/src/uts/common/io/i40e/core/virtchnl.h747
-rw-r--r--usr/src/uts/common/io/i40e/i40e_main.c8
23 files changed, 3341 insertions, 836 deletions
diff --git a/exception_lists/cstyle b/exception_lists/cstyle
index 53a89f6406..7c20c11d6d 100644
--- a/exception_lists/cstyle
+++ b/exception_lists/cstyle
@@ -1031,6 +1031,8 @@ usr/src/uts/common/io/i40e/core/i40e_adminq.c
usr/src/uts/common/io/i40e/core/i40e_adminq.h
usr/src/uts/common/io/i40e/core/i40e_alloc.h
usr/src/uts/common/io/i40e/core/i40e_common.c
+usr/src/uts/common/io/i40e/core/i40e_dcb.c
+usr/src/uts/common/io/i40e/core/i40e_dcb.h
usr/src/uts/common/io/i40e/core/i40e_devids.h
usr/src/uts/common/io/i40e/core/i40e_hmc.c
usr/src/uts/common/io/i40e/core/i40e_hmc.h
@@ -1041,7 +1043,7 @@ usr/src/uts/common/io/i40e/core/i40e_prototype.h
usr/src/uts/common/io/i40e/core/i40e_register.h
usr/src/uts/common/io/i40e/core/i40e_status.h
usr/src/uts/common/io/i40e/core/i40e_type.h
-usr/src/uts/common/io/i40e/core/i40e_virtchnl.h
+usr/src/uts/common/io/i40e/core/virtchnl.h
usr/src/uts/common/io/iwn/if_iwn.c
usr/src/uts/common/io/iwn/if_iwnreg.h
usr/src/uts/common/io/iwn/if_iwnvar.h
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index 0e9d931d1a..39ee71dc58 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -2042,7 +2042,7 @@ I40E_OBJS = i40e_main.o i40e_osdep.o i40e_intr.o i40e_transceiver.o \
i40e_stats.o i40e_gld.o
# Intel-written ones.
I40E_INTC_OBJS = i40e_adminq.o i40e_common.o i40e_hmc.o i40e_lan_hmc.o \
- i40e_nvm.o
+ i40e_nvm.o i40e_dcb.o
#
# Solarflare 1/10/40GbE NIC driver module
diff --git a/usr/src/uts/common/io/i40e/core/README.illumos b/usr/src/uts/common/io/i40e/core/README.illumos
index 47cb1fdf2d..5e81eadfed 100644
--- a/usr/src/uts/common/io/i40e/core/README.illumos
+++ b/usr/src/uts/common/io/i40e/core/README.illumos
@@ -10,82 +10,99 @@
#
This directory contains files extracted from the Intel ixl-1.6.10 driver for
-FreeBSD with the following modifications/differences. The following two
-changes each modified the common code.
+FreeBSD which was later merged with ixl-1.9.9.
+
+The following change modified the common code.
-9805 i40e should read SFP data when firmware supports it
9601 Divide by zero in i40e_get_available_resources()
-The following diff was originally applied to add support for Studio and the
-32-bit kernel:
+During the merge to ixl-1.9.9 the following patches were not applied to
+the illumos core.
+
+We don't explicitly recognise/use the _VF mac types.
+====================================================
+--- i40e_common.c
++++ i40e_common.c
+@@ -77,11 +76,11 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
+ hw->mac.type = I40E_MAC_X722;
+ break;
+ case I40E_DEV_ID_X722_VF:
+- case I40E_DEV_ID_X722_A0_VF:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
++ case I40E_DEV_ID_ADAPTIVE_VF:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+
+We don't have these functions in Illumos core.
+==============================================
+--- i40e_common.c
++++ i40e_common.c
+@@ -4537,7 +4687,6 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add in Host byte order
+- * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+@@ -6910,7 +7185,7 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+ * completion before returning.
+ **/
+ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+- enum i40e_virtchnl_ops v_opcode,
++ enum virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+@@ -6949,9 +7224,9 @@ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ * with appropriate information.
+ **/
+ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+- struct i40e_virtchnl_vf_resource *msg)
++ struct virtchnl_vf_resource *msg)
+ {
+- struct i40e_virtchnl_vsi_resource *vsi_res;
++ struct virtchnl_vsi_resource *vsi_res;
+ int i;
---- ixl-1.6.10/src/i40e_common.c
-+++ illumos-gate/usr/src/uts/common/io/i40e/core/i40e_common.c
-@@ -4037,8 +4037,8 @@
-
- cmd->type = mib_type;
- cmd->length = CPU_TO_LE16(buff_size);
-- cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff));
-- cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff));
-+ cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((uintptr_t)buff));
-+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((uintptr_t)buff));
-
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- return status;
-@@ -6585,9 +6585,9 @@
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
-
- desc.params.external.addr_high =
-- CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
-+ CPU_TO_LE32(I40E_HI_DWORD((uintptr_t)proxy_config));
- desc.params.external.addr_low =
-- CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
-+ CPU_TO_LE32(I40E_LO_DWORD((uintptr_t)proxy_config));
-
- status = i40e_asq_send_command(hw, &desc, proxy_config,
- sizeof(struct i40e_aqc_arp_proxy_data),
-@@ -6619,9 +6619,9 @@
- i40e_aqc_opc_set_ns_proxy_table_entry);
-
- desc.params.external.addr_high =
-- CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
-+ CPU_TO_LE32(I40E_HI_DWORD((uintptr_t)ns_proxy_table_entry));
- desc.params.external.addr_low =
-- CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
-+ CPU_TO_LE32(I40E_LO_DWORD((uintptr_t)ns_proxy_table_entry));
-
- status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
- sizeof(struct i40e_aqc_ns_proxy_data),
-@@ -6681,8 +6681,8 @@
- valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
- cmd->valid_flags = CPU_TO_LE16(valid_flags);
-
-- cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
-- cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
-+ cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((uintptr_t)filter));
-+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((uintptr_t)filter));
-
- status = i40e_asq_send_command(hw, &desc, filter,
- buff_len, cmd_details);
---- ixl-1.6.10/src/i40e_register.h
-+++ illumos-gate/usr/src/uts/common/io/i40e/core/i40e_register.h
-@@ -113,7 +113,7 @@
- #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
- #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
- #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
--#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
-+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1UL, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
- #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
- #define I40E_PF_ATQT_ATQT_SHIFT 0
- #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
---- ixl-1.6.10/src/i40e_type.h
-+++ illumos-gate/usr/src/uts/common/io/i40e/core/i40e_type.h
-@@ -49,7 +49,7 @@
-
- #ifndef I40E_MASK
- /* I40E_MASK is a macro used on 32 bit registers */
--#define I40E_MASK(mask, shift) (mask << shift)
-+#define I40E_MASK(mask, shift) (((uint32_t)(mask)) << ((uint32_t)(shift)))
- #endif
-
- #define I40E_MAX_PF 16
+ vsi_res = &msg->vsi_res[0];
+@@ -6960,20 +7235,18 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+- hw->dev_caps.dcb = msg->vf_offload_flags &
+- I40E_VIRTCHNL_VF_OFFLOAD_L2;
+- hw->dev_caps.fcoe = (msg->vf_offload_flags &
+- I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+- hw->dev_caps.iwarp = (msg->vf_offload_flags &
+- I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
++ hw->dev_caps.dcb = msg->vf_cap_flags &
++ VIRTCHNL_VF_OFFLOAD_L2;
++ hw->dev_caps.iwarp = (msg->vf_cap_flags &
++ VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+- if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
++ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
+ i40e_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+- I40E_ETH_LENGTH_OF_ADDRESS,
++ ETH_ALEN,
+ I40E_NONDMA_TO_NONDMA);
+ i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+- I40E_ETH_LENGTH_OF_ADDRESS,
++ ETH_ALEN,
+ I40E_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+@@ -6990,14 +7263,14 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ **/
+ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
+ {
+- return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
++ return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
+ I40E_SUCCESS, NULL, 0, NULL);
+ }
diff --git a/usr/src/uts/common/io/i40e/core/i40e_adminq.c b/usr/src/uts/common/io/i40e/core/i40e_adminq.c
index 1485244e7a..85d6b34b2c 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_adminq.c
+++ b/usr/src/uts/common/io/i40e/core/i40e_adminq.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -633,6 +633,24 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 7)))
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ }
+
+ /* Newer versions of firmware require lock when reading the NVM */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 5)))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
@@ -888,8 +906,8 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- i40e_msec_delay(1);
- total_delay++;
+ i40e_usec_delay(50);
+ total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -930,10 +948,15 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
}
asq_send_command_error:
@@ -995,7 +1018,10 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (!i40e_is_vf(hw))
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+ else
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -1053,7 +1079,7 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
+ i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
diff --git a/usr/src/uts/common/io/i40e/core/i40e_adminq.h b/usr/src/uts/common/io/i40e/core/i40e_adminq.h
index a0279273bd..f58dc98289 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_adminq.h
+++ b/usr/src/uts/common/io/i40e/core/i40e_adminq.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -159,7 +159,7 @@ static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/usr/src/uts/common/io/i40e/core/i40e_adminq_cmd.h b/usr/src/uts/common/io/i40e/core/i40e_adminq_cmd.h
index 49eb7caf4b..ee3f852f64 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_adminq_cmd.h
+++ b/usr/src/uts/common/io/i40e/core/i40e_adminq_cmd.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -41,8 +41,17 @@
* This file needs to comply with the Linux Kernel coding style.
*/
+
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc {
__le16 flags;
@@ -201,6 +210,7 @@ enum i40e_admin_queue_opc {
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@@ -251,6 +261,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_nvm_update = 0x0703,
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_nvm_progress = 0x0706,
i40e_aqc_opc_oem_post_update = 0x0720,
i40e_aqc_opc_thermal_sensor = 0x0721,
@@ -774,8 +785,52 @@ struct i40e_aqc_set_switch_config {
/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
__le16 valid_flags;
- u8 reserved[12];
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ /* Next byte is split into following:
+ * Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0
+ * Bit 6 : 0 : Destination Port, 1: source port
+ * Bit 5..4 : L4 type
+ * 0: rsvd
+ * 1: TCP
+ * 2: UDP
+ * 3: Both TCP and UDP
+ * Bits 3:0 Mode
+ * 0: default mode
+ * 1: L4 port only mode
+ * 2: non-tunneled mode
+ * 3: tunneled mode
+ */
+#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80
+
+#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40
+
+#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00
+#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10
+#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20
+#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30
+
+#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00
+#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01
+#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02
+#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03
+ u8 mode;
+ u8 rsvd5[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
@@ -1706,6 +1761,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_10GBASE_AOC = 0xC,
I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
+ I40E_PHY_TYPE_UNSUPPORTED = 0xF,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1724,8 +1781,50 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
- I40E_PHY_TYPE_MAX
-};
+ I40E_PHY_TYPE_25GBASE_AOC = 0x23,
+ I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
+ I40E_PHY_TYPE_EMPTY = 0xFE,
+ I40E_PHY_TYPE_DEFAULT = 0xFF,
+};
+
+#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \
+ BIT_ULL(I40E_PHY_TYPE_XAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XFI) | \
+ BIT_ULL(I40E_PHY_TYPE_SFI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLPPI) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \
+ BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \
+ BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \
+ BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC))
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
@@ -1781,7 +1880,18 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
- u8 mod_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
+#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
@@ -1811,6 +1921,8 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
#define I40E_AQ_SET_FEC_REQUEST_KR (1 << 2)
#define I40E_AQ_SET_FEC_REQUEST_RS (1 << 3)
#define I40E_AQ_SET_FEC_AUTO (1 << 4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
u8 reserved;
};
@@ -1899,19 +2011,31 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define I40E_AQ_LOOPBACK_MASK 0x07
+#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
+#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 power_desc;
+ union {
+ struct {
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
#define I40E_AQ_PWR_CLASS_MASK 0x03
- u8 reserved[4];
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -1948,11 +2072,28 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
/* Set Loopback mode (0x0618) */
struct i40e_aqc_set_lb_mode {
- __le16 lb_mode;
+ u8 lb_level;
+#define I40E_AQ_LB_NONE 0
+#define I40E_AQ_LB_MAC 1
+#define I40E_AQ_LB_SERDES 2
+#define I40E_AQ_LB_PHY_INT 3
+#define I40E_AQ_LB_PHY_EXT 4
+#define I40E_AQ_LB_CPVL_PCS 5
+#define I40E_AQ_LB_CPVL_EXT 6
#define I40E_AQ_LB_PHY_LOCAL 0x01
#define I40E_AQ_LB_PHY_REMOTE 0x02
#define I40E_AQ_LB_MAC_LOCAL 0x04
- u8 reserved[14];
+ u8 lb_type;
+#define I40E_AQ_LB_LOCAL 0
+#define I40E_AQ_LB_FAR 0x01
+ u8 speed;
+#define I40E_AQ_LB_SPEED_NONE 0
+#define I40E_AQ_LB_SPEED_1G 1
+#define I40E_AQ_LB_SPEED_10G 2
+#define I40E_AQ_LB_SPEED_40G 3
+#define I40E_AQ_LB_SPEED_20G 4
+ u8 force_speed;
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
@@ -2018,8 +2159,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;
@@ -2279,6 +2424,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
/* Get CEE DCBX Oper Config (0x0A07)
* uses the generic descriptor struct
* returns below as indirect response
diff --git a/usr/src/uts/common/io/i40e/core/i40e_alloc.h b/usr/src/uts/common/io/i40e/core/i40e_alloc.h
index b09570855e..292016b81f 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_alloc.h
+++ b/usr/src/uts/common/io/i40e/core/i40e_alloc.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/usr/src/uts/common/io/i40e/core/i40e_common.c b/usr/src/uts/common/io/i40e/core/i40e_common.c
index 0e0dc285ae..fd32e0204c 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_common.c
+++ b/usr/src/uts/common/io/i40e/core/i40e_common.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,7 @@
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#include "i40e_virtchnl.h"
+#include "virtchnl.h"
/**
@@ -68,7 +68,6 @@ static enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
- case I40E_DEV_ID_X722_A0:
case I40E_DEV_ID_KX_X722:
case I40E_DEV_ID_QSFP_X722:
case I40E_DEV_ID_SFP_X722:
@@ -290,6 +289,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -310,13 +311,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = LE16_TO_CPU(aq_desc->datalen);
u8 *buf = (u8 *)buffer;
+ u16 len;
u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
+ len = LE16_TO_CPU(aq_desc->datalen);
+
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
LE16_TO_CPU(aq_desc->opcode),
@@ -996,7 +999,8 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
status = i40e_init_nvm(hw);
return status;
@@ -1230,6 +1234,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_ACC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1312,6 +1318,8 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
+ u32 reg2 = 0;
+
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
@@ -1319,6 +1327,12 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n");
+ DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
i40e_msec_delay(1);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
@@ -1507,6 +1521,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1555,6 +1570,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1565,9 +1581,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
- if (mode == I40E_LINK_ACTIVITY)
- blink = FALSE;
-
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
@@ -1597,35 +1610,58 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
{
struct i40e_aq_desc desc;
enum i40e_status_code status;
+ u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
if (!abilities)
return I40E_ERR_PARAM;
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_phy_abilities);
+ do {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
- if (abilities_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- if (qualified_modules)
- desc.params.external.param0 |=
+ if (qualified_modules)
+ desc.params.external.param0 |=
CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
- if (report_init)
- desc.params.external.param0 |=
+ if (report_init)
+ desc.params.external.param0 |=
CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
- status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
- cmd_details);
+ status = i40e_asq_send_command(hw, &desc, abilities,
+ abilities_size, cmd_details);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
- status = I40E_ERR_UNKNOWN_PHY;
+ if (status != I40E_SUCCESS)
+ break;
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ i40e_msec_delay(1);
+ total_delay++;
+ status = I40E_ERR_TIMEOUT;
+ }
+ } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
+ (total_delay < max_delay));
+
+ if (status != I40E_SUCCESS)
+ return status;
if (report_init) {
- hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
- hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ } else {
+ hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |=
+ ((u64)abilities->phy_type_ext << 32);
+ }
}
return status;
@@ -1668,6 +1704,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
/**
* i40e_set_fc
* @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
*
* Set the requested flow control mode using set_phy_config.
**/
@@ -1883,7 +1921,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
- hw_link_info->loopback = resp->loopback;
+ hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
@@ -1914,6 +1952,16 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= 7) {
+ __le32 tmp;
+
+ i40e_memcpy(&tmp, resp->link_type, sizeof(tmp),
+ I40E_NONDMA_TO_NONDMA);
+ hw->phy.phy_types = LE32_TO_CPU(tmp);
+ hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
+ }
+
/* save link status information */
if (link)
i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
@@ -2053,9 +2101,9 @@ aq_get_partner_advt_exit:
*
* Sets loopback modes.
**/
-enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
- u16 lb_modes,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_set_lb_modes(struct i40e_hw *hw, u8 lb_level, u8 lb_type, u8 speed,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_lb_mode *cmd =
@@ -2065,7 +2113,11 @@ enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_lb_modes);
- cmd->lb_mode = CPU_TO_LE16(lb_modes);
+ cmd->lb_level = lb_level;
+ cmd->lb_type = lb_type;
+ cmd->speed = speed;
+ if (speed)
+ cmd->force_speed = 1;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2554,13 +2606,14 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
* i40e_aq_set_switch_config
* @hw: pointer to the hardware structure
* @flags: bit flag values to set
+ * @mode: cloud filter mode
* @valid_flags: which bit flags to set
* @cmd_details: pointer to command details structure or NULL
*
* Set switch configuration bits
**/
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags, u16 valid_flags,
+ u16 flags, u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2572,7 +2625,12 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
i40e_aqc_opc_set_switch_config);
scfg->flags = CPU_TO_LE16(flags);
scfg->valid_flags = CPU_TO_LE16(valid_flags);
-
+ scfg->mode = mode;
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ scfg->switch_tag = CPU_TO_LE16(hw->switch_tag);
+ scfg->first_tag = CPU_TO_LE16(hw->first_tag);
+ scfg->second_tag = CPU_TO_LE16(hw->second_tag);
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -2718,6 +2776,10 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+
i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
@@ -2966,8 +3028,8 @@ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
* VEBs/VEPA elements only
@@ -3027,8 +3089,8 @@ static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
**/
@@ -3058,8 +3120,8 @@ enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
* add_mirrorrule.
* @mr_list: list of mirrored VLAN IDs to be removed
* @cmd_details: pointer to command details structure or NULL
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
**/
@@ -3462,6 +3524,8 @@ enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
/**
* i40e_aq_oem_post_update - triggers an OEM specific flow after update
* @hw: pointer to the hw struct
+ * @buff: buffer for result
+ * @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
**/
enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
@@ -3540,9 +3604,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
+ enum i40e_status_code status;
+ u16 id, ocp_cfg_word0;
u8 major_rev;
u32 i = 0;
- u16 id;
cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
@@ -3836,6 +3901,26 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
hw->num_ports++;
}
+ /* OCP cards case: if a mezz is removed the ethernet port is at
+ * disabled state in PRTGEN_CNF register. Additional NVM read is
+ * needed in order to check if we are dealing with OCP card.
+ * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
+ * physical ports results in wrong partition id calculation and thus
+ * not supporting WoL.
+ */
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (i40e_acquire_nvm(hw, I40E_RESOURCE_READ) == I40E_SUCCESS) {
+ status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
+ 2 * I40E_SR_OCP_CFG_WORD0,
+ sizeof(ocp_cfg_word0),
+ &ocp_cfg_word0, TRUE, NULL);
+ if (status == I40E_SUCCESS &&
+ (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
+ hw->num_ports = 4;
+ i40e_release_nvm(hw);
+ }
+ }
+
valid_functions = p->valid_functions;
num_functions = 0;
while (valid_functions) {
@@ -3913,13 +3998,14 @@ exit:
* @length: length of the section to be written (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
+ * @preservation_flags: Preservation mode flags
* @cmd_details: pointer to command details structure or NULL
*
* Update the NVM using the admin queue commands
**/
enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -3940,6 +4026,16 @@ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ }
cmd->module_pointer = module_pointer;
cmd->offset = CPU_TO_LE32(offset);
cmd->length = CPU_TO_LE16(length);
@@ -3955,6 +4051,28 @@ i40e_aq_update_nvm_exit:
}
/**
+ * i40e_aq_nvm_progress
+ * @hw: pointer to the hw struct
+ * @progress: pointer to progress returned from AQ
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Gets progress of flash rearrangement process
+ **/
+enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+
+ DEBUGFUNC("i40e_aq_nvm_progress");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ *progress = desc.params.raw[0];
+ return status;
+}
+
+/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
@@ -4268,7 +4386,39 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_dcb_parameters
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ * @dcb_enable: True if DCB configuration needs to be applied
+ *
+ **/
+enum i40e_status_code
+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_dcb_parameters *cmd =
+ (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if ((hw->mac.type != I40E_MAC_XL710) ||
+ ((hw->aq.api_maj_ver < 1) ||
+ ((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 6))))
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_dcb_parameters);
+ if (dcb_enable) {
+ cmd->valid_flags = I40E_DCB_VALID;
+ cmd->command = I40E_AQ_DCB_SET_AGENT;
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -5379,7 +5529,7 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
if (mac_addr)
- i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
+ i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
cmd->etype = CPU_TO_LE16(ethtype);
@@ -5403,10 +5553,10 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @seid: VSI seid to add ethertype filter from
**/
-#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 seid)
{
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
@@ -5837,6 +5987,7 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
* @ret_buff_size: actual buffer size returned
* @ret_next_table: next block to read
* @ret_next_index: next index to read
+ * @cmd_details: pointer to command details structure or NULL
*
* Dump internal FW/HW data for debug purposes.
*
@@ -5959,7 +6110,7 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
* i40e_read_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -6004,7 +6155,7 @@ enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
* i40e_write_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes specified PHY register value
@@ -6045,7 +6196,7 @@ enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -6119,7 +6270,7 @@ phy_read_end:
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@@ -6186,7 +6337,7 @@ phy_write_end:
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@@ -6222,7 +6373,7 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -6257,7 +6408,6 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
- * @phy_addr: Returned PHY address
*
* Gets PHY address for current port
**/
@@ -6344,6 +6494,64 @@ phy_blinking_end:
}
/**
+ * i40e_led_get_reg - read LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: read register value
+ **/
+static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
+{
+ enum i40e_status_code status;
+ u8 phy_addr = 0;
+
+ *reg_val = 0;
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ phy_addr = i40e_get_phy_address(hw, hw->port);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)reg_val);
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_reg - write LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: register value to write
+ **/
+static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
+{
+ enum i40e_status_code status;
+ u8 phy_addr = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ phy_addr = i40e_get_phy_address(hw, hw->port);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)reg_val);
+ }
+
+ return status;
+}
+
+/**
* i40e_led_get_phy - return current on/off mode
* @hw: pointer to the hw struct
* @led_addr: address of led register to use
@@ -6355,17 +6563,23 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
{
enum i40e_status_code status = I40E_SUCCESS;
u16 gpio_led_port;
+ u32 reg_val_aq;
+ u16 temp_addr;
u8 phy_addr = 0;
u16 reg_val;
- u16 temp_addr;
- u8 port_num;
- u32 i;
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &reg_val_aq, NULL);
+ if (status == I40E_SUCCESS)
+ *val = (u16)reg_val_aq;
+ return status;
+ }
temp_addr = I40E_PHY_LED_PROV_REG_1;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
+ phy_addr = i40e_get_phy_address(hw, hw->port);
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
status = i40e_read_phy_register_clause45(hw,
@@ -6387,7 +6601,9 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
* i40e_led_set_phy
* @hw: pointer to the HW structure
* @on: TRUE or FALSE
+ * @led_addr: address of led register to use
* @mode: original val plus bit for set or ignore
+ *
* Set led's on or off when controlled by the PHY
*
**/
@@ -6395,51 +6611,37 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode)
{
enum i40e_status_code status = I40E_SUCCESS;
- u16 led_ctl = 0;
- u16 led_reg = 0;
- u8 phy_addr = 0;
- u8 port_num;
- u32 i;
+ u32 led_ctl = 0;
+ u32 led_reg = 0;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
if (status)
return status;
}
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
}
return status;
+
restore_config:
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
return status;
}
@@ -6489,7 +6691,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -6548,7 +6752,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@@ -6638,7 +6844,7 @@ enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
/**
* i40e_aq_set_arp_proxy_config
* @hw: pointer to the HW structure
- * @proxy_config - pointer to proxy config command table struct
+ * @proxy_config: pointer to proxy config command table struct
* @cmd_details: pointer to command details
*
* Set ARP offload parameters from pre-populated
diff --git a/usr/src/uts/common/io/i40e/core/i40e_dcb.c b/usr/src/uts/common/io/i40e/core/i40e_dcb.c
new file mode 100644
index 0000000000..46ea4e184f
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_dcb.c
@@ -0,0 +1,1385 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2017, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcenable = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, typelength, offset = 0;
+ struct i40e_cee_app_prio *app;
+ u8 i;
+
+ typelength = I40E_NTOHS(tlv->hdr.typelen);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ dcbcfg->numapps = length / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
+ app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+ if (app->prio_map & BIT(up))
+ break;
+ }
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+ switch (selector) {
+ case I40E_CEE_APP_SEL_ETHTYPE:
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ break;
+ case I40E_CEE_APP_SEL_TCPIP:
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ break;
+ default:
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ }
+
+ dcbcfg->app[i].protocolid = I40E_NTOHS(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 len, tlvlen, sublen, typelength;
+ struct i40e_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u32 ouisubtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ /* Return if not CEE DCBX */
+ if (subtype != I40E_CEE_DCBX_TYPE)
+ return;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+ sizeof(struct i40e_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+ typelength = I40E_NTOHS(sub_tlv->hdr.typelen);
+ sublen = (u16)((typelength &
+ I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ switch (subtype) {
+ case I40E_CEE_SUBTYPE_PG_CFG:
+ i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_PFC_CFG:
+ i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_APP_PRI:
+ i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+ sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case I40E_CEE_DCBX_OUI:
+ i40e_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type;
+ u16 length;
+ u16 typelength;
+ u16 offset = 0;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += I40E_LLDP_MIB_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelength = I40E_NTOHS(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ offset += sizeof(typelength) + length;
+
+ /* END TLV or beyond LLDPDU size */
+ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+ break;
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_cee_to_dcb_v1_config
+ * @cee_cfg: pointer to CEE v1 response configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE v1 configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_v1_config(
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status);
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, tc, err;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+ I40E_AQC_CEE_APP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ /* Add APPs if Error is False */
+ if (!err) {
+ /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+ /* FCoE APP */
+ dcbcfg->app[0].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ /* iSCSI APP */
+ dcbcfg->app[1].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+ /* FIP APP */
+ dcbcfg->app[2].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ }
+}
+
+/**
+ * i40e_cee_to_dcb_config
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_config(
+ struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, tc, err, sync, oper;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ i = 0;
+ status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+ I40E_AQC_CEE_FCOE_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FCoE APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+ I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add iSCSI APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* iSCSI APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+ I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FIP APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FIP APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+ i++;
+ }
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+static enum i40e_status_code i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = I40E_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+
+ /* If Firmware version < v4.33 on X710/XL710, IEEE only */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)))
+ return i40e_get_ieee_dcb_config(hw);
+
+ /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
+ sizeof(cee_v1_cfg), NULL);
+ if (ret == I40E_SUCCESS) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ LE16_TO_CPU(cee_v1_cfg.tlv_status);
+ i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
+ &hw->local_dcbx_config);
+ }
+ } else {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
+ sizeof(cee_cfg), NULL);
+ if (ret == I40E_SUCCESS) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ LE32_TO_CPU(cee_cfg.tlv_status);
+ i40e_cee_to_dcb_config(&cee_cfg,
+ &hw->local_dcbx_config);
+ }
+ }
+
+ /* CEE mode not enabled try querying IEEE data */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ return i40e_get_ieee_dcb_config(hw);
+
+ if (ret != I40E_SUCCESS)
+ goto out;
+
+ /* Get CEE DCB Desired Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->desired_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = I40E_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_variables lldp_cfg;
+ u8 adminstatus = 0;
+
+ if (!hw->func_caps.dcb)
+ return ret;
+
+ /* Read LLDP NVM area */
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (ret)
+ return ret;
+
+ /* Get the LLDP AdminStatus for the current port */
+ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+ adminstatus &= 0xF;
+
+ /* LLDP agent disabled */
+ if (!adminstatus) {
+ hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
+ return ret;
+ }
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ switch (hw->dcbx_status) {
+ case I40E_DCBX_STATUS_DONE:
+ case I40E_DCBX_STATUS_IN_PROGRESS:
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ if (ret)
+ return ret;
+ break;
+ case I40E_DCBX_STATUS_DISABLED:
+ return ret;
+ case I40E_DCBX_STATUS_NOT_STARTED:
+ case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+ default:
+ break;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, TRUE, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 priority0, priority1, maxtcwilling = 0;
+ struct i40e_dcb_ets_config *etscfg;
+ u16 offset = 0, typelength, i;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+ maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+ buf[offset] = maxtcwilling;
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etscfg->prioritytable[i * 2] & 0xF;
+ priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etsrec;
+ u16 offset = 0, typelength, i;
+ u8 priority0, priority1;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+ /* First Octet is reserved */
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etsrec->prioritytable[i * 2] & 0xF;
+ priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tsatable[i];
+}
+
+ /**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelength;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_PFC_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength, length, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+ /* length includes size of ouisubtype + 1 reserved + 3*numapps */
+ length = sizeof(tlv->ouisubtype) + 1 + (i*3);
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ (length & 0x1FF));
+ tlv->typelength = I40E_HTONS(typelength);
+}
+
+ /**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ *
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case I40E_IEEE_TLV_ID_ETS_CFG:
+ i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_ETS_REC:
+ i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_PFC_CFG:
+ i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_APP_PRI:
+ i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+ /**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_dcbx_config *dcbcfg;
+ struct i40e_virt_mem mem;
+ u8 mib_type, *lldpmib;
+ u16 miblen;
+
+ /* update the hw local config */
+ dcbcfg = &hw->local_dcbx_config;
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+ mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+ }
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @hw: pointer to the hw struct
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 typelength;
+
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelength = I40E_NTOHS(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ if (length)
+ offset += length + 2;
+ /* END TLV or beyond LLDPDU size */
+ if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) ||
+ (offset > I40E_LLDPDU_SIZE))
+ break;
+ /* Move to next TLV */
+ if (length)
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) + length);
+ }
+ *miblen = offset;
+ return ret;
+}
+
+
+/**
+ * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ * @module: address of the module pointer
+ * @word_offset: offset of LLDP configuration
+ *
+ * Reads the LLDP configuration data from NVM using passed addresses
+ **/
+static enum i40e_status_code _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
+{
+ u32 address, offset = (2 * word_offset);
+ enum i40e_status_code ret;
+ __le16 raw_mem;
+ u16 mem;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem,
+ TRUE, NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ mem = LE16_TO_CPU(raw_mem);
+ /* Check if this pointer needs to be read in word size or 4K sector
+ * units.
+ */
+ if (mem & I40E_PTR_TYPE)
+ address = (0x7FFF & mem) * 4096;
+ else
+ address = (0x7FFF & mem) * 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem,
+ TRUE, NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ mem = LE16_TO_CPU(raw_mem);
+ offset = mem + word_offset;
+ offset *= 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, 0, address + offset,
+ sizeof(struct i40e_lldp_variables), lldp_cfg,
+ TRUE, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
+
+/**
+ * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ *
+ * Reads the LLDP configuration data from NVM
+ **/
+enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 mem;
+
+ if (!lldp_cfg)
+ return I40E_ERR_PARAM;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem),
+ &mem, TRUE, NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ /* Read a bit that holds information whether we are running flat or
+ * structured NVM image. Flat image has LLDP configuration in shadow
+ * ram, so there is a need to pass different addresses for both cases.
+ */
+ if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) {
+ /* Flat NVM case */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR,
+ I40E_SR_LLDP_CFG_PTR);
+ } else {
+ /* Good old structured NVM image */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR,
+ I40E_NVM_LLDP_CFG_PTR);
+ }
+
+ return ret;
+}
diff --git a/usr/src/uts/common/io/i40e/core/i40e_dcb.h b/usr/src/uts/common/io/i40e/core/i40e_dcb.h
new file mode 100644
index 0000000000..8bb011623c
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_dcb.h
@@ -0,0 +1,224 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2017, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_OFFLOAD_DISABLED 0
+#define I40E_DCBX_OFFLOAD_ENABLED 1
+
+#define I40E_DCBX_STATUS_NOT_STARTED 0
+#define I40E_DCBX_STATUS_IN_PROGRESS 1
+#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
+#define I40E_DCBX_STATUS_DISABLED 7
+
+#define I40E_TLV_TYPE_END 0
+#define I40E_TLV_TYPE_ORG 127
+
+#define I40E_IEEE_8021QAZ_OUI 0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG 9
+#define I40E_IEEE_SUBTYPE_ETS_REC 10
+#define I40E_IEEE_SUBTYPE_PFC_CFG 11
+#define I40E_IEEE_SUBTYPE_APP_PRI 12
+
+#define I40E_CEE_DCBX_OUI 0x001b21
+#define I40E_CEE_DCBX_TYPE 2
+
+#define I40E_CEE_SUBTYPE_CTRL 1
+#define I40E_CEE_SUBTYPE_PG_CFG 2
+#define I40E_CEE_SUBTYPE_PFC_CFG 3
+#define I40E_CEE_SUBTYPE_APP_PRI 4
+
+#define I40E_CEE_MAX_FEAT_TYPE 3
+#define I40E_LLDP_ADMINSTATUS_DISABLED 0
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1
+#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_MIB_HLEN 14
+#define I40E_LLDP_TLV_LEN_SHIFT 0
+#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT 9
+#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
+#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT 8
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT 0
+#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT 6
+#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT 7
+#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
+#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
+#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_PRIO_0_SHIFT 0
+#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT)
+#define I40E_CEE_PGID_PRIO_1_SHIFT 4
+#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_STRICT 15
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT 0
+#define I40E_IEEE_TSA_CBS 1
+#define I40E_IEEE_TSA_ETS 2
+#define I40E_IEEE_TSA_VENDOR 255
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT 0
+#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT 6
+#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT 7
+#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT 0
+#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT 5
+#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+/* TLV definitions for preparing MIB */
+#define I40E_TLV_ID_CHASSIS_ID 0
+#define I40E_TLV_ID_PORT_ID 1
+#define I40E_TLV_ID_TIME_TO_LIVE 2
+#define I40E_IEEE_TLV_ID_ETS_CFG 3
+#define I40E_IEEE_TLV_ID_ETS_REC 4
+#define I40E_IEEE_TLV_ID_PFC_CFG 5
+#define I40E_IEEE_TLV_ID_APP_PRI 6
+#define I40E_TLV_ID_END_OF_LLDPPDU 7
+#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG
+
+#define I40E_IEEE_ETS_TLV_LENGTH 25
+#define I40E_IEEE_PFC_TLV_LENGTH 6
+#define I40E_IEEE_APP_TLV_LENGTH 11
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP TLV structure */
+struct i40e_lldp_generic_tlv {
+ __be16 typelength;
+ u8 tlvinfo[1];
+};
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+ __be16 typelength;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+};
+#pragma pack()
+
+/*
+ * TODO: The below structures related LLDP/DCBX variables
+ * and statistics are defined but need to find how to get
+ * the required information from the Firmware to use them
+ */
+
+/* IEEE 802.1AB LLDP Agent Statistics */
+struct i40e_lldp_stats {
+ u64 remtablelastchangetime;
+ u64 remtableinserts;
+ u64 remtabledeletes;
+ u64 remtabledrops;
+ u64 remtableageouts;
+ u64 txframestotal;
+ u64 rxframesdiscarded;
+ u64 rxportframeerrors;
+ u64 rxportframestotal;
+ u64 rxporttlvsdiscardedtotal;
+ u64 rxporttlvsunrecognizedtotal;
+ u64 remtoomanyneighbors;
+};
+
+/* IEEE 802.1Qaz DCBX variables */
+struct i40e_dcbx_variables {
+ u32 defmaxtrafficclasses;
+ u32 defprioritytcmapping;
+ u32 deftcbandwidth;
+ u32 deftsaassignment;
+};
+
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw);
+enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw);
+enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
+
+#endif /* _I40E_DCB_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_devids.h b/usr/src/uts/common/io/i40e/core/i40e_devids.h
index 01487115b7..11e6e59fdd 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_devids.h
+++ b/usr/src/uts/common/io/i40e/core/i40e_devids.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -52,7 +52,7 @@
#define I40E_DEV_ID_10G_BASE_T4 0x1589
#define I40E_DEV_ID_25G_B 0x158A
#define I40E_DEV_ID_25G_SFP28 0x158B
-#define I40E_DEV_ID_X722_A0 0x374C
+#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
@@ -64,4 +64,7 @@
(d) == I40E_DEV_ID_QSFP_B || \
(d) == I40E_DEV_ID_QSFP_C)
+#define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \
+ (d) == I40E_DEV_ID_25G_SFP28)
+
#endif /* _I40E_DEVIDS_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_hmc.c b/usr/src/uts/common/io/i40e/core/i40e_hmc.c
index 4f0de109c3..f8072751d9 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_hmc.c
+++ b/usr/src/uts/common/io/i40e/core/i40e_hmc.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -211,7 +211,6 @@ exit:
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
- * @is_pf: distinguishes a VF from a PF
*
* This function:
* 1. Marks the entry in pd tabe (for paged address mode) or in sd table
diff --git a/usr/src/uts/common/io/i40e/core/i40e_hmc.h b/usr/src/uts/common/io/i40e/core/i40e_hmc.h
index 1a9995ab6a..398332effa 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_hmc.h
+++ b/usr/src/uts/common/io/i40e/core/i40e_hmc.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.c b/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.c
index a6716a913c..5472352b5d 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.c
+++ b/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.h b/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.h
index cce4a3931c..e6d0ab6644 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.h
+++ b/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/usr/src/uts/common/io/i40e/core/i40e_nvm.c b/usr/src/uts/common/io/i40e/core/i40e_nvm.c
index a32b4646c7..81fd5a97b3 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_nvm.c
+++ b/usr/src/uts/common/io/i40e/core/i40e_nvm.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -34,18 +34,6 @@
#include "i40e_prototype.h"
-enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data);
-enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
- u16 *data);
-enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
- bool last_command);
-
/**
* i40e_init_nvm_ops - Initialize NVM function pointers
* @hw: pointer to the HW structure
@@ -208,52 +196,6 @@ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @data: word read from the Shadow RAM
- *
- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
- **/
-enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
-{
- enum i40e_status_code ret_code = I40E_SUCCESS;
-
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
- }
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
- }
- return ret_code;
-}
-
-/**
- * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @data: word read from the Shadow RAM
- *
- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
- **/
-enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
- u16 offset,
- u16 *data)
-{
- enum i40e_status_code ret_code = I40E_SUCCESS;
-
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
- return ret_code;
-}
-
-/**
* i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -304,15 +246,68 @@ read_nvm_exit:
}
/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+static enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ DEBUGFUNC("i40e_read_nvm_aq");
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
* i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ * Reads one 16 bit word from the Shadow RAM using the AdminQ
**/
-enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
@@ -325,55 +320,49 @@ enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
}
/**
- * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
+ * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
* @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
*
- * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
- * method. The buffer read is preceded by the NVM ownership take
- * and followed by the release.
+ * Reads one 16 bit word from the Shadow RAM.
+ *
+ * Do not use this function except in cases where the nvm lock is already
+ * taken via i40e_acquire_nvm().
**/
-enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
- u16 offset,
- u16 *words, u16 *data)
+enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset,
+ u16 *data)
{
- enum i40e_status_code ret_code = I40E_SUCCESS;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
- else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
- return ret_code;
+ return i40e_read_nvm_word_aq(hw, offset, data);
+
+ return i40e_read_nvm_word_srctl(hw, offset, data);
}
/**
- * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
+ * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
* @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
*
- * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
- * method. The buffer read is preceded by the NVM ownership take
- * and followed by the release.
+ * Reads one 16 bit word from the Shadow RAM.
**/
-enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
- data);
- i40e_release_nvm(hw);
- }
- } else {
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
- }
+
+ if (ret_code)
+ return ret_code;
+ ret_code = __i40e_read_nvm_word(hw, offset, data);
+
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ i40e_release_nvm(hw);
return ret_code;
}
@@ -388,8 +377,8 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
u16 index, word;
@@ -421,8 +410,8 @@ enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
enum i40e_status_code ret_code;
u16 read_size = *words;
@@ -470,57 +459,55 @@ read_nvm_buffer_aq_exit:
}
/**
- * i40e_read_nvm_aq - Read Shadow RAM.
- * @hw: pointer to the HW structure.
- * @module_pointer: module pointer location in words from the NVM beginning
- * @offset: offset in words from module start
- * @words: number of words to write
- * @data: buffer with words to write to the Shadow RAM
- * @last_command: tells the AdminQ that this is the last command
+ * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
*
- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method.
**/
-enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
- bool last_command)
+enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset,
+ u16 *words, u16 *data)
{
- enum i40e_status_code ret_code = I40E_ERR_NVM;
- struct i40e_asq_cmd_details cmd_details;
-
- DEBUGFUNC("i40e_read_nvm_aq");
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
- /* Here we are checking the SR limit only for the flat memory model.
- * We cannot do it for the module-based model, as we did not acquire
- * the NVM resource yet (we cannot get the module pointer value).
- * Firmware will check the module-based model.
- */
- if ((offset + words) > hw->nvm.sr_size)
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write error: offset %d beyond Shadow RAM limit %d\n",
- (offset + words), hw->nvm.sr_size);
- else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
- /* We can write only up to 4KB (one sector), in one AQ write */
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write fail error: tried to write %d words, limit is %d.\n",
- words, I40E_SR_SECTOR_SIZE_IN_WORDS);
- else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
- != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
- /* A single write cannot spread over two sectors */
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
- offset, words);
- else
- ret_code = i40e_aq_read_nvm(hw, module_pointer,
- 2 * offset, /*bytes*/
- 2 * words, /*bytes*/
- data, last_command, &cmd_details);
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
return ret_code;
}
+
/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
@@ -562,7 +549,8 @@ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, &cmd_details);
+ data, last_command, 0,
+ &cmd_details);
return ret_code;
}
@@ -651,16 +639,14 @@ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
data = (u16 *)vmem.va;
/* read pointer to VPD area */
- ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
- &vpd_module);
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
- ret_code = __i40e_read_nvm_word(hw,
- I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -750,19 +736,19 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
DEBUGFUNC("i40e_validate_nvm_checksum");
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- i40e_release_nvm(hw);
- if (ret_code != I40E_SUCCESS)
- goto i40e_validate_nvm_checksum_exit;
- } else {
- goto i40e_validate_nvm_checksum_exit;
- }
-
- i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ /* We must acquire the NVM lock in order to correctly synchronize the
+ * NVM accesses across multiple PFs. Without doing so it is possible
+ * for one of the PFs to read invalid data potentially indicating that
+ * the checksum is invalid.
+ */
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_code)
+ return ret_code;
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ i40e_release_nvm(hw);
+ if (ret_code)
+ return ret_code;
/* Verify read checksum from EEPROM is the same as
* calculated checksum
@@ -774,7 +760,6 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
if (checksum)
*checksum = checksum_local;
-i40e_validate_nvm_checksum_exit:
return ret_code;
}
@@ -805,6 +790,9 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno);
+static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static INLINE u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -814,6 +802,12 @@ static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
+static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
+{
+ return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
+ I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+}
+
static const char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
@@ -831,6 +825,7 @@ static const char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_STATUS",
"I40E_NVMUPD_EXEC_AQ",
"I40E_NVMUPD_GET_AQ_RESULT",
+ "I40E_NVMUPD_GET_AQ_EVENT",
};
/**
@@ -900,6 +895,15 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
+ *
+ * During NVMUpdate, it is observed that lock could be held for
+ * ~5ms for most commands. However lock is held for ~60ms for
+ * NVMUPD_CSUM_LCB command.
+ */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -919,8 +923,9 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
* the wait info and return before doing anything else
*/
if (cmd->offset == 0xffff) {
- i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
- return I40E_SUCCESS;
+ i40e_nvmupd_clear_wait_state(hw);
+ status = I40E_SUCCESS;
+ break;
}
status = I40E_ERR_NOT_READY;
@@ -935,6 +940,8 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
+
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
return status;
}
@@ -1064,6 +1071,10 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
break;
+ case I40E_NVMUPD_GET_AQ_EVENT:
+ status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
+ break;
+
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
@@ -1242,39 +1253,55 @@ retry:
}
/**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
* @hw: pointer to the hardware structure
- * @opcode: the event that just happened
**/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
- if (opcode == hw->nvm_wait_opcode) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
- if (hw->nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->nvm_release_on_done = FALSE;
- }
- hw->nvm_wait_opcode = 0;
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = FALSE;
+ }
+ hw->nvm_wait_opcode = 0;
- if (hw->aq.arq_last_status) {
- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
- return;
- }
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
- default:
- break;
- }
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ * @desc: AdminQ descriptor
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
+{
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+
+ if (opcode == hw->nvm_wait_opcode) {
+ i40e_memcpy(&hw->nvm_aq_event_desc, desc,
+ aq_desc_len, I40E_NONDMA_TO_NONDMA);
+ i40e_nvmupd_clear_wait_state(hw);
}
}
@@ -1332,6 +1359,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
else if (module == 0)
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
}
break;
@@ -1394,6 +1424,9 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u32 aq_data_len;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return I40E_SUCCESS;
+
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -1430,6 +1463,9 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
}
}
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
/* and away we go! */
status = i40e_asq_send_command(hw, aq_desc, buff,
buff_size, &cmd_details);
@@ -1439,6 +1475,7 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
}
/* should we wait for a followup event? */
@@ -1520,6 +1557,41 @@ static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
}
/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
+ I40E_NONDMA_TO_NONDMA);
+
+ return I40E_SUCCESS;
+}
+
+/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1614,18 +1686,20 @@ static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
enum i40e_status_code status = I40E_SUCCESS;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
+ u8 preservation_flags;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last,
- &cmd_details);
+ preservation_flags, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
diff --git a/usr/src/uts/common/io/i40e/core/i40e_prototype.h b/usr/src/uts/common/io/i40e/core/i40e_prototype.h
index 08433ebd11..784870378d 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_prototype.h
+++ b/usr/src/uts/common/io/i40e/core/i40e_prototype.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,7 @@
#include "i40e_type.h"
#include "i40e_alloc.h"
-#include "i40e_virtchnl.h"
+#include "virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -139,8 +139,9 @@ enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
u64 *advt_reg,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
- struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_set_lb_modes(struct i40e_hw *hw, u8 lb_level, u8 lb_type, u8 speed,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
@@ -222,7 +223,7 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags, u16 valid_flags,
+ u16 flags, u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
@@ -257,7 +258,9 @@ enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
@@ -284,6 +287,10 @@ enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details
+ *cmd_details);
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
@@ -455,7 +462,9 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc);
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
@@ -465,6 +474,37 @@ static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
return i40e_ptype_lookup[ptype];
}
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static INLINE enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return VIRTCHNL_LINK_SPEED_100MB;
+ case I40E_LINK_SPEED_1GB:
+ return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_10GB:
+ return VIRTCHNL_LINK_SPEED_10GB;
+ case I40E_LINK_SPEED_40GB:
+ return VIRTCHNL_LINK_SPEED_40GB;
+ case I40E_LINK_SPEED_20GB:
+ return VIRTCHNL_LINK_SPEED_20GB;
+ case I40E_LINK_SPEED_25GB:
+ return VIRTCHNL_LINK_SPEED_25GB;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+ }
+}
+
/* prototype for functions used for SW spinlocks */
void i40e_init_spinlock(struct i40e_spinlock *sp);
void i40e_acquire_spinlock(struct i40e_spinlock *sp);
@@ -473,10 +513,10 @@ void i40e_destroy_spinlock(struct i40e_spinlock *sp);
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg);
+ struct virtchnl_vf_resource *msg);
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
diff --git a/usr/src/uts/common/io/i40e/core/i40e_register.h b/usr/src/uts/common/io/i40e/core/i40e_register.h
index 00b00c8488..65b9506126 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_register.h
+++ b/usr/src/uts/common/io/i40e/core/i40e_register.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -2803,7 +2803,7 @@
#define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
diff --git a/usr/src/uts/common/io/i40e/core/i40e_status.h b/usr/src/uts/common/io/i40e/core/i40e_status.h
index 08f09d0c8a..bed25f14b4 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_status.h
+++ b/usr/src/uts/common/io/i40e/core/i40e_status.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -103,6 +103,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_type.h b/usr/src/uts/common/io/i40e/core/i40e_type.h
index 33314193bb..1bfdb3f7aa 100644
--- a/usr/src/uts/common/io/i40e/core/i40e_type.h
+++ b/usr/src/uts/common/io/i40e/core/i40e_type.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -66,6 +66,9 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT 500
+
/* Check whether address is multicast. */
#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
@@ -81,7 +84,7 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define I40E_ETH_LENGTH_OF_ADDRESS 6
+#define ETH_ALEN 6
/* Data type manipulation macros. */
#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
@@ -185,9 +188,6 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0005
-
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
@@ -258,6 +258,8 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 req_fec_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -328,6 +330,8 @@ struct i40e_phy_info {
#define I40E_CAP_PHY_TYPE_25GBASE_CR I40E_MASK((u64)I40E_AQ_PHY_TYPE_EXT_25G_CR, 32)
#define I40E_CAP_PHY_TYPE_25GBASE_SR I40E_MASK((u64)I40E_AQ_PHY_TYPE_EXT_25G_SR, 32)
#define I40E_CAP_PHY_TYPE_25GBASE_LR I40E_MASK((u64)I40E_AQ_PHY_TYPE_EXT_25G_LR, 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_AOC I40E_MASK((u64)I40E_AQ_PHY_TYPE_EXT_25G_AOLRC, 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_ACC I40E_MASK((u64)I40E_AQ_PHY_TYPE_EXT_25G_ACLRC, 32)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
@@ -348,6 +352,15 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+ /* Cloud filter modes:
+ * Mode1: Filter on L4 port only
+ * Mode2: Filter for non-tunneled traffic
+ * Mode3: Filter for tunnel traffic
+ */
+#define I40E_CLOUD_FILTER_MODE1 0x6
+#define I40E_CLOUD_FILTER_MODE2 0x7
+#define I40E_CLOUD_FILTER_MODE3 0x8
+
u32 management_mode;
u32 mng_protocols_over_mctp;
#define I40E_MNG_PROTOCOL_PLDM 0x2
@@ -415,10 +428,10 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
u16 max_fcoeq;
};
@@ -460,6 +473,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@@ -479,15 +493,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
-#define I40E_NVM_TRANS_SHIFT 8
-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON 0x0
-#define I40E_NVM_SNT 0x1
-#define I40E_NVM_LCB 0x2
-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA 0x4
-#define I40E_NVM_CSUM 0x8
-#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
@@ -503,6 +523,19 @@ struct i40e_nvm_access {
u8 data[1];
};
+/* (Q)SFP module access definitions */
+#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
+#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
+#define I40E_MODULE_TYPE_ADDR 0x00
+#define I40E_MODULE_REVISION_ADDR 0x01
+#define I40E_MODULE_SFF_8472_COMP 0x5E
+#define I40E_MODULE_SFF_8472_SWAP 0x5C
+#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_SFF_DIAG_CAPAB 0x40
+#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
+#define I40E_MODULE_TYPE_QSFP28 0x11
+#define I40E_MODULE_QSFP_MAX_LEN 640
+
/* PCI bus types */
enum i40e_bus_type {
i40e_bus_type_unknown = 0,
@@ -657,6 +690,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -677,8 +711,16 @@ struct i40e_hw {
u16 wol_proxy_vsi_seid;
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
/* debug mask */
u32 debug_mask;
char err_str[16];
@@ -1421,7 +1463,8 @@ struct i40e_hw_port_stats {
#define I40E_SR_PE_IMAGE_PTR 0x0C
#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
#define I40E_SR_MNG_CONFIG_PTR 0x0E
-#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_EMP_MODULE_PTR 0x0F
+#define I40E_SR_EMP_MODULE_PTR 0x48
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
@@ -1462,6 +1505,11 @@ struct i40e_hw_port_stats {
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
+#define I40E_PTR_TYPE BIT(15)
+#define I40E_SR_OCP_CFG_WORD0 0x2B
+#define I40E_SR_OCP_ENABLED BIT(15)
/* Shadow RAM related */
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
@@ -1577,7 +1625,8 @@ enum i40e_reset_type {
};
/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR 0xD
+#define I40E_NVM_LLDP_CFG_PTR 0x06
+#define I40E_SR_LLDP_CFG_PTR 0x31
struct i40e_lldp_variables {
u16 length;
u16 adminstatus;
diff --git a/usr/src/uts/common/io/i40e/core/i40e_virtchnl.h b/usr/src/uts/common/io/i40e/core/i40e_virtchnl.h
deleted file mode 100644
index 32af9c8e28..0000000000
--- a/usr/src/uts/common/io/i40e/core/i40e_virtchnl.h
+++ /dev/null
@@ -1,424 +0,0 @@
-/******************************************************************************
-
- Copyright (c) 2013-2015, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _I40E_VIRTCHNL_H_
-#define _I40E_VIRTCHNL_H_
-
-#include "i40e_type.h"
-
-/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the various i40e drivers.
- *
- * Admin queue buffer usage:
- * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
- * flags, retval, datalen, and data addr are all used normally.
- * Firmware copies the cookie fields when sending messages between the PF and
- * VF, but uses all other fields internally. Due to this limitation, we
- * must send all messages as "indirect", i.e. using an external buffer.
- *
- * All the vsi indexes are relative to the VF. Each VF can have maximum of
- * three VSIs. All the queue indexes are relative to the VSI. Each VF can
- * have a maximum of sixteen queues for all of its VSIs.
- *
- * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value is of
- * i40e_status_code type, defined in the i40e_type.h.
- *
- * In general, VF driver initialization should roughly follow the order of these
- * opcodes. The VF driver must first validate the API version of the PF driver,
- * then request a reset, then get resources, then configure queues and
- * interrupts. After these operations are complete, the VF driver may start
- * its queues, optionally add MAC and VLAN filters, and process traffic.
- */
-
-/* Opcodes for VF-PF communication. These are placed in the v_opcode field
- * of the virtchnl_msg structure.
- */
-enum i40e_virtchnl_ops {
-/* The PF sends status change events to VFs using
- * the I40E_VIRTCHNL_OP_EVENT opcode.
- * VFs send requests to the PF using the other ops.
- */
- I40E_VIRTCHNL_OP_UNKNOWN = 0,
- I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF = 2,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
- I40E_VIRTCHNL_OP_ADD_VLAN = 12,
- I40E_VIRTCHNL_OP_DEL_VLAN = 13,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
- I40E_VIRTCHNL_OP_GET_STATS = 15,
- I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
- I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
- I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
-
-};
-
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct i40e_virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum i40e_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-/* Message descriptions and data structures.*/
-
-/* I40E_VIRTCHNL_OP_VERSION
- * VF posts its version number to the PF. PF responds with its version number
- * in the same format, along with a return code.
- * Reply from PF has its major/minor versions also in param0 and param1.
- * If there is a major version mismatch, then the VF cannot operate.
- * If there is a minor version mismatch, then the VF can operate but should
- * add a warning to the system log.
- *
- * This enum element MUST always be specified as == 1, regardless of other
- * changes in the API. The PF must always respond to this message without
- * error regardless of version mismatch.
- */
-#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
-
-struct i40e_virtchnl_version_info {
- u32 major;
- u32 minor;
-};
-
-/* I40E_VIRTCHNL_OP_RESET_VF
- * VF sends this request to PF with no parameters
- * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
- * until reset completion is indicated. The admin queue must be reinitialized
- * after this operation.
- *
- * When reset is complete, PF must ensure that all queues in all VSIs associated
- * with the VF are stopped, all queue configurations in the HMC are set to 0,
- * and all MAC and VLAN filters (except the default MAC address) on all VSIs
- * are cleared.
- */
-
-/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * Version 1.0 VF sends this request to PF with no parameters
- * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
- * PF responds with an indirect message containing
- * i40e_virtchnl_vf_resource and one or more
- * i40e_virtchnl_vsi_resource structures.
- */
-
-struct i40e_virtchnl_vsi_resource {
- u16 vsi_id;
- u16 num_queue_pairs;
- enum i40e_vsi_type vsi_type;
- u16 qset_handle;
- u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
-};
-/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
-
-#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
-
-struct i40e_virtchnl_vf_resource {
- u16 num_vsis;
- u16 num_queue_pairs;
- u16 max_vectors;
- u16 max_mtu;
-
- u32 vf_offload_flags;
- u32 rss_key_size;
- u32 rss_lut_size;
-
- struct i40e_virtchnl_vsi_resource vsi_res[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
- * VF sends this message to set up parameters for one TX queue.
- * External data buffer contains one instance of i40e_virtchnl_txq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Tx queue config info */
-struct i40e_virtchnl_txq_info {
- u16 vsi_id;
- u16 queue_id;
- u16 ring_len; /* number of descriptors, multiple of 8 */
- u16 headwb_enabled;
- u64 dma_ring_addr;
- u64 dma_headwb_addr;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
- * VF sends this message to set up parameters for one RX queue.
- * External data buffer contains one instance of i40e_virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Rx queue config info */
-struct i40e_virtchnl_rxq_info {
- u16 vsi_id;
- u16 queue_id;
- u32 ring_len; /* number of descriptors, multiple of 32 */
- u16 hdr_size;
- u16 splithdr_enabled;
- u32 databuffer_size;
- u32 max_pkt_size;
- u64 dma_ring_addr;
- enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
- * associated with the specified VSI.
- * PF configures queues and returns status.
- * If the number of queues specified is greater than the number of queues
- * associated with the VSI, an error is returned and no queues are configured.
- */
-struct i40e_virtchnl_queue_pair_info {
- /* NOTE: vsi_id and queue_id should be identical for both queues. */
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
-};
-
-struct i40e_virtchnl_vsi_queue_config_info {
- u16 vsi_id;
- u16 num_queue_pairs;
- struct i40e_virtchnl_queue_pair_info qpair[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
- * VF uses this message to map vectors to queues.
- * The rxq_map and txq_map fields are bitmaps used to indicate which queues
- * are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
- * PF configures interrupt mapping and returns status.
- */
-struct i40e_virtchnl_vector_map {
- u16 vsi_id;
- u16 vector_id;
- u16 rxq_map;
- u16 txq_map;
- u16 rxitr_idx;
- u16 txitr_idx;
-};
-
-struct i40e_virtchnl_irq_map_info {
- u16 num_vectors;
- struct i40e_virtchnl_vector_map vecmap[1];
-};
-
-/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
- * I40E_VIRTCHNL_OP_DISABLE_QUEUES
- * VF sends these message to enable or disable TX/RX queue pairs.
- * The queues fields are bitmaps indicating which queues to act upon.
- * (Currently, we only support 16 queues per VF, but we make the field
- * u32 to allow for expansion.)
- * PF performs requested action and returns status.
- */
-struct i40e_virtchnl_queue_select {
- u16 vsi_id;
- u16 pad;
- u32 rx_queues;
- u32 tx_queues;
-};
-
-/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
- * VF sends this message in order to add one or more unicast or multicast
- * address filters for the specified VSI.
- * PF adds the filters and returns status.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
- * VF sends this message in order to remove one or more unicast or multicast
- * filters for the specified VSI.
- * PF removes the filters and returns status.
- */
-
-struct i40e_virtchnl_ether_addr {
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 pad[2];
-};
-
-struct i40e_virtchnl_ether_addr_list {
- u16 vsi_id;
- u16 num_elements;
- struct i40e_virtchnl_ether_addr list[1];
-};
-
-/* I40E_VIRTCHNL_OP_ADD_VLAN
- * VF sends this message to add one or more VLAN tag filters for receives.
- * PF adds the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_VLAN
- * VF sends this message to remove one or more VLAN tag filters for receives.
- * PF removes the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-struct i40e_virtchnl_vlan_filter_list {
- u16 vsi_id;
- u16 num_elements;
- u16 vlan_id[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
- * VF sends VSI id and flags.
- * PF returns status code in retval.
- * Note: we assume that broadcast accept mode is always enabled.
- */
-struct i40e_virtchnl_promisc_info {
- u16 vsi_id;
- u16 flags;
-};
-
-#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
-#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
-
-/* I40E_VIRTCHNL_OP_GET_STATS
- * VF sends this message to request stats for the selected VSI. VF uses
- * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
- * field is ignored by the PF.
- *
- * PF replies with struct i40e_eth_stats in an external buffer.
- */
-
-/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
- * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
- * VF sends these messages to configure RSS. Only supported if both PF
- * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
- * configuration negotiation. If this is the case, then the rss fields in
- * the vf resource struct are valid.
- * Both the key and LUT are initialized to 0 by the PF, meaning that
- * RSS is effectively disabled until set up by the VF.
- */
-struct i40e_virtchnl_rss_key {
- u16 vsi_id;
- u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
-};
-
-struct i40e_virtchnl_rss_lut {
- u16 vsi_id;
- u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
-};
-
-/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * I40E_VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the PF sets these to all possible traffic types that the
- * hardware supports. The VF can query this value if it wants to change the
- * traffic types that are hashed by the hardware.
- * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
- */
-struct i40e_virtchnl_rss_hena {
- u64 hena;
-};
-
-/* I40E_VIRTCHNL_OP_EVENT
- * PF sends this message to inform the VF driver of events that may affect it.
- * No direct response is expected from the VF, though it may generate other
- * messages in response to this one.
- */
-enum i40e_virtchnl_event_codes {
- I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
- I40E_VIRTCHNL_EVENT_LINK_CHANGE,
- I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
- I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
-};
-#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
-#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
-#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
-
-struct i40e_virtchnl_pf_event {
- enum i40e_virtchnl_event_codes event;
- union {
- struct {
- enum i40e_aq_link_speed link_speed;
- bool link_status;
- } link_event;
- } event_data;
-
- int severity;
-};
-
-/* VF reset states - these are written into the RSTAT register:
- * I40E_VFGEN_RSTAT1 on the PF
- * I40E_VFGEN_RSTAT on the VF
- * When the PF initiates a reset, it writes 0
- * When the reset is complete, it writes 1
- * When the PF detects that the VF has recovered, it writes 2
- * VF checks this register periodically to determine if a reset has occurred,
- * then polls it to know when the reset is complete.
- * If either the PF or VF reads the register while the hardware
- * is in a reset state, it will return DEADBEEF, which, when masked
- * will result in 3.
- */
-enum i40e_vfr_states {
- I40E_VFR_INPROGRESS = 0,
- I40E_VFR_COMPLETED,
- I40E_VFR_VFACTIVE,
- I40E_VFR_UNKNOWN,
-};
-
-#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/virtchnl.h b/usr/src/uts/common/io/i40e/core/virtchnl.h
new file mode 100644
index 0000000000..917f9d7d31
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/virtchnl.h
@@ -0,0 +1,747 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2017, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+};
+
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures.*/
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_cap_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u32 pad1;
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support; otherwise it will respond with the
+ * number requested.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct eth_stats in an external buffer.
+ */
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ struct {
+ enum virtchnl_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define QUEUE_TYPE_PE_AEQ 0x80
+#define QUEUE_INVALID_IDX 0xFFFF
+
+struct virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = FALSE;
+ int valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = TRUE;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = TRUE;
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = TRUE;
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = TRUE;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = TRUE;
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_iwarp_qvlist_info *qv =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = TRUE;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct virtchnl_iwarp_qv_info));
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_ERR_PARAM;
+ }
+ /* few more checks */
+ if (err_msg_format || valid_len != msglen)
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/usr/src/uts/common/io/i40e/i40e_main.c b/usr/src/uts/common/io/i40e/i40e_main.c
index dd2edfd379..8971866d35 100644
--- a/usr/src/uts/common/io/i40e/i40e_main.c
+++ b/usr/src/uts/common/io/i40e/i40e_main.c
@@ -1281,19 +1281,19 @@ i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw)
}
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+ hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
i40e_log(i40e, "The driver for the device detected a newer "
"version of the NVM image (%d.%d) than expected (%d.%d).\n"
"Please install the most recent version of the network "
"driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver,
- I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+ I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw));
} else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) {
+ hw->aq.api_min_ver < (I40E_FW_MINOR_VERSION(hw) - 1)) {
i40e_log(i40e, "The driver for the device detected an older"
" version of the NVM image (%d.%d) than expected (%d.%d)."
"\nPlease update the NVM image.\n",
hw->aq.api_maj_ver, hw->aq.api_min_ver,
- I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR - 1);
+ I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw) - 1);
}
i40e_clear_pxe_mode(hw);