summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/io
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common/io')
-rw-r--r--usr/src/uts/common/io/aggr/aggr_port.c8
-rw-r--r--usr/src/uts/common/io/aggr/aggr_recv.c36
-rw-r--r--usr/src/uts/common/io/axf/ax88172reg.h163
-rw-r--r--usr/src/uts/common/io/axf/axf_usbgem.c1539
-rw-r--r--usr/src/uts/common/io/cons.c16
-rw-r--r--usr/src/uts/common/io/devpoll.c45
-rw-r--r--usr/src/uts/common/io/dld/dld_drv.c9
-rw-r--r--usr/src/uts/common/io/dld/dld_proto.c87
-rw-r--r--usr/src/uts/common/io/dld/dld_str.c104
-rw-r--r--usr/src/uts/common/io/dls/dls.c128
-rw-r--r--usr/src/uts/common/io/dls/dls_link.c72
-rw-r--r--usr/src/uts/common/io/dls/dls_mgmt.c259
-rw-r--r--usr/src/uts/common/io/dls/dls_stat.c172
-rw-r--r--usr/src/uts/common/io/dr_sas/THIRDPARTYLICENSE32
-rw-r--r--usr/src/uts/common/io/dr_sas/THIRDPARTYLICENSE.descrip1
-rw-r--r--usr/src/uts/common/io/dr_sas/dr_sas.c5506
-rw-r--r--usr/src/uts/common/io/dr_sas/dr_sas.conf15
-rw-r--r--usr/src/uts/common/io/dr_sas/dr_sas.h1766
-rw-r--r--usr/src/uts/common/io/dr_sas/dr_sas_list.h212
-rw-r--r--usr/src/uts/common/io/fibre-channel/impl/fctl.c6
-rw-r--r--usr/src/uts/common/io/gsqueue/gsqueue.c612
-rw-r--r--usr/src/uts/common/io/i40e/core/README410
-rw-r--r--usr/src/uts/common/io/i40e/core/THIRDPARTYLICENSE29
-rw-r--r--usr/src/uts/common/io/i40e/core/THIRDPARTYLICENSE.descrip1
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_adminq.c1101
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_adminq.h125
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_adminq_cmd.h2424
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_alloc.h66
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_common.c5708
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_devids.h68
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_hmc.c373
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_hmc.h246
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_lan_hmc.c1412
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_lan_hmc.h201
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_nvm.c712
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_prototype.h478
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_register.h5317
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_status.h108
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_type.h1581
-rw-r--r--usr/src/uts/common/io/i40e/core/i40e_virtchnl.h378
-rw-r--r--usr/src/uts/common/io/i40e/i40e.conf19
-rw-r--r--usr/src/uts/common/io/i40e/i40e_gld.c1097
-rw-r--r--usr/src/uts/common/io/i40e/i40e_intr.c757
-rw-r--r--usr/src/uts/common/io/i40e/i40e_main.c2883
-rw-r--r--usr/src/uts/common/io/i40e/i40e_osdep.c236
-rw-r--r--usr/src/uts/common/io/i40e/i40e_osdep.h201
-rw-r--r--usr/src/uts/common/io/i40e/i40e_stats.c1310
-rw-r--r--usr/src/uts/common/io/i40e/i40e_sw.h974
-rw-r--r--usr/src/uts/common/io/i40e/i40e_transceiver.c2266
-rw-r--r--usr/src/uts/common/io/i40e/i40e_xregs.h53
-rw-r--r--usr/src/uts/common/io/inotify.c1504
-rw-r--r--usr/src/uts/common/io/inotify.conf16
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_main.c8
-rw-r--r--usr/src/uts/common/io/ksocket/ksocket.c14
-rw-r--r--usr/src/uts/common/io/ksocket/ksocket_impl.h6
-rw-r--r--usr/src/uts/common/io/ksyms.c9
-rw-r--r--usr/src/uts/common/io/mac/mac.c3
-rw-r--r--usr/src/uts/common/io/mac/mac_client.c68
-rw-r--r--usr/src/uts/common/io/mac/mac_datapath_setup.c12
-rw-r--r--usr/src/uts/common/io/mac/mac_protect.c3
-rw-r--r--usr/src/uts/common/io/mac/mac_provider.c8
-rw-r--r--usr/src/uts/common/io/mac/mac_sched.c10
-rw-r--r--usr/src/uts/common/io/mac/mac_stat.c9
-rw-r--r--usr/src/uts/common/io/mem.c13
-rw-r--r--usr/src/uts/common/io/mr_sas/mr_sas.conf8
-rw-r--r--usr/src/uts/common/io/nfp/THIRDPARTYLICENSE19
-rw-r--r--usr/src/uts/common/io/nfp/THIRDPARTYLICENSE.descrip1
-rw-r--r--usr/src/uts/common/io/nfp/autoversion.h21
-rw-r--r--usr/src/uts/common/io/nfp/drvlist.c19
-rw-r--r--usr/src/uts/common/io/nfp/hostif.c1192
-rw-r--r--usr/src/uts/common/io/nfp/i21285.c310
-rw-r--r--usr/src/uts/common/io/nfp/i21285.h43
-rw-r--r--usr/src/uts/common/io/nfp/i21555.c423
-rw-r--r--usr/src/uts/common/io/nfp/i21555.h51
-rw-r--r--usr/src/uts/common/io/nfp/i21555d.c28
-rw-r--r--usr/src/uts/common/io/nfp/nfdev-common.h141
-rw-r--r--usr/src/uts/common/io/nfp/nfdev-solaris.h37
-rw-r--r--usr/src/uts/common/io/nfp/nfp.h113
-rw-r--r--usr/src/uts/common/io/nfp/nfp_cmd.h68
-rw-r--r--usr/src/uts/common/io/nfp/nfp_common.h68
-rw-r--r--usr/src/uts/common/io/nfp/nfp_error.h48
-rw-r--r--usr/src/uts/common/io/nfp/nfp_hostif.h54
-rw-r--r--usr/src/uts/common/io/nfp/nfp_ifvers.c51
-rw-r--r--usr/src/uts/common/io/nfp/nfp_osif.h105
-rw-r--r--usr/src/uts/common/io/nfp/nfpci.h171
-rw-r--r--usr/src/uts/common/io/nfp/osif.c184
-rw-r--r--usr/src/uts/common/io/overlay/overlay.c2184
-rw-r--r--usr/src/uts/common/io/overlay/overlay.conf16
-rw-r--r--usr/src/uts/common/io/overlay/overlay.mapfile46
-rw-r--r--usr/src/uts/common/io/overlay/overlay_fm.c82
-rw-r--r--usr/src/uts/common/io/overlay/overlay_mux.c354
-rw-r--r--usr/src/uts/common/io/overlay/overlay_plugin.c281
-rw-r--r--usr/src/uts/common/io/overlay/overlay_prop.c122
-rw-r--r--usr/src/uts/common/io/overlay/overlay_target.c1651
-rw-r--r--usr/src/uts/common/io/overlay/plugins/overlay_vxlan.c372
-rw-r--r--usr/src/uts/common/io/physmem.c8
-rw-r--r--usr/src/uts/common/io/pseudo.conf9
-rw-r--r--usr/src/uts/common/io/pseudonex.c26
-rw-r--r--usr/src/uts/common/io/ptm.c47
-rw-r--r--usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c4
-rw-r--r--usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas_hash.c215
-rw-r--r--usr/src/uts/common/io/scsi/targets/sd.c79
-rw-r--r--usr/src/uts/common/io/signalfd.c488
-rw-r--r--usr/src/uts/common/io/stream.c10
-rw-r--r--usr/src/uts/common/io/udmf/dm9601reg.h348
-rw-r--r--usr/src/uts/common/io/udmf/udmf_usbgem.c1036
-rw-r--r--usr/src/uts/common/io/upf/adm8511reg.h205
-rw-r--r--usr/src/uts/common/io/upf/upf_usbgem.c1213
-rw-r--r--usr/src/uts/common/io/urf/rtl8150reg.h218
-rw-r--r--usr/src/uts/common/io/urf/urf_usbgem.c1039
-rw-r--r--usr/src/uts/common/io/usbgem/usbgem.c6389
-rw-r--r--usr/src/uts/common/io/usbgem/usbgem.h428
-rw-r--r--usr/src/uts/common/io/usbgem/usbgem_mii.h242
-rw-r--r--usr/src/uts/common/io/vioif/vioif.c120
-rw-r--r--usr/src/uts/common/io/vnd/frameio.c464
-rw-r--r--usr/src/uts/common/io/vnd/vnd.c5800
-rw-r--r--usr/src/uts/common/io/vnd/vnd.conf16
-rw-r--r--usr/src/uts/common/io/vnic/vnic_dev.c87
-rw-r--r--usr/src/uts/common/io/vscan/vscan_svc.c4
-rw-r--r--usr/src/uts/common/io/zfd.c1154
120 files changed, 70201 insertions, 716 deletions
diff --git a/usr/src/uts/common/io/aggr/aggr_port.c b/usr/src/uts/common/io/aggr/aggr_port.c
index 00545d2c03..a39110255a 100644
--- a/usr/src/uts/common/io/aggr/aggr_port.c
+++ b/usr/src/uts/common/io/aggr/aggr_port.c
@@ -21,6 +21,7 @@
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2012 OmniTI Computer Consulting, Inc All rights reserved.
*/
/*
@@ -528,8 +529,13 @@ aggr_port_promisc(aggr_port_t *port, boolean_t on)
if (on) {
mac_rx_clear(port->lp_mch);
+ /* We use the promisc callback because without hardware
+ * rings, we deliver through flows that will cause duplicate
+ * delivery of packets when we've flipped into this mode
+ * to compensate for the lack of hardware MAC matching
+ */
rc = mac_promisc_add(port->lp_mch, MAC_CLIENT_PROMISC_ALL,
- aggr_recv_cb, port, &port->lp_mphp,
+ aggr_recv_promisc_cb, port, &port->lp_mphp,
MAC_PROMISC_FLAGS_NO_TX_LOOP);
if (rc != 0) {
mac_rx_set(port->lp_mch, aggr_recv_cb, port);
diff --git a/usr/src/uts/common/io/aggr/aggr_recv.c b/usr/src/uts/common/io/aggr/aggr_recv.c
index 2bdb7872e3..0dfe234b70 100644
--- a/usr/src/uts/common/io/aggr/aggr_recv.c
+++ b/usr/src/uts/common/io/aggr/aggr_recv.c
@@ -21,6 +21,7 @@
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2012 OmniTI Computer Consulting, Inc All rights reserved.
*/
/*
@@ -68,16 +69,27 @@ aggr_recv_lacp(aggr_port_t *port, mac_resource_handle_t mrh, mblk_t *mp)
/*
* Callback function invoked by MAC service module when packets are
- * made available by a MAC port.
+ * made available by a MAC port, both in promisc_on mode and not.
*/
/* ARGSUSED */
-void
-aggr_recv_cb(void *arg, mac_resource_handle_t mrh, mblk_t *mp,
- boolean_t loopback)
+static void
+aggr_recv_path_cb(void *arg, mac_resource_handle_t mrh, mblk_t *mp,
+ boolean_t loopback, boolean_t promisc_path)
{
aggr_port_t *port = (aggr_port_t *)arg;
aggr_grp_t *grp = port->lp_grp;
+ /* In the case where lp_promisc_on has been turned on to
+ * compensate for insufficient hardware MAC matching and
+ * hardware rings are not in use we will fall back to
+ * using flows for delivery which can result in duplicates
+ * pushed up the stack. Only respect the chosen path.
+ */
+ if (port->lp_promisc_on != promisc_path) {
+ freemsgchain(mp);
+ return;
+ }
+
if (grp->lg_lacp_mode == AGGR_LACP_OFF) {
aggr_mac_rx(grp->lg_mh, mrh, mp);
} else {
@@ -161,3 +173,19 @@ aggr_recv_cb(void *arg, mac_resource_handle_t mrh, mblk_t *mp,
}
}
}
+
+/* ARGSUSED */
+void
+aggr_recv_cb(void *arg, mac_resource_handle_t mrh, mblk_t *mp,
+ boolean_t loopback)
+{
+ aggr_recv_path_cb(arg, mrh, mp, loopback, B_FALSE);
+}
+
+/* ARGSUSED */
+void
+aggr_recv_promisc_cb(void *arg, mac_resource_handle_t mrh, mblk_t *mp,
+ boolean_t loopback)
+{
+ aggr_recv_path_cb(arg, mrh, mp, loopback, B_TRUE);
+}
diff --git a/usr/src/uts/common/io/axf/ax88172reg.h b/usr/src/uts/common/io/axf/ax88172reg.h
new file mode 100644
index 0000000000..8ca6ebc187
--- /dev/null
+++ b/usr/src/uts/common/io/axf/ax88172reg.h
@@ -0,0 +1,163 @@
+/*
+ * @(#)ax88172reg.h 1.1 09/06/15
+ * Macro definitions for ASIX AX88172 USB to fast ethernet controler
+ * based on ASIX AX88172/88772 data sheet
+ * This file is public domain. Coded by M.Murayama (KHF04453@nifty.com)
+ */
+
+#ifndef __AX88172_H__
+#define __AX88172_H__
+
+/*
+ * Vendor command definitions
+ */
+#define VCMD_READ_SRAM 0x02
+#define VCMD_WRITE_RXSRAM 0x03
+#define VCMD_WRITE_TXSRAM 0x04
+#define VCMD_SOFTWARE_MII_OP 0x06
+#define VCMD_READ_MII_REG 0x07
+#define VCMD_WRITE_MII_REG 0x08
+#define VCMD_READ_MII_OPMODE 0x09
+#define VCMD_HARDWARE_MII_OP 0x0a
+#define VCMD_READ_SROM 0x0b
+#define VCMD_WRITE_SROM 0x0c
+#define VCMD_WRITE_SROM_ENABLE 0x0d
+#define VCMD_WRITE_SROM_DISABLE 0x0e
+#define VCMD_READ_RXCTRL 0x0f
+#define VCMD_WRITE_RXCTRL 0x10
+#define VCMD_READ_IPGS 0x11
+#define VCMD_WRITE_IPG 0x12
+#define VCMD_WRITE_IPG1 0x13
+#define VCMD_WRITE_IPG2 0x14
+#define VCMD_READ_MCAST_FILTER 0x15
+#define VCMD_WRITE_MCAST_FILTER 0x16
+#define VCMD_READ_NODE_ID 0x17
+#define VCMD_READ_PHY_IDS 0x19
+#define VCMD_READ_MEDIUM_STATUS 0x1a
+#define VCMD_WRITE_MEDIUM_STATUS 0x1b
+#define VCMD_SET_MONITOR_MODE 0x1c
+#define VCMD_GET_MONITOR_MODE 0x1d
+#define VCMD_READ_GPIO 0x1e
+#define VCMD_WRITE_GPIO 0x1f
+
+/* ax88772 only, currently not supported */
+#define VCMD_WRITE_IPGS_88772 0x12
+#define VCMD_READ_NODE_ID_88772 0x13
+#define VCMD_WRITE_NODE_ID_88772 0x14
+#define VCMD_WRITE_TEST_REG_88772 0x17
+#define VCMD_SOFTWARE_RESET_88772 0x20
+#define VCMD_READ_PHY_SELECT_88772 0x21
+#define VCMD_WRITE_PHY_SELECT_88772 0x22
+
+
+/*
+ * Register definitions
+ */
+
+/* Rx control register */
+#define RCR_SO 0x80 /* Start Operation */
+#define RCR_AP_88772 0x20 /* accept physical address from mcast filter */
+#define RCR_AM 0x10 /* accept multicast address */
+#define RCR_AB 0x08 /* accept broadcast address */
+#define RCR_SEP 0x04 /* save error packet */
+#define RCR_AMALL 0x02 /* accept all multicast address */
+#define RCR_PRO 0x01 /* promiscious, all frames received */
+
+#define RCR_MFB 0x0300
+#define RCR_MFB_SHIFT 8
+#define RCR_MFB_2K (0U << RCR_MFB_SHIFT)
+#define RCR_MFB_4K (1U << RCR_MFB_SHIFT)
+#define RCR_MFB_8K (2U << RCR_MFB_SHIFT)
+#define RCR_MFB_16K (3U << RCR_MFB_SHIFT)
+
+#define RCR_BITS \
+ "\020" \
+ "\010SO" \
+ "\006AP" \
+ "\005AM" \
+ "\004AB" \
+ "\003SEP" \
+ "\002AMALL" \
+ "\001PRO"
+
+/* Medium status register */
+#define MSR_SM 0x1000 /* super mac support */
+#define MSR_SBP 0x0800 /* stop backpressure */
+#define MSR_PS 0x0200 /* port speed in mii mode */
+#define MSR_RE 0x0100 /* rx enable */
+#define MSR_PF 0x0080 /* check only length/type for pause frame */
+#define MSR_JFE 0x0040 /* jumbo frame enable */
+#define MSR_TFC 0x0020 /* tx flow control enable */
+#define MSR_RFC 0x0010 /* rx flow control enable (178) */
+#define MSR_FCEN 0x0010 /* flow control enable (172/772) */
+#define MSR_ENCK 0x0008 /* Enable GTX_CLK and TXC clock output (178) */
+#define MSR_TXABT 0x0004 /* Tx abort allow, always set */
+#define MSR_FDPX 0x0002 /* full duplex */
+#define MSR_GM 0x0001 /* Gigabit mode (178) */
+
+#define MSR_BITS \
+ "\020" \
+ "\015SM" \
+ "\014SBP" \
+ "\012PS" \
+ "\011RE" \
+ "\005FCEN" \
+ "\004ENCK" \
+ "\003TXABT" \
+ "\002FDPX" \
+ "\001GM"
+
+/* monitor mode register */
+#define MMR_RWMP 0x04 /* remote wakeup by magic pkt */
+#define MMR_RWLU 0x02 /* remote wakeup by linkup */
+#define MMR_MOM 0x01 /* monitor mode 1:en, 0:dis */
+
+#define MMR_BITS \
+ "\020" \
+ "\003RWMP" \
+ "\002RWLU" \
+ "\001MOM"
+
+/* GPIO register */
+#define GPIO_RSE 0x80 /* reload serial eeprom (88772)*/
+#define GPIO_DATA2 0x20
+#define GPIO_EN2 0x10
+#define GPIO_DATA1 0x08
+#define GPIO_EN1 0x04
+#define GPIO_DATA0 0x02
+#define GPIO_EN0 0x01
+
+#define GPIO_BITS \
+ "\020" \
+ "\010RSE" \
+ "\006DATA2" \
+ "\005EN2" \
+ "\004DATA1" \
+ "\003EN1" \
+ "\002DATA0" \
+ "\001EN0"
+
+/* Software reset register */
+#define SWRST_IPPD 0x40 /* internal phy power down control */
+#define SWRST_IPRL 0x20 /* internal phy reset control */
+#define SWRST_BZ 0x10 /* force Bulk In to return zero-length pkt */
+#define SWRST_PRL 0x08 /* external phy reset pin level */
+#define SWRST_PRTE 0x04 /* external phy tri-state enable */
+#define SWRST_RT 0x02 /* clear frame length error for Bulk-Out */
+#define SWRST_RR 0x01 /* clear frame length error for Bulk-In */
+
+#define SWRST_BITS \
+ "\020" \
+ "\007IPPD" \
+ "\006IPRL" \
+ "\005BZ" \
+ "\004PRL" \
+ "\003PRTE" \
+ "\002RT" \
+ "\001RR"
+
+/* Software PHY Select Status register */
+#define SPSS_ASEL 0x02 /* 1:auto select 0:manual select */
+#define SPSS_PSEL 0x01 /* 1:intenal phy, 0:external (when ASEL=0) */
+
+#endif /* __AX88172_H__ */
diff --git a/usr/src/uts/common/io/axf/axf_usbgem.c b/usr/src/uts/common/io/axf/axf_usbgem.c
new file mode 100644
index 0000000000..28963f6849
--- /dev/null
+++ b/usr/src/uts/common/io/axf/axf_usbgem.c
@@ -0,0 +1,1539 @@
+/*
+ * axf_usbgem.c : ASIX AX88172/772 USB to Fast Ethernet Driver for Solaris
+ *
+ * Copyright (c) 2004-2012 Masayuki Murayama. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the author nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#pragma ident "@(#)axf_usbgem.c 1.3 12/02/09"
+
+/*
+ * Changelog:
+ */
+
+/*
+ * TODO
+ * handle RXMODE_ENABLE in set_rx_filter()
+ */
+/* ======================================================= */
+
+/*
+ * Solaris system header files and macros
+ */
+
+/* minimum kernel headers for drivers */
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/debug.h>
+#include <sys/kmem.h>
+#include <sys/modctl.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/byteorder.h>
+
+/* ethernet stuff */
+#include <sys/ethernet.h>
+
+/* interface card depend stuff */
+#include <sys/stropts.h>
+#include <sys/stream.h>
+#include <sys/strlog.h>
+#include <sys/usb/usba.h>
+#include "usbgem.h"
+
+/* hardware stuff */
+#include "usbgem_mii.h"
+#include "ax88172reg.h"
+
+char ident[] = "ax88x72 usbnic driver v" VERSION;
+
+/*
+ * Useful macros
+ */
+#define CHECK_AND_JUMP(err, label) if (err != USB_SUCCESS) goto label
+#define LE16P(p) ((((uint8_t *)(p))[1] << 8) | ((uint8_t *)(p))[0])
+
+#define AX88172(dp) \
+ (((struct axf_dev *)(dp)->private)->chip->type == CHIP_TYPE_AX88172)
+
+#define AX88772(dp) \
+ (((struct axf_dev *)(dp)->private)->chip->type == CHIP_TYPE_AX88772)
+
+/*
+ * Debugging
+ */
+#ifdef DEBUG_LEVEL
+static int axf_debug = DEBUG_LEVEL;
+#define DPRINTF(n, args) if (axf_debug > (n)) cmn_err args
+#else
+#define DPRINTF(n, args)
+#endif
+
+/*
+ * Our configration for ax88172
+ */
+/* timeouts */
+#define ONESEC (drv_usectohz(1*1000000))
+
+/*
+ * RX/TX buffer size
+ */
+
+/*
+ * Local device definitions
+ */
+struct chip_info {
+ uint16_t vid; /* usb vendor id */
+ uint16_t pid; /* usb product id */
+ int type;
+ uint8_t gpio_reset[2];
+ uint8_t gpio_speed[2];
+ uint8_t gpio_duplex[2];
+ char *name;
+#define CHIP_TYPE_AX88172 0
+#define CHIP_TYPE_AX88772 1
+#define CHIP_TYPE_AX88178 2
+};
+
+#define GPIO_DEFAULT {0x00, 0x15}, {0, 0}, {0, 0}
+struct chip_info chiptbl_88x7x[] = {
+/* AX88172 */
+{
+ /* Planex UE2-100TX, Hawking UF200, TrendNet TU2-ET100 */
+ 0x07b8, 0x420a, CHIP_TYPE_AX88172,
+
+ /*
+ * the default setting covers below:
+ * gpio bit2 has to be 0 and gpio bit0 has to be 1
+ */
+ {0, 0},
+ {GPIO_EN1, GPIO_DATA1 | GPIO_EN1},
+ {0, 0},
+ "Planex UE2-100TX", /* tested */
+},
+{
+ 0x2001, 0x1a00, CHIP_TYPE_AX88172,
+ {0x9f, 0x9e}, {0, 0}, {0, 0},
+ "D-Link dube100", /* XXX */
+},
+{
+ 0x077b, 0x2226, CHIP_TYPE_AX88172,
+ GPIO_DEFAULT,
+ "Linksys USB200M",
+},
+{
+ 0x0846, 0x1040, CHIP_TYPE_AX88172,
+ GPIO_DEFAULT,
+ "Netgear FA120",
+},
+{
+ 0x0b95, 0x1720, CHIP_TYPE_AX88172,
+ GPIO_DEFAULT,
+ "Intellinet, ST Lab USB Ethernet",
+},
+{
+ 0x08dd, 0x90ff, CHIP_TYPE_AX88172,
+ GPIO_DEFAULT,
+ "Billionton Systems, USB2AR",
+},
+{
+ 0x0557, 0x2009, CHIP_TYPE_AX88172,
+ GPIO_DEFAULT,
+ "ATEN UC210T",
+},
+{
+ 0x0411, 0x003d, CHIP_TYPE_AX88172,
+ GPIO_DEFAULT,
+ "Buffalo LUA-U2-KTX",
+},
+{
+ 0x6189, 0x182d, CHIP_TYPE_AX88172,
+ GPIO_DEFAULT,
+ "Sitecom LN-029 USB 2.0 10/100 Ethernet adapter",
+},
+{
+ 0x07aa, 0x0017, CHIP_TYPE_AX88172,
+ GPIO_DEFAULT,
+ "corega FEther USB2-TX",
+},
+{
+ 0x1189, 0x0893, CHIP_TYPE_AX88172,
+ GPIO_DEFAULT,
+ "Surecom EP-1427X-2",
+},
+{
+ 0x1631, 0x6200, CHIP_TYPE_AX88172,
+ GPIO_DEFAULT,
+ "goodway corp usb gwusb2e",
+},
+/* AX88772 and AX88178 */
+{
+ 0x13b1, 0x0018, CHIP_TYPE_AX88772,
+ {0, 0}, {0, 0}, {0, 0},
+ "Linksys USB200M rev.2",
+},
+{
+ 0x1557, 0x7720, CHIP_TYPE_AX88772,
+ {0, 0}, {0, 0}, {0, 0},
+ "0Q0 cable ethernet",
+},
+{
+ 0x07d1, 0x3c05, CHIP_TYPE_AX88772,
+ {0, 0}, {0, 0}, {0, 0},
+ "DLink DUB E100 ver B1",
+},
+{
+ 0x2001, 0x3c05, CHIP_TYPE_AX88772,
+ {0, 0}, {0, 0}, {0, 0},
+ "DLink DUB E100 ver B1(2)",
+},
+{
+ 0x05ac, 0x1402, CHIP_TYPE_AX88772,
+ {0, 0}, {0, 0}, {0, 0},
+ "Apple Ethernet USB Adapter",
+},
+{
+ 0x1737, 0x0039, CHIP_TYPE_AX88178,
+ {0, 0}, {0, 0}, {0, 0},
+ "Linksys USB1000",
+},
+{
+ 0x0411, 0x006e, CHIP_TYPE_AX88178,
+ {0, 0}, {0, 0}, {0, 0},
+ "Buffalo LUA-U2-KGT/LUA-U2-GT",
+},
+{
+ 0x04bb, 0x0930, CHIP_TYPE_AX88178,
+ {0, 0}, {0, 0}, {0, 0},
+ "I/O DATA ETG-US2",
+},
+{
+ 0x050d, 0x5055, CHIP_TYPE_AX88178,
+ {0, 0}, {0, 0}, {0, 0},
+ "Belkin F5D5055",
+},
+{
+ /* generic ax88772 must be the last entry */
+ /* planex UE-200TX-G */
+ 0x0b95, 0x7720, CHIP_TYPE_AX88772,
+ {0, 0}, {0, 0}, {0, 0},
+ "ASIX AX88772/AX88178", /* tested */
+},
+};
+
+#define CHIPTABLESIZE (sizeof (chiptbl_88x7x) / sizeof (struct chip_info))
+
+struct axf_dev {
+ /*
+ * Misc HW information
+ */
+ struct chip_info *chip;
+ uint8_t ipg[3];
+ uint8_t gpio;
+ uint16_t rcr;
+ uint16_t msr;
+ uint8_t last_link_state;
+ boolean_t phy_has_reset;
+};
+
+/*
+ * private functions
+ */
+
+/* mii operations */
+static uint16_t axf_mii_read(struct usbgem_dev *, uint_t, int *errp);
+static void axf_mii_write(struct usbgem_dev *, uint_t, uint16_t, int *errp);
+
+/* nic operations */
+static int axf_reset_chip(struct usbgem_dev *);
+static int axf_init_chip(struct usbgem_dev *);
+static int axf_start_chip(struct usbgem_dev *);
+static int axf_stop_chip(struct usbgem_dev *);
+static int axf_set_media(struct usbgem_dev *);
+static int axf_set_rx_filter(struct usbgem_dev *);
+static int axf_get_stats(struct usbgem_dev *);
+static void axf_interrupt(struct usbgem_dev *, mblk_t *);
+
+/* packet operations */
+static mblk_t *axf_tx_make_packet(struct usbgem_dev *, mblk_t *);
+static mblk_t *axf_rx_make_packet(struct usbgem_dev *, mblk_t *);
+
+/* =============================================================== */
+/*
+ * I/O functions
+ */
+/* =============================================================== */
+#define OUT(dp, req, val, ix, len, buf, errp, label) \
+ if ((*(errp) = usbgem_ctrl_out((dp), \
+ /* bmRequestType */ USB_DEV_REQ_HOST_TO_DEV \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ (req), \
+ /* wValue */ (val), \
+ /* wIndex */ (ix), \
+ /* wLength */ (len), \
+ /* value */ (buf), \
+ /* size */ (len))) != USB_SUCCESS) goto label
+
+#define IN(dp, req, val, ix, len, buf, errp, label) \
+ if ((*(errp) = usbgem_ctrl_in((dp), \
+ /* bmRequestType */ USB_DEV_REQ_DEV_TO_HOST \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ (req), \
+ /* wValue */ (val), \
+ /* wIndex */ (ix), \
+ /* wLength */ (len), \
+ /* valuep */ (buf), \
+ /* size */ (len))) != USB_SUCCESS) goto label
+
+/* =============================================================== */
+/*
+ * Hardware manupilation
+ */
+/* =============================================================== */
+static int
+axf_reset_phy(struct usbgem_dev *dp)
+{
+ uint8_t phys[2];
+ uint8_t val8;
+ int err;
+ struct axf_dev *lp = dp->private;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ if (AX88172(dp)) {
+ delay(drv_usectohz(5000));
+ IN(dp, VCMD_READ_GPIO, 0, 0, 1, &val8, &err, usberr);
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: gpio 0x%b",
+ dp->name, __func__, val8, GPIO_BITS));
+
+ /* reset MII PHY */
+ val8 = lp->chip->gpio_reset[1]
+ | lp->chip->gpio_speed[dp->speed]
+ | lp->chip->gpio_duplex[dp->full_duplex];
+
+ OUT(dp, VCMD_WRITE_GPIO,
+ val8, 0, 0, NULL, &err, usberr);
+ delay(drv_usectohz(5000));
+
+ val8 = lp->chip->gpio_reset[0]
+ | lp->chip->gpio_speed[dp->speed]
+ | lp->chip->gpio_duplex[dp->full_duplex];
+
+ OUT(dp, VCMD_WRITE_GPIO,
+ val8, 0, 0, NULL, &err, usberr);
+ delay(drv_usectohz(5000));
+ } else {
+ lp->gpio = GPIO_RSE | GPIO_DATA2 | GPIO_EN2;
+ OUT(dp, VCMD_WRITE_GPIO, lp->gpio, 0,
+ 0, NULL, &err, usberr);
+ drv_usecwait(1000);
+
+ OUT(dp, VCMD_WRITE_PHY_SELECT_88772,
+ dp->mii_phy_addr == 16 ? 1 : 0, 0, 0, NULL, &err, usberr);
+
+ OUT(dp, VCMD_SOFTWARE_RESET_88772,
+ SWRST_IPPD | SWRST_PRL, 0, 0, NULL, &err, usberr);
+ delay(drv_usectohz(150*1000));
+ OUT(dp, VCMD_SOFTWARE_RESET_88772,
+ 0, 0, 0, NULL, &err, usberr);
+
+ OUT(dp, VCMD_SOFTWARE_RESET_88772,
+ dp->mii_phy_addr == 16 ? SWRST_IPRL : SWRST_PRTE,
+ 0, 0, NULL, &err, usberr);
+ delay(drv_usectohz(150*1000));
+ }
+
+
+ return (USB_SUCCESS);
+
+usberr:
+ return (USB_FAILURE);
+}
+
+static int
+axf_reset_chip(struct usbgem_dev *dp)
+{
+ int err = USB_SUCCESS;
+
+ if (AX88172(dp)) {
+ /* there are no ways to reset nic */
+ return (USB_SUCCESS);
+ }
+#ifdef NEVER
+ OUT(dp, VCMD_SOFTWARE_RESET_88772,
+ SWRST_RR | SWRST_RT, 0, 0, NULL, &err, usberr);
+ OUT(dp, VCMD_SOFTWARE_RESET_88772,
+ 0, 0, 0, NULL, &err, usberr);
+usberr:
+#endif
+ return (err);
+}
+
+/*
+ * Setup ax88172
+ */
+static int
+axf_init_chip(struct usbgem_dev *dp)
+{
+ int i;
+ uint32_t val;
+ int err = USB_SUCCESS;
+ uint16_t reg;
+ uint8_t buf[2];
+ uint16_t tmp16;
+ struct axf_dev *lp = dp->private;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* rx conrol register: read default value */
+ if (!AX88172(dp)) {
+ /* clear rx control */
+ OUT(dp, VCMD_WRITE_RXCTRL, 0, 0, 0, NULL, &err, usberr);
+ }
+
+ IN(dp, VCMD_READ_RXCTRL, 0, 0, 2, buf, &err, usberr);
+ lp->rcr = LE16P(buf);
+ DPRINTF(0, (CE_CONT, "!%s: %s: rcr(default):%b",
+ dp->name, __func__, lp->rcr, RCR_BITS));
+
+ lp->rcr &= ~RCR_SO;
+
+ /* Media status register */
+ if (AX88172(dp)) {
+#ifdef notdef
+ lp->msr = MSR_TXABT;
+#else
+ lp->msr = 0;
+#endif
+ } else {
+ lp->msr = MSR_RE | MSR_TXABT;
+ }
+ DPRINTF(0, (CE_CONT, "!%s: %s: msr:%b",
+ dp->name, __func__, lp->msr, MSR_BITS));
+ err = axf_set_media(dp);
+ CHECK_AND_JUMP(err, usberr);
+
+ /* write IPG0-2 registers */
+ if (AX88172(dp)) {
+ OUT(dp, VCMD_WRITE_IPG, lp->ipg[0], 0, 0, NULL, &err, usberr);
+ OUT(dp, VCMD_WRITE_IPG1, lp->ipg[1], 0, 0, NULL, &err, usberr);
+ OUT(dp, VCMD_WRITE_IPG2, lp->ipg[2], 0, 0, NULL, &err, usberr);
+ } else {
+ /* EMPTY */
+ }
+#ifdef ENABLE_RX_IN_INIT_CHIP
+ /* enable Rx */
+ lp->rcr |= RCR_SO;
+ OUT(dp, VCMD_WRITE_RXCTRL, lp->rcr, 0, 0, NULL, &err, usberr);
+#endif
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end (%s)",
+ dp->name, __func__,
+ err, err == USB_SUCCESS ? "success" : "error"));
+ return (err);
+}
+
+static int
+axf_start_chip(struct usbgem_dev *dp)
+{
+ int err = USB_SUCCESS;
+ struct axf_dev *lp = dp->private;
+#ifndef ENABLE_RX_IN_INIT_CHIP
+ /* enable Rx */
+ lp->rcr |= RCR_SO;
+ OUT(dp, VCMD_WRITE_RXCTRL, lp->rcr, 0, 0, NULL, &err, usberr);
+
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end (%s)",
+ dp->name, __func__,
+ err, err == USB_SUCCESS ? "success" : "error"));
+#endif
+ return (err);
+}
+
+static int
+axf_stop_chip(struct usbgem_dev *dp)
+{
+ int err = USB_SUCCESS;
+ struct axf_dev *lp = dp->private;
+
+ /* Disable Rx */
+ lp->rcr &= ~RCR_SO;
+ OUT(dp, VCMD_WRITE_RXCTRL, lp->rcr, 0, 0, NULL, &err, usberr);
+
+ /*
+ * Restore factory mac address
+ * if we have changed current mac address
+ */
+ if (!AX88172(dp) &&
+ bcmp(dp->dev_addr.ether_addr_octet,
+ dp->cur_addr.ether_addr_octet,
+ ETHERADDRL) != 0) {
+ OUT(dp, VCMD_WRITE_NODE_ID_88772, 0, 0,
+ ETHERADDRL, dp->cur_addr.ether_addr_octet, &err, usberr);
+ }
+usberr:
+ return (axf_reset_chip(dp));
+}
+
+static int
+axf_get_stats(struct usbgem_dev *dp)
+{
+ /* EMPTY */
+ return (USB_SUCCESS);
+}
+
+static uint_t
+axf_mcast_hash(struct usbgem_dev *dp, const uint8_t *addr)
+{
+ return (usbgem_ether_crc_be(addr) >> (32 - 6));
+}
+
+static int
+axf_set_rx_filter(struct usbgem_dev *dp)
+{
+ int i;
+ uint8_t mode;
+ uint8_t mhash[8];
+ uint8_t buf[2];
+ uint_t h;
+ int err = USB_SUCCESS;
+ struct axf_dev *lp = dp->private;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: called, rxmode:%x",
+ dp->name, __func__, dp->rxmode));
+
+ if (lp->rcr & RCR_SO) {
+ /* set promiscuous mode before changing it. */
+ OUT(dp, VCMD_WRITE_RXCTRL,
+ lp->rcr | RCR_PRO, 0, 0, NULL, &err, usberr);
+ }
+
+ lp->rcr &= ~(RCR_AP_88772 | RCR_AM | RCR_SEP | RCR_AMALL | RCR_PRO);
+ mode = RCR_AB; /* accept broadcast packets */
+
+ bzero(mhash, sizeof (mhash));
+
+ if (dp->rxmode & RXMODE_PROMISC) {
+ /* promiscious mode implies all multicast and all physical */
+ mode |= RCR_PRO;
+ } else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 32) {
+ /* accept all multicast packets */
+ mode |= RCR_AMALL;
+ } else if (dp->mc_count > 0) {
+ /*
+ * make hash table to select interresting
+ * multicast address only.
+ */
+ mode |= RCR_AM;
+ for (i = 0; i < dp->mc_count; i++) {
+ h = dp->mc_list[i].hash;
+ mhash[h / 8] |= 1 << (h % 8);
+ }
+ }
+ if (AX88172(dp)) {
+ if (bcmp(dp->dev_addr.ether_addr_octet,
+ dp->cur_addr.ether_addr_octet, ETHERADDRL) != 0) {
+ /*
+ * we use promiscious mode instead of changing the
+ * mac address in ax88172
+ */
+ mode |= RCR_PRO;
+ }
+ } else {
+ OUT(dp, VCMD_WRITE_NODE_ID_88772, 0, 0,
+ ETHERADDRL, dp->cur_addr.ether_addr_octet, &err, usberr);
+ }
+ lp->rcr |= mode;
+
+ /* set multicast hash table */
+ if (mode & RCR_AM) {
+ /* need to set up multicast hash table */
+ OUT(dp, VCMD_WRITE_MCAST_FILTER, 0, 0,
+ sizeof (mhash), mhash, &err, usberr);
+ }
+
+ /* update rcr */
+ OUT(dp, VCMD_WRITE_RXCTRL, lp->rcr, 0,
+ 0, NULL, &err, usberr);
+
+#if DEBUG_LEVEL > 1
+ /* verify rxctrl reg */
+ IN(dp, VCMD_READ_RXCTRL, 0, 0, 2, buf, &err, usberr);
+ cmn_err(CE_CONT, "!%s: %s: rcr:%b returned",
+ dp->name, __func__, LE16P(buf), RCR_BITS);
+#endif
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end (%s)",
+ dp->name, __func__,
+ err, err == USB_SUCCESS ? "success" : "error"));
+ return (err);
+}
+
+static int
+axf_set_media(struct usbgem_dev *dp)
+{
+ uint8_t val8;
+ uint8_t gpio;
+ uint8_t gpio_old;
+ int err = USB_SUCCESS;
+ uint16_t msr;
+ struct axf_dev *lp = dp->private;
+
+ IN(dp, VCMD_READ_GPIO, 0, 0, 1, &gpio, &err, usberr);
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called, gpio:%b",
+ dp->name, __func__, gpio, GPIO_BITS));
+
+ msr = lp->msr;
+ gpio_old = gpio;
+ gpio = lp->chip->gpio_reset[0];
+
+ /* setup speed */
+ if (AX88172(dp)) {
+ /* EMPTY */
+ } else {
+ msr &= ~(MSR_PS | MSR_GM | MSR_ENCK);
+
+ switch (dp->speed) {
+ case USBGEM_SPD_1000:
+ msr |= MSR_GM | MSR_ENCK;
+ break;
+
+ case USBGEM_SPD_100:
+ msr |= MSR_PS;
+ break;
+
+ case USBGEM_SPD_10:
+ break;
+ }
+ }
+ gpio |= lp->chip->gpio_speed[dp->speed == USBGEM_SPD_100 ? 1 : 0];
+
+ /* select duplex */
+ msr &= ~MSR_FDPX;
+ if (dp->full_duplex) {
+ msr |= MSR_FDPX;
+
+ /* select flow control */
+ if (AX88172(dp)) {
+ msr &= ~MSR_FCEN;
+ switch (dp->flow_control) {
+ case FLOW_CONTROL_TX_PAUSE:
+ case FLOW_CONTROL_SYMMETRIC:
+ case FLOW_CONTROL_RX_PAUSE:
+ msr |= MSR_FCEN;
+ break;
+ }
+ } else {
+ msr &= ~(MSR_RFC | MSR_TFC);
+ switch (dp->flow_control) {
+ case FLOW_CONTROL_TX_PAUSE:
+ msr |= MSR_TFC;
+ break;
+
+ case FLOW_CONTROL_SYMMETRIC:
+ msr |= MSR_TFC | MSR_RFC;
+ break;
+
+ case FLOW_CONTROL_RX_PAUSE:
+ msr |= MSR_RFC;
+ break;
+ }
+ }
+ }
+ gpio |= lp->chip->gpio_duplex[dp->full_duplex ? 1 : 0];
+
+ /* update medium status register */
+ lp->msr = msr;
+ OUT(dp, VCMD_WRITE_MEDIUM_STATUS, lp->msr, 0,
+ 0, NULL, &err, usberr);
+
+ if (gpio != gpio_old) {
+ /* LED control required for some products */
+ OUT(dp, VCMD_WRITE_GPIO,
+ gpio, 0, 0, NULL, &err, usberr);
+ }
+
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end (%s)",
+ dp->name, __func__,
+ err, err == USB_SUCCESS ? "success" : "error"));
+ return (err);
+}
+
+#define FILL_PKT_HEADER(bp, len) { \
+ (bp)[0] = (uint8_t)(len); \
+ (bp)[1] = (uint8_t)((len) >> 8); \
+ (bp)[2] = (uint8_t)(~(len)); \
+ (bp)[3] = (uint8_t)((~(len)) >> 8); \
+}
+
+#define PKT_HEADER_SIZE 4
+
+/*
+ * send/receive packet check
+ */
+static mblk_t *
+axf_tx_make_packet(struct usbgem_dev *dp, mblk_t *mp)
+{
+ int n;
+ size_t len;
+ size_t pkt_size;
+ mblk_t *new;
+ mblk_t *tp;
+ uint8_t *bp;
+ uint8_t *last_pos;
+ uint_t align_mask;
+ size_t header_size;
+ int pad_size;
+
+ len = msgdsize(mp);
+
+ if (AX88172(dp)) {
+#ifdef notdef
+ align_mask = 63;
+#else
+ align_mask = 511;
+#endif
+ header_size = 0;
+
+ if (len >= ETHERMIN && mp->b_cont == NULL &&
+ (len & align_mask) != 0) {
+ /* use the mp "as is" */
+ return (mp);
+ }
+ } else {
+ align_mask = 511;
+ header_size = PKT_HEADER_SIZE;
+ }
+
+ /*
+ * re-allocate the mp
+ */
+ /* minimum ethernet packet size of ETHERMIN */
+ pkt_size = max(len, ETHERMIN);
+
+ if (((pkt_size + header_size) & align_mask) == 0) {
+ /* padding is required in usb communication */
+ pad_size = PKT_HEADER_SIZE;
+ } else {
+ pad_size = 0;
+ }
+
+ if ((new = allocb(header_size + pkt_size + pad_size, 0)) == NULL) {
+ return (NULL);
+ }
+
+ bp = new->b_rptr;
+ if (header_size) {
+ uint16_t tmp;
+
+ /* add a header */
+ tmp = (uint16_t)pkt_size;
+ FILL_PKT_HEADER(bp, tmp);
+ bp += header_size;
+ }
+
+ /* copy contents of the buffer */
+ for (tp = mp; tp; tp = tp->b_cont) {
+ n = tp->b_wptr - tp->b_rptr;
+ bcopy(tp->b_rptr, bp, n);
+ bp += n;
+ }
+
+ /* add pads for ethernet packets */
+ last_pos = new->b_rptr + header_size + pkt_size;
+ while (bp < last_pos) {
+ *bp++ = 0;
+ }
+
+ /* add a zero-length pad segment for usb communications */
+ if (pad_size) {
+ /* add a dummy header for zero-length packet */
+ FILL_PKT_HEADER(bp, 0);
+ bp += pad_size;
+ }
+
+ /* close the payload of the packet */
+ new->b_wptr = bp;
+
+ return (new);
+}
+
+static void
+axf_dump_packet(struct usbgem_dev *dp, uint8_t *bp, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i += 8, bp += 8) {
+ cmn_err(CE_CONT, "%02x %02x %02x %02x %02x %02x %02x %02x",
+ bp[0], bp[1], bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
+ }
+}
+
+static mblk_t *
+axf_rx_make_packet(struct usbgem_dev *dp, mblk_t *mp)
+{
+ mblk_t *tp;
+ int rest;
+
+ if (AX88172(dp)) {
+ return (mp);
+ }
+
+ tp = mp;
+ rest = tp->b_wptr - tp->b_rptr;
+
+ if (rest <= PKT_HEADER_SIZE) {
+ /*
+ * the usb bulk-in frame doesn't include any valid
+ * ethernet packets.
+ */
+ return (NULL);
+ }
+
+ for (; ; ) {
+ uint16_t len;
+ uint16_t cksum;
+
+ /* analyse the header of the received usb frame */
+ len = LE16P(tp->b_rptr + 0);
+ cksum = LE16P(tp->b_rptr + 2);
+
+ /* test if the header is valid */
+ if (len + cksum != 0xffff) {
+ /* discard whole the packet */
+ cmn_err(CE_WARN,
+ "!%s: %s: corrupted header:%04x %04x",
+ dp->name, __func__, len, cksum);
+ return (NULL);
+ }
+#if DEBUG_LEVEL > 0
+ if (len < ETHERMIN || len > ETHERMAX) {
+ cmn_err(CE_NOTE,
+ "!%s: %s: incorrect pktsize:%d",
+ dp->name, __func__, len);
+ }
+#endif
+ /* extract a ethernet packet from the bulk-in frame */
+ tp->b_rptr += PKT_HEADER_SIZE;
+ tp->b_wptr = tp->b_rptr + len;
+
+ if (len & 1) {
+ /*
+ * skip a tailing pad byte if the packet
+ * length is odd
+ */
+ len++;
+ }
+ rest -= len + PKT_HEADER_SIZE;
+
+ if (rest <= PKT_HEADER_SIZE) {
+ /* no more vaild ethernet packets */
+ break;
+ }
+
+#if DEBUG_LEVEL > 10
+ axf_dump_packet(dp, tp->b_wptr, 18);
+#endif
+ /* allocate a mblk_t header for the next ethernet packet */
+ tp->b_next = dupb(mp);
+ tp->b_next->b_rptr = tp->b_rptr + len;
+ tp = tp->b_next;
+ }
+
+ return (mp);
+}
+
+/*
+ * MII Interfaces
+ */
+static uint16_t
+axf_mii_read(struct usbgem_dev *dp, uint_t index, int *errp)
+{
+ uint8_t buf[2];
+ uint16_t val;
+
+ DPRINTF(4, (CE_CONT, "!%s: %s: called, ix:%d",
+ dp->name, __func__, index));
+
+ /* switch to software MII operation mode */
+ OUT(dp, VCMD_SOFTWARE_MII_OP, 0, 0, 0, NULL, errp, usberr);
+
+ /* Read MII register */
+ IN(dp, VCMD_READ_MII_REG, dp->mii_phy_addr, index,
+ 2, buf, errp, usberr);
+
+ /* switch to hardware MII operation mode */
+ OUT(dp, VCMD_HARDWARE_MII_OP, 0, 0, 0, NULL, errp, usberr);
+
+ return (LE16P(buf));
+
+usberr:
+ cmn_err(CE_CONT,
+ "!%s: %s: usberr(%d) detected", dp->name, __func__, *errp);
+ return (0);
+}
+
+static void
+axf_mii_write(struct usbgem_dev *dp, uint_t index, uint16_t val, int *errp)
+{
+ uint8_t buf[2];
+
+ DPRINTF(4, (CE_CONT, "!%s: %s called, reg:%x val:%x",
+ dp->name, __func__, index, val));
+
+ /* switch software MII operation mode */
+ OUT(dp, VCMD_SOFTWARE_MII_OP, 0, 0, 0, NULL, errp, usberr);
+
+ /* Write to the specified MII register */
+ buf[0] = (uint8_t)val;
+ buf[1] = (uint8_t)(val >> 8);
+ OUT(dp, VCMD_WRITE_MII_REG, dp->mii_phy_addr, index,
+ 2, buf, errp, usberr);
+
+ /* switch to hardware MII operation mode */
+ OUT(dp, VCMD_HARDWARE_MII_OP, 0, 0, 0, NULL, errp, usberr);
+
+usberr:
+ ;
+}
+
+static void
+axf_interrupt(struct usbgem_dev *dp, mblk_t *mp)
+{
+ uint8_t *bp;
+ struct axf_dev *lp = dp->private;
+
+ bp = mp->b_rptr;
+
+ DPRINTF(2, (CE_CONT,
+ "!%s: %s: size:%d, %02x %02x %02x %02x %02x %02x %02x %02x",
+ dp->name, __func__, mp->b_wptr - mp->b_rptr,
+ bp[0], bp[1], bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]));
+
+ if (lp->last_link_state ^ bp[2]) {
+ usbgem_mii_update_link(dp);
+ }
+
+ lp->last_link_state = bp[2];
+}
+
+/* ======================================================== */
+/*
+ * OS depend (device driver DKI) routine
+ */
+/* ======================================================== */
+#ifdef DEBUG_LEVEL
+static void
+axf_eeprom_dump(struct usbgem_dev *dp, int size)
+{
+ int i;
+ int err;
+ uint8_t w0[2], w1[2], w2[2], w3[2];
+
+ cmn_err(CE_CONT, "!%s: eeprom dump:", dp->name);
+
+ err = USB_SUCCESS;
+
+ for (i = 0; i < size; i += 4) {
+ IN(dp, VCMD_READ_SROM, i + 0, 0, 2, w0, &err, usberr);
+ IN(dp, VCMD_READ_SROM, i + 1, 0, 2, w1, &err, usberr);
+ IN(dp, VCMD_READ_SROM, i + 2, 0, 2, w2, &err, usberr);
+ IN(dp, VCMD_READ_SROM, i + 3, 0, 2, w3, &err, usberr);
+ cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
+ i,
+ (w0[1] << 8) | w0[0],
+ (w1[1] << 8) | w1[0],
+ (w2[1] << 8) | w2[0],
+ (w3[1] << 8) | w3[0]);
+ }
+usberr:
+ ;
+}
+#endif
+
+static int
+axf_attach_chip(struct usbgem_dev *dp)
+{
+ uint8_t phys[2];
+ int err;
+ uint_t vcmd;
+ int ret;
+#ifdef CONFIG_FULLSIZE_VLAN
+ uint8_t maxpktsize[2];
+ uint16_t vlan_pktsize;
+#endif
+#ifdef DEBUG_LEVEL
+ uint8_t val8;
+#endif
+ struct axf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s enter", dp->name, __func__));
+
+ ret = USB_SUCCESS;
+ /*
+ * mac address in EEPROM has loaded to ID registers.
+ */
+ vcmd = AX88172(dp) ? VCMD_READ_NODE_ID : VCMD_READ_NODE_ID_88772;
+ IN(dp, vcmd, 0, 0,
+ ETHERADDRL, dp->dev_addr.ether_addr_octet, &err, usberr);
+
+ /*
+ * setup IPG values
+ */
+ lp->ipg[0] = 0x15;
+ lp->ipg[1] = 0x0c;
+ lp->ipg[2] = 0x12;
+
+ /*
+ * We cannot scan phy because the nic returns undefined
+ * value, i.e. remained garbage, when MII phy is not at the
+ * specified index.
+ */
+#ifdef DEBUG_LEVELx
+ if (lp->chip->vid == 0x07b8 && lp->chip->pid == 0x420a) {
+ /*
+ * restore the original phy address of brain
+ * damaged Planex UE2-100TX
+ */
+ OUT(dp, VCMD_WRITE_SROM_ENABLE, 0, 0, 0, NULL, &err, usberr);
+ OUT(dp, VCMD_WRITE_SROM, 0x11, 0xe004, 0, NULL, &err, usberr);
+ OUT(dp, VCMD_WRITE_SROM_DISABLE, 0, 0, 0, NULL, &err, usberr);
+ }
+#endif
+ if (AX88172(dp)) {
+ IN(dp, VCMD_READ_PHY_IDS, 0, 0, 2, &phys, &err, usberr);
+ dp->mii_phy_addr = phys[1];
+ DPRINTF(0, (CE_CONT, "!%s: %s: phys_addr:%d %d",
+ dp->name, __func__, phys[0], phys[1]));
+ } else {
+ /* use built-in phy */
+ dp->mii_phy_addr = 0x10;
+ }
+
+ dp->misc_flag |= USBGEM_VLAN;
+#ifdef CONFIG_FULLSIZE_VLAN
+ if (AX88172(dp) || AX88772(dp)) {
+ /* check max packet size in srom */
+ IN(dp, VCMD_READ_SROM, 0x10, 0, 2, maxpktsize, &err, usberr);
+ vlan_pktsize = ETHERMAX + ETHERFCSL + 4 /* VTAG_SIZE */;
+
+ if (LE16P(maxpktsize) < vlan_pktsize) {
+ cmn_err(CE_CONT,
+ "!%s: %s: max packet size in srom is too small, "
+ "changing %d -> %d, do power cycle for the device",
+ dp->name, __func__,
+ LE16P(maxpktsize), vlan_pktsize);
+ OUT(dp, VCMD_WRITE_SROM_ENABLE,
+ 0, 0, 0, NULL, &err, usberr);
+ OUT(dp, VCMD_WRITE_SROM, 0x10,
+ vlan_pktsize, 0, NULL, &err, usberr);
+ OUT(dp, VCMD_WRITE_SROM_DISABLE,
+ 0, 0, 0, NULL, &err, usberr);
+
+ /* need to power off the device */
+ ret = USB_FAILURE;
+ }
+ }
+#endif
+#ifdef DEBUG_LEVEL
+ IN(dp, VCMD_READ_GPIO, 0, 0, 1, &val8, &err, usberr);
+ cmn_err(CE_CONT,
+ "!%s: %s: ipg 0x%02x 0x%02x 0x%02x, gpio 0x%b",
+ dp->name, __func__, lp->ipg[0], lp->ipg[1], lp->ipg[2],
+ val8, GPIO_BITS);
+#endif
+ /* fix rx buffer size */
+ if (!AX88172(dp)) {
+ dp->rx_buf_len = 2048;
+ }
+
+#if DEBUG_LEVEL > 0
+ axf_eeprom_dump(dp, 0x20);
+#endif
+ return (ret);
+
+usberr:
+ cmn_err(CE_WARN, "%s: %s: usb error detected (%d)",
+ dp->name, __func__, err);
+ return (USB_FAILURE);
+}
+
+static boolean_t
+axf_scan_phy(struct usbgem_dev *dp)
+{
+ int i;
+ int err;
+ uint16_t val;
+ int phy_addr_saved;
+ struct axf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ phy_addr_saved = dp->mii_phy_addr;
+
+ /* special probe routine for unreliable MII addr */
+#define PROBE_PAT \
+ (MII_ABILITY_100BASE_TX_FD | \
+ MII_ABILITY_100BASE_TX | \
+ MII_ABILITY_10BASE_T_FD | \
+ MII_ABILITY_10BASE_T)
+
+ for (i = 0; i < 32; i++) {
+ dp->mii_phy_addr = i;
+ axf_mii_write(dp, MII_AN_ADVERT, 0, &err);
+ if (err != USBGEM_SUCCESS) {
+ break;
+ }
+ val = axf_mii_read(dp, MII_AN_ADVERT, &err);
+ if (err != USBGEM_SUCCESS) {
+ break;
+ }
+ if (val != 0) {
+ DPRINTF(0, (CE_CONT, "!%s: %s: index:%d, val %b != 0",
+ dp->name, __func__, i, val, MII_ABILITY_BITS));
+ continue;
+ }
+
+ axf_mii_write(dp, MII_AN_ADVERT, PROBE_PAT, &err);
+ if (err != USBGEM_SUCCESS) {
+ break;
+ }
+ val = axf_mii_read(dp, MII_AN_ADVERT, &err);
+ if (err != USBGEM_SUCCESS) {
+ break;
+ }
+ if ((val & MII_ABILITY_TECH) != PROBE_PAT) {
+ DPRINTF(0, (CE_CONT, "!%s: %s: "
+ "index:%d, pat:%x != val:%b",
+ dp->name, __func__, i,
+ PROBE_PAT, val, MII_ABILITY_BITS));
+ continue;
+ }
+
+ /* found */
+ dp->mii_phy_addr = phy_addr_saved;
+ return (i);
+ }
+#undef PROBE_PAT
+ if (i == 32) {
+ cmn_err(CE_CONT, "!%s: %s: no mii phy found",
+ dp->name, __func__);
+ } else {
+ cmn_err(CE_CONT, "!%s: %s: i/o error while scanning phy",
+ dp->name, __func__);
+ }
+ dp->mii_phy_addr = phy_addr_saved;
+ return (-1);
+}
+
+static int
+axf_mii_probe(struct usbgem_dev *dp)
+{
+ int my_guess;
+ int err;
+ uint8_t old_11th[2];
+ uint8_t new_11th[2];
+ struct axf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+ (void) axf_reset_phy(dp);
+ lp->phy_has_reset = B_TRUE;
+
+ if (AX88172(dp)) {
+ my_guess = axf_scan_phy(dp);
+ if (my_guess >= 0 && my_guess < 32 &&
+ my_guess != dp->mii_phy_addr) {
+ /*
+ * phy addr in srom is wrong, need to fix it
+ */
+ IN(dp, VCMD_READ_SROM,
+ 0x11, 0, 2, old_11th, &err, usberr);
+
+ new_11th[0] = my_guess;
+ new_11th[1] = old_11th[1];
+
+ OUT(dp, VCMD_WRITE_SROM_ENABLE,
+ 0, 0, 0, NULL, &err, usberr);
+ OUT(dp, VCMD_WRITE_SROM,
+ 0x11, LE16P(new_11th), 0, NULL, &err, usberr);
+ OUT(dp, VCMD_WRITE_SROM_DISABLE,
+ 0, 0, 0, NULL, &err, usberr);
+#if 1
+ /* XXX - read back, but it doesn't work, why? */
+ delay(drv_usectohz(1000*1000));
+ IN(dp, VCMD_READ_SROM,
+ 0x11, 0, 2, new_11th, &err, usberr);
+#endif
+ cmn_err(CE_NOTE, "!%s: %s: phy addr in srom fixed: "
+ "%04x -> %04x",
+ dp->name, __func__,
+ LE16P(old_11th), LE16P(new_11th));
+ return (USBGEM_FAILURE);
+usberr:
+ cmn_err(CE_NOTE,
+ "!%s: %s: failed to patch phy addr, "
+ "current: %04x",
+ dp->name, __func__, LE16P(old_11th));
+ return (USBGEM_FAILURE);
+ }
+ }
+ return (usbgem_mii_probe_default(dp));
+}
+
+static int
+axf_mii_init(struct usbgem_dev *dp)
+{
+ struct axf_dev *lp = dp->private;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ if (!lp->phy_has_reset) {
+ (void) axf_reset_phy(dp);
+ }
+
+ /* prepare to reset phy on the next reconnect or resume */
+ lp->phy_has_reset = B_FALSE;
+
+ return (USB_SUCCESS);
+}
+
+static int
+axfattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int i;
+ ddi_iblock_cookie_t c;
+ int ret;
+ int revid;
+ int unit;
+ int vid;
+ int pid;
+ struct chip_info *p;
+ int len;
+ const char *drv_name;
+ struct usbgem_dev *dp;
+ void *base;
+ struct usbgem_conf *ugcp;
+ struct axf_dev *lp;
+
+ unit = ddi_get_instance(dip);
+ drv_name = ddi_driver_name(dip);
+
+ DPRINTF(3, (CE_CONT, "!%s%d: %s: called, cmd:%d",
+ drv_name, unit, __func__, cmd));
+
+ if (cmd == DDI_ATTACH) {
+ /*
+ * Check if the chip is supported.
+ */
+ vid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "usb-vendor-id", -1);
+ pid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "usb-product-id", -1);
+ revid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "usb-revision-id", -1);
+
+ for (i = 0, p = chiptbl_88x7x; i < CHIPTABLESIZE; i++, p++) {
+ if (p->vid == vid && p->pid == pid) {
+ /* found */
+ cmn_err(CE_CONT, "!%s%d: %s "
+ "(vid: 0x%04x, did: 0x%04x, revid: 0x%02x)",
+ drv_name, unit, p->name, vid, pid, revid);
+ goto chip_found;
+ }
+ }
+
+ /* Not found */
+ cmn_err(CE_WARN, "!%s: %s: wrong usb venid/prodid (0x%x, 0x%x)",
+ drv_name, __func__, vid, pid);
+
+ /* assume 88772 */
+ p = &chiptbl_88x7x[CHIPTABLESIZE - 1];
+chip_found:
+ /*
+ * construct usbgem configration
+ */
+ ugcp = kmem_zalloc(sizeof (*ugcp), KM_SLEEP);
+
+ /* name */
+ /*
+ * softmac requires that ppa is the instance number
+ * of the device, otherwise it hangs in seaching the device.
+ */
+ sprintf(ugcp->usbgc_name, "%s%d", drv_name, unit);
+ ugcp->usbgc_ppa = unit;
+
+ ugcp->usbgc_ifnum = 0;
+ ugcp->usbgc_alt = 0;
+
+ ugcp->usbgc_tx_list_max = 64;
+
+ ugcp->usbgc_rx_header_len = 0;
+ ugcp->usbgc_rx_list_max = 64;
+
+ /* time out parameters */
+ ugcp->usbgc_tx_timeout = USBGEM_TX_TIMEOUT;
+ ugcp->usbgc_tx_timeout_interval = USBGEM_TX_TIMEOUT_INTERVAL;
+
+ /* flow control */
+ /*
+ * XXX - flow control caused link down frequently under
+ * heavy traffic
+ */
+ ugcp->usbgc_flow_control = FLOW_CONTROL_RX_PAUSE;
+
+ /* MII timeout parameters */
+ ugcp->usbgc_mii_link_watch_interval = ONESEC;
+ ugcp->usbgc_mii_an_watch_interval = ONESEC/5;
+ ugcp->usbgc_mii_reset_timeout = MII_RESET_TIMEOUT; /* 1 sec */
+ ugcp->usbgc_mii_an_timeout = MII_AN_TIMEOUT; /* 5 sec */
+ ugcp->usbgc_mii_an_wait = 0;
+ ugcp->usbgc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
+
+ ugcp->usbgc_mii_an_delay = ONESEC/10;
+ ugcp->usbgc_mii_linkdown_action = MII_ACTION_RSA;
+ ugcp->usbgc_mii_linkdown_timeout_action = MII_ACTION_RESET;
+ ugcp->usbgc_mii_dont_reset = B_FALSE;
+ ugcp->usbgc_mii_hw_link_detection = B_TRUE;
+ ugcp->usbgc_mii_stop_mac_on_linkdown = B_FALSE;
+
+ /* I/O methods */
+
+ /* mac operation */
+ ugcp->usbgc_attach_chip = &axf_attach_chip;
+ ugcp->usbgc_reset_chip = &axf_reset_chip;
+ ugcp->usbgc_init_chip = &axf_init_chip;
+ ugcp->usbgc_start_chip = &axf_start_chip;
+ ugcp->usbgc_stop_chip = &axf_stop_chip;
+ ugcp->usbgc_multicast_hash = &axf_mcast_hash;
+
+ ugcp->usbgc_set_rx_filter = &axf_set_rx_filter;
+ ugcp->usbgc_set_media = &axf_set_media;
+ ugcp->usbgc_get_stats = &axf_get_stats;
+ ugcp->usbgc_interrupt = &axf_interrupt;
+
+ /* packet operation */
+ ugcp->usbgc_tx_make_packet = &axf_tx_make_packet;
+ ugcp->usbgc_rx_make_packet = &axf_rx_make_packet;
+
+ /* mii operations */
+ ugcp->usbgc_mii_probe = &axf_mii_probe;
+ ugcp->usbgc_mii_init = &axf_mii_init;
+ ugcp->usbgc_mii_config = &usbgem_mii_config_default;
+ ugcp->usbgc_mii_read = &axf_mii_read;
+ ugcp->usbgc_mii_write = &axf_mii_write;
+
+ /* mtu */
+ ugcp->usbgc_min_mtu = ETHERMTU;
+ ugcp->usbgc_max_mtu = ETHERMTU;
+ ugcp->usbgc_default_mtu = ETHERMTU;
+
+ lp = kmem_zalloc(sizeof (struct axf_dev), KM_SLEEP);
+ lp->chip = p;
+ lp->last_link_state = 0;
+ lp->phy_has_reset = B_FALSE;
+
+ dp = usbgem_do_attach(dip, ugcp, lp, sizeof (struct axf_dev));
+
+ kmem_free(ugcp, sizeof (*ugcp));
+
+ if (dp != NULL) {
+ return (DDI_SUCCESS);
+ }
+
+err_free_mem:
+ kmem_free(lp, sizeof (struct axf_dev));
+err_close_pipe:
+err:
+ return (DDI_FAILURE);
+ }
+
+ if (cmd == DDI_RESUME) {
+ return (usbgem_resume(dip));
+ }
+
+ return (DDI_FAILURE);
+}
+
+static int
+axfdetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int ret;
+
+ if (cmd == DDI_DETACH) {
+ ret = usbgem_do_detach(dip);
+ if (ret != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+ }
+ if (cmd == DDI_SUSPEND) {
+ return (usbgem_suspend(dip));
+ }
+ return (DDI_FAILURE);
+}
+
+/* ======================================================== */
+/*
+ * OS depend (loadable streams driver) routine
+ */
+/* ======================================================== */
+#ifdef USBGEM_CONFIG_GLDv3
+USBGEM_STREAM_OPS(axf_ops, axfattach, axfdetach);
+#else
+static struct module_info axfminfo = {
+ 0, /* mi_idnum */
+ "axf", /* mi_idname */
+ 0, /* mi_minpsz */
+ ETHERMTU, /* mi_maxpsz */
+ ETHERMTU*128, /* mi_hiwat */
+ 1, /* mi_lowat */
+};
+
+static struct qinit axfrinit = {
+ (int (*)()) NULL, /* qi_putp */
+ usbgem_rsrv, /* qi_srvp */
+ usbgem_open, /* qi_qopen */
+ usbgem_close, /* qi_qclose */
+ (int (*)()) NULL, /* qi_qadmin */
+ &axfminfo, /* qi_minfo */
+ NULL /* qi_mstat */
+};
+
+static struct qinit axfwinit = {
+ usbgem_wput, /* qi_putp */
+ usbgem_wsrv, /* qi_srvp */
+ (int (*)()) NULL, /* qi_qopen */
+ (int (*)()) NULL, /* qi_qclose */
+ (int (*)()) NULL, /* qi_qadmin */
+ &axfminfo, /* qi_minfo */
+ NULL /* qi_mstat */
+};
+
+static struct streamtab axf_info = {
+ &axfrinit, /* st_rdinit */
+ &axfwinit, /* st_wrinit */
+ NULL, /* st_muxrinit */
+ NULL /* st_muxwrinit */
+};
+
+static struct cb_ops cb_axf_ops = {
+ nulldev, /* cb_open */
+ nulldev, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read */
+ nodev, /* cb_write */
+ nodev, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ &axf_info, /* cb_stream */
+ D_NEW|D_MP /* cb_flag */
+};
+
+static struct dev_ops axf_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ usbgem_getinfo, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ axfattach, /* devo_attach */
+ axfdetach, /* devo_detach */
+ nodev, /* devo_reset */
+ &cb_axf_ops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ usbgem_power, /* devo_power */
+#if DEVO_REV >= 4
+ usbgem_quiesce, /* devo_quiesce */
+#endif
+};
+#endif
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* Type of module. This one is a driver */
+ ident,
+ &axf_ops, /* driver ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, &modldrv, NULL
+};
+
+/* ======================================================== */
+/*
+ * _init : done
+ */
+/* ======================================================== */
+int
+_init(void)
+{
+ int status;
+
+ DPRINTF(2, (CE_CONT, "!axf: _init: called"));
+
+ status = usbgem_mod_init(&axf_ops, "axf");
+ if (status != DDI_SUCCESS) {
+ return (status);
+ }
+ status = mod_install(&modlinkage);
+ if (status != DDI_SUCCESS) {
+ usbgem_mod_fini(&axf_ops);
+ }
+ return (status);
+}
+
+/*
+ * _fini : done
+ */
+int
+_fini(void)
+{
+ int status;
+
+ DPRINTF(2, (CE_CONT, "!axf: _fini: called"));
+ status = mod_remove(&modlinkage);
+ if (status == DDI_SUCCESS) {
+ usbgem_mod_fini(&axf_ops);
+ }
+ return (status);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
diff --git a/usr/src/uts/common/io/cons.c b/usr/src/uts/common/io/cons.c
index 6ef1b0b9f7..495ae93cf9 100644
--- a/usr/src/uts/common/io/cons.c
+++ b/usr/src/uts/common/io/cons.c
@@ -21,6 +21,7 @@
/*
* Copyright (c) 1982, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2015, Joyent, Inc. All rights reserved.
*/
/*
@@ -53,6 +54,7 @@
#include <sys/vnode.h>
#include <sys/uio.h>
#include <sys/stat.h>
+#include <sys/limits.h>
#include <sys/console.h>
#include <sys/consdev.h>
@@ -414,14 +416,24 @@ cnwrite(dev_t dev, struct uio *uio, struct cred *cred)
*/
if (vsconsvp != NULL && vsconsvp->v_stream != NULL) {
struiod_t uiod;
+ struct iovec buf[IOV_MAX_STACK];
+ int iovlen = 0;
+
+ if (uio->uio_iovcnt > IOV_MAX_STACK) {
+ iovlen = uio->uio_iovcnt * sizeof (iovec_t);
+ uiod.d_iov = kmem_alloc(iovlen, KM_SLEEP);
+ } else {
+ uiod.d_iov = buf;
+ }
/*
* strwrite modifies uio so need to make copy.
*/
- (void) uiodup(uio, &uiod.d_uio, uiod.d_iov,
- sizeof (uiod.d_iov) / sizeof (*uiod.d_iov));
+ (void) uiodup(uio, &uiod.d_uio, uiod.d_iov, uio->uio_iovcnt);
(void) strwrite(vsconsvp, &uiod.d_uio, cred);
+ if (iovlen != 0)
+ kmem_free(uiod.d_iov, iovlen);
}
if (rconsvp->v_stream != NULL)
diff --git a/usr/src/uts/common/io/devpoll.c b/usr/src/uts/common/io/devpoll.c
index e00ac1d1e9..f8d9f1cff8 100644
--- a/usr/src/uts/common/io/devpoll.c
+++ b/usr/src/uts/common/io/devpoll.c
@@ -670,15 +670,26 @@ dpwrite(dev_t dev, struct uio *uiop, cred_t *credp)
uiosize = uiop->uio_resid;
pollfdnum = uiosize / size;
- mutex_enter(&curproc->p_lock);
- if (pollfdnum > (uint_t)rctl_enforced_value(
- rctlproc_legacy[RLIMIT_NOFILE], curproc->p_rctls, curproc)) {
- (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
- curproc->p_rctls, curproc, RCA_SAFE);
+
+ /*
+ * We want to make sure that pollfdnum isn't large enough to DoS us,
+ * but we also don't want to grab p_lock unnecessarily -- so we
+ * perform the full check against our resource limits if and only if
+ * pollfdnum is larger than the known-to-be-sane value of UINT8_MAX.
+ */
+ if (pollfdnum > UINT8_MAX) {
+ mutex_enter(&curproc->p_lock);
+ if (pollfdnum >
+ (uint_t)rctl_enforced_value(rctlproc_legacy[RLIMIT_NOFILE],
+ curproc->p_rctls, curproc)) {
+ (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
+ curproc->p_rctls, curproc, RCA_SAFE);
+ mutex_exit(&curproc->p_lock);
+ return (EINVAL);
+ }
mutex_exit(&curproc->p_lock);
- return (EINVAL);
}
- mutex_exit(&curproc->p_lock);
+
/*
* Copy in the pollfd array. Walk through the array and add
* each polled fd to the cached set.
@@ -1112,14 +1123,18 @@ dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
void *setp = STRUCT_FGETP(dvpoll, dp_setp);
if (setp != NULL) {
- if (copyin(setp, &set, sizeof (set))) {
- DP_REFRELE(dpep);
- return (EFAULT);
+ if ((mode & FKIOCTL) != 0) {
+ /* Use the signal set directly */
+ ksetp = (k_sigset_t *)setp;
+ } else {
+ if (copyin(setp, &set, sizeof (set))) {
+ DP_REFRELE(dpep);
+ return (EFAULT);
+ }
+ sigutok(&set, &kset);
+ ksetp = &kset;
}
- sigutok(&set, &kset);
- ksetp = &kset;
-
mutex_enter(&p->p_lock);
schedctl_finish_sigblock(t);
lwp->lwp_sigoldmask = t->t_hold;
@@ -1268,6 +1283,10 @@ dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
DP_SIGMASK_RESTORE(ksetp);
if (error == 0 && fdcnt > 0) {
+ /*
+ * It should be noted that FKIOCTL does not influence
+ * the copyout (vs bcopy) of dp_fds at this time.
+ */
if (copyout(ps->ps_dpbuf,
STRUCT_FGETP(dvpoll, dp_fds), fdcnt * fdsize)) {
DP_REFRELE(dpep);
diff --git a/usr/src/uts/common/io/dld/dld_drv.c b/usr/src/uts/common/io/dld/dld_drv.c
index 40cbe86170..62bc4a8ecf 100644
--- a/usr/src/uts/common/io/dld/dld_drv.c
+++ b/usr/src/uts/common/io/dld/dld_drv.c
@@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2015, Joyent Inc.
*/
/*
@@ -701,7 +702,8 @@ drv_ioc_prop_common(dld_ioc_macprop_t *prop, intptr_t arg, boolean_t set,
err = EACCES;
goto done;
}
- err = dls_devnet_setzid(dlh, dzp->diz_zid);
+ err = dls_devnet_setzid(dlh, dzp->diz_zid,
+ dzp->diz_transient);
} else {
kprop->pr_perm_flags = MAC_PROP_PERM_RW;
(*(zoneid_t *)kprop->pr_val) = dls_devnet_getzid(dlh);
@@ -865,7 +867,7 @@ drv_ioc_rename(void *karg, intptr_t arg, int mode, cred_t *cred, int *rvalp)
return (err);
if ((err = dls_devnet_rename(dir->dir_linkid1, dir->dir_linkid2,
- dir->dir_link)) != 0)
+ dir->dir_link, dir->dir_zoneinit)) != 0)
return (err);
if (dir->dir_linkid2 == DATALINK_INVALID_LINKID)
@@ -1376,7 +1378,8 @@ static dld_ioc_modentry_t dld_ioc_modtable[] = {
{SIMNET_IOC, "simnet", 0, NULL, 0},
{BRIDGE_IOC, "bridge", 0, NULL, 0},
{IPTUN_IOC, "iptun", 0, NULL, 0},
- {IBPART_IOC, "ibp", -1, NULL, 0}
+ {IBPART_IOC, "ibp", -1, NULL, 0},
+ {OVERLAY_IOC, "overlay", 0, NULL, 0}
};
#define DLDIOC_CNT \
(sizeof (dld_ioc_modtable) / sizeof (dld_ioc_modentry_t))
diff --git a/usr/src/uts/common/io/dld/dld_proto.c b/usr/src/uts/common/io/dld/dld_proto.c
index a438e43d91..661d8b2f4f 100644
--- a/usr/src/uts/common/io/dld/dld_proto.c
+++ b/usr/src/uts/common/io/dld/dld_proto.c
@@ -41,7 +41,7 @@ static proto_reqfunc_t proto_info_req, proto_attach_req, proto_detach_req,
proto_bind_req, proto_unbind_req, proto_promiscon_req, proto_promiscoff_req,
proto_enabmulti_req, proto_disabmulti_req, proto_physaddr_req,
proto_setphysaddr_req, proto_udqos_req, proto_req, proto_capability_req,
- proto_notify_req, proto_passive_req;
+ proto_notify_req, proto_passive_req, proto_exclusive_req;
static void proto_capability_advertise(dld_str_t *, mblk_t *);
static int dld_capab_poll_disable(dld_str_t *, dld_capab_poll_t *);
@@ -121,6 +121,9 @@ dld_proto(dld_str_t *dsp, mblk_t *mp)
case DL_PASSIVE_REQ:
proto_passive_req(dsp, mp);
break;
+ case DL_EXCLUSIVE_REQ:
+ proto_exclusive_req(dsp, mp);
+ break;
default:
proto_req(dsp, mp);
break;
@@ -605,6 +608,14 @@ proto_promiscon_req(dld_str_t *dsp, mblk_t *mp)
new_flags |= DLS_PROMISC_PHYS;
break;
+ case DL_PROMISC_RX_ONLY:
+ new_flags |= DLS_PROMISC_RX_ONLY;
+ break;
+
+ case DL_PROMISC_FIXUPS:
+ new_flags |= DLS_PROMISC_FIXUPS;
+ break;
+
default:
dl_err = DL_NOTSUPPORTED;
goto failed2;
@@ -692,6 +703,22 @@ proto_promiscoff_req(dld_str_t *dsp, mblk_t *mp)
new_flags &= ~DLS_PROMISC_PHYS;
break;
+ case DL_PROMISC_RX_ONLY:
+ if (!(dsp->ds_promisc & DLS_PROMISC_RX_ONLY)) {
+ dl_err = DL_NOTENAB;
+ goto failed;
+ }
+ new_flags &= ~DLS_PROMISC_RX_ONLY;
+ break;
+
+ case DL_PROMISC_FIXUPS:
+ if (!(dsp->ds_promisc & DLS_PROMISC_FIXUPS)) {
+ dl_err = DL_NOTENAB;
+ goto failed;
+ }
+ new_flags &= ~DLS_PROMISC_FIXUPS;
+ break;
+
default:
dl_err = DL_NOTSUPPORTED;
mac_perim_exit(mph);
@@ -1295,7 +1322,8 @@ proto_passive_req(dld_str_t *dsp, mblk_t *mp)
* If we've already become active by issuing an active primitive,
* then it's too late to try to become passive.
*/
- if (dsp->ds_passivestate == DLD_ACTIVE) {
+ if (dsp->ds_passivestate == DLD_ACTIVE ||
+ dsp->ds_passivestate == DLD_EXCLUSIVE) {
dl_err = DL_OUTSTATE;
goto failed;
}
@@ -1354,7 +1382,12 @@ dld_capab_direct(dld_str_t *dsp, void *data, uint_t flags)
dls_rx_set(dsp, (dls_rx_t)direct->di_rx_cf,
direct->di_rx_ch);
- direct->di_tx_df = (uintptr_t)str_mdata_fastpath_put;
+ if (direct->di_flags & DI_DIRECT_RAW) {
+ direct->di_tx_df =
+ (uintptr_t)str_mdata_raw_fastpath_put;
+ } else {
+ direct->di_tx_df = (uintptr_t)str_mdata_fastpath_put;
+ }
direct->di_tx_dh = dsp;
direct->di_tx_cb_df = (uintptr_t)mac_client_tx_notify;
direct->di_tx_cb_dh = dsp->ds_mch;
@@ -1516,8 +1549,9 @@ dld_capab(dld_str_t *dsp, uint_t type, void *data, uint_t flags)
* completes. So we limit the check to DLD_ENABLE case.
*/
if ((flags == DLD_ENABLE && type != DLD_CAPAB_PERIM) &&
- (dsp->ds_sap != ETHERTYPE_IP ||
- !check_mod_above(dsp->ds_rq, "ip"))) {
+ ((dsp->ds_sap != ETHERTYPE_IP ||
+ !check_mod_above(dsp->ds_rq, "ip")) &&
+ !check_mod_above(dsp->ds_rq, "vnd"))) {
return (ENOTSUP);
}
@@ -1599,9 +1633,15 @@ proto_capability_advertise(dld_str_t *dsp, mblk_t *mp)
}
/*
- * Direct capability negotiation interface between IP and DLD
+ * Direct capability negotiation interface between IP/VND and DLD. Note
+ * that for vnd we only allow the case where the media type is the
+ * native media type so we know that there are no transformations that
+ * would have to happen to the mac header that it receives.
*/
- if (dsp->ds_sap == ETHERTYPE_IP && check_mod_above(dsp->ds_rq, "ip")) {
+ if ((dsp->ds_sap == ETHERTYPE_IP &&
+ check_mod_above(dsp->ds_rq, "ip")) ||
+ (check_mod_above(dsp->ds_rq, "vnd") &&
+ dsp->ds_mip->mi_media == dsp->ds_mip->mi_nativemedia)) {
dld_capable = B_TRUE;
subsize += sizeof (dl_capability_sub_t) +
sizeof (dl_capab_dld_t);
@@ -1720,3 +1760,36 @@ dld_capabilities_disable(dld_str_t *dsp)
if (dsp->ds_polling)
(void) dld_capab_poll_disable(dsp, NULL);
}
+
+static void
+proto_exclusive_req(dld_str_t *dsp, mblk_t *mp)
+{
+ int ret = 0;
+ t_uscalar_t dl_err;
+ mac_perim_handle_t mph;
+
+ if (dsp->ds_passivestate != DLD_UNINITIALIZED) {
+ dl_err = DL_OUTSTATE;
+ goto failed;
+ }
+
+ if (MBLKL(mp) < DL_EXCLUSIVE_REQ_SIZE) {
+ dl_err = DL_BADPRIM;
+ goto failed;
+ }
+
+ mac_perim_enter_by_mh(dsp->ds_mh, &mph);
+ ret = dls_exclusive_set(dsp, B_TRUE);
+ mac_perim_exit(mph);
+
+ if (ret != 0) {
+ dl_err = DL_SYSERR;
+ goto failed;
+ }
+
+ dsp->ds_passivestate = DLD_EXCLUSIVE;
+ dlokack(dsp->ds_wq, mp, DL_EXCLUSIVE_REQ);
+ return;
+failed:
+ dlerrorack(dsp->ds_wq, mp, DL_EXCLUSIVE_REQ, dl_err, (t_uscalar_t)ret);
+}
diff --git a/usr/src/uts/common/io/dld/dld_str.c b/usr/src/uts/common/io/dld/dld_str.c
index 6f0d0b9a6c..f5308e70ff 100644
--- a/usr/src/uts/common/io/dld/dld_str.c
+++ b/usr/src/uts/common/io/dld/dld_str.c
@@ -854,6 +854,77 @@ i_dld_ether_header_update_tag(mblk_t *mp, uint_t pri, uint16_t vid,
return (mp);
}
+static boolean_t
+i_dld_raw_ether_check(dld_str_t *dsp, mac_header_info_t *mhip, mblk_t **mpp)
+{
+ mblk_t *mp = *mpp;
+ mblk_t *newmp;
+ uint_t pri, vid, dvid;
+
+ dvid = mac_client_vid(dsp->ds_mch);
+
+ /*
+ * Discard the packet if this is a VLAN stream but the VID in
+ * the packet is not correct.
+ */
+ vid = VLAN_ID(mhip->mhi_tci);
+ if ((dvid != VLAN_ID_NONE) && (vid != VLAN_ID_NONE))
+ return (B_FALSE);
+
+ /*
+ * Discard the packet if this packet is a tagged packet
+ * but both pri and VID are 0.
+ */
+ pri = VLAN_PRI(mhip->mhi_tci);
+ if (mhip->mhi_istagged && !mhip->mhi_ispvid && pri == 0 &&
+ vid == VLAN_ID_NONE)
+ return (B_FALSE);
+
+ /*
+ * Update the priority bits to the per-stream priority if
+ * priority is not set in the packet. Update the VID for
+ * packets on a VLAN stream.
+ */
+ pri = (pri == 0) ? dsp->ds_pri : 0;
+ if ((pri != 0) || (dvid != VLAN_ID_NONE)) {
+ if ((newmp = i_dld_ether_header_update_tag(mp, pri,
+ dvid, dsp->ds_dlp->dl_tagmode)) == NULL) {
+ return (B_FALSE);
+ }
+ *mpp = newmp;
+ }
+
+ return (B_TRUE);
+}
+
+mac_tx_cookie_t
+str_mdata_raw_fastpath_put(dld_str_t *dsp, mblk_t *mp, uintptr_t f_hint,
+ uint16_t flag)
+{
+ boolean_t is_ethernet = (dsp->ds_mip->mi_media == DL_ETHER);
+ mac_header_info_t mhi;
+ mac_tx_cookie_t cookie;
+
+ if (mac_vlan_header_info(dsp->ds_mh, mp, &mhi) != 0)
+ goto discard;
+
+ if (is_ethernet) {
+ if (i_dld_raw_ether_check(dsp, &mhi, &mp) == B_FALSE)
+ goto discard;
+ }
+
+ if ((cookie = DLD_TX(dsp, mp, f_hint, flag)) != NULL) {
+ DLD_SETQFULL(dsp);
+ }
+ return (cookie);
+discard:
+ /* TODO: bump kstat? */
+ freemsg(mp);
+ return (NULL);
+}
+
+
+
/*
* M_DATA put (IP fast-path mode)
*/
@@ -902,7 +973,6 @@ str_mdata_raw_put(dld_str_t *dsp, mblk_t *mp)
mblk_t *bp, *newmp;
size_t size;
mac_header_info_t mhi;
- uint_t pri, vid, dvid;
uint_t max_sdu;
/*
@@ -948,38 +1018,8 @@ str_mdata_raw_put(dld_str_t *dsp, mblk_t *mp)
goto discard;
if (is_ethernet) {
- dvid = mac_client_vid(dsp->ds_mch);
-
- /*
- * Discard the packet if this is a VLAN stream but the VID in
- * the packet is not correct.
- */
- vid = VLAN_ID(mhi.mhi_tci);
- if ((dvid != VLAN_ID_NONE) && (vid != VLAN_ID_NONE))
- goto discard;
-
- /*
- * Discard the packet if this packet is a tagged packet
- * but both pri and VID are 0.
- */
- pri = VLAN_PRI(mhi.mhi_tci);
- if (mhi.mhi_istagged && !mhi.mhi_ispvid && pri == 0 &&
- vid == VLAN_ID_NONE)
+ if (i_dld_raw_ether_check(dsp, &mhi, &mp) == B_FALSE)
goto discard;
-
- /*
- * Update the priority bits to the per-stream priority if
- * priority is not set in the packet. Update the VID for
- * packets on a VLAN stream.
- */
- pri = (pri == 0) ? dsp->ds_pri : 0;
- if ((pri != 0) || (dvid != VLAN_ID_NONE)) {
- if ((newmp = i_dld_ether_header_update_tag(mp, pri,
- dvid, dsp->ds_dlp->dl_tagmode)) == NULL) {
- goto discard;
- }
- mp = newmp;
- }
}
if (DLD_TX(dsp, mp, 0, 0) != NULL) {
diff --git a/usr/src/uts/common/io/dls/dls.c b/usr/src/uts/common/io/dls/dls.c
index 92993ada58..0f8dbcb57a 100644
--- a/usr/src/uts/common/io/dls/dls.c
+++ b/usr/src/uts/common/io/dls/dls.c
@@ -25,7 +25,7 @@
*/
/*
- * Copyright (c) 2013 Joyent, Inc. All rights reserved.
+ * Copyright 2016 Joyent, Inc.
*/
/*
@@ -248,19 +248,69 @@ dls_promisc(dld_str_t *dsp, uint32_t new_flags)
{
int err = 0;
uint32_t old_flags = dsp->ds_promisc;
+ uint32_t new_type = new_flags &
+ ~(DLS_PROMISC_RX_ONLY | DLS_PROMISC_FIXUPS);
mac_client_promisc_type_t mptype = MAC_CLIENT_PROMISC_ALL;
+ uint16_t mac_flags = 0;
+ boolean_t doremove = B_FALSE;
ASSERT(MAC_PERIM_HELD(dsp->ds_mh));
ASSERT(!(new_flags & ~(DLS_PROMISC_SAP | DLS_PROMISC_MULTI |
- DLS_PROMISC_PHYS)));
+ DLS_PROMISC_PHYS | DLS_PROMISC_RX_ONLY | DLS_PROMISC_FIXUPS)));
+
+ /*
+ * If we only have the non-data receive flags set or are only changing
+ * them, then there's nothing to do other than update the flags here.
+ * Basically when we only have something in the set of
+ * DLS_PROMISC_RX_ONLY and DLS_PROMISC_FIXUPS around, then there's
+ * nothing else for us to do other than toggle it, as there's no need to
+ * talk to MAC and we don't have to do anything else.
+ */
+ if ((old_flags & ~(DLS_PROMISC_RX_ONLY | DLS_PROMISC_FIXUPS)) == 0 &&
+ (new_flags & ~(DLS_PROMISC_RX_ONLY | DLS_PROMISC_FIXUPS)) == 0) {
+ dsp->ds_promisc = new_flags;
+ return (0);
+ }
/*
* If the user has only requested DLS_PROMISC_MULTI then we need to make
* sure that they don't see all packets.
*/
- if (new_flags == DLS_PROMISC_MULTI)
+ if (new_type == DLS_PROMISC_MULTI)
mptype = MAC_CLIENT_PROMISC_MULTI;
+ /*
+ * Look at new flags and figure out the correct mac promisc flags.
+ * If we've only requested DLS_PROMISC_SAP and not _MULTI or _PHYS,
+ * don't turn on physical promisc mode.
+ */
+ if (new_flags & DLS_PROMISC_RX_ONLY)
+ mac_flags |= MAC_PROMISC_FLAGS_NO_TX_LOOP;
+ if (new_flags & DLS_PROMISC_FIXUPS)
+ mac_flags |= MAC_PROMISC_FLAGS_DO_FIXUPS;
+ if (new_type == DLS_PROMISC_SAP)
+ mac_flags |= MAC_PROMISC_FLAGS_NO_PHYS;
+
+ /*
+ * If we're coming in and we're being asked to transition to a state
+ * where the only DLS flags would be enabled are flags that change what
+ * we do with promiscuous packets (DLS_PROMISC_RX_ONLY and
+ * DLS_PROMISC_FIXUPS) and not which packets we should receive, then we
+ * need to remove the MAC layer promiscuous handler.
+ */
+ if ((new_flags & ~(DLS_PROMISC_RX_ONLY | DLS_PROMISC_FIXUPS)) == 0 &&
+ (old_flags & ~(DLS_PROMISC_RX_ONLY | DLS_PROMISC_FIXUPS)) != 0 &&
+ new_flags != 0) {
+ doremove = B_TRUE;
+ }
+
+ /*
+ * There are three cases we care about here with respect to MAC. Going
+ * from nothing to something, something to nothing, something to
+ * something where we need to change how we're getting stuff from mac.
+ * In the last case, as long as they're not equal, we need to assume
+ * something has changed and do something about it.
+ */
if (dsp->ds_promisc == 0 && new_flags != 0) {
/*
* If only DLS_PROMISC_SAP, we don't turn on the
@@ -268,9 +318,7 @@ dls_promisc(dld_str_t *dsp, uint32_t new_flags)
*/
dsp->ds_promisc = new_flags;
err = mac_promisc_add(dsp->ds_mch, mptype,
- dls_rx_promisc, dsp, &dsp->ds_mph,
- (new_flags != DLS_PROMISC_SAP) ? 0 :
- MAC_PROMISC_FLAGS_NO_PHYS);
+ dls_rx_promisc, dsp, &dsp->ds_mph, mac_flags);
if (err != 0) {
dsp->ds_promisc = old_flags;
return (err);
@@ -281,7 +329,8 @@ dls_promisc(dld_str_t *dsp, uint32_t new_flags)
mac_promisc_remove(dsp->ds_vlan_mph);
dsp->ds_vlan_mph = NULL;
}
- } else if (dsp->ds_promisc != 0 && new_flags == 0) {
+ } else if (dsp->ds_promisc != 0 &&
+ (new_flags == 0 || doremove == B_TRUE)) {
ASSERT(dsp->ds_mph != NULL);
mac_promisc_remove(dsp->ds_mph);
@@ -296,19 +345,13 @@ dls_promisc(dld_str_t *dsp, uint32_t new_flags)
MAC_CLIENT_PROMISC_ALL, dls_rx_vlan_promisc, dsp,
&dsp->ds_vlan_mph, MAC_PROMISC_FLAGS_NO_PHYS);
}
- } else if (dsp->ds_promisc == DLS_PROMISC_SAP && new_flags != 0 &&
- new_flags != dsp->ds_promisc) {
- /*
- * If the old flag is PROMISC_SAP, but the current flag has
- * changed to some new non-zero value, we need to turn the
- * physical promiscuous mode.
- */
+ } else if (new_flags != 0 && new_flags != old_flags) {
ASSERT(dsp->ds_mph != NULL);
mac_promisc_remove(dsp->ds_mph);
/* Honors both after-remove and before-add semantics! */
dsp->ds_promisc = new_flags;
err = mac_promisc_add(dsp->ds_mch, mptype,
- dls_rx_promisc, dsp, &dsp->ds_mph, 0);
+ dls_rx_promisc, dsp, &dsp->ds_mph, mac_flags);
if (err != 0)
dsp->ds_promisc = old_flags;
} else {
@@ -629,6 +672,22 @@ boolean_t
dls_accept_promisc(dld_str_t *dsp, mac_header_info_t *mhip, dls_rx_t *ds_rx,
void **ds_rx_arg, boolean_t loopback)
{
+ if (dsp->ds_promisc == 0) {
+ /*
+ * If there are active walkers of the mi_promisc_list when
+ * promiscuousness is disabled, ds_promisc will be cleared,
+ * but the DLS will remain on the mi_promisc_list until the
+ * walk is completed. If we do not recognize this case here,
+ * we won't properly execute the ds_promisc case in the common
+ * accept routine -- and we will potentially accept a packet
+ * that has originated with this DLS (which in turn can
+ * induce recursion and death by stack overflow). If
+ * ds_promisc is zero, we know that we are in this window --
+ * and we refuse to accept the packet.
+ */
+ return (B_FALSE);
+ }
+
return (dls_accept_common(dsp, mhip, ds_rx, ds_rx_arg, B_TRUE,
loopback));
}
@@ -659,7 +718,10 @@ dls_mac_active_set(dls_link_t *dlp)
* Set the function to start receiving packets.
*/
mac_rx_set(dlp->dl_mch, i_dls_link_rx, dlp);
+ } else if (dlp->dl_exclusive == B_TRUE) {
+ return (EBUSY);
}
+
dlp->dl_nactive++;
return (0);
}
@@ -685,7 +747,11 @@ dls_active_set(dld_str_t *dsp)
if (dsp->ds_passivestate == DLD_PASSIVE)
return (0);
- /* If we're already active, then there's nothing more to do. */
+ if (dsp->ds_dlp->dl_exclusive == B_TRUE &&
+ dsp->ds_passivestate != DLD_EXCLUSIVE)
+ return (EBUSY);
+
+ /* If we're already active, we need to check the link's exclusivity */
if ((dsp->ds_nactive == 0) &&
((err = dls_mac_active_set(dsp->ds_dlp)) != 0)) {
/* except for ENXIO all other errors are mapped to EBUSY */
@@ -694,7 +760,8 @@ dls_active_set(dld_str_t *dsp)
return (err);
}
- dsp->ds_passivestate = DLD_ACTIVE;
+ dsp->ds_passivestate = dsp->ds_dlp->dl_exclusive == B_TRUE ?
+ DLD_EXCLUSIVE : DLD_ACTIVE;
dsp->ds_nactive++;
return (0);
}
@@ -725,7 +792,32 @@ dls_active_clear(dld_str_t *dsp, boolean_t all)
if (dsp->ds_nactive != 0)
return;
- ASSERT(dsp->ds_passivestate == DLD_ACTIVE);
+ ASSERT(dsp->ds_passivestate == DLD_ACTIVE ||
+ dsp->ds_passivestate == DLD_EXCLUSIVE);
dls_mac_active_clear(dsp->ds_dlp);
+ /*
+ * We verify below to ensure that no other part of DLS has mucked with
+ * our exclusive state.
+ */
+ if (dsp->ds_passivestate == DLD_EXCLUSIVE)
+ VERIFY(dls_exclusive_set(dsp, B_FALSE) == 0);
dsp->ds_passivestate = DLD_UNINITIALIZED;
}
+
+int
+dls_exclusive_set(dld_str_t *dsp, boolean_t enable)
+{
+ ASSERT(MAC_PERIM_HELD(dsp->ds_mh));
+
+ if (enable == B_FALSE) {
+ dsp->ds_dlp->dl_exclusive = B_FALSE;
+ return (0);
+ }
+
+ if (dsp->ds_dlp->dl_nactive != 0)
+ return (EBUSY);
+
+ dsp->ds_dlp->dl_exclusive = B_TRUE;
+
+ return (0);
+}
diff --git a/usr/src/uts/common/io/dls/dls_link.c b/usr/src/uts/common/io/dls/dls_link.c
index 6b92a81e77..4a735d870e 100644
--- a/usr/src/uts/common/io/dls/dls_link.c
+++ b/usr/src/uts/common/io/dls/dls_link.c
@@ -21,6 +21,7 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2015 Joyent, Inc.
*/
/*
@@ -34,6 +35,9 @@
#include <sys/dld_impl.h>
#include <sys/sdt.h>
#include <sys/atomic.h>
+#include <sys/sysevent.h>
+#include <sys/sysevent/eventdefs.h>
+#include <sys/sysevent/datalink.h>
static kmem_cache_t *i_dls_link_cachep;
mod_hash_t *i_dls_link_hash;
@@ -579,6 +583,67 @@ drop:
freemsg(mp);
}
+/*
+ * We'd like to notify via sysevents that a link state change has occurred.
+ * There are a couple of challenges associated with this. The first is that if
+ * the link is flapping a lot, we may not see an accurate state when we launch
+ * the notification, we're told it changed, not what it changed to.
+ *
+ * The next problem is that all of the information that a user has associated
+ * with this device is the exact opposite of what we have on the dls_link_t. We
+ * have the name of the mac device, which has no bearing on what users see.
+ * Likewise, we don't have the datalink id either. So we're going to have to get
+ * this from dls.
+ *
+ * This is all further complicated by the fact that this could be going on in
+ * another thread at the same time as someone is tearing down the dls_link_t
+ * that we're associated with. We need to be careful not to grab the mac
+ * perimeter, otherwise we stand a good chance of deadlock.
+ */
+static void
+dls_link_notify(void *arg, mac_notify_type_t type)
+{
+ dls_link_t *dlp = arg;
+ dls_dl_handle_t dhp;
+ nvlist_t *nvp;
+ sysevent_t *event;
+ sysevent_id_t eid;
+
+ if (type != MAC_NOTE_LINK && type != MAC_NOTE_LOWLINK)
+ return;
+
+ /*
+ * If we can't find a devnet handle for this link, then there is no user
+ * knowable device for this at the moment and there's nothing we can
+ * really share with them that will make sense.
+ */
+ if (dls_devnet_hold_tmp_by_link(dlp, &dhp) != 0)
+ return;
+
+ /*
+ * Because we're attaching this nvlist_t to the sysevent, it'll get
+ * cleaned up when we call sysevent_free.
+ */
+ VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY(nvlist_add_int32(nvp, DATALINK_EV_LINK_ID,
+ dls_devnet_linkid(dhp)) == 0);
+ VERIFY(nvlist_add_string(nvp, DATALINK_EV_LINK_NAME,
+ dls_devnet_link(dhp)) == 0);
+ VERIFY(nvlist_add_int32(nvp, DATALINK_EV_ZONE_ID,
+ dls_devnet_getzid(dhp)) == 0);
+
+ dls_devnet_rele_tmp(dhp);
+
+ event = sysevent_alloc(EC_DATALINK, ESC_DATALINK_LINK_STATE,
+ ILLUMOS_KERN_PUB"dls", SE_SLEEP);
+ VERIFY(event != NULL);
+ (void) sysevent_attach_attributes(event, (sysevent_attr_list_t *)nvp);
+
+ (void) log_sysevent(event, SE_SLEEP, &eid);
+ sysevent_free(event);
+
+}
+
static void
i_dls_link_destroy(dls_link_t *dlp)
{
@@ -589,6 +654,9 @@ i_dls_link_destroy(dls_link_t *dlp)
/*
* Free the structure back to the cache.
*/
+ if (dlp->dl_mnh != NULL)
+ mac_notify_remove(dlp->dl_mnh, B_TRUE);
+
if (dlp->dl_mch != NULL)
mac_client_close(dlp->dl_mch, 0);
@@ -600,8 +668,10 @@ i_dls_link_destroy(dls_link_t *dlp)
dlp->dl_mh = NULL;
dlp->dl_mch = NULL;
dlp->dl_mip = NULL;
+ dlp->dl_mnh = NULL;
dlp->dl_unknowns = 0;
dlp->dl_nonip_cnt = 0;
+ dlp->dl_exclusive = B_FALSE;
kmem_cache_free(i_dls_link_cachep, dlp);
}
@@ -640,6 +710,8 @@ i_dls_link_create(const char *name, dls_link_t **dlpp)
if (err != 0)
goto bail;
+ dlp->dl_mnh = mac_notify_add(dlp->dl_mh, dls_link_notify, dlp);
+
DTRACE_PROBE2(dls__primary__client, char *, dlp->dl_name, void *,
dlp->dl_mch);
diff --git a/usr/src/uts/common/io/dls/dls_mgmt.c b/usr/src/uts/common/io/dls/dls_mgmt.c
index 049c4bd757..105c55c7ce 100644
--- a/usr/src/uts/common/io/dls/dls_mgmt.c
+++ b/usr/src/uts/common/io/dls/dls_mgmt.c
@@ -21,6 +21,7 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2015 Joyent, Inc.
*/
/*
@@ -105,12 +106,13 @@ typedef struct dls_devnet_s {
zoneid_t dd_zid; /* current zone */
boolean_t dd_prop_loaded;
taskqid_t dd_prop_taskid;
+ boolean_t dd_transient; /* link goes away when zone does */
} dls_devnet_t;
static int i_dls_devnet_create_iptun(const char *, const char *,
datalink_id_t *);
static int i_dls_devnet_destroy_iptun(datalink_id_t);
-static int i_dls_devnet_setzid(dls_devnet_t *, zoneid_t, boolean_t);
+static int i_dls_devnet_setzid(dls_devnet_t *, zoneid_t, boolean_t, boolean_t);
static int dls_devnet_unset(const char *, datalink_id_t *, boolean_t);
/*ARGSUSED*/
@@ -145,7 +147,12 @@ dls_zone_remove(datalink_id_t linkid, void *arg)
dls_devnet_t *ddp;
if (dls_devnet_hold_tmp(linkid, &ddp) == 0) {
- (void) dls_devnet_setzid(ddp, GLOBAL_ZONEID);
+ /*
+ * Don't bother moving transient links back to the global zone
+ * since we will simply delete them in dls_devnet_unset.
+ */
+ if (!ddp->dd_transient)
+ (void) dls_devnet_setzid(ddp, GLOBAL_ZONEID, B_FALSE);
dls_devnet_rele_tmp(ddp);
}
return (0);
@@ -526,6 +533,7 @@ dls_mgmt_get_linkid(const char *link, datalink_id_t *linkid)
getlinkid.ld_cmd = DLMGMT_CMD_GETLINKID;
(void) strlcpy(getlinkid.ld_link, link, MAXLINKNAMELEN);
+ getlinkid.ld_zoneid = getzoneid();
if ((err = i_dls_mgmt_upcall(&getlinkid, sizeof (getlinkid), &retval,
sizeof (retval))) == 0) {
@@ -534,6 +542,27 @@ dls_mgmt_get_linkid(const char *link, datalink_id_t *linkid)
return (err);
}
+int
+dls_mgmt_get_linkid_in_zone(const char *link, datalink_id_t *linkid,
+ zoneid_t zid)
+{
+ dlmgmt_door_getlinkid_t getlinkid;
+ dlmgmt_getlinkid_retval_t retval;
+ int err;
+
+ ASSERT(getzoneid() == GLOBAL_ZONEID || zid == getzoneid());
+ getlinkid.ld_cmd = DLMGMT_CMD_GETLINKID;
+ (void) strlcpy(getlinkid.ld_link, link, MAXLINKNAMELEN);
+ getlinkid.ld_zoneid = zid;
+
+ if ((err = i_dls_mgmt_upcall(&getlinkid, sizeof (getlinkid), &retval,
+ sizeof (retval))) == 0) {
+ *linkid = retval.lr_linkid;
+ }
+ return (err);
+}
+
+
datalink_id_t
dls_mgmt_get_next(datalink_id_t linkid, datalink_class_t class,
datalink_media_t dmedia, uint32_t flags)
@@ -740,12 +769,23 @@ dls_devnet_stat_update(kstat_t *ksp, int rw)
* Create the "link" kstats.
*/
static void
-dls_devnet_stat_create(dls_devnet_t *ddp, zoneid_t zoneid)
+dls_devnet_stat_create(dls_devnet_t *ddp, zoneid_t zoneid, zoneid_t newzoneid)
{
kstat_t *ksp;
+ char *nm;
+ char kname[MAXLINKNAMELEN];
+
+ if (zoneid != newzoneid) {
+ ASSERT(zoneid == GLOBAL_ZONEID);
+ (void) snprintf(kname, sizeof (kname), "z%d_%s", newzoneid,
+ ddp->dd_linkname);
+ nm = kname;
+ } else {
+ nm = ddp->dd_linkname;
+ }
- if (dls_stat_create("link", 0, ddp->dd_linkname, zoneid,
- dls_devnet_stat_update, ddp, &ksp) == 0) {
+ if (dls_stat_create("link", 0, nm, zoneid,
+ dls_devnet_stat_update, ddp, &ksp, newzoneid) == 0) {
ASSERT(ksp != NULL);
if (zoneid == ddp->dd_owner_zid) {
ASSERT(ddp->dd_ksp == NULL);
@@ -765,12 +805,12 @@ dls_devnet_stat_destroy(dls_devnet_t *ddp, zoneid_t zoneid)
{
if (zoneid == ddp->dd_owner_zid) {
if (ddp->dd_ksp != NULL) {
- kstat_delete(ddp->dd_ksp);
+ dls_stat_delete(ddp->dd_ksp);
ddp->dd_ksp = NULL;
}
} else {
if (ddp->dd_zone_ksp != NULL) {
- kstat_delete(ddp->dd_zone_ksp);
+ dls_stat_delete(ddp->dd_zone_ksp);
ddp->dd_zone_ksp = NULL;
}
}
@@ -781,15 +821,25 @@ dls_devnet_stat_destroy(dls_devnet_t *ddp, zoneid_t zoneid)
* and create the new set using the new name.
*/
static void
-dls_devnet_stat_rename(dls_devnet_t *ddp)
+dls_devnet_stat_rename(dls_devnet_t *ddp, boolean_t zoneinit)
{
if (ddp->dd_ksp != NULL) {
- kstat_delete(ddp->dd_ksp);
+ dls_stat_delete(ddp->dd_ksp);
ddp->dd_ksp = NULL;
}
- /* We can't rename a link while it's assigned to a non-global zone. */
+ if (zoneinit && ddp->dd_zone_ksp != NULL) {
+ dls_stat_delete(ddp->dd_zone_ksp);
+ ddp->dd_zone_ksp = NULL;
+ }
+ /*
+ * We can't rename a link while it's assigned to a non-global zone
+ * unless we're first initializing the zone while readying it.
+ */
ASSERT(ddp->dd_zone_ksp == NULL);
- dls_devnet_stat_create(ddp, ddp->dd_owner_zid);
+ dls_devnet_stat_create(ddp, ddp->dd_owner_zid,
+ (zoneinit ? ddp->dd_zid : ddp->dd_owner_zid));
+ if (zoneinit)
+ dls_devnet_stat_create(ddp, ddp->dd_zid, ddp->dd_zid);
}
/*
@@ -878,7 +928,8 @@ done:
rw_exit(&i_dls_devnet_lock);
if (err == 0) {
if (zoneid != GLOBAL_ZONEID &&
- (err = i_dls_devnet_setzid(ddp, zoneid, B_FALSE)) != 0)
+ (err = i_dls_devnet_setzid(ddp, zoneid, B_FALSE,
+ B_FALSE)) != 0)
(void) dls_devnet_unset(macname, &linkid, B_TRUE);
/*
* The kstat subsystem holds its own locks (rather perimeter)
@@ -887,7 +938,7 @@ done:
* lock hierarchy is kstat locks -> i_dls_devnet_lock.
*/
if (stat_create)
- dls_devnet_stat_create(ddp, zoneid);
+ dls_devnet_stat_create(ddp, zoneid, zoneid);
if (ddpp != NULL)
*ddpp = ddp;
}
@@ -924,17 +975,78 @@ dls_devnet_unset(const char *macname, datalink_id_t *id, boolean_t wait)
ASSERT(ddp->dd_ref != 0);
if ((ddp->dd_ref != 1) || (!wait &&
(ddp->dd_tref != 0 || ddp->dd_prop_taskid != NULL))) {
- mutex_exit(&ddp->dd_mutex);
- rw_exit(&i_dls_devnet_lock);
- return (EBUSY);
+ int zstatus = 0;
+
+ /*
+ * There are a couple of alternatives that might be going on
+ * here; a) the zone is shutting down and it has a transient
+ * link assigned, in which case we want to clean it up instead
+ * of moving it back to the global zone, or b) its possible
+ * that we're trying to clean up an orphaned vnic that was
+ * delegated to a zone and which wasn't cleaned up properly
+ * when the zone went away. Check for either of these cases
+ * before we simply return EBUSY.
+ *
+ * zstatus indicates which situation we are dealing with:
+ * 0 - means return EBUSY
+ * 1 - means case (a), cleanup transient link
+ * -1 - means case (b), orphained VNIC
+ */
+ if (ddp->dd_ref > 1 && ddp->dd_zid != GLOBAL_ZONEID) {
+ zone_t *zp;
+
+ if ((zp = zone_find_by_id(ddp->dd_zid)) == NULL) {
+ zstatus = -1;
+ } else {
+ if (ddp->dd_transient) {
+ zone_status_t s = zone_status_get(zp);
+
+ if (s >= ZONE_IS_SHUTTING_DOWN)
+ zstatus = 1;
+ }
+ zone_rele(zp);
+ }
+ }
+
+ if (zstatus == 0) {
+ mutex_exit(&ddp->dd_mutex);
+ rw_exit(&i_dls_devnet_lock);
+ return (EBUSY);
+ }
+
+ /*
+ * We want to delete the link, reset ref to 1;
+ */
+ if (zstatus == -1)
+ /* Log a warning, but continue in this case */
+ cmn_err(CE_WARN, "clear orphaned datalink: %s\n",
+ ddp->dd_linkname);
+ ddp->dd_ref = 1;
}
ddp->dd_flags |= DD_CONDEMNED;
ddp->dd_ref--;
*id = ddp->dd_linkid;
- if (ddp->dd_zid != GLOBAL_ZONEID)
- (void) i_dls_devnet_setzid(ddp, GLOBAL_ZONEID, B_FALSE);
+ if (ddp->dd_zid != GLOBAL_ZONEID) {
+ /*
+ * We need to release the dd_mutex before we try and destroy the
+ * stat. When we destroy it, we'll need to grab the lock for the
+ * kstat but if there's a concurrent reader of the kstat, we'll
+ * be blocked on it. This will lead to deadlock because these
+ * kstats employ a ks_update function (dls_devnet_stat_update)
+ * which needs the dd_mutex that we currently hold.
+ *
+ * Because we've already flagged the dls_devnet_t as
+ * DD_CONDEMNED and we still have a write lock on
+ * i_dls_devnet_lock, we should be able to release the dd_mutex.
+ */
+ mutex_exit(&ddp->dd_mutex);
+ dls_devnet_stat_destroy(ddp, ddp->dd_zid);
+ mutex_enter(&ddp->dd_mutex);
+ (void) i_dls_devnet_setzid(ddp, GLOBAL_ZONEID, B_FALSE,
+ B_FALSE);
+ }
/*
* Remove this dls_devnet_t from the hash table.
@@ -960,8 +1072,15 @@ dls_devnet_unset(const char *macname, datalink_id_t *id, boolean_t wait)
ASSERT(ddp->dd_tref == 0 && ddp->dd_prop_taskid == NULL);
}
- if (ddp->dd_linkid != DATALINK_INVALID_LINKID)
+ if (ddp->dd_linkid != DATALINK_INVALID_LINKID) {
+ /*
+ * See the earlier call in this function for an explanation.
+ */
+ mutex_exit(&ddp->dd_mutex);
dls_devnet_stat_destroy(ddp, ddp->dd_owner_zid);
+ mutex_enter(&ddp->dd_mutex);
+ }
+
ddp->dd_prop_loaded = B_FALSE;
ddp->dd_linkid = DATALINK_INVALID_LINKID;
@@ -972,6 +1091,39 @@ dls_devnet_unset(const char *macname, datalink_id_t *id, boolean_t wait)
return (0);
}
+/*
+ * This is a private hold routine used when we already have the dls_link_t, thus
+ * we know that it cannot go away.
+ */
+int
+dls_devnet_hold_tmp_by_link(dls_link_t *dlp, dls_dl_handle_t *ddhp)
+{
+ int err;
+ dls_devnet_t *ddp = NULL;
+
+ rw_enter(&i_dls_devnet_lock, RW_WRITER);
+ if ((err = mod_hash_find(i_dls_devnet_hash,
+ (mod_hash_key_t)dlp->dl_name, (mod_hash_val_t *)&ddp)) != 0) {
+ ASSERT(err == MH_ERR_NOTFOUND);
+ rw_exit(&i_dls_devnet_lock);
+ return (ENOENT);
+ }
+
+ mutex_enter(&ddp->dd_mutex);
+ ASSERT(ddp->dd_ref > 0);
+ if (ddp->dd_flags & DD_CONDEMNED) {
+ mutex_exit(&ddp->dd_mutex);
+ rw_exit(&i_dls_devnet_lock);
+ return (ENOENT);
+ }
+ ddp->dd_tref++;
+ mutex_exit(&ddp->dd_mutex);
+ rw_exit(&i_dls_devnet_lock);
+
+ *ddhp = ddp;
+ return (0);
+}
+
static int
dls_devnet_hold_common(datalink_id_t linkid, dls_devnet_t **ddpp,
boolean_t tmp_hold)
@@ -1111,7 +1263,7 @@ dls_devnet_rele(dls_devnet_t *ddp)
}
static int
-dls_devnet_hold_by_name(const char *link, dls_devnet_t **ddpp)
+dls_devnet_hold_by_name(const char *link, dls_devnet_t **ddpp, zoneid_t zid)
{
char drv[MAXLINKNAMELEN];
uint_t ppa;
@@ -1121,7 +1273,7 @@ dls_devnet_hold_by_name(const char *link, dls_devnet_t **ddpp)
dls_dev_handle_t ddh;
int err;
- if ((err = dls_mgmt_get_linkid(link, &linkid)) == 0)
+ if ((err = dls_mgmt_get_linkid_in_zone(link, &linkid, zid)) == 0)
return (dls_devnet_hold(linkid, ddpp));
/*
@@ -1261,9 +1413,15 @@ dls_devnet_phydev(datalink_id_t vlanid, dev_t *devp)
*
* This case does not change the <link name, linkid> mapping, so the link's
* kstats need to be updated with using name associated the given id2.
+ *
+ * The zonename parameter is used to allow us to create a VNIC in the global
+ * zone which is assigned to a non-global zone. Since there is a race condition
+ * in the create process if two VNICs have the same name, we need to rename it
+ * after it has been assigned to the zone.
*/
int
-dls_devnet_rename(datalink_id_t id1, datalink_id_t id2, const char *link)
+dls_devnet_rename(datalink_id_t id1, datalink_id_t id2, const char *link,
+ boolean_t zoneinit)
{
dls_dev_handle_t ddh = NULL;
int err = 0;
@@ -1313,13 +1471,16 @@ dls_devnet_rename(datalink_id_t id1, datalink_id_t id2, const char *link)
* is currently accessing the link kstats, or if the link is on-loan
* to a non-global zone. Then set the DD_KSTAT_CHANGING flag to
* prevent any access to the kstats while we delete and recreate
- * kstats below.
+ * kstats below. However, we skip this check if we're renaming the
+ * vnic as part of bringing it up for a zone.
*/
mutex_enter(&ddp->dd_mutex);
- if (ddp->dd_ref > 1) {
- mutex_exit(&ddp->dd_mutex);
- err = EBUSY;
- goto done;
+ if (!zoneinit) {
+ if (ddp->dd_ref > 1) {
+ mutex_exit(&ddp->dd_mutex);
+ err = EBUSY;
+ goto done;
+ }
}
ddp->dd_flags |= DD_KSTAT_CHANGING;
@@ -1333,7 +1494,15 @@ dls_devnet_rename(datalink_id_t id1, datalink_id_t id2, const char *link)
/* rename mac client name and its flow if exists */
if ((err = mac_open(ddp->dd_mac, &mh)) != 0)
goto done;
- (void) mac_rename_primary(mh, link);
+ if (zoneinit) {
+ char tname[MAXLINKNAMELEN];
+
+ (void) snprintf(tname, sizeof (tname), "z%d_%s",
+ ddp->dd_zid, link);
+ (void) mac_rename_primary(mh, tname);
+ } else {
+ (void) mac_rename_primary(mh, link);
+ }
mac_close(mh);
goto done;
}
@@ -1406,7 +1575,7 @@ done:
*/
rw_exit(&i_dls_devnet_lock);
if (err == 0)
- dls_devnet_stat_rename(ddp);
+ dls_devnet_stat_rename(ddp, zoneinit);
if (clear_dd_flag) {
mutex_enter(&ddp->dd_mutex);
@@ -1421,7 +1590,8 @@ done:
}
static int
-i_dls_devnet_setzid(dls_devnet_t *ddp, zoneid_t new_zoneid, boolean_t setprop)
+i_dls_devnet_setzid(dls_devnet_t *ddp, zoneid_t new_zoneid, boolean_t setprop,
+ boolean_t transient)
{
int err;
mac_perim_handle_t mph;
@@ -1454,6 +1624,7 @@ i_dls_devnet_setzid(dls_devnet_t *ddp, zoneid_t new_zoneid, boolean_t setprop)
}
if ((err = dls_link_setzid(ddp->dd_mac, new_zoneid)) == 0) {
ddp->dd_zid = new_zoneid;
+ ddp->dd_transient = transient;
devnet_need_rebuild = B_TRUE;
}
@@ -1468,7 +1639,7 @@ done:
}
int
-dls_devnet_setzid(dls_dl_handle_t ddh, zoneid_t new_zid)
+dls_devnet_setzid(dls_dl_handle_t ddh, zoneid_t new_zid, boolean_t transient)
{
dls_devnet_t *ddp;
int err;
@@ -1490,7 +1661,7 @@ dls_devnet_setzid(dls_dl_handle_t ddh, zoneid_t new_zid)
refheld = B_TRUE;
}
- if ((err = i_dls_devnet_setzid(ddh, new_zid, B_TRUE)) != 0) {
+ if ((err = i_dls_devnet_setzid(ddh, new_zid, B_TRUE, transient)) != 0) {
if (refheld)
dls_devnet_rele(ddp);
return (err);
@@ -1507,7 +1678,7 @@ dls_devnet_setzid(dls_dl_handle_t ddh, zoneid_t new_zid)
if (old_zid != GLOBAL_ZONEID)
dls_devnet_stat_destroy(ddh, old_zid);
if (new_zid != GLOBAL_ZONEID)
- dls_devnet_stat_create(ddh, new_zid);
+ dls_devnet_stat_create(ddh, new_zid, new_zid);
return (0);
}
@@ -1545,15 +1716,19 @@ dls_devnet_islinkvisible(datalink_id_t linkid, zoneid_t zoneid)
* Access a vanity naming node.
*/
int
-dls_devnet_open(const char *link, dls_dl_handle_t *dhp, dev_t *devp)
+dls_devnet_open_in_zone(const char *link, dls_dl_handle_t *dhp, dev_t *devp,
+ zoneid_t zid)
{
dls_devnet_t *ddp;
dls_link_t *dlp;
- zoneid_t zid = getzoneid();
+ zoneid_t czid = getzoneid();
int err;
mac_perim_handle_t mph;
- if ((err = dls_devnet_hold_by_name(link, &ddp)) != 0)
+ if (czid != GLOBAL_ZONEID && czid != zid)
+ return (ENOENT);
+
+ if ((err = dls_devnet_hold_by_name(link, &ddp, zid)) != 0)
return (err);
dls_devnet_prop_task_wait(ddp);
@@ -1586,6 +1761,12 @@ dls_devnet_open(const char *link, dls_dl_handle_t *dhp, dev_t *devp)
return (0);
}
+int
+dls_devnet_open(const char *link, dls_dl_handle_t *dhp, dev_t *devp)
+{
+ return (dls_devnet_open_in_zone(link, dhp, devp, getzoneid()));
+}
+
/*
* Close access to a vanity naming node.
*/
@@ -1765,6 +1946,12 @@ i_dls_devnet_destroy_iptun(datalink_id_t linkid)
}
const char *
+dls_devnet_link(dls_dl_handle_t ddh)
+{
+ return (ddh->dd_linkname);
+}
+
+const char *
dls_devnet_mac(dls_dl_handle_t ddh)
{
return (ddh->dd_mac);
diff --git a/usr/src/uts/common/io/dls/dls_stat.c b/usr/src/uts/common/io/dls/dls_stat.c
index 51e4be7260..82dceff278 100644
--- a/usr/src/uts/common/io/dls/dls_stat.c
+++ b/usr/src/uts/common/io/dls/dls_stat.c
@@ -21,6 +21,7 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2011 Joyent, Inc. All rights reserved.
*/
/*
@@ -30,30 +31,33 @@
#include <sys/dld_impl.h>
#include <sys/mac_ether.h>
-static mac_stat_info_t i_dls_si[] = {
- { MAC_STAT_IFSPEED, "ifspeed", KSTAT_DATA_UINT64, 0 },
- { MAC_STAT_MULTIRCV, "multircv", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_BRDCSTRCV, "brdcstrcv", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_MULTIXMT, "multixmt", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_BRDCSTXMT, "brdcstxmt", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_NORCVBUF, "norcvbuf", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_IERRORS, "ierrors", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_NOXMTBUF, "noxmtbuf", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_OERRORS, "oerrors", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_COLLISIONS, "collisions", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_RBYTES, "rbytes", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_IPACKETS, "ipackets", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_OBYTES, "obytes", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_OPACKETS, "opackets", KSTAT_DATA_UINT32, 0 },
- { MAC_STAT_RBYTES, "rbytes64", KSTAT_DATA_UINT64, 0 },
- { MAC_STAT_IPACKETS, "ipackets64", KSTAT_DATA_UINT64, 0 },
- { MAC_STAT_OBYTES, "obytes64", KSTAT_DATA_UINT64, 0 },
- { MAC_STAT_OPACKETS, "opackets64", KSTAT_DATA_UINT64, 0 },
- { MAC_STAT_LINK_STATE, "link_state", KSTAT_DATA_UINT32,
- (uint64_t)LINK_STATE_UNKNOWN}
-};
-
-#define STAT_INFO_COUNT (sizeof (i_dls_si) / sizeof (i_dls_si[0]))
+/*
+ * structure for link kstats
+ */
+typedef struct {
+ kstat_named_t dk_ifspeed;
+ kstat_named_t dk_multircv;
+ kstat_named_t dk_brdcstrcv;
+ kstat_named_t dk_multixmt;
+ kstat_named_t dk_brdcstxmt;
+ kstat_named_t dk_norcvbuf;
+ kstat_named_t dk_ierrors;
+ kstat_named_t dk_noxmtbuf;
+ kstat_named_t dk_oerrors;
+ kstat_named_t dk_collisions;
+ kstat_named_t dk_rbytes;
+ kstat_named_t dk_ipackets;
+ kstat_named_t dk_obytes;
+ kstat_named_t dk_opackets;
+ kstat_named_t dk_rbytes64;
+ kstat_named_t dk_ipackets64;
+ kstat_named_t dk_obytes64;
+ kstat_named_t dk_opackets64;
+ kstat_named_t dk_link_state;
+ kstat_named_t dk_link_duplex;
+ kstat_named_t dk_unknowns;
+ kstat_named_t dk_zonename;
+} dls_kstat_t;
/*
* Exported functions.
@@ -61,42 +65,54 @@ static mac_stat_info_t i_dls_si[] = {
int
dls_stat_update(kstat_t *ksp, dls_link_t *dlp, int rw)
{
- kstat_named_t *knp;
- uint_t i;
- uint64_t val;
+ dls_kstat_t *dkp = ksp->ks_data;
if (rw != KSTAT_READ)
return (EACCES);
- knp = (kstat_named_t *)ksp->ks_data;
- for (i = 0; i < STAT_INFO_COUNT; i++) {
- val = mac_stat_get(dlp->dl_mh, i_dls_si[i].msi_stat);
-
- switch (i_dls_si[i].msi_type) {
- case KSTAT_DATA_UINT64:
- knp->value.ui64 = val;
- break;
- case KSTAT_DATA_UINT32:
- knp->value.ui32 = (uint32_t)val;
- break;
- default:
- ASSERT(B_FALSE);
- }
-
- knp++;
- }
+ dkp->dk_ifspeed.value.ui64 = mac_stat_get(dlp->dl_mh, MAC_STAT_IFSPEED);
+ dkp->dk_multircv.value.ui32 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_MULTIRCV);
+ dkp->dk_brdcstrcv.value.ui32 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_BRDCSTRCV);
+ dkp->dk_multixmt.value.ui32 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_MULTIXMT);
+ dkp->dk_brdcstxmt.value.ui32 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_BRDCSTXMT);
+ dkp->dk_norcvbuf.value.ui32 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_NORCVBUF);
+ dkp->dk_ierrors.value.ui32 = mac_stat_get(dlp->dl_mh, MAC_STAT_IERRORS);
+ dkp->dk_noxmtbuf.value.ui32 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_NOXMTBUF);
+ dkp->dk_oerrors.value.ui32 = mac_stat_get(dlp->dl_mh, MAC_STAT_OERRORS);
+ dkp->dk_collisions.value.ui32 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_COLLISIONS);
+ dkp->dk_rbytes.value.ui32 = mac_stat_get(dlp->dl_mh, MAC_STAT_RBYTES);
+ dkp->dk_ipackets.value.ui32 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_IPACKETS);
+ dkp->dk_obytes.value.ui32 = mac_stat_get(dlp->dl_mh, MAC_STAT_OBYTES);
+ dkp->dk_opackets.value.ui32 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_OPACKETS);
+ dkp->dk_rbytes64.value.ui64 = mac_stat_get(dlp->dl_mh, MAC_STAT_RBYTES);
+ dkp->dk_ipackets64.value.ui64 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_IPACKETS);
+ dkp->dk_obytes64.value.ui64 = mac_stat_get(dlp->dl_mh, MAC_STAT_OBYTES);
+ dkp->dk_opackets64.value.ui64 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_OPACKETS);
+ dkp->dk_link_state.value.ui32 = mac_stat_get(dlp->dl_mh,
+ MAC_STAT_LINK_STATE);
/*
* Ethernet specific kstat "link_duplex"
*/
if (dlp->dl_mip->mi_nativemedia != DL_ETHER) {
- knp->value.ui32 = LINK_DUPLEX_UNKNOWN;
+ dkp->dk_link_duplex.value.ui32 = LINK_DUPLEX_UNKNOWN;
} else {
- val = mac_stat_get(dlp->dl_mh, ETHER_STAT_LINK_DUPLEX);
- knp->value.ui32 = (uint32_t)val;
+ dkp->dk_link_duplex.value.ui32 =
+ (uint32_t)mac_stat_get(dlp->dl_mh, ETHER_STAT_LINK_DUPLEX);
}
- knp++;
- knp->value.ui32 = dlp->dl_unknowns;
+
+ dkp->dk_unknowns.value.ui32 = dlp->dl_unknowns;
return (0);
}
@@ -104,30 +120,66 @@ dls_stat_update(kstat_t *ksp, dls_link_t *dlp, int rw)
int
dls_stat_create(const char *module, int instance, const char *name,
zoneid_t zoneid, int (*update)(struct kstat *, int), void *private,
- kstat_t **kspp)
+ kstat_t **kspp, zoneid_t newzoneid)
{
kstat_t *ksp;
- kstat_named_t *knp;
- uint_t i;
+ zone_t *zone;
+ dls_kstat_t *dkp;
if ((ksp = kstat_create_zone(module, instance, name, "net",
- KSTAT_TYPE_NAMED, STAT_INFO_COUNT + 2, 0, zoneid)) == NULL) {
+ KSTAT_TYPE_NAMED, sizeof (dls_kstat_t) / sizeof (kstat_named_t),
+ KSTAT_FLAG_VIRTUAL, zoneid)) == NULL) {
return (EINVAL);
}
ksp->ks_update = update;
ksp->ks_private = private;
+ dkp = ksp->ks_data = kmem_zalloc(sizeof (dls_kstat_t), KM_SLEEP);
+ if ((zone = zone_find_by_id(newzoneid)) != NULL) {
+ ksp->ks_data_size += strlen(zone->zone_name) + 1;
+ }
- knp = (kstat_named_t *)ksp->ks_data;
- for (i = 0; i < STAT_INFO_COUNT; i++) {
- kstat_named_init(knp, i_dls_si[i].msi_name,
- i_dls_si[i].msi_type);
- knp++;
+ kstat_named_init(&dkp->dk_ifspeed, "ifspeed", KSTAT_DATA_UINT64);
+ kstat_named_init(&dkp->dk_multircv, "multircv", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_brdcstrcv, "brdcstrcv", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_multixmt, "multixmt", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_brdcstxmt, "brdcstxmt", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_norcvbuf, "norcvbuf", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_ierrors, "ierrors", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_noxmtbuf, "noxmtbuf", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_oerrors, "oerrors", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_collisions, "collisions", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_rbytes, "rbytes", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_ipackets, "ipackets", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_obytes, "obytes", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_opackets, "opackets", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_rbytes64, "rbytes64", KSTAT_DATA_UINT64);
+ kstat_named_init(&dkp->dk_ipackets64, "ipackets64", KSTAT_DATA_UINT64);
+ kstat_named_init(&dkp->dk_obytes64, "obytes64", KSTAT_DATA_UINT64);
+ kstat_named_init(&dkp->dk_opackets64, "opackets64", KSTAT_DATA_UINT64);
+ kstat_named_init(&dkp->dk_link_state, "link_state", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_link_duplex, "link_duplex",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_unknowns, "unknowns", KSTAT_DATA_UINT32);
+ kstat_named_init(&dkp->dk_zonename, "zonename", KSTAT_DATA_STRING);
+
+ if (zone != NULL) {
+ kstat_named_setstr(&dkp->dk_zonename, zone->zone_name);
+ zone_rele(zone);
}
- kstat_named_init(knp++, "link_duplex", KSTAT_DATA_UINT32);
- kstat_named_init(knp, "unknowns", KSTAT_DATA_UINT32);
kstat_install(ksp);
*kspp = ksp;
return (0);
}
+
+void
+dls_stat_delete(kstat_t *ksp)
+{
+ void *data;
+ if (ksp != NULL) {
+ data = ksp->ks_data;
+ kstat_delete(ksp);
+ kmem_free(data, sizeof (dls_kstat_t));
+ }
+}
diff --git a/usr/src/uts/common/io/dr_sas/THIRDPARTYLICENSE b/usr/src/uts/common/io/dr_sas/THIRDPARTYLICENSE
new file mode 100644
index 0000000000..00aefb6f51
--- /dev/null
+++ b/usr/src/uts/common/io/dr_sas/THIRDPARTYLICENSE
@@ -0,0 +1,32 @@
+/*
+ * MegaRAID device driver for SAS2.0 controllers
+ * Copyright (c) 2009, LSI Logic Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the author nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
diff --git a/usr/src/uts/common/io/dr_sas/THIRDPARTYLICENSE.descrip b/usr/src/uts/common/io/dr_sas/THIRDPARTYLICENSE.descrip
new file mode 100644
index 0000000000..ac6d2d1b15
--- /dev/null
+++ b/usr/src/uts/common/io/dr_sas/THIRDPARTYLICENSE.descrip
@@ -0,0 +1 @@
+DR_SAS DRIVER
diff --git a/usr/src/uts/common/io/dr_sas/dr_sas.c b/usr/src/uts/common/io/dr_sas/dr_sas.c
new file mode 100644
index 0000000000..5b1dc82938
--- /dev/null
+++ b/usr/src/uts/common/io/dr_sas/dr_sas.c
@@ -0,0 +1,5506 @@
+/*
+ * dr_sas.c: source for dr_sas driver
+ *
+ * MegaRAID device driver for SAS2.0 controllers
+ * Copyright (c) 2008-2009, LSI Logic Corporation.
+ * All rights reserved.
+ *
+ * Version:
+ * Author:
+ * Arun Chandrashekhar
+ * Manju R
+ * Rajesh Prabhakaran
+ * Seokmann Ju
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the author nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/file.h>
+#include <sys/errno.h>
+#include <sys/open.h>
+#include <sys/cred.h>
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/devops.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/stat.h>
+#include <sys/mkdev.h>
+#include <sys/pci.h>
+#include <sys/scsi/scsi.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/atomic.h>
+#include <sys/signal.h>
+#include <sys/fs/dv_node.h> /* devfs_clean */
+
+#include "dr_sas.h"
+
+/*
+ * FMA header files
+ */
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/ddi.h>
+
+/*
+ * Local static data
+ */
+static void *drsas_state = NULL;
+static int debug_level_g = CL_NONE;
+
+#pragma weak scsi_hba_open
+#pragma weak scsi_hba_close
+#pragma weak scsi_hba_ioctl
+
+static ddi_dma_attr_t drsas_generic_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* low DMA address range */
+ 0xFFFFFFFFU, /* high DMA address range */
+ 0xFFFFFFFFU, /* DMA counter register */
+ 8, /* DMA address alignment */
+ 0x07, /* DMA burstsizes */
+ 1, /* min DMA size */
+ 0xFFFFFFFFU, /* max DMA size */
+ 0xFFFFFFFFU, /* segment boundary */
+ DRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
+ 512, /* granularity of device */
+ 0 /* bus specific DMA flags */
+};
+
+int32_t drsas_max_cap_maxxfer = 0x1000000;
+
+/*
+ * cb_ops contains base level routines
+ */
+static struct cb_ops drsas_cb_ops = {
+ drsas_open, /* open */
+ drsas_close, /* close */
+ nodev, /* strategy */
+ nodev, /* print */
+ nodev, /* dump */
+ nodev, /* read */
+ nodev, /* write */
+ drsas_ioctl, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ nochpoll, /* poll */
+ nodev, /* cb_prop_op */
+ 0, /* streamtab */
+ D_NEW | D_HOTPLUG, /* cb_flag */
+ CB_REV, /* cb_rev */
+ nodev, /* cb_aread */
+ nodev /* cb_awrite */
+};
+
+/*
+ * dev_ops contains configuration routines
+ */
+static struct dev_ops drsas_ops = {
+ DEVO_REV, /* rev, */
+ 0, /* refcnt */
+ drsas_getinfo, /* getinfo */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ drsas_attach, /* attach */
+ drsas_detach, /* detach */
+ drsas_reset, /* reset */
+ &drsas_cb_ops, /* char/block ops */
+ NULL, /* bus ops */
+ NULL, /* power */
+ ddi_quiesce_not_supported, /* quiesce */
+};
+
+char _depends_on[] = "misc/scsi";
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* module type - driver */
+ DRSAS_VERSION,
+ &drsas_ops, /* driver ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, /* ml_rev - must be MODREV_1 */
+ &modldrv, /* ml_linkage */
+ NULL /* end of driver linkage */
+};
+
+static struct ddi_device_acc_attr endian_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+
+/*
+ * ************************************************************************** *
+ * *
+ * common entry points - for loadable kernel modules *
+ * *
+ * ************************************************************************** *
+ */
+
+int
+_init(void)
+{
+ int ret;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ ret = ddi_soft_state_init(&drsas_state,
+ sizeof (struct drsas_instance), 0);
+
+ if (ret != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN, "dr_sas: could not init state"));
+ return (ret);
+ }
+
+ if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN, "dr_sas: could not init scsi hba"));
+ ddi_soft_state_fini(&drsas_state);
+ return (ret);
+ }
+
+ ret = mod_install(&modlinkage);
+
+ if (ret != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN, "dr_sas: mod_install failed"));
+ scsi_hba_fini(&modlinkage);
+ ddi_soft_state_fini(&drsas_state);
+ }
+
+ return (ret);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ return (mod_info(&modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int ret;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS)
+ return (ret);
+
+ scsi_hba_fini(&modlinkage);
+
+ ddi_soft_state_fini(&drsas_state);
+
+ return (ret);
+}
+
+
+/*
+ * ************************************************************************** *
+ * *
+ * common entry points - for autoconfiguration *
+ * *
+ * ************************************************************************** *
+ */
+
+static int
+drsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int instance_no;
+ int nregs;
+ uint8_t added_isr_f = 0;
+ uint8_t added_soft_isr_f = 0;
+ uint8_t create_devctl_node_f = 0;
+ uint8_t create_scsi_node_f = 0;
+ uint8_t create_ioc_node_f = 0;
+ uint8_t tran_alloc_f = 0;
+ uint8_t irq;
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint16_t subsysvid;
+ uint16_t subsysid;
+ uint16_t command;
+ off_t reglength = 0;
+ int intr_types = 0;
+ char *data;
+ int msi_enable = 0;
+
+ scsi_hba_tran_t *tran;
+ ddi_dma_attr_t tran_dma_attr;
+ struct drsas_instance *instance;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ /* CONSTCOND */
+ ASSERT(NO_COMPETING_THREADS);
+
+ instance_no = ddi_get_instance(dip);
+
+ /*
+ * check to see whether this device is in a DMA-capable slot.
+ */
+ if (ddi_slaveonly(dip) == DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas%d: Device in slave-only slot, unused",
+ instance_no));
+ return (DDI_FAILURE);
+ }
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ con_log(CL_DLEVEL1, (CE_NOTE, "dr_sas: DDI_ATTACH"));
+ /* allocate the soft state for the instance */
+ if (ddi_soft_state_zalloc(drsas_state, instance_no)
+ != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas%d: Failed to allocate soft state",
+ instance_no));
+
+ return (DDI_FAILURE);
+ }
+
+ instance = (struct drsas_instance *)ddi_get_soft_state
+ (drsas_state, instance_no);
+
+ if (instance == NULL) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas%d: Bad soft state", instance_no));
+
+ ddi_soft_state_free(drsas_state, instance_no);
+
+ return (DDI_FAILURE);
+ }
+
+ bzero((caddr_t)instance,
+ sizeof (struct drsas_instance));
+
+ instance->func_ptr = kmem_zalloc(
+ sizeof (struct drsas_func_ptr), KM_SLEEP);
+ ASSERT(instance->func_ptr);
+
+ /* Setup the PCI configuration space handles */
+ if (pci_config_setup(dip, &instance->pci_handle) !=
+ DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas%d: pci config setup failed ",
+ instance_no));
+
+ kmem_free(instance->func_ptr,
+ sizeof (struct drsas_func_ptr));
+ ddi_soft_state_free(drsas_state, instance_no);
+
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: failed to get registers."));
+
+ pci_config_teardown(&instance->pci_handle);
+ kmem_free(instance->func_ptr,
+ sizeof (struct drsas_func_ptr));
+ ddi_soft_state_free(drsas_state, instance_no);
+
+ return (DDI_FAILURE);
+ }
+
+ vendor_id = pci_config_get16(instance->pci_handle,
+ PCI_CONF_VENID);
+ device_id = pci_config_get16(instance->pci_handle,
+ PCI_CONF_DEVID);
+
+ subsysvid = pci_config_get16(instance->pci_handle,
+ PCI_CONF_SUBVENID);
+ subsysid = pci_config_get16(instance->pci_handle,
+ PCI_CONF_SUBSYSID);
+
+ pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
+ (pci_config_get16(instance->pci_handle,
+ PCI_CONF_COMM) | PCI_COMM_ME));
+ irq = pci_config_get8(instance->pci_handle,
+ PCI_CONF_ILINE);
+
+ con_log(CL_DLEVEL1, (CE_CONT, "dr_sas%d: "
+ "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
+ instance_no, vendor_id, device_id, subsysvid,
+ subsysid, irq, DRSAS_VERSION));
+
+ /* enable bus-mastering */
+ command = pci_config_get16(instance->pci_handle,
+ PCI_CONF_COMM);
+
+ if (!(command & PCI_COMM_ME)) {
+ command |= PCI_COMM_ME;
+
+ pci_config_put16(instance->pci_handle,
+ PCI_CONF_COMM, command);
+
+ con_log(CL_ANN, (CE_CONT, "dr_sas%d: "
+ "enable bus-mastering", instance_no));
+ } else {
+ con_log(CL_DLEVEL1, (CE_CONT, "dr_sas%d: "
+ "bus-mastering already set", instance_no));
+ }
+
+ /* initialize function pointers */
+ if ((device_id == PCI_DEVICE_ID_LSI_2108VDE) ||
+ (device_id == PCI_DEVICE_ID_LSI_2108V)) {
+ con_log(CL_DLEVEL1, (CE_CONT, "dr_sas%d: "
+ "2108V/DE detected", instance_no));
+ instance->func_ptr->read_fw_status_reg =
+ read_fw_status_reg_ppc;
+ instance->func_ptr->issue_cmd = issue_cmd_ppc;
+ instance->func_ptr->issue_cmd_in_sync_mode =
+ issue_cmd_in_sync_mode_ppc;
+ instance->func_ptr->issue_cmd_in_poll_mode =
+ issue_cmd_in_poll_mode_ppc;
+ instance->func_ptr->enable_intr =
+ enable_intr_ppc;
+ instance->func_ptr->disable_intr =
+ disable_intr_ppc;
+ instance->func_ptr->intr_ack = intr_ack_ppc;
+ } else {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: Invalid device detected"));
+
+ pci_config_teardown(&instance->pci_handle);
+ kmem_free(instance->func_ptr,
+ sizeof (struct drsas_func_ptr));
+ ddi_soft_state_free(drsas_state, instance_no);
+
+ return (DDI_FAILURE);
+ }
+
+ instance->baseaddress = pci_config_get32(
+ instance->pci_handle, PCI_CONF_BASE0);
+ instance->baseaddress &= 0x0fffc;
+
+ instance->dip = dip;
+ instance->vendor_id = vendor_id;
+ instance->device_id = device_id;
+ instance->subsysvid = subsysvid;
+ instance->subsysid = subsysid;
+ instance->instance = instance_no;
+
+ /* Initialize FMA */
+ instance->fm_capabilities = ddi_prop_get_int(
+ DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
+ "fm-capable", DDI_FM_EREPORT_CAPABLE |
+ DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
+ | DDI_FM_ERRCB_CAPABLE);
+
+ drsas_fm_init(instance);
+
+ /* Initialize Interrupts */
+ if ((ddi_dev_regsize(instance->dip,
+ REGISTER_SET_IO_2108, &reglength) != DDI_SUCCESS) ||
+ reglength < MINIMUM_MFI_MEM_SZ) {
+ return (DDI_FAILURE);
+ }
+ if (reglength > DEFAULT_MFI_MEM_SZ) {
+ reglength = DEFAULT_MFI_MEM_SZ;
+ con_log(CL_DLEVEL1, (CE_NOTE,
+ "dr_sas: register length to map is "
+ "0x%lx bytes", reglength));
+ }
+ if (ddi_regs_map_setup(instance->dip,
+ REGISTER_SET_IO_2108, &instance->regmap, 0,
+ reglength, &endian_attr, &instance->regmap_handle)
+ != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_NOTE,
+ "dr_sas: couldn't map control registers"));
+ goto fail_attach;
+ }
+
+ /*
+ * Disable Interrupt Now.
+ * Setup Software interrupt
+ */
+ instance->func_ptr->disable_intr(instance);
+
+ msi_enable = 0;
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
+ "drsas-enable-msi", &data) == DDI_SUCCESS) {
+ if (strncmp(data, "yes", 3) == 0) {
+ msi_enable = 1;
+ con_log(CL_ANN, (CE_WARN,
+ "msi_enable = %d ENABLED",
+ msi_enable));
+ }
+ ddi_prop_free(data);
+ }
+
+ con_log(CL_DLEVEL1, (CE_WARN, "msi_enable = %d",
+ msi_enable));
+
+ /* Check for all supported interrupt types */
+ if (ddi_intr_get_supported_types(
+ dip, &intr_types) != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ "ddi_intr_get_supported_types() failed"));
+ goto fail_attach;
+ }
+
+ con_log(CL_DLEVEL1, (CE_NOTE,
+ "ddi_intr_get_supported_types() ret: 0x%x",
+ intr_types));
+
+ /* Initialize and Setup Interrupt handler */
+ if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
+ if (drsas_add_intrs(instance,
+ DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ "MSIX interrupt query failed"));
+ goto fail_attach;
+ }
+ instance->intr_type = DDI_INTR_TYPE_MSIX;
+ } else if (msi_enable && (intr_types &
+ DDI_INTR_TYPE_MSI)) {
+ if (drsas_add_intrs(instance,
+ DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ "MSI interrupt query failed"));
+ goto fail_attach;
+ }
+ instance->intr_type = DDI_INTR_TYPE_MSI;
+ } else if (intr_types & DDI_INTR_TYPE_FIXED) {
+ msi_enable = 0;
+ if (drsas_add_intrs(instance,
+ DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ "FIXED interrupt query failed"));
+ goto fail_attach;
+ }
+ instance->intr_type = DDI_INTR_TYPE_FIXED;
+ } else {
+ con_log(CL_ANN, (CE_WARN, "Device cannot "
+ "suppport either FIXED or MSI/X "
+ "interrupts"));
+ goto fail_attach;
+ }
+
+ added_isr_f = 1;
+
+ /* setup the mfi based low level driver */
+ if (init_mfi(instance) != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN, "dr_sas: "
+ "could not initialize the low level driver"));
+
+ goto fail_attach;
+ }
+
+ /* Initialize all Mutex */
+ INIT_LIST_HEAD(&instance->completed_pool_list);
+ mutex_init(&instance->completed_pool_mtx,
+ "completed_pool_mtx", MUTEX_DRIVER,
+ DDI_INTR_PRI(instance->intr_pri));
+
+ mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
+ MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
+ cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
+
+ mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx",
+ MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
+
+ /* Register our soft-isr for highlevel interrupts. */
+ instance->isr_level = instance->intr_pri;
+ if (instance->isr_level == HIGH_LEVEL_INTR) {
+ if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH,
+ &instance->soft_intr_id, NULL, NULL,
+ drsas_softintr, (caddr_t)instance) !=
+ DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ " Software ISR did not register"));
+
+ goto fail_attach;
+ }
+
+ added_soft_isr_f = 1;
+ }
+
+ /* Allocate a transport structure */
+ tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
+
+ if (tran == NULL) {
+ con_log(CL_ANN, (CE_WARN,
+ "scsi_hba_tran_alloc failed"));
+ goto fail_attach;
+ }
+
+ tran_alloc_f = 1;
+
+ instance->tran = tran;
+
+ tran->tran_hba_private = instance;
+ tran->tran_tgt_init = drsas_tran_tgt_init;
+ tran->tran_tgt_probe = scsi_hba_probe;
+ tran->tran_tgt_free = drsas_tran_tgt_free;
+ tran->tran_init_pkt = drsas_tran_init_pkt;
+ tran->tran_start = drsas_tran_start;
+ tran->tran_abort = drsas_tran_abort;
+ tran->tran_reset = drsas_tran_reset;
+ tran->tran_getcap = drsas_tran_getcap;
+ tran->tran_setcap = drsas_tran_setcap;
+ tran->tran_destroy_pkt = drsas_tran_destroy_pkt;
+ tran->tran_dmafree = drsas_tran_dmafree;
+ tran->tran_sync_pkt = drsas_tran_sync_pkt;
+ tran->tran_bus_config = drsas_tran_bus_config;
+
+ tran_dma_attr = drsas_generic_dma_attr;
+ tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
+
+ /* Attach this instance of the hba */
+ if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
+ != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ "scsi_hba_attach failed"));
+
+ goto fail_attach;
+ }
+
+ /* create devctl node for cfgadm command */
+ if (ddi_create_minor_node(dip, "devctl",
+ S_IFCHR, INST2DEVCTL(instance_no),
+ DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: failed to create devctl node."));
+
+ goto fail_attach;
+ }
+
+ create_devctl_node_f = 1;
+
+ /* create scsi node for cfgadm command */
+ if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
+ INST2SCSI(instance_no),
+ DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
+ DDI_FAILURE) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: failed to create scsi node."));
+
+ goto fail_attach;
+ }
+
+ create_scsi_node_f = 1;
+
+ (void) sprintf(instance->iocnode, "%d:lsirdctl",
+ instance_no);
+
+ /*
+ * Create a node for applications
+ * for issuing ioctl to the driver.
+ */
+ if (ddi_create_minor_node(dip, instance->iocnode,
+ S_IFCHR, INST2LSIRDCTL(instance_no),
+ DDI_PSEUDO, 0) == DDI_FAILURE) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: failed to create ioctl node."));
+
+ goto fail_attach;
+ }
+
+ create_ioc_node_f = 1;
+
+ /* Create a taskq to handle dr events */
+ if ((instance->taskq = ddi_taskq_create(dip,
+ "drsas_dr_taskq", 1,
+ TASKQ_DEFAULTPRI, 0)) == NULL) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: failed to create taskq "));
+ instance->taskq = NULL;
+ goto fail_attach;
+ }
+
+ /* enable interrupt */
+ instance->func_ptr->enable_intr(instance);
+
+ /* initiate AEN */
+ if (start_mfi_aen(instance)) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: failed to initiate AEN."));
+ goto fail_initiate_aen;
+ }
+
+ con_log(CL_DLEVEL1, (CE_NOTE,
+ "AEN started for instance %d.", instance_no));
+
+ /* Finally! We are on the air. */
+ ddi_report_dev(dip);
+
+ if (drsas_check_acc_handle(instance->regmap_handle) !=
+ DDI_SUCCESS) {
+ goto fail_attach;
+ }
+ if (drsas_check_acc_handle(instance->pci_handle) !=
+ DDI_SUCCESS) {
+ goto fail_attach;
+ }
+ instance->dr_ld_list =
+ kmem_zalloc(MRDRV_MAX_LD * sizeof (struct drsas_ld),
+ KM_SLEEP);
+ break;
+ case DDI_PM_RESUME:
+ con_log(CL_ANN, (CE_NOTE,
+ "dr_sas: DDI_PM_RESUME"));
+ break;
+ case DDI_RESUME:
+ con_log(CL_ANN, (CE_NOTE,
+ "dr_sas: DDI_RESUME"));
+ break;
+ default:
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: invalid attach cmd=%x", cmd));
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+
+fail_initiate_aen:
+fail_attach:
+ if (create_devctl_node_f) {
+ ddi_remove_minor_node(dip, "devctl");
+ }
+
+ if (create_scsi_node_f) {
+ ddi_remove_minor_node(dip, "scsi");
+ }
+
+ if (create_ioc_node_f) {
+ ddi_remove_minor_node(dip, instance->iocnode);
+ }
+
+ if (tran_alloc_f) {
+ scsi_hba_tran_free(tran);
+ }
+
+
+ if (added_soft_isr_f) {
+ ddi_remove_softintr(instance->soft_intr_id);
+ }
+
+ if (added_isr_f) {
+ drsas_rem_intrs(instance);
+ }
+
+ if (instance && instance->taskq) {
+ ddi_taskq_destroy(instance->taskq);
+ }
+
+ drsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+
+ drsas_fm_fini(instance);
+
+ pci_config_teardown(&instance->pci_handle);
+
+ ddi_soft_state_free(drsas_state, instance_no);
+
+ con_log(CL_ANN, (CE_NOTE,
+ "dr_sas: return failure from drsas_attach"));
+
+ return (DDI_FAILURE);
+}
+
+/*ARGSUSED*/
+static int
+drsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
+{
+ int rval;
+ int drsas_minor = getminor((dev_t)arg);
+
+ struct drsas_instance *instance;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ instance = (struct drsas_instance *)
+ ddi_get_soft_state(drsas_state,
+ MINOR2INST(drsas_minor));
+
+ if (instance == NULL) {
+ *resultp = NULL;
+ rval = DDI_FAILURE;
+ } else {
+ *resultp = instance->dip;
+ rval = DDI_SUCCESS;
+ }
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *resultp = (void *)instance;
+ rval = DDI_SUCCESS;
+ break;
+ default:
+ *resultp = NULL;
+ rval = DDI_FAILURE;
+ }
+
+ return (rval);
+}
+
+static int
+drsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance_no;
+
+ struct drsas_instance *instance;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ /* CONSTCOND */
+ ASSERT(NO_COMPETING_THREADS);
+
+ instance_no = ddi_get_instance(dip);
+
+ instance = (struct drsas_instance *)ddi_get_soft_state(drsas_state,
+ instance_no);
+
+ if (!instance) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas:%d could not get instance in detach",
+ instance_no));
+
+ return (DDI_FAILURE);
+ }
+
+ con_log(CL_ANN, (CE_NOTE,
+ "dr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
+ instance_no, instance->vendor_id, instance->device_id,
+ instance->subsysvid, instance->subsysid));
+
+ switch (cmd) {
+ case DDI_DETACH:
+ con_log(CL_ANN, (CE_NOTE,
+ "drsas_detach: DDI_DETACH"));
+
+ if (scsi_hba_detach(dip) != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas:%d failed to detach",
+ instance_no));
+
+ return (DDI_FAILURE);
+ }
+
+ scsi_hba_tran_free(instance->tran);
+
+ flush_cache(instance);
+
+ if (abort_aen_cmd(instance, instance->aen_cmd)) {
+ con_log(CL_ANN, (CE_WARN, "drsas_detach: "
+ "failed to abort prevous AEN command"));
+
+ return (DDI_FAILURE);
+ }
+
+ instance->func_ptr->disable_intr(instance);
+
+ if (instance->isr_level == HIGH_LEVEL_INTR) {
+ ddi_remove_softintr(instance->soft_intr_id);
+ }
+
+ drsas_rem_intrs(instance);
+
+ if (instance->taskq) {
+ ddi_taskq_destroy(instance->taskq);
+ }
+ kmem_free(instance->dr_ld_list, MRDRV_MAX_LD
+ * sizeof (struct drsas_ld));
+ free_space_for_mfi(instance);
+
+ drsas_fm_fini(instance);
+
+ pci_config_teardown(&instance->pci_handle);
+
+ kmem_free(instance->func_ptr,
+ sizeof (struct drsas_func_ptr));
+
+ ddi_soft_state_free(drsas_state, instance_no);
+ break;
+ case DDI_PM_SUSPEND:
+ con_log(CL_ANN, (CE_NOTE,
+ "drsas_detach: DDI_PM_SUSPEND"));
+
+ break;
+ case DDI_SUSPEND:
+ con_log(CL_ANN, (CE_NOTE,
+ "drsas_detach: DDI_SUSPEND"));
+
+ break;
+ default:
+ con_log(CL_ANN, (CE_WARN,
+ "invalid detach command:0x%x", cmd));
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * ************************************************************************** *
+ * *
+ * common entry points - for character driver types *
+ * *
+ * ************************************************************************** *
+ */
+static int
+drsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
+{
+ int rval = 0;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ /* Check root permissions */
+ if (drv_priv(credp) != 0) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: Non-root ioctl access denied!"));
+ return (EPERM);
+ }
+
+ /* Verify we are being opened as a character device */
+ if (otyp != OTYP_CHR) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: ioctl node must be a char node"));
+ return (EINVAL);
+ }
+
+ if (ddi_get_soft_state(drsas_state, MINOR2INST(getminor(*dev)))
+ == NULL) {
+ return (ENXIO);
+ }
+
+ if (scsi_hba_open) {
+ rval = scsi_hba_open(dev, openflags, otyp, credp);
+ }
+
+ return (rval);
+}
+
+static int
+drsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
+{
+ int rval = 0;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ /* no need for locks! */
+
+ if (scsi_hba_close) {
+ rval = scsi_hba_close(dev, openflags, otyp, credp);
+ }
+
+ return (rval);
+}
+
+static int
+drsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
+ int *rvalp)
+{
+ int rval = 0;
+
+ struct drsas_instance *instance;
+ struct drsas_ioctl *ioctl;
+ struct drsas_aen aen;
+ int i;
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ instance = ddi_get_soft_state(drsas_state, MINOR2INST(getminor(dev)));
+
+ if (instance == NULL) {
+ /* invalid minor number */
+ con_log(CL_ANN, (CE_WARN, "dr_sas: adapter not found."));
+ return (ENXIO);
+ }
+
+ ioctl = (struct drsas_ioctl *)kmem_zalloc(sizeof (struct drsas_ioctl),
+ KM_SLEEP);
+ ASSERT(ioctl);
+
+ switch ((uint_t)cmd) {
+ case DRSAS_IOCTL_FIRMWARE:
+ for (i = 0; i < sizeof (struct drsas_ioctl); i++) {
+ if (ddi_copyin((uint8_t *)arg+i,
+ (uint8_t *)ioctl+i, 1, mode)) {
+ con_log(CL_ANN, (CE_WARN, "drsas_ioctl "
+ "ERROR IOCTL copyin"));
+ kmem_free(ioctl,
+ sizeof (struct drsas_ioctl));
+ return (EFAULT);
+ }
+ }
+ if (ioctl->control_code == DRSAS_DRIVER_IOCTL_COMMON) {
+ rval = handle_drv_ioctl(instance, ioctl, mode);
+ } else {
+ rval = handle_mfi_ioctl(instance, ioctl, mode);
+ }
+ for (i = 0; i < sizeof (struct drsas_ioctl) - 1; i++) {
+ if (ddi_copyout((uint8_t *)ioctl+i,
+ (uint8_t *)arg+i, 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "drsas_ioctl: ddi_copyout "
+ "failed"));
+ rval = 1;
+ break;
+ }
+ }
+
+ break;
+ case DRSAS_IOCTL_AEN:
+ for (i = 0; i < sizeof (struct drsas_aen); i++) {
+ if (ddi_copyin((uint8_t *)arg+i,
+ (uint8_t *)&aen+i, 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "drsas_ioctl: "
+ "ERROR AEN copyin"));
+ kmem_free(ioctl,
+ sizeof (struct drsas_ioctl));
+ return (EFAULT);
+ }
+ }
+
+ rval = handle_mfi_aen(instance, &aen);
+ for (i = 0; i < sizeof (struct drsas_aen); i++) {
+ if (ddi_copyout((uint8_t *)&aen + i,
+ (uint8_t *)arg + i, 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "drsas_ioctl: "
+ "ddi_copyout failed"));
+ rval = 1;
+ break;
+ }
+ }
+
+ break;
+ default:
+ rval = scsi_hba_ioctl(dev, cmd, arg,
+ mode, credp, rvalp);
+
+ con_log(CL_DLEVEL1, (CE_NOTE, "drsas_ioctl: "
+ "scsi_hba_ioctl called, ret = %x.", rval));
+ }
+
+ kmem_free(ioctl, sizeof (struct drsas_ioctl));
+ return (rval);
+}
+
+/*
+ * ************************************************************************** *
+ * *
+ * common entry points - for block driver types *
+ * *
+ * ************************************************************************** *
+ */
+/*ARGSUSED*/
+static int
+drsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
+{
+ int instance_no;
+
+ struct drsas_instance *instance;
+
+ instance_no = ddi_get_instance(dip);
+ instance = (struct drsas_instance *)ddi_get_soft_state
+ (drsas_state, instance_no);
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ if (!instance) {
+ con_log(CL_ANN, (CE_WARN, "dr_sas:%d could not get adapter "
+ "in reset", instance_no));
+ return (DDI_FAILURE);
+ }
+
+ instance->func_ptr->disable_intr(instance);
+
+ con_log(CL_ANN1, (CE_NOTE, "flushing cache for instance %d",
+ instance_no));
+
+ flush_cache(instance);
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * ************************************************************************** *
+ * *
+ * entry points (SCSI HBA) *
+ * *
+ * ************************************************************************** *
+ */
+/*ARGSUSED*/
+static int
+drsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
+ scsi_hba_tran_t *tran, struct scsi_device *sd)
+{
+ struct drsas_instance *instance;
+ uint16_t tgt = sd->sd_address.a_target;
+ uint8_t lun = sd->sd_address.a_lun;
+
+ con_log(CL_ANN1, (CE_NOTE, "drsas_tgt_init target %d lun %d",
+ tgt, lun));
+
+ instance = ADDR2MR(&sd->sd_address);
+
+ if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
+ (void) ndi_merge_node(tgt_dip, drsas_name_node);
+ ddi_set_name_addr(tgt_dip, NULL);
+
+ con_log(CL_ANN1, (CE_NOTE, "drsas_tgt_init in "
+ "ndi_dev_is_persistent_node DDI_FAILURE t = %d l = %d",
+ tgt, lun));
+ return (DDI_FAILURE);
+ }
+
+ con_log(CL_ANN1, (CE_NOTE, "drsas_tgt_init dev_dip %p tgt_dip %p",
+ (void *)instance->dr_ld_list[tgt].dip, (void *)tgt_dip));
+
+ if (tgt < MRDRV_MAX_LD && lun == 0) {
+ if (instance->dr_ld_list[tgt].dip == NULL &&
+ strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
+ instance->dr_ld_list[tgt].dip = tgt_dip;
+ instance->dr_ld_list[tgt].lun_type = DRSAS_LD_LUN;
+ }
+ }
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static void
+drsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
+ scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
+{
+ struct drsas_instance *instance;
+ int tgt = sd->sd_address.a_target;
+ int lun = sd->sd_address.a_lun;
+
+ instance = ADDR2MR(&sd->sd_address);
+
+ con_log(CL_ANN1, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
+
+ if (tgt < MRDRV_MAX_LD && lun == 0) {
+ if (instance->dr_ld_list[tgt].dip == tgt_dip) {
+ instance->dr_ld_list[tgt].dip = NULL;
+ }
+ }
+}
+
+static dev_info_t *
+drsas_find_child(struct drsas_instance *instance, uint16_t tgt, uint8_t lun)
+{
+ dev_info_t *child = NULL;
+ char addr[SCSI_MAXNAMELEN];
+ char tmp[MAXNAMELEN];
+
+ (void) sprintf(addr, "%x,%x", tgt, lun);
+ for (child = ddi_get_child(instance->dip); child;
+ child = ddi_get_next_sibling(child)) {
+
+ if (drsas_name_node(child, tmp, MAXNAMELEN) !=
+ DDI_SUCCESS) {
+ continue;
+ }
+
+ if (strcmp(addr, tmp) == 0) {
+ break;
+ }
+ }
+ con_log(CL_ANN1, (CE_NOTE, "drsas_find_child: return child = %p",
+ (void *)child));
+ return (child);
+}
+
+static int
+drsas_name_node(dev_info_t *dip, char *name, int len)
+{
+ int tgt, lun;
+
+ tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "target", -1);
+ con_log(CL_ANN1, (CE_NOTE,
+ "drsas_name_node: dip %p tgt %d", (void *)dip, tgt));
+ if (tgt == -1) {
+ return (DDI_FAILURE);
+ }
+ lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "lun", -1);
+ con_log(CL_ANN1,
+ (CE_NOTE, "drsas_name_node: tgt %d lun %d", tgt, lun));
+ if (lun == -1) {
+ return (DDI_FAILURE);
+ }
+ (void) snprintf(name, len, "%x,%x", tgt, lun);
+ return (DDI_SUCCESS);
+}
+
+static struct scsi_pkt *
+drsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
+ struct buf *bp, int cmdlen, int statuslen, int tgtlen,
+ int flags, int (*callback)(), caddr_t arg)
+{
+ struct scsa_cmd *acmd;
+ struct drsas_instance *instance;
+ struct scsi_pkt *new_pkt;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ instance = ADDR2MR(ap);
+
+ /* step #1 : pkt allocation */
+ if (pkt == NULL) {
+ pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
+ tgtlen, sizeof (struct scsa_cmd), callback, arg);
+ if (pkt == NULL) {
+ return (NULL);
+ }
+
+ acmd = PKT2CMD(pkt);
+
+ /*
+ * Initialize the new pkt - we redundantly initialize
+ * all the fields for illustrative purposes.
+ */
+ acmd->cmd_pkt = pkt;
+ acmd->cmd_flags = 0;
+ acmd->cmd_scblen = statuslen;
+ acmd->cmd_cdblen = cmdlen;
+ acmd->cmd_dmahandle = NULL;
+ acmd->cmd_ncookies = 0;
+ acmd->cmd_cookie = 0;
+ acmd->cmd_cookiecnt = 0;
+ acmd->cmd_nwin = 0;
+
+ pkt->pkt_address = *ap;
+ pkt->pkt_comp = (void (*)())NULL;
+ pkt->pkt_flags = 0;
+ pkt->pkt_time = 0;
+ pkt->pkt_resid = 0;
+ pkt->pkt_state = 0;
+ pkt->pkt_statistics = 0;
+ pkt->pkt_reason = 0;
+ new_pkt = pkt;
+ } else {
+ acmd = PKT2CMD(pkt);
+ new_pkt = NULL;
+ }
+
+ /* step #2 : dma allocation/move */
+ if (bp && bp->b_bcount != 0) {
+ if (acmd->cmd_dmahandle == NULL) {
+ if (drsas_dma_alloc(instance, pkt, bp, flags,
+ callback) == DDI_FAILURE) {
+ if (new_pkt) {
+ scsi_hba_pkt_free(ap, new_pkt);
+ }
+ return ((struct scsi_pkt *)NULL);
+ }
+ } else {
+ if (drsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
+ return ((struct scsi_pkt *)NULL);
+ }
+ }
+ }
+
+ return (pkt);
+}
+
+static int
+drsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
+{
+ uchar_t cmd_done = 0;
+
+ struct drsas_instance *instance = ADDR2MR(ap);
+ struct drsas_cmd *cmd;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x",
+ __func__, __LINE__, pkt->pkt_cdbp[0]));
+
+ pkt->pkt_reason = CMD_CMPLT;
+ *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
+
+ cmd = build_cmd(instance, ap, pkt, &cmd_done);
+
+ /*
+ * Check if the command is already completed by the drsas_build_cmd()
+ * routine. In which case the busy_flag would be clear and scb will be
+ * NULL and appropriate reason provided in pkt_reason field
+ */
+ if (cmd_done) {
+ pkt->pkt_reason = CMD_CMPLT;
+ pkt->pkt_scbp[0] = STATUS_GOOD;
+ pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
+ | STATE_SENT_CMD;
+ if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
+ (*pkt->pkt_comp)(pkt);
+ }
+
+ return (TRAN_ACCEPT);
+ }
+
+ if (cmd == NULL) {
+ return (TRAN_BUSY);
+ }
+
+ if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
+ if (instance->fw_outstanding > instance->max_fw_cmds) {
+ con_log(CL_ANN, (CE_CONT, "dr_sas:Firmware busy"));
+ return_mfi_pkt(instance, cmd);
+ return (TRAN_BUSY);
+ }
+
+ /* Synchronize the Cmd frame for the controller */
+ (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
+ DDI_DMA_SYNC_FORDEV);
+
+ instance->func_ptr->issue_cmd(cmd, instance);
+
+ } else {
+ struct drsas_header *hdr = &cmd->frame->hdr;
+
+ cmd->sync_cmd = DRSAS_TRUE;
+
+ instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd);
+
+ pkt->pkt_reason = CMD_CMPLT;
+ pkt->pkt_statistics = 0;
+ pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
+
+ switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
+ &hdr->cmd_status)) {
+ case MFI_STAT_OK:
+ pkt->pkt_scbp[0] = STATUS_GOOD;
+ break;
+
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+ pkt->pkt_reason = CMD_CMPLT;
+ pkt->pkt_statistics = 0;
+
+ ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
+ break;
+
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ pkt->pkt_reason = CMD_DEV_GONE;
+ pkt->pkt_statistics = STAT_DISCON;
+ break;
+
+ default:
+ ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
+ }
+
+ return_mfi_pkt(instance, cmd);
+ (void) drsas_common_check(instance, cmd);
+
+ if (pkt->pkt_comp) {
+ (*pkt->pkt_comp)(pkt);
+ }
+
+ }
+
+ return (TRAN_ACCEPT);
+}
+
+/*ARGSUSED*/
+static int
+drsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ /* abort command not supported by H/W */
+
+ return (DDI_FAILURE);
+}
+
+/*ARGSUSED*/
+static int
+drsas_tran_reset(struct scsi_address *ap, int level)
+{
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ /* reset command not supported by H/W */
+
+ return (DDI_FAILURE);
+
+}
+
+/*ARGSUSED*/
+static int
+drsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
+{
+ int rval = 0;
+
+ struct drsas_instance *instance = ADDR2MR(ap);
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ /* we do allow inquiring about capabilities for other targets */
+ if (cap == NULL) {
+ return (-1);
+ }
+
+ switch (scsi_hba_lookup_capstr(cap)) {
+ case SCSI_CAP_DMA_MAX:
+ /* Limit to 16MB max transfer */
+ rval = drsas_max_cap_maxxfer;
+ break;
+ case SCSI_CAP_MSG_OUT:
+ rval = 1;
+ break;
+ case SCSI_CAP_DISCONNECT:
+ rval = 0;
+ break;
+ case SCSI_CAP_SYNCHRONOUS:
+ rval = 0;
+ break;
+ case SCSI_CAP_WIDE_XFER:
+ rval = 1;
+ break;
+ case SCSI_CAP_TAGGED_QING:
+ rval = 1;
+ break;
+ case SCSI_CAP_UNTAGGED_QING:
+ rval = 1;
+ break;
+ case SCSI_CAP_PARITY:
+ rval = 1;
+ break;
+ case SCSI_CAP_INITIATOR_ID:
+ rval = instance->init_id;
+ break;
+ case SCSI_CAP_ARQ:
+ rval = 1;
+ break;
+ case SCSI_CAP_LINKED_CMDS:
+ rval = 0;
+ break;
+ case SCSI_CAP_RESET_NOTIFICATION:
+ rval = 1;
+ break;
+ case SCSI_CAP_GEOMETRY:
+ rval = -1;
+
+ break;
+ default:
+ con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
+ scsi_hba_lookup_capstr(cap)));
+ rval = -1;
+ break;
+ }
+
+ return (rval);
+}
+
+/*ARGSUSED*/
+static int
+drsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
+{
+ int rval = 1;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ /* We don't allow setting capabilities for other targets */
+ if (cap == NULL || whom == 0) {
+ return (-1);
+ }
+
+ switch (scsi_hba_lookup_capstr(cap)) {
+ case SCSI_CAP_DMA_MAX:
+ case SCSI_CAP_MSG_OUT:
+ case SCSI_CAP_PARITY:
+ case SCSI_CAP_LINKED_CMDS:
+ case SCSI_CAP_RESET_NOTIFICATION:
+ case SCSI_CAP_DISCONNECT:
+ case SCSI_CAP_SYNCHRONOUS:
+ case SCSI_CAP_UNTAGGED_QING:
+ case SCSI_CAP_WIDE_XFER:
+ case SCSI_CAP_INITIATOR_ID:
+ case SCSI_CAP_ARQ:
+ /*
+ * None of these are settable via
+ * the capability interface.
+ */
+ break;
+ case SCSI_CAP_TAGGED_QING:
+ rval = 1;
+ break;
+ case SCSI_CAP_SECTOR_SIZE:
+ rval = 1;
+ break;
+
+ case SCSI_CAP_TOTAL_SECTORS:
+ rval = 1;
+ break;
+ default:
+ rval = -1;
+ break;
+ }
+
+ return (rval);
+}
+
+static void
+drsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ struct scsa_cmd *acmd = PKT2CMD(pkt);
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ if (acmd->cmd_flags & CFLAG_DMAVALID) {
+ acmd->cmd_flags &= ~CFLAG_DMAVALID;
+
+ (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
+
+ ddi_dma_free_handle(&acmd->cmd_dmahandle);
+
+ acmd->cmd_dmahandle = NULL;
+ }
+
+ /* free the pkt */
+ scsi_hba_pkt_free(ap, pkt);
+}
+
+/*ARGSUSED*/
+static void
+drsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ register struct scsa_cmd *acmd = PKT2CMD(pkt);
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ if (acmd->cmd_flags & CFLAG_DMAVALID) {
+ acmd->cmd_flags &= ~CFLAG_DMAVALID;
+
+ (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
+
+ ddi_dma_free_handle(&acmd->cmd_dmahandle);
+
+ acmd->cmd_dmahandle = NULL;
+ }
+}
+
+/*ARGSUSED*/
+static void
+drsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ register struct scsa_cmd *acmd = PKT2CMD(pkt);
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ if (acmd->cmd_flags & CFLAG_DMAVALID) {
+ (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
+ acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
+ DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
+ }
+}
+
+/*
+ * drsas_isr(caddr_t)
+ *
+ * The Interrupt Service Routine
+ *
+ * Collect status for all completed commands and do callback
+ *
+ */
+static uint_t
+drsas_isr(struct drsas_instance *instance)
+{
+ int need_softintr;
+ uint32_t producer;
+ uint32_t consumer;
+ uint32_t context;
+
+ struct drsas_cmd *cmd;
+
+ con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
+
+ ASSERT(instance);
+ if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
+ !instance->func_ptr->intr_ack(instance)) {
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
+ 0, 0, DDI_DMA_SYNC_FORCPU);
+
+ if (drsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
+ != DDI_SUCCESS) {
+ drsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
+ instance->producer);
+ consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
+ instance->consumer);
+
+ con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ",
+ producer, consumer));
+ if (producer == consumer) {
+ con_log(CL_ANN1, (CE_WARN, "producer = consumer case"));
+ return (DDI_INTR_UNCLAIMED);
+ }
+ mutex_enter(&instance->completed_pool_mtx);
+
+ while (consumer != producer) {
+ context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
+ &instance->reply_queue[consumer]);
+ cmd = instance->cmd_list[context];
+ mlist_add_tail(&cmd->list, &instance->completed_pool_list);
+
+ consumer++;
+ if (consumer == (instance->max_fw_cmds + 1)) {
+ consumer = 0;
+ }
+ }
+
+ mutex_exit(&instance->completed_pool_mtx);
+
+ ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
+ instance->consumer, consumer);
+ (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
+ 0, 0, DDI_DMA_SYNC_FORDEV);
+
+ if (instance->softint_running) {
+ need_softintr = 0;
+ } else {
+ need_softintr = 1;
+ }
+
+ if (instance->isr_level == HIGH_LEVEL_INTR) {
+ if (need_softintr) {
+ ddi_trigger_softintr(instance->soft_intr_id);
+ }
+ } else {
+ /*
+ * Not a high-level interrupt, therefore call the soft level
+ * interrupt explicitly
+ */
+ (void) drsas_softintr(instance);
+ }
+
+ return (DDI_INTR_CLAIMED);
+}
+
+
+/*
+ * ************************************************************************** *
+ * *
+ * libraries *
+ * *
+ * ************************************************************************** *
+ */
+/*
+ * get_mfi_pkt : Get a command from the free pool
+ * After successful allocation, the caller of this routine
+ * must clear the frame buffer (memset to zero) before
+ * using the packet further.
+ *
+ * ***** Note *****
+ * After clearing the frame buffer the context id of the
+ * frame buffer SHOULD be restored back.
+ */
+static struct drsas_cmd *
+get_mfi_pkt(struct drsas_instance *instance)
+{
+ mlist_t *head = &instance->cmd_pool_list;
+ struct drsas_cmd *cmd = NULL;
+
+ mutex_enter(&instance->cmd_pool_mtx);
+ ASSERT(mutex_owned(&instance->cmd_pool_mtx));
+
+ if (!mlist_empty(head)) {
+ cmd = mlist_entry(head->next, struct drsas_cmd, list);
+ mlist_del_init(head->next);
+ }
+ if (cmd != NULL)
+ cmd->pkt = NULL;
+ mutex_exit(&instance->cmd_pool_mtx);
+
+ return (cmd);
+}
+
+/*
+ * return_mfi_pkt : Return a cmd to free command pool
+ */
+static void
+return_mfi_pkt(struct drsas_instance *instance, struct drsas_cmd *cmd)
+{
+ mutex_enter(&instance->cmd_pool_mtx);
+ ASSERT(mutex_owned(&instance->cmd_pool_mtx));
+
+ mlist_add(&cmd->list, &instance->cmd_pool_list);
+
+ mutex_exit(&instance->cmd_pool_mtx);
+}
+
+/*
+ * destroy_mfi_frame_pool
+ */
+static void
+destroy_mfi_frame_pool(struct drsas_instance *instance)
+{
+ int i;
+ uint32_t max_cmd = instance->max_fw_cmds;
+
+ struct drsas_cmd *cmd;
+
+ /* return all frames to pool */
+ for (i = 0; i < max_cmd+1; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
+ (void) drsas_free_dma_obj(instance, cmd->frame_dma_obj);
+
+ cmd->frame_dma_obj_status = DMA_OBJ_FREED;
+ }
+
+}
+
+/*
+ * create_mfi_frame_pool
+ */
+static int
+create_mfi_frame_pool(struct drsas_instance *instance)
+{
+ int i = 0;
+ int cookie_cnt;
+ uint16_t max_cmd;
+ uint16_t sge_sz;
+ uint32_t sgl_sz;
+ uint32_t tot_frame_size;
+
+ struct drsas_cmd *cmd;
+
+ max_cmd = instance->max_fw_cmds;
+
+ sge_sz = sizeof (struct drsas_sge64);
+
+ /* calculated the number of 64byte frames required for SGL */
+ sgl_sz = sge_sz * instance->max_num_sge;
+ tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
+
+ con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
+ "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
+
+ while (i < max_cmd+1) {
+ cmd = instance->cmd_list[i];
+
+ cmd->frame_dma_obj.size = tot_frame_size;
+ cmd->frame_dma_obj.dma_attr = drsas_generic_dma_attr;
+ cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
+ cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
+ cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
+
+
+ cookie_cnt = drsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
+ (uchar_t)DDI_STRUCTURE_LE_ACC);
+
+ if (cookie_cnt == -1 || cookie_cnt > 1) {
+ con_log(CL_ANN, (CE_WARN,
+ "create_mfi_frame_pool: could not alloc."));
+ return (DDI_FAILURE);
+ }
+
+ bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
+
+ cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
+ cmd->frame = (union drsas_frame *)cmd->frame_dma_obj.buffer;
+ cmd->frame_phys_addr =
+ cmd->frame_dma_obj.dma_cookie[0].dmac_address;
+
+ cmd->sense = (uint8_t *)(((unsigned long)
+ cmd->frame_dma_obj.buffer) +
+ tot_frame_size - SENSE_LENGTH);
+ cmd->sense_phys_addr =
+ cmd->frame_dma_obj.dma_cookie[0].dmac_address +
+ tot_frame_size - SENSE_LENGTH;
+
+ if (!cmd->frame || !cmd->sense) {
+ con_log(CL_ANN, (CE_NOTE,
+ "dr_sas: pci_pool_alloc failed"));
+
+ return (ENOMEM);
+ }
+
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &cmd->frame->io.context, cmd->index);
+ i++;
+
+ con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
+ cmd->index, cmd->frame_phys_addr));
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * free_additional_dma_buffer
+ */
+static void
+free_additional_dma_buffer(struct drsas_instance *instance)
+{
+ if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
+ (void) drsas_free_dma_obj(instance,
+ instance->mfi_internal_dma_obj);
+ instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
+ }
+
+ if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
+ (void) drsas_free_dma_obj(instance,
+ instance->mfi_evt_detail_obj);
+ instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
+ }
+}
+
+/*
+ * alloc_additional_dma_buffer
+ */
+static int
+alloc_additional_dma_buffer(struct drsas_instance *instance)
+{
+ uint32_t reply_q_sz;
+ uint32_t internal_buf_size = PAGESIZE*2;
+
+ /* max cmds plus 1 + producer & consumer */
+ reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
+
+ instance->mfi_internal_dma_obj.size = internal_buf_size;
+ instance->mfi_internal_dma_obj.dma_attr = drsas_generic_dma_attr;
+ instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
+ instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
+ 0xFFFFFFFFU;
+ instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
+
+ if (drsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
+ (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas: could not alloc reply queue"));
+ return (DDI_FAILURE);
+ }
+
+ bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
+
+ instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
+
+ instance->producer = (uint32_t *)((unsigned long)
+ instance->mfi_internal_dma_obj.buffer);
+ instance->consumer = (uint32_t *)((unsigned long)
+ instance->mfi_internal_dma_obj.buffer + 4);
+ instance->reply_queue = (uint32_t *)((unsigned long)
+ instance->mfi_internal_dma_obj.buffer + 8);
+ instance->internal_buf = (caddr_t)(((unsigned long)
+ instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
+ instance->internal_buf_dmac_add =
+ instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
+ (reply_q_sz + 8);
+ instance->internal_buf_size = internal_buf_size -
+ (reply_q_sz + 8);
+
+ /* allocate evt_detail */
+ instance->mfi_evt_detail_obj.size = sizeof (struct drsas_evt_detail);
+ instance->mfi_evt_detail_obj.dma_attr = drsas_generic_dma_attr;
+ instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
+ instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
+ instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
+
+ if (drsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
+ (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
+ con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: "
+ "could not allocate data transfer buffer."));
+ return (DDI_FAILURE);
+ }
+
+ bzero(instance->mfi_evt_detail_obj.buffer,
+ sizeof (struct drsas_evt_detail));
+
+ instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * free_space_for_mfi
+ */
+static void
+free_space_for_mfi(struct drsas_instance *instance)
+{
+ int i;
+ uint32_t max_cmd = instance->max_fw_cmds;
+
+ /* already freed */
+ if (instance->cmd_list == NULL) {
+ return;
+ }
+
+ free_additional_dma_buffer(instance);
+
+ /* first free the MFI frame pool */
+ destroy_mfi_frame_pool(instance);
+
+ /* free all the commands in the cmd_list */
+ for (i = 0; i < instance->max_fw_cmds+1; i++) {
+ kmem_free(instance->cmd_list[i],
+ sizeof (struct drsas_cmd));
+
+ instance->cmd_list[i] = NULL;
+ }
+
+ /* free the cmd_list buffer itself */
+ kmem_free(instance->cmd_list,
+ sizeof (struct drsas_cmd *) * (max_cmd+1));
+
+ instance->cmd_list = NULL;
+
+ INIT_LIST_HEAD(&instance->cmd_pool_list);
+}
+
+/*
+ * alloc_space_for_mfi
+ */
+static int
+alloc_space_for_mfi(struct drsas_instance *instance)
+{
+ int i;
+ uint32_t max_cmd;
+ size_t sz;
+
+ struct drsas_cmd *cmd;
+
+ max_cmd = instance->max_fw_cmds;
+
+ /* reserve 1 more slot for flush_cache */
+ sz = sizeof (struct drsas_cmd *) * (max_cmd+1);
+
+ /*
+ * instance->cmd_list is an array of struct drsas_cmd pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
+ ASSERT(instance->cmd_list);
+
+ for (i = 0; i < max_cmd+1; i++) {
+ instance->cmd_list[i] = kmem_zalloc(sizeof (struct drsas_cmd),
+ KM_SLEEP);
+ ASSERT(instance->cmd_list[i]);
+ }
+
+ INIT_LIST_HEAD(&instance->cmd_pool_list);
+
+ /* add all the commands to command pool (instance->cmd_pool) */
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ cmd->index = i;
+
+ mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
+ }
+
+ /* single slot for flush_cache won't be added in command pool */
+ cmd = instance->cmd_list[max_cmd];
+ cmd->index = i;
+
+ /* create a frame pool and assign one frame to each cmd */
+ if (create_mfi_frame_pool(instance)) {
+ con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool"));
+ return (DDI_FAILURE);
+ }
+
+ /* create a frame pool and assign one frame to each cmd */
+ if (alloc_additional_dma_buffer(instance)) {
+ con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool"));
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * get_ctrl_info
+ */
+static int
+get_ctrl_info(struct drsas_instance *instance,
+ struct drsas_ctrl_info *ctrl_info)
+{
+ int ret = 0;
+
+ struct drsas_cmd *cmd;
+ struct drsas_dcmd_frame *dcmd;
+ struct drsas_ctrl_info *ci;
+
+ cmd = get_mfi_pkt(instance);
+
+ if (!cmd) {
+ con_log(CL_ANN, (CE_WARN,
+ "Failed to get a cmd for ctrl info"));
+ return (DDI_FAILURE);
+ }
+ /* Clear the frame buffer and assign back the context id */
+ (void) memset((char *)&cmd->frame[0], 0, sizeof (union drsas_frame));
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
+ cmd->index);
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = (struct drsas_ctrl_info *)instance->internal_buf;
+
+ if (!ci) {
+ con_log(CL_ANN, (CE_WARN,
+ "Failed to alloc mem for ctrl info"));
+ return_mfi_pkt(instance, cmd);
+ return (DDI_FAILURE);
+ }
+
+ (void) memset(ci, 0, sizeof (struct drsas_ctrl_info));
+
+ /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
+ (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
+
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
+ MFI_CMD_STATUS_POLL_MODE);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
+ MFI_FRAME_DIR_READ);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
+ sizeof (struct drsas_ctrl_info));
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
+ DR_DCMD_CTRL_GET_INFO);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
+ instance->internal_buf_dmac_add);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
+ sizeof (struct drsas_ctrl_info));
+
+ cmd->frame_count = 1;
+
+ if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
+ ret = 0;
+ ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
+ (uint8_t *)ctrl_info, (uint8_t *)ci,
+ sizeof (struct drsas_ctrl_info), DDI_DEV_AUTOINCR);
+ } else {
+ con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed"));
+ ret = -1;
+ }
+
+ return_mfi_pkt(instance, cmd);
+ if (drsas_common_check(instance, cmd) != DDI_SUCCESS) {
+ ret = -1;
+ }
+
+ return (ret);
+}
+
+/*
+ * abort_aen_cmd
+ */
+static int
+abort_aen_cmd(struct drsas_instance *instance,
+ struct drsas_cmd *cmd_to_abort)
+{
+ int ret = 0;
+
+ struct drsas_cmd *cmd;
+ struct drsas_abort_frame *abort_fr;
+
+ cmd = get_mfi_pkt(instance);
+
+ if (!cmd) {
+ con_log(CL_ANN, (CE_WARN,
+ "Failed to get a cmd for ctrl info"));
+ return (DDI_FAILURE);
+ }
+ /* Clear the frame buffer and assign back the context id */
+ (void) memset((char *)&cmd->frame[0], 0, sizeof (union drsas_frame));
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
+ cmd->index);
+
+ abort_fr = &cmd->frame->abort;
+
+ /* prepare and issue the abort frame */
+ ddi_put8(cmd->frame_dma_obj.acc_handle,
+ &abort_fr->cmd, MFI_CMD_OP_ABORT);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
+ MFI_CMD_STATUS_SYNC_MODE);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
+ cmd_to_abort->index);
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &abort_fr->abort_mfi_phys_addr_hi, 0);
+
+ instance->aen_cmd->abort_aen = 1;
+
+ cmd->sync_cmd = DRSAS_TRUE;
+ cmd->frame_count = 1;
+
+ if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
+ con_log(CL_ANN, (CE_WARN,
+ "abort_aen_cmd: issue_cmd_in_sync_mode failed"));
+ ret = -1;
+ } else {
+ ret = 0;
+ }
+
+ instance->aen_cmd->abort_aen = 1;
+ instance->aen_cmd = 0;
+
+ return_mfi_pkt(instance, cmd);
+ (void) drsas_common_check(instance, cmd);
+
+ return (ret);
+}
+
+/*
+ * init_mfi
+ */
+static int
+init_mfi(struct drsas_instance *instance)
+{
+ struct drsas_cmd *cmd;
+ struct drsas_ctrl_info ctrl_info;
+ struct drsas_init_frame *init_frame;
+ struct drsas_init_queue_info *initq_info;
+
+ /* we expect the FW state to be READY */
+ if (mfi_state_transition_to_ready(instance)) {
+ con_log(CL_ANN, (CE_WARN, "dr_sas: F/W is not ready"));
+ goto fail_ready_state;
+ }
+
+ /* get various operational parameters from status register */
+ instance->max_num_sge =
+ (instance->func_ptr->read_fw_status_reg(instance) &
+ 0xFF0000) >> 0x10;
+ /*
+ * Reduce the max supported cmds by 1. This is to ensure that the
+ * reply_q_sz (1 more than the max cmd that driver may send)
+ * does not exceed max cmds that the FW can support
+ */
+ instance->max_fw_cmds =
+ instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
+ instance->max_fw_cmds = instance->max_fw_cmds - 1;
+
+ instance->max_num_sge =
+ (instance->max_num_sge > DRSAS_MAX_SGE_CNT) ?
+ DRSAS_MAX_SGE_CNT : instance->max_num_sge;
+
+ /* create a pool of commands */
+ if (alloc_space_for_mfi(instance) != DDI_SUCCESS)
+ goto fail_alloc_fw_space;
+
+ /*
+ * Prepare a init frame. Note the init frame points to queue info
+ * structure. Each frame has SGL allocated after first 64 bytes. For
+ * this frame - since we don't need any SGL - we use SGL's space as
+ * queue info structure
+ */
+ cmd = get_mfi_pkt(instance);
+ /* Clear the frame buffer and assign back the context id */
+ (void) memset((char *)&cmd->frame[0], 0, sizeof (union drsas_frame));
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
+ cmd->index);
+
+ init_frame = (struct drsas_init_frame *)cmd->frame;
+ initq_info = (struct drsas_init_queue_info *)
+ ((unsigned long)init_frame + 64);
+
+ (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
+ (void) memset(initq_info, 0, sizeof (struct drsas_init_queue_info));
+
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
+
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
+
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &initq_info->producer_index_phys_addr_hi, 0);
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &initq_info->producer_index_phys_addr_lo,
+ instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
+
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &initq_info->consumer_index_phys_addr_hi, 0);
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &initq_info->consumer_index_phys_addr_lo,
+ instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
+
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &initq_info->reply_queue_start_phys_addr_hi, 0);
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &initq_info->reply_queue_start_phys_addr_lo,
+ instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
+
+ ddi_put8(cmd->frame_dma_obj.acc_handle,
+ &init_frame->cmd, MFI_CMD_OP_INIT);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
+ MFI_CMD_STATUS_POLL_MODE);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &init_frame->queue_info_new_phys_addr_lo,
+ cmd->frame_phys_addr + 64);
+ ddi_put32(cmd->frame_dma_obj.acc_handle,
+ &init_frame->queue_info_new_phys_addr_hi, 0);
+
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
+ sizeof (struct drsas_init_queue_info));
+
+ cmd->frame_count = 1;
+
+ /* issue the init frame in polled mode */
+ if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
+ con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
+ goto fail_fw_init;
+ }
+
+ return_mfi_pkt(instance, cmd);
+ if (drsas_common_check(instance, cmd) != DDI_SUCCESS) {
+ goto fail_fw_init;
+ }
+
+ /* gather misc FW related information */
+ if (!get_ctrl_info(instance, &ctrl_info)) {
+ instance->max_sectors_per_req = ctrl_info.max_request_size;
+ con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d",
+ ctrl_info.product_name, ctrl_info.ld_present_count));
+ } else {
+ instance->max_sectors_per_req = instance->max_num_sge *
+ PAGESIZE / 512;
+ }
+
+ if (drsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
+ goto fail_fw_init;
+ }
+
+ return (DDI_SUCCESS);
+
+fail_fw_init:
+fail_alloc_fw_space:
+
+ free_space_for_mfi(instance);
+
+fail_ready_state:
+ ddi_regs_map_free(&instance->regmap_handle);
+
+fail_mfi_reg_setup:
+ return (DDI_FAILURE);
+}
+
+/*
+ * mfi_state_transition_to_ready : Move the FW to READY state
+ *
+ * @reg_set : MFI register set
+ */
+static int
+mfi_state_transition_to_ready(struct drsas_instance *instance)
+{
+ int i;
+ uint8_t max_wait;
+ uint32_t fw_ctrl;
+ uint32_t fw_state;
+ uint32_t cur_state;
+
+ fw_state =
+ instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK;
+ con_log(CL_ANN1, (CE_NOTE,
+ "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
+
+ while (fw_state != MFI_STATE_READY) {
+ con_log(CL_ANN, (CE_NOTE,
+ "mfi_state_transition_to_ready:FW state%x", fw_state));
+
+ switch (fw_state) {
+ case MFI_STATE_FAULT:
+ con_log(CL_ANN, (CE_NOTE,
+ "dr_sas: FW in FAULT state!!"));
+
+ return (ENODEV);
+ case MFI_STATE_WAIT_HANDSHAKE:
+ /* set the CLR bit in IMR0 */
+ con_log(CL_ANN, (CE_NOTE,
+ "dr_sas: FW waiting for HANDSHAKE"));
+ /*
+ * PCI_Hot Plug: MFI F/W requires
+ * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
+ * to be set
+ */
+ /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
+ WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
+ MFI_INIT_HOTPLUG, instance);
+
+ max_wait = 2;
+ cur_state = MFI_STATE_WAIT_HANDSHAKE;
+ break;
+ case MFI_STATE_BOOT_MESSAGE_PENDING:
+ /* set the CLR bit in IMR0 */
+ con_log(CL_ANN, (CE_NOTE,
+ "dr_sas: FW state boot message pending"));
+ /*
+ * PCI_Hot Plug: MFI F/W requires
+ * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
+ * to be set
+ */
+ WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
+
+ max_wait = 10;
+ cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
+ break;
+ case MFI_STATE_OPERATIONAL:
+ /* bring it to READY state; assuming max wait 2 secs */
+ instance->func_ptr->disable_intr(instance);
+ con_log(CL_ANN1, (CE_NOTE,
+ "dr_sas: FW in OPERATIONAL state"));
+ /*
+ * PCI_Hot Plug: MFI F/W requires
+ * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
+ * to be set
+ */
+ /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
+ WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
+
+ max_wait = 10;
+ cur_state = MFI_STATE_OPERATIONAL;
+ break;
+ case MFI_STATE_UNDEFINED:
+ /* this state should not last for more than 2 seconds */
+ con_log(CL_ANN, (CE_NOTE, "FW state undefined"));
+
+ max_wait = 2;
+ cur_state = MFI_STATE_UNDEFINED;
+ break;
+ case MFI_STATE_BB_INIT:
+ max_wait = 2;
+ cur_state = MFI_STATE_BB_INIT;
+ break;
+ case MFI_STATE_FW_INIT:
+ max_wait = 2;
+ cur_state = MFI_STATE_FW_INIT;
+ break;
+ case MFI_STATE_DEVICE_SCAN:
+ max_wait = 10;
+ cur_state = MFI_STATE_DEVICE_SCAN;
+ break;
+ default:
+ con_log(CL_ANN, (CE_NOTE,
+ "dr_sas: Unknown state 0x%x", fw_state));
+ return (ENODEV);
+ }
+
+ /* the cur_state should not last for more than max_wait secs */
+ for (i = 0; i < (max_wait * MILLISEC); i++) {
+ /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
+ fw_state =
+ instance->func_ptr->read_fw_status_reg(instance) &
+ MFI_STATE_MASK;
+
+ if (fw_state == cur_state) {
+ delay(1 * drv_usectohz(MILLISEC));
+ } else {
+ break;
+ }
+ }
+
+ /* return error if fw_state hasn't changed after max_wait */
+ if (fw_state == cur_state) {
+ con_log(CL_ANN, (CE_NOTE,
+ "FW state hasn't changed in %d secs", max_wait));
+ return (ENODEV);
+ }
+ };
+
+ fw_ctrl = RD_IB_DOORBELL(instance);
+
+ con_log(CL_ANN1, (CE_NOTE,
+ "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
+
+ /*
+ * Write 0xF to the doorbell register to do the following.
+ * - Abort all outstanding commands (bit 0).
+ * - Transition from OPERATIONAL to READY state (bit 1).
+ * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
+ * - Set to release FW to continue running (i.e. BIOS handshake
+ * (bit 3).
+ */
+ WR_IB_DOORBELL(0xF, instance);
+
+ if (drsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
+ return (ENODEV);
+ }
+ return (DDI_SUCCESS);
+}
+
+/*
+ * get_seq_num
+ */
+static int
+get_seq_num(struct drsas_instance *instance,
+ struct drsas_evt_log_info *eli)
+{
+ int ret = DDI_SUCCESS;
+
+ dma_obj_t dcmd_dma_obj;
+ struct drsas_cmd *cmd;
+ struct drsas_dcmd_frame *dcmd;
+
+ cmd = get_mfi_pkt(instance);
+
+ if (!cmd) {
+ cmn_err(CE_WARN, "dr_sas: failed to get a cmd");
+ return (ENOMEM);
+ }
+ /* Clear the frame buffer and assign back the context id */
+ (void) memset((char *)&cmd->frame[0], 0, sizeof (union drsas_frame));
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
+ cmd->index);
+
+ dcmd = &cmd->frame->dcmd;
+
+ /* allocate the data transfer buffer */
+ dcmd_dma_obj.size = sizeof (struct drsas_evt_log_info);
+ dcmd_dma_obj.dma_attr = drsas_generic_dma_attr;
+ dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
+ dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
+ dcmd_dma_obj.dma_attr.dma_attr_align = 1;
+
+ if (drsas_alloc_dma_obj(instance, &dcmd_dma_obj,
+ (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
+ con_log(CL_ANN, (CE_WARN,
+ "get_seq_num: could not allocate data transfer buffer."));
+ return (DDI_FAILURE);
+ }
+
+ (void) memset(dcmd_dma_obj.buffer, 0,
+ sizeof (struct drsas_evt_log_info));
+
+ (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
+
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
+ MFI_FRAME_DIR_READ);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
+ sizeof (struct drsas_evt_log_info));
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
+ DR_DCMD_CTRL_EVENT_GET_INFO);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
+ sizeof (struct drsas_evt_log_info));
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
+ dcmd_dma_obj.dma_cookie[0].dmac_address);
+
+ cmd->sync_cmd = DRSAS_TRUE;
+ cmd->frame_count = 1;
+
+ if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
+ cmn_err(CE_WARN, "get_seq_num: "
+ "failed to issue DRSAS_DCMD_CTRL_EVENT_GET_INFO");
+ ret = DDI_FAILURE;
+ } else {
+ /* copy the data back into callers buffer */
+ ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)eli,
+ (uint8_t *)dcmd_dma_obj.buffer,
+ sizeof (struct drsas_evt_log_info), DDI_DEV_AUTOINCR);
+ ret = DDI_SUCCESS;
+ }
+
+ if (drsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
+ ret = DDI_FAILURE;
+
+ return_mfi_pkt(instance, cmd);
+ if (drsas_common_check(instance, cmd) != DDI_SUCCESS) {
+ ret = DDI_FAILURE;
+ }
+ return (ret);
+}
+
+/*
+ * start_mfi_aen
+ */
+static int
+start_mfi_aen(struct drsas_instance *instance)
+{
+ int ret = 0;
+
+ struct drsas_evt_log_info eli;
+ union drsas_evt_class_locale class_locale;
+
+ /* get the latest sequence number from FW */
+ (void) memset(&eli, 0, sizeof (struct drsas_evt_log_info));
+
+ if (get_seq_num(instance, &eli)) {
+ cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
+ return (-1);
+ }
+
+ /* register AEN with FW for latest sequence number plus 1 */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = DR_EVT_LOCALE_ALL;
+ class_locale.members.class = DR_EVT_CLASS_INFO;
+ ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
+ class_locale.word);
+
+ if (ret) {
+ cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
+ return (-1);
+ }
+
+ return (ret);
+}
+
+/*
+ * flush_cache
+ */
+static void
+flush_cache(struct drsas_instance *instance)
+{
+ struct drsas_cmd *cmd = NULL;
+ struct drsas_dcmd_frame *dcmd;
+ uint32_t max_cmd = instance->max_fw_cmds;
+
+ cmd = instance->cmd_list[max_cmd];
+
+ if (cmd == NULL)
+ return;
+
+ dcmd = &cmd->frame->dcmd;
+
+ (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
+
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
+ MFI_FRAME_DIR_NONE);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
+ DR_DCMD_CTRL_CACHE_FLUSH);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
+ DR_FLUSH_CTRL_CACHE | DR_FLUSH_DISK_CACHE);
+
+ cmd->frame_count = 1;
+
+ if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
+ con_log(CL_ANN1, (CE_WARN,
+ "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
+ }
+ con_log(CL_DLEVEL1, (CE_NOTE, "done"));
+}
+
+/*
+ * service_mfi_aen- Completes an AEN command
+ * @instance: Adapter soft state
+ * @cmd: Command to be completed
+ *
+ */
+static void
+service_mfi_aen(struct drsas_instance *instance, struct drsas_cmd *cmd)
+{
+ uint32_t seq_num;
+ struct drsas_evt_detail *evt_detail =
+ (struct drsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
+ int rval = 0;
+ int tgt = 0;
+ ddi_acc_handle_t acc_handle;
+
+ acc_handle = cmd->frame_dma_obj.acc_handle;
+
+ cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
+
+ if (cmd->cmd_status == ENODATA) {
+ cmd->cmd_status = 0;
+ }
+
+ /*
+ * log the MFI AEN event to the sysevent queue so that
+ * application will get noticed
+ */
+ if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
+ NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
+ int instance_no = ddi_get_instance(instance->dip);
+ con_log(CL_ANN, (CE_WARN,
+ "dr_sas%d: Failed to log AEN event", instance_no));
+ }
+ /*
+ * Check for any ld devices that has changed state. i.e. online
+ * or offline.
+ */
+ con_log(CL_ANN1, (CE_NOTE,
+ "AEN: code = %x class = %x locale = %x args = %x",
+ ddi_get32(acc_handle, &evt_detail->code),
+ evt_detail->cl.members.class,
+ ddi_get16(acc_handle, &evt_detail->cl.members.locale),
+ ddi_get8(acc_handle, &evt_detail->arg_type)));
+
+ switch (ddi_get32(acc_handle, &evt_detail->code)) {
+ case DR_EVT_CFG_CLEARED: {
+ for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
+ if (instance->dr_ld_list[tgt].dip != NULL) {
+ rval = drsas_service_evt(instance, tgt, 0,
+ DRSAS_EVT_UNCONFIG_TGT, NULL);
+ con_log(CL_ANN1, (CE_WARN,
+ "dr_sas: CFG CLEARED AEN rval = %d "
+ "tgt id = %d", rval, tgt));
+ }
+ }
+ break;
+ }
+
+ case DR_EVT_LD_DELETED: {
+ rval = drsas_service_evt(instance,
+ ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
+ DRSAS_EVT_UNCONFIG_TGT, NULL);
+ con_log(CL_ANN1, (CE_WARN, "dr_sas: LD DELETED AEN rval = %d "
+ "tgt id = %d index = %d", rval,
+ ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
+ ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
+ break;
+ } /* End of DR_EVT_LD_DELETED */
+
+ case DR_EVT_LD_CREATED: {
+ rval = drsas_service_evt(instance,
+ ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
+ DRSAS_EVT_CONFIG_TGT, NULL);
+ con_log(CL_ANN1, (CE_WARN, "dr_sas: LD CREATED AEN rval = %d "
+ "tgt id = %d index = %d", rval,
+ ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
+ ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
+ break;
+ } /* End of DR_EVT_LD_CREATED */
+ } /* End of Main Switch */
+
+ /* get copy of seq_num and class/locale for re-registration */
+ seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
+ seq_num++;
+ (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
+ sizeof (struct drsas_evt_detail));
+
+ ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
+ ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
+
+ instance->aen_seq_num = seq_num;
+
+ cmd->frame_count = 1;
+
+ /* Issue the aen registration frame */
+ instance->func_ptr->issue_cmd(cmd, instance);
+}
+
+/*
+ * complete_cmd_in_sync_mode - Completes an internal command
+ * @instance: Adapter soft state
+ * @cmd: Command to be completed
+ *
+ * The issue_cmd_in_sync_mode() function waits for a command to complete
+ * after it issues a command. This function wakes up that waiting routine by
+ * calling wake_up() on the wait queue.
+ */
+static void
+complete_cmd_in_sync_mode(struct drsas_instance *instance,
+ struct drsas_cmd *cmd)
+{
+ cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
+ &cmd->frame->io.cmd_status);
+
+ cmd->sync_cmd = DRSAS_FALSE;
+
+ if (cmd->cmd_status == ENODATA) {
+ cmd->cmd_status = 0;
+ }
+
+ cv_broadcast(&instance->int_cmd_cv);
+}
+
+/*
+ * drsas_softintr - The Software ISR
+ * @param arg : HBA soft state
+ *
+ * called from high-level interrupt if hi-level interrupt are not there,
+ * otherwise triggered as a soft interrupt
+ */
+static uint_t
+drsas_softintr(struct drsas_instance *instance)
+{
+ struct scsi_pkt *pkt;
+ struct scsa_cmd *acmd;
+ struct drsas_cmd *cmd;
+ struct mlist_head *pos, *next;
+ mlist_t process_list;
+ struct drsas_header *hdr;
+ struct scsi_arq_status *arqstat;
+
+ con_log(CL_ANN1, (CE_CONT, "drsas_softintr called"));
+
+ ASSERT(instance);
+ mutex_enter(&instance->completed_pool_mtx);
+
+ if (mlist_empty(&instance->completed_pool_list)) {
+ mutex_exit(&instance->completed_pool_mtx);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ instance->softint_running = 1;
+
+ INIT_LIST_HEAD(&process_list);
+ mlist_splice(&instance->completed_pool_list, &process_list);
+ INIT_LIST_HEAD(&instance->completed_pool_list);
+
+ mutex_exit(&instance->completed_pool_mtx);
+
+ /* perform all callbacks first, before releasing the SCBs */
+ mlist_for_each_safe(pos, next, &process_list) {
+ cmd = mlist_entry(pos, struct drsas_cmd, list);
+
+ /* syncronize the Cmd frame for the controller */
+ (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
+ 0, 0, DDI_DMA_SYNC_FORCPU);
+
+ if (drsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
+ DDI_SUCCESS) {
+ drsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ hdr = &cmd->frame->hdr;
+
+ /* remove the internal command from the process list */
+ mlist_del_init(&cmd->list);
+
+ switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
+ case MFI_CMD_OP_PD_SCSI:
+ case MFI_CMD_OP_LD_SCSI:
+ case MFI_CMD_OP_LD_READ:
+ case MFI_CMD_OP_LD_WRITE:
+ /*
+ * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
+ * could have been issued either through an
+ * IO path or an IOCTL path. If it was via IOCTL,
+ * we will send it to internal completion.
+ */
+ if (cmd->sync_cmd == DRSAS_TRUE) {
+ complete_cmd_in_sync_mode(instance, cmd);
+ break;
+ }
+
+ /* regular commands */
+ acmd = cmd->cmd;
+ pkt = CMD2PKT(acmd);
+
+ if (acmd->cmd_flags & CFLAG_DMAVALID) {
+ if (acmd->cmd_flags & CFLAG_CONSISTENT) {
+ (void) ddi_dma_sync(acmd->cmd_dmahandle,
+ acmd->cmd_dma_offset,
+ acmd->cmd_dma_len,
+ DDI_DMA_SYNC_FORCPU);
+ }
+ }
+
+ pkt->pkt_reason = CMD_CMPLT;
+ pkt->pkt_statistics = 0;
+ pkt->pkt_state = STATE_GOT_BUS
+ | STATE_GOT_TARGET | STATE_SENT_CMD
+ | STATE_XFERRED_DATA | STATE_GOT_STATUS;
+
+ con_log(CL_ANN1, (CE_CONT,
+ "CDB[0] = %x completed for %s: size %lx context %x",
+ pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
+ acmd->cmd_dmacount, hdr->context));
+
+ if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
+ struct scsi_inquiry *inq;
+
+ if (acmd->cmd_dmacount != 0) {
+ bp_mapin(acmd->cmd_buf);
+ inq = (struct scsi_inquiry *)
+ acmd->cmd_buf->b_un.b_addr;
+
+ /* don't expose physical drives to OS */
+ if (acmd->islogical &&
+ (hdr->cmd_status == MFI_STAT_OK)) {
+ display_scsi_inquiry(
+ (caddr_t)inq);
+ } else if ((hdr->cmd_status ==
+ MFI_STAT_OK) && inq->inq_dtype ==
+ DTYPE_DIRECT) {
+
+ display_scsi_inquiry(
+ (caddr_t)inq);
+
+ /* for physical disk */
+ hdr->cmd_status =
+ MFI_STAT_DEVICE_NOT_FOUND;
+ }
+ }
+ }
+
+ switch (hdr->cmd_status) {
+ case MFI_STAT_OK:
+ pkt->pkt_scbp[0] = STATUS_GOOD;
+ break;
+ case MFI_STAT_LD_CC_IN_PROGRESS:
+ case MFI_STAT_LD_RECON_IN_PROGRESS:
+ pkt->pkt_scbp[0] = STATUS_GOOD;
+ break;
+ case MFI_STAT_LD_INIT_IN_PROGRESS:
+ con_log(CL_ANN,
+ (CE_WARN, "Initialization in Progress"));
+ pkt->pkt_reason = CMD_TRAN_ERR;
+
+ break;
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+ con_log(CL_ANN1, (CE_CONT, "scsi_done error"));
+
+ pkt->pkt_reason = CMD_CMPLT;
+ ((struct scsi_status *)
+ pkt->pkt_scbp)->sts_chk = 1;
+
+ if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
+
+ con_log(CL_ANN,
+ (CE_WARN, "TEST_UNIT_READY fail"));
+
+ } else {
+ pkt->pkt_state |= STATE_ARQ_DONE;
+ arqstat = (void *)(pkt->pkt_scbp);
+ arqstat->sts_rqpkt_reason = CMD_CMPLT;
+ arqstat->sts_rqpkt_resid = 0;
+ arqstat->sts_rqpkt_state |=
+ STATE_GOT_BUS | STATE_GOT_TARGET
+ | STATE_SENT_CMD
+ | STATE_XFERRED_DATA;
+ *(uint8_t *)&arqstat->sts_rqpkt_status =
+ STATUS_GOOD;
+ ddi_rep_get8(
+ cmd->frame_dma_obj.acc_handle,
+ (uint8_t *)
+ &(arqstat->sts_sensedata),
+ cmd->sense,
+ acmd->cmd_scblen -
+ offsetof(struct scsi_arq_status,
+ sts_sensedata), DDI_DEV_AUTOINCR);
+ }
+ break;
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ con_log(CL_ANN1, (CE_CONT,
+ "device not found error"));
+ pkt->pkt_reason = CMD_DEV_GONE;
+ pkt->pkt_statistics = STAT_DISCON;
+ break;
+ case MFI_STAT_LD_LBA_OUT_OF_RANGE:
+ pkt->pkt_state |= STATE_ARQ_DONE;
+ pkt->pkt_reason = CMD_CMPLT;
+ ((struct scsi_status *)
+ pkt->pkt_scbp)->sts_chk = 1;
+
+ arqstat = (void *)(pkt->pkt_scbp);
+ arqstat->sts_rqpkt_reason = CMD_CMPLT;
+ arqstat->sts_rqpkt_resid = 0;
+ arqstat->sts_rqpkt_state |= STATE_GOT_BUS
+ | STATE_GOT_TARGET | STATE_SENT_CMD
+ | STATE_XFERRED_DATA;
+ *(uint8_t *)&arqstat->sts_rqpkt_status =
+ STATUS_GOOD;
+
+ arqstat->sts_sensedata.es_valid = 1;
+ arqstat->sts_sensedata.es_key =
+ KEY_ILLEGAL_REQUEST;
+ arqstat->sts_sensedata.es_class =
+ CLASS_EXTENDED_SENSE;
+
+ /*
+ * LOGICAL BLOCK ADDRESS OUT OF RANGE:
+ * ASC: 0x21h; ASCQ: 0x00h;
+ */
+ arqstat->sts_sensedata.es_add_code = 0x21;
+ arqstat->sts_sensedata.es_qual_code = 0x00;
+
+ break;
+
+ default:
+ con_log(CL_ANN, (CE_CONT, "Unknown status!"));
+ pkt->pkt_reason = CMD_TRAN_ERR;
+
+ break;
+ }
+
+ atomic_add_16(&instance->fw_outstanding, (-1));
+
+ return_mfi_pkt(instance, cmd);
+
+ (void) drsas_common_check(instance, cmd);
+
+ if (acmd->cmd_dmahandle) {
+ if (drsas_check_dma_handle(
+ acmd->cmd_dmahandle) != DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip,
+ DDI_SERVICE_UNAFFECTED);
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ pkt->pkt_statistics = 0;
+ }
+ }
+
+ /* Call the callback routine */
+ if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
+ pkt->pkt_comp) {
+ (*pkt->pkt_comp)(pkt);
+ }
+
+ break;
+ case MFI_CMD_OP_SMP:
+ case MFI_CMD_OP_STP:
+ complete_cmd_in_sync_mode(instance, cmd);
+ break;
+ case MFI_CMD_OP_DCMD:
+ /* see if got an event notification */
+ if (ddi_get32(cmd->frame_dma_obj.acc_handle,
+ &cmd->frame->dcmd.opcode) ==
+ DR_DCMD_CTRL_EVENT_WAIT) {
+ if ((instance->aen_cmd == cmd) &&
+ (instance->aen_cmd->abort_aen)) {
+ con_log(CL_ANN, (CE_WARN,
+ "drsas_softintr: "
+ "aborted_aen returned"));
+ } else {
+ atomic_add_16(&instance->fw_outstanding,
+ (-1));
+ service_mfi_aen(instance, cmd);
+ }
+ } else {
+ complete_cmd_in_sync_mode(instance, cmd);
+ }
+
+ break;
+ case MFI_CMD_OP_ABORT:
+ con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete"));
+ /*
+ * MFI_CMD_OP_ABORT successfully completed
+ * in the synchronous mode
+ */
+ complete_cmd_in_sync_mode(instance, cmd);
+ break;
+ default:
+ drsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+
+ if (cmd->pkt != NULL) {
+ pkt = cmd->pkt;
+ if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
+ pkt->pkt_comp) {
+ (*pkt->pkt_comp)(pkt);
+ }
+ }
+ con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
+ break;
+ }
+ }
+
+ instance->softint_running = 0;
+
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * drsas_alloc_dma_obj
+ *
+ * Allocate the memory and other resources for an dma object.
+ */
+static int
+drsas_alloc_dma_obj(struct drsas_instance *instance, dma_obj_t *obj,
+ uchar_t endian_flags)
+{
+ int i;
+ size_t alen = 0;
+ uint_t cookie_cnt;
+ struct ddi_device_acc_attr tmp_endian_attr;
+
+ tmp_endian_attr = endian_attr;
+ tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
+
+ i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
+ DDI_DMA_SLEEP, NULL, &obj->dma_handle);
+ if (i != DDI_SUCCESS) {
+
+ switch (i) {
+ case DDI_DMA_BADATTR :
+ con_log(CL_ANN, (CE_WARN,
+ "Failed ddi_dma_alloc_handle- Bad attribute"));
+ break;
+ case DDI_DMA_NORESOURCES :
+ con_log(CL_ANN, (CE_WARN,
+ "Failed ddi_dma_alloc_handle- No Resources"));
+ break;
+ default :
+ con_log(CL_ANN, (CE_WARN,
+ "Failed ddi_dma_alloc_handle: "
+ "unknown status %d", i));
+ break;
+ }
+
+ return (-1);
+ }
+
+ if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
+ DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
+ &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
+ alen < obj->size) {
+
+ ddi_dma_free_handle(&obj->dma_handle);
+
+ con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
+
+ return (-1);
+ }
+
+ if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
+ obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
+ NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
+
+ ddi_dma_mem_free(&obj->acc_handle);
+ ddi_dma_free_handle(&obj->dma_handle);
+
+ con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
+
+ return (-1);
+ }
+
+ if (drsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+ return (-1);
+ }
+
+ if (drsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
+ return (-1);
+ }
+
+ return (cookie_cnt);
+}
+
+/*
+ * drsas_free_dma_obj(struct drsas_instance *, dma_obj_t)
+ *
+ * De-allocate the memory and other resources for an dma object, which must
+ * have been alloated by a previous call to drsas_alloc_dma_obj()
+ */
+static int
+drsas_free_dma_obj(struct drsas_instance *instance, dma_obj_t obj)
+{
+
+ if (drsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
+ return (DDI_FAILURE);
+ }
+
+ if (drsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
+ return (DDI_FAILURE);
+ }
+
+ (void) ddi_dma_unbind_handle(obj.dma_handle);
+ ddi_dma_mem_free(&obj.acc_handle);
+ ddi_dma_free_handle(&obj.dma_handle);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * drsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
+ * int, int (*)())
+ *
+ * Allocate dma resources for a new scsi command
+ */
+static int
+drsas_dma_alloc(struct drsas_instance *instance, struct scsi_pkt *pkt,
+ struct buf *bp, int flags, int (*callback)())
+{
+ int dma_flags;
+ int (*cb)(caddr_t);
+ int i;
+
+ ddi_dma_attr_t tmp_dma_attr = drsas_generic_dma_attr;
+ struct scsa_cmd *acmd = PKT2CMD(pkt);
+
+ acmd->cmd_buf = bp;
+
+ if (bp->b_flags & B_READ) {
+ acmd->cmd_flags &= ~CFLAG_DMASEND;
+ dma_flags = DDI_DMA_READ;
+ } else {
+ acmd->cmd_flags |= CFLAG_DMASEND;
+ dma_flags = DDI_DMA_WRITE;
+ }
+
+ if (flags & PKT_CONSISTENT) {
+ acmd->cmd_flags |= CFLAG_CONSISTENT;
+ dma_flags |= DDI_DMA_CONSISTENT;
+ }
+
+ if (flags & PKT_DMA_PARTIAL) {
+ dma_flags |= DDI_DMA_PARTIAL;
+ }
+
+ dma_flags |= DDI_DMA_REDZONE;
+
+ cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
+
+ tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
+ tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
+
+ if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
+ cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
+ switch (i) {
+ case DDI_DMA_BADATTR:
+ bioerror(bp, EFAULT);
+ return (DDI_FAILURE);
+
+ case DDI_DMA_NORESOURCES:
+ bioerror(bp, 0);
+ return (DDI_FAILURE);
+
+ default:
+ con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
+ "impossible result (0x%x)", i));
+ bioerror(bp, EFAULT);
+ return (DDI_FAILURE);
+ }
+ }
+
+ i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
+ cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
+
+ switch (i) {
+ case DDI_DMA_PARTIAL_MAP:
+ if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
+ con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
+ "DDI_DMA_PARTIAL_MAP impossible"));
+ goto no_dma_cookies;
+ }
+
+ if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
+ DDI_FAILURE) {
+ con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
+ goto no_dma_cookies;
+ }
+
+ if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
+ &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
+ &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
+ DDI_FAILURE) {
+
+ con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
+ goto no_dma_cookies;
+ }
+
+ goto get_dma_cookies;
+ case DDI_DMA_MAPPED:
+ acmd->cmd_nwin = 1;
+ acmd->cmd_dma_len = 0;
+ acmd->cmd_dma_offset = 0;
+
+get_dma_cookies:
+ i = 0;
+ acmd->cmd_dmacount = 0;
+ for (;;) {
+ acmd->cmd_dmacount +=
+ acmd->cmd_dmacookies[i++].dmac_size;
+
+ if (i == instance->max_num_sge ||
+ i == acmd->cmd_ncookies)
+ break;
+
+ ddi_dma_nextcookie(acmd->cmd_dmahandle,
+ &acmd->cmd_dmacookies[i]);
+ }
+
+ acmd->cmd_cookie = i;
+ acmd->cmd_cookiecnt = i;
+
+ acmd->cmd_flags |= CFLAG_DMAVALID;
+
+ if (bp->b_bcount >= acmd->cmd_dmacount) {
+ pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
+ } else {
+ pkt->pkt_resid = 0;
+ }
+
+ return (DDI_SUCCESS);
+ case DDI_DMA_NORESOURCES:
+ bioerror(bp, 0);
+ break;
+ case DDI_DMA_NOMAPPING:
+ bioerror(bp, EFAULT);
+ break;
+ case DDI_DMA_TOOBIG:
+ bioerror(bp, EINVAL);
+ break;
+ case DDI_DMA_INUSE:
+ con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
+ " DDI_DMA_INUSE impossible"));
+ break;
+ default:
+ con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
+ "impossible result (0x%x)", i));
+ break;
+ }
+
+no_dma_cookies:
+ ddi_dma_free_handle(&acmd->cmd_dmahandle);
+ acmd->cmd_dmahandle = NULL;
+ acmd->cmd_flags &= ~CFLAG_DMAVALID;
+ return (DDI_FAILURE);
+}
+
+/*
+ * drsas_dma_move(struct drsas_instance *, struct scsi_pkt *, struct buf *)
+ *
+ * move dma resources to next dma window
+ *
+ */
+static int
+drsas_dma_move(struct drsas_instance *instance, struct scsi_pkt *pkt,
+ struct buf *bp)
+{
+ int i = 0;
+
+ struct scsa_cmd *acmd = PKT2CMD(pkt);
+
+ /*
+ * If there are no more cookies remaining in this window,
+ * must move to the next window first.
+ */
+ if (acmd->cmd_cookie == acmd->cmd_ncookies) {
+ if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
+ return (DDI_SUCCESS);
+ }
+
+ /* at last window, cannot move */
+ if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
+ &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
+ &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
+ DDI_FAILURE) {
+ return (DDI_FAILURE);
+ }
+
+ acmd->cmd_cookie = 0;
+ } else {
+ /* still more cookies in this window - get the next one */
+ ddi_dma_nextcookie(acmd->cmd_dmahandle,
+ &acmd->cmd_dmacookies[0]);
+ }
+
+ /* get remaining cookies in this window, up to our maximum */
+ for (;;) {
+ acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
+ acmd->cmd_cookie++;
+
+ if (i == instance->max_num_sge ||
+ acmd->cmd_cookie == acmd->cmd_ncookies) {
+ break;
+ }
+
+ ddi_dma_nextcookie(acmd->cmd_dmahandle,
+ &acmd->cmd_dmacookies[i]);
+ }
+
+ acmd->cmd_cookiecnt = i;
+
+ if (bp->b_bcount >= acmd->cmd_dmacount) {
+ pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
+ } else {
+ pkt->pkt_resid = 0;
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * build_cmd
+ */
+static struct drsas_cmd *
+build_cmd(struct drsas_instance *instance, struct scsi_address *ap,
+ struct scsi_pkt *pkt, uchar_t *cmd_done)
+{
+ uint16_t flags = 0;
+ uint32_t i;
+ uint32_t context;
+ uint32_t sge_bytes;
+ ddi_acc_handle_t acc_handle;
+ struct drsas_cmd *cmd;
+ struct drsas_sge64 *mfi_sgl;
+ struct scsa_cmd *acmd = PKT2CMD(pkt);
+ struct drsas_pthru_frame *pthru;
+ struct drsas_io_frame *ldio;
+
+ /* find out if this is logical or physical drive command. */
+ acmd->islogical = MRDRV_IS_LOGICAL(ap);
+ acmd->device_id = MAP_DEVICE_ID(instance, ap);
+ *cmd_done = 0;
+
+ /* get the command packet */
+ if (!(cmd = get_mfi_pkt(instance))) {
+ return (NULL);
+ }
+
+ acc_handle = cmd->frame_dma_obj.acc_handle;
+
+ /* Clear the frame buffer and assign back the context id */
+ (void) memset((char *)&cmd->frame[0], 0, sizeof (union drsas_frame));
+ ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
+
+ cmd->pkt = pkt;
+ cmd->cmd = acmd;
+
+ /* lets get the command directions */
+ if (acmd->cmd_flags & CFLAG_DMASEND) {
+ flags = MFI_FRAME_DIR_WRITE;
+
+ if (acmd->cmd_flags & CFLAG_CONSISTENT) {
+ (void) ddi_dma_sync(acmd->cmd_dmahandle,
+ acmd->cmd_dma_offset, acmd->cmd_dma_len,
+ DDI_DMA_SYNC_FORDEV);
+ }
+ } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
+ flags = MFI_FRAME_DIR_READ;
+
+ if (acmd->cmd_flags & CFLAG_CONSISTENT) {
+ (void) ddi_dma_sync(acmd->cmd_dmahandle,
+ acmd->cmd_dma_offset, acmd->cmd_dma_len,
+ DDI_DMA_SYNC_FORCPU);
+ }
+ } else {
+ flags = MFI_FRAME_DIR_NONE;
+ }
+
+ flags |= MFI_FRAME_SGL64;
+
+ switch (pkt->pkt_cdbp[0]) {
+
+ /*
+ * case SCMD_SYNCHRONIZE_CACHE:
+ * flush_cache(instance);
+ * return_mfi_pkt(instance, cmd);
+ * *cmd_done = 1;
+ *
+ * return (NULL);
+ */
+
+ case SCMD_READ:
+ case SCMD_WRITE:
+ case SCMD_READ_G1:
+ case SCMD_WRITE_G1:
+ if (acmd->islogical) {
+ ldio = (struct drsas_io_frame *)cmd->frame;
+
+ /*
+ * preare the Logical IO frame:
+ * 2nd bit is zero for all read cmds
+ */
+ ddi_put8(acc_handle, &ldio->cmd,
+ (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
+ : MFI_CMD_OP_LD_READ);
+ ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
+ ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
+ ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
+ ddi_put16(acc_handle, &ldio->timeout, 0);
+ ddi_put8(acc_handle, &ldio->reserved_0, 0);
+ ddi_put16(acc_handle, &ldio->pad_0, 0);
+ ddi_put16(acc_handle, &ldio->flags, flags);
+
+ /* Initialize sense Information */
+ bzero(cmd->sense, SENSE_LENGTH);
+ ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
+ ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
+ ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
+ cmd->sense_phys_addr);
+ ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
+ ddi_put8(acc_handle, &ldio->access_byte,
+ (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
+ ddi_put8(acc_handle, &ldio->sge_count,
+ acmd->cmd_cookiecnt);
+ mfi_sgl = (struct drsas_sge64 *)&ldio->sgl;
+
+ context = ddi_get32(acc_handle, &ldio->context);
+
+ if (acmd->cmd_cdblen == CDB_GROUP0) {
+ ddi_put32(acc_handle, &ldio->lba_count, (
+ (uint16_t)(pkt->pkt_cdbp[4])));
+
+ ddi_put32(acc_handle, &ldio->start_lba_lo, (
+ ((uint32_t)(pkt->pkt_cdbp[3])) |
+ ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
+ ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
+ << 16)));
+ } else if (acmd->cmd_cdblen == CDB_GROUP1) {
+ ddi_put32(acc_handle, &ldio->lba_count, (
+ ((uint16_t)(pkt->pkt_cdbp[8])) |
+ ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
+
+ ddi_put32(acc_handle, &ldio->start_lba_lo, (
+ ((uint32_t)(pkt->pkt_cdbp[5])) |
+ ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
+ ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
+ ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
+ } else if (acmd->cmd_cdblen == CDB_GROUP2) {
+ ddi_put32(acc_handle, &ldio->lba_count, (
+ ((uint16_t)(pkt->pkt_cdbp[9])) |
+ ((uint16_t)(pkt->pkt_cdbp[8]) << 8) |
+ ((uint16_t)(pkt->pkt_cdbp[7]) << 16) |
+ ((uint16_t)(pkt->pkt_cdbp[6]) << 24)));
+
+ ddi_put32(acc_handle, &ldio->start_lba_lo, (
+ ((uint32_t)(pkt->pkt_cdbp[5])) |
+ ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
+ ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
+ ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
+ } else if (acmd->cmd_cdblen == CDB_GROUP3) {
+ ddi_put32(acc_handle, &ldio->lba_count, (
+ ((uint16_t)(pkt->pkt_cdbp[13])) |
+ ((uint16_t)(pkt->pkt_cdbp[12]) << 8) |
+ ((uint16_t)(pkt->pkt_cdbp[11]) << 16) |
+ ((uint16_t)(pkt->pkt_cdbp[10]) << 24)));
+
+ ddi_put32(acc_handle, &ldio->start_lba_lo, (
+ ((uint32_t)(pkt->pkt_cdbp[9])) |
+ ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
+ ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
+ ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
+
+ ddi_put32(acc_handle, &ldio->start_lba_lo, (
+ ((uint32_t)(pkt->pkt_cdbp[5])) |
+ ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
+ ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
+ ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
+ }
+
+ break;
+ }
+ /* fall through For all non-rd/wr cmds */
+ default:
+
+ switch (pkt->pkt_cdbp[0]) {
+ case SCMD_MODE_SENSE:
+ case SCMD_MODE_SENSE_G1: {
+ union scsi_cdb *cdbp;
+ uint16_t page_code;
+
+ cdbp = (void *)pkt->pkt_cdbp;
+ page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
+ switch (page_code) {
+ case 0x3:
+ case 0x4:
+ (void) drsas_mode_sense_build(pkt);
+ return_mfi_pkt(instance, cmd);
+ *cmd_done = 1;
+ return (NULL);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ pthru = (struct drsas_pthru_frame *)cmd->frame;
+
+ /* prepare the DCDB frame */
+ ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
+ MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
+ ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
+ ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
+ ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
+ ddi_put8(acc_handle, &pthru->lun, 0);
+ ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
+ ddi_put16(acc_handle, &pthru->timeout, 0);
+ ddi_put16(acc_handle, &pthru->flags, flags);
+ ddi_put32(acc_handle, &pthru->data_xfer_len,
+ acmd->cmd_dmacount);
+ ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
+ mfi_sgl = (struct drsas_sge64 *)&pthru->sgl;
+
+ bzero(cmd->sense, SENSE_LENGTH);
+ ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
+ ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
+ ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
+ cmd->sense_phys_addr);
+
+ context = ddi_get32(acc_handle, &pthru->context);
+ ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
+ (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
+
+ break;
+ }
+#ifdef lint
+ context = context;
+#endif
+ /* prepare the scatter-gather list for the firmware */
+ for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
+ ddi_put64(acc_handle, &mfi_sgl->phys_addr,
+ acmd->cmd_dmacookies[i].dmac_laddress);
+ ddi_put32(acc_handle, &mfi_sgl->length,
+ acmd->cmd_dmacookies[i].dmac_size);
+ }
+
+ sge_bytes = sizeof (struct drsas_sge64)*acmd->cmd_cookiecnt;
+
+ cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
+ ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
+
+ if (cmd->frame_count >= 8) {
+ cmd->frame_count = 8;
+ }
+
+ return (cmd);
+}
+
+/*
+ * issue_mfi_pthru
+ */
+static int
+issue_mfi_pthru(struct drsas_instance *instance, struct drsas_ioctl *ioctl,
+ struct drsas_cmd *cmd, int mode)
+{
+ void *ubuf;
+ uint32_t kphys_addr = 0;
+ uint32_t xferlen = 0;
+ uint_t model;
+ ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
+ dma_obj_t pthru_dma_obj;
+ struct drsas_pthru_frame *kpthru;
+ struct drsas_pthru_frame *pthru;
+ int i;
+ pthru = &cmd->frame->pthru;
+ kpthru = (struct drsas_pthru_frame *)&ioctl->frame[0];
+
+ model = ddi_model_convert_from(mode & FMODELS);
+ if (model == DDI_MODEL_ILP32) {
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
+
+ xferlen = kpthru->sgl.sge32[0].length;
+
+ ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
+ } else {
+#ifdef _ILP32
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
+ xferlen = kpthru->sgl.sge32[0].length;
+ ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
+#else
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64"));
+ xferlen = kpthru->sgl.sge64[0].length;
+ ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
+#endif
+ }
+
+ if (xferlen) {
+ /* means IOCTL requires DMA */
+ /* allocate the data transfer buffer */
+ pthru_dma_obj.size = xferlen;
+ pthru_dma_obj.dma_attr = drsas_generic_dma_attr;
+ pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
+ pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
+ pthru_dma_obj.dma_attr.dma_attr_align = 1;
+
+ /* allocate kernel buffer for DMA */
+ if (drsas_alloc_dma_obj(instance, &pthru_dma_obj,
+ (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
+ "could not allocate data transfer buffer."));
+ return (DDI_FAILURE);
+ }
+
+ /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
+ if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
+ for (i = 0; i < xferlen; i++) {
+ if (ddi_copyin((uint8_t *)ubuf+i,
+ (uint8_t *)pthru_dma_obj.buffer+i,
+ 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "issue_mfi_pthru : "
+ "copy from user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+
+ kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
+ }
+
+ ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
+ ddi_put8(acc_handle, &pthru->sense_len, kpthru->sense_len);
+ ddi_put8(acc_handle, &pthru->cmd_status, 0);
+ ddi_put8(acc_handle, &pthru->scsi_status, 0);
+ ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
+ ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
+ ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
+ ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
+ ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
+ ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
+
+ ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
+ /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */
+ ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0);
+
+ ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
+ pthru->cdb_len, DDI_DEV_AUTOINCR);
+
+ ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
+ ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
+ ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
+
+ cmd->sync_cmd = DRSAS_TRUE;
+ cmd->frame_count = 1;
+
+ if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
+ con_log(CL_ANN, (CE_WARN,
+ "issue_mfi_pthru: fw_ioctl failed"));
+ } else {
+ if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
+ for (i = 0; i < xferlen; i++) {
+ if (ddi_copyout(
+ (uint8_t *)pthru_dma_obj.buffer+i,
+ (uint8_t *)ubuf+i, 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "issue_mfi_pthru : "
+ "copy to user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+ }
+
+ kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
+ kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
+
+ con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, "
+ "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
+
+ if (xferlen) {
+ /* free kernel buffer */
+ if (drsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * issue_mfi_dcmd
+ */
+static int
+issue_mfi_dcmd(struct drsas_instance *instance, struct drsas_ioctl *ioctl,
+ struct drsas_cmd *cmd, int mode)
+{
+ void *ubuf;
+ uint32_t kphys_addr = 0;
+ uint32_t xferlen = 0;
+ uint32_t model;
+ dma_obj_t dcmd_dma_obj;
+ struct drsas_dcmd_frame *kdcmd;
+ struct drsas_dcmd_frame *dcmd;
+ ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
+ int i;
+ dcmd = &cmd->frame->dcmd;
+ kdcmd = (struct drsas_dcmd_frame *)&ioctl->frame[0];
+
+ model = ddi_model_convert_from(mode & FMODELS);
+ if (model == DDI_MODEL_ILP32) {
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
+
+ xferlen = kdcmd->sgl.sge32[0].length;
+
+ ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
+ } else {
+#ifdef _ILP32
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
+ xferlen = kdcmd->sgl.sge32[0].length;
+ ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
+#else
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64"));
+ xferlen = kdcmd->sgl.sge64[0].length;
+ ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
+#endif
+ }
+ if (xferlen) {
+ /* means IOCTL requires DMA */
+ /* allocate the data transfer buffer */
+ dcmd_dma_obj.size = xferlen;
+ dcmd_dma_obj.dma_attr = drsas_generic_dma_attr;
+ dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
+ dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
+ dcmd_dma_obj.dma_attr.dma_attr_align = 1;
+
+ /* allocate kernel buffer for DMA */
+ if (drsas_alloc_dma_obj(instance, &dcmd_dma_obj,
+ (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
+ "could not allocate data transfer buffer."));
+ return (DDI_FAILURE);
+ }
+
+ /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
+ if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
+ for (i = 0; i < xferlen; i++) {
+ if (ddi_copyin((uint8_t *)ubuf + i,
+ (uint8_t *)dcmd_dma_obj.buffer + i,
+ 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "issue_mfi_dcmd : "
+ "copy from user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+
+ kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
+ }
+
+ ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
+ ddi_put8(acc_handle, &dcmd->cmd_status, 0);
+ ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
+ ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
+ ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
+ ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
+
+ ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
+ (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
+
+ ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
+ ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
+ ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
+
+ cmd->sync_cmd = DRSAS_TRUE;
+ cmd->frame_count = 1;
+
+ if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
+ } else {
+ if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
+ for (i = 0; i < xferlen; i++) {
+ if (ddi_copyout(
+ (uint8_t *)dcmd_dma_obj.buffer + i,
+ (uint8_t *)ubuf + i,
+ 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "issue_mfi_dcmd : "
+ "copy to user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+ }
+
+ kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
+
+ if (xferlen) {
+ /* free kernel buffer */
+ if (drsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * issue_mfi_smp
+ */
+static int
+issue_mfi_smp(struct drsas_instance *instance, struct drsas_ioctl *ioctl,
+ struct drsas_cmd *cmd, int mode)
+{
+ void *request_ubuf;
+ void *response_ubuf;
+ uint32_t request_xferlen = 0;
+ uint32_t response_xferlen = 0;
+ uint_t model;
+ dma_obj_t request_dma_obj;
+ dma_obj_t response_dma_obj;
+ ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
+ struct drsas_smp_frame *ksmp;
+ struct drsas_smp_frame *smp;
+ struct drsas_sge32 *sge32;
+#ifndef _ILP32
+ struct drsas_sge64 *sge64;
+#endif
+ int i;
+ uint64_t tmp_sas_addr;
+
+ smp = &cmd->frame->smp;
+ ksmp = (struct drsas_smp_frame *)&ioctl->frame[0];
+
+ model = ddi_model_convert_from(mode & FMODELS);
+ if (model == DDI_MODEL_ILP32) {
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
+
+ sge32 = &ksmp->sgl[0].sge32[0];
+ response_xferlen = sge32[0].length;
+ request_xferlen = sge32[1].length;
+ con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
+ "response_xferlen = %x, request_xferlen = %x",
+ response_xferlen, request_xferlen));
+
+ response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
+ request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
+ "response_ubuf = %p, request_ubuf = %p",
+ response_ubuf, request_ubuf));
+ } else {
+#ifdef _ILP32
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
+
+ sge32 = &ksmp->sgl[0].sge32[0];
+ response_xferlen = sge32[0].length;
+ request_xferlen = sge32[1].length;
+ con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
+ "response_xferlen = %x, request_xferlen = %x",
+ response_xferlen, request_xferlen));
+
+ response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
+ request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
+ "response_ubuf = %p, request_ubuf = %p",
+ response_ubuf, request_ubuf));
+#else
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64"));
+
+ sge64 = &ksmp->sgl[0].sge64[0];
+ response_xferlen = sge64[0].length;
+ request_xferlen = sge64[1].length;
+
+ response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
+ request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
+#endif
+ }
+ if (request_xferlen) {
+ /* means IOCTL requires DMA */
+ /* allocate the data transfer buffer */
+ request_dma_obj.size = request_xferlen;
+ request_dma_obj.dma_attr = drsas_generic_dma_attr;
+ request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
+ request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ request_dma_obj.dma_attr.dma_attr_sgllen = 1;
+ request_dma_obj.dma_attr.dma_attr_align = 1;
+
+ /* allocate kernel buffer for DMA */
+ if (drsas_alloc_dma_obj(instance, &request_dma_obj,
+ (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
+ "could not allocate data transfer buffer."));
+ return (DDI_FAILURE);
+ }
+
+ /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
+ for (i = 0; i < request_xferlen; i++) {
+ if (ddi_copyin((uint8_t *)request_ubuf + i,
+ (uint8_t *)request_dma_obj.buffer + i,
+ 1, mode)) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
+ "copy from user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+
+ if (response_xferlen) {
+ /* means IOCTL requires DMA */
+ /* allocate the data transfer buffer */
+ response_dma_obj.size = response_xferlen;
+ response_dma_obj.dma_attr = drsas_generic_dma_attr;
+ response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
+ response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ response_dma_obj.dma_attr.dma_attr_sgllen = 1;
+ response_dma_obj.dma_attr.dma_attr_align = 1;
+
+ /* allocate kernel buffer for DMA */
+ if (drsas_alloc_dma_obj(instance, &response_dma_obj,
+ (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
+ "could not allocate data transfer buffer."));
+ return (DDI_FAILURE);
+ }
+
+ /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
+ for (i = 0; i < response_xferlen; i++) {
+ if (ddi_copyin((uint8_t *)response_ubuf + i,
+ (uint8_t *)response_dma_obj.buffer + i,
+ 1, mode)) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
+ "copy from user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+
+ ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
+ ddi_put8(acc_handle, &smp->cmd_status, 0);
+ ddi_put8(acc_handle, &smp->connection_status, 0);
+ ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
+ /* smp->context = ksmp->context; */
+ ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
+ ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
+
+ bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
+ sizeof (uint64_t));
+ ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
+
+ ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
+
+ model = ddi_model_convert_from(mode & FMODELS);
+ if (model == DDI_MODEL_ILP32) {
+ con_log(CL_ANN1, (CE_NOTE,
+ "handle_drv_ioctl: DDI_MODEL_ILP32"));
+
+ sge32 = &smp->sgl[0].sge32[0];
+ ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
+ ddi_put32(acc_handle, &sge32[0].phys_addr,
+ response_dma_obj.dma_cookie[0].dmac_address);
+ ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
+ ddi_put32(acc_handle, &sge32[1].phys_addr,
+ request_dma_obj.dma_cookie[0].dmac_address);
+ } else {
+#ifdef _ILP32
+ con_log(CL_ANN1, (CE_NOTE,
+ "handle_drv_ioctl: DDI_MODEL_ILP32"));
+ sge32 = &smp->sgl[0].sge32[0];
+ ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
+ ddi_put32(acc_handle, &sge32[0].phys_addr,
+ response_dma_obj.dma_cookie[0].dmac_address);
+ ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
+ ddi_put32(acc_handle, &sge32[1].phys_addr,
+ request_dma_obj.dma_cookie[0].dmac_address);
+#else
+ con_log(CL_ANN1, (CE_NOTE,
+ "issue_mfi_smp: DDI_MODEL_LP64"));
+ sge64 = &smp->sgl[0].sge64[0];
+ ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
+ ddi_put64(acc_handle, &sge64[0].phys_addr,
+ response_dma_obj.dma_cookie[0].dmac_address);
+ ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
+ ddi_put64(acc_handle, &sge64[1].phys_addr,
+ request_dma_obj.dma_cookie[0].dmac_address);
+#endif
+ }
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp : "
+ "smp->response_xferlen = %d, smp->request_xferlen = %d "
+ "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
+ ddi_get32(acc_handle, &sge32[1].length),
+ ddi_get32(acc_handle, &smp->data_xfer_len)));
+
+ cmd->sync_cmd = DRSAS_TRUE;
+ cmd->frame_count = 1;
+
+ if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
+ con_log(CL_ANN, (CE_WARN,
+ "issue_mfi_smp: fw_ioctl failed"));
+ } else {
+ con_log(CL_ANN1, (CE_NOTE,
+ "issue_mfi_smp: copy to user space"));
+
+ if (request_xferlen) {
+ for (i = 0; i < request_xferlen; i++) {
+ if (ddi_copyout(
+ (uint8_t *)request_dma_obj.buffer +
+ i, (uint8_t *)request_ubuf + i,
+ 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "issue_mfi_smp : copy to user space"
+ " failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+
+ if (response_xferlen) {
+ for (i = 0; i < response_xferlen; i++) {
+ if (ddi_copyout(
+ (uint8_t *)response_dma_obj.buffer
+ + i, (uint8_t *)response_ubuf
+ + i, 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "issue_mfi_smp : copy to "
+ "user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+ }
+
+ ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
+ ddi_get8(acc_handle, &smp->cmd_status)));
+
+
+ if (request_xferlen) {
+ /* free kernel buffer */
+ if (drsas_free_dma_obj(instance, request_dma_obj) !=
+ DDI_SUCCESS)
+ return (DDI_FAILURE);
+ }
+
+ if (response_xferlen) {
+ /* free kernel buffer */
+ if (drsas_free_dma_obj(instance, response_dma_obj) !=
+ DDI_SUCCESS)
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * issue_mfi_stp
+ */
+static int
+issue_mfi_stp(struct drsas_instance *instance, struct drsas_ioctl *ioctl,
+ struct drsas_cmd *cmd, int mode)
+{
+ void *fis_ubuf;
+ void *data_ubuf;
+ uint32_t fis_xferlen = 0;
+ uint32_t data_xferlen = 0;
+ uint_t model;
+ dma_obj_t fis_dma_obj;
+ dma_obj_t data_dma_obj;
+ struct drsas_stp_frame *kstp;
+ struct drsas_stp_frame *stp;
+ ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
+ int i;
+
+ stp = &cmd->frame->stp;
+ kstp = (struct drsas_stp_frame *)&ioctl->frame[0];
+
+ model = ddi_model_convert_from(mode & FMODELS);
+ if (model == DDI_MODEL_ILP32) {
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
+
+ fis_xferlen = kstp->sgl.sge32[0].length;
+ data_xferlen = kstp->sgl.sge32[1].length;
+
+ fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
+ data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
+ }
+ else
+ {
+#ifdef _ILP32
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
+
+ fis_xferlen = kstp->sgl.sge32[0].length;
+ data_xferlen = kstp->sgl.sge32[1].length;
+
+ fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
+ data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
+#else
+ con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64"));
+
+ fis_xferlen = kstp->sgl.sge64[0].length;
+ data_xferlen = kstp->sgl.sge64[1].length;
+
+ fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
+ data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
+#endif
+ }
+
+
+ if (fis_xferlen) {
+ con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: "
+ "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
+
+ /* means IOCTL requires DMA */
+ /* allocate the data transfer buffer */
+ fis_dma_obj.size = fis_xferlen;
+ fis_dma_obj.dma_attr = drsas_generic_dma_attr;
+ fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
+ fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
+ fis_dma_obj.dma_attr.dma_attr_align = 1;
+
+ /* allocate kernel buffer for DMA */
+ if (drsas_alloc_dma_obj(instance, &fis_dma_obj,
+ (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
+ "could not allocate data transfer buffer."));
+ return (DDI_FAILURE);
+ }
+
+ /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
+ for (i = 0; i < fis_xferlen; i++) {
+ if (ddi_copyin((uint8_t *)fis_ubuf + i,
+ (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
+ "copy from user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+
+ if (data_xferlen) {
+ con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p "
+ "data_xferlen = %x", data_ubuf, data_xferlen));
+
+ /* means IOCTL requires DMA */
+ /* allocate the data transfer buffer */
+ data_dma_obj.size = data_xferlen;
+ data_dma_obj.dma_attr = drsas_generic_dma_attr;
+ data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
+ data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
+ data_dma_obj.dma_attr.dma_attr_sgllen = 1;
+ data_dma_obj.dma_attr.dma_attr_align = 1;
+
+/* allocate kernel buffer for DMA */
+ if (drsas_alloc_dma_obj(instance, &data_dma_obj,
+ (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
+ "could not allocate data transfer buffer."));
+ return (DDI_FAILURE);
+ }
+
+ /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
+ for (i = 0; i < data_xferlen; i++) {
+ if (ddi_copyin((uint8_t *)data_ubuf + i,
+ (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
+ "copy from user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+
+ ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
+ ddi_put8(acc_handle, &stp->cmd_status, 0);
+ ddi_put8(acc_handle, &stp->connection_status, 0);
+ ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
+ ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
+
+ ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
+ ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
+
+ ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
+ DDI_DEV_AUTOINCR);
+
+ ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
+ ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
+ ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
+ ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
+ fis_dma_obj.dma_cookie[0].dmac_address);
+ ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
+ ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
+ data_dma_obj.dma_cookie[0].dmac_address);
+
+ cmd->sync_cmd = DRSAS_TRUE;
+ cmd->frame_count = 1;
+
+ if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
+ con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
+ } else {
+
+ if (fis_xferlen) {
+ for (i = 0; i < fis_xferlen; i++) {
+ if (ddi_copyout(
+ (uint8_t *)fis_dma_obj.buffer + i,
+ (uint8_t *)fis_ubuf + i, 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "issue_mfi_stp : copy to "
+ "user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+ }
+ if (data_xferlen) {
+ for (i = 0; i < data_xferlen; i++) {
+ if (ddi_copyout(
+ (uint8_t *)data_dma_obj.buffer + i,
+ (uint8_t *)data_ubuf + i, 1, mode)) {
+ con_log(CL_ANN, (CE_WARN,
+ "issue_mfi_stp : copy to"
+ " user space failed"));
+ return (DDI_FAILURE);
+ }
+ }
+ }
+
+ kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
+
+ if (fis_xferlen) {
+ /* free kernel buffer */
+ if (drsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+ }
+
+ if (data_xferlen) {
+ /* free kernel buffer */
+ if (drsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * fill_up_drv_ver
+ */
+static void
+fill_up_drv_ver(struct drsas_drv_ver *dv)
+{
+ (void) memset(dv, 0, sizeof (struct drsas_drv_ver));
+
+ (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
+ (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
+ (void) memcpy(dv->drv_name, "dr_sas", strlen("dr_sas"));
+ (void) memcpy(dv->drv_ver, DRSAS_VERSION, strlen(DRSAS_VERSION));
+ (void) memcpy(dv->drv_rel_date, DRSAS_RELDATE,
+ strlen(DRSAS_RELDATE));
+}
+
+/*
+ * handle_drv_ioctl
+ */
+static int
+handle_drv_ioctl(struct drsas_instance *instance, struct drsas_ioctl *ioctl,
+ int mode)
+{
+ int i;
+ int rval = DDI_SUCCESS;
+ int *props = NULL;
+ void *ubuf;
+
+ uint8_t *pci_conf_buf;
+ uint32_t xferlen;
+ uint32_t num_props;
+ uint_t model;
+ struct drsas_dcmd_frame *kdcmd;
+ struct drsas_drv_ver dv;
+ struct drsas_pci_information pi;
+
+ kdcmd = (struct drsas_dcmd_frame *)&ioctl->frame[0];
+
+ model = ddi_model_convert_from(mode & FMODELS);
+ if (model == DDI_MODEL_ILP32) {
+ con_log(CL_ANN1, (CE_NOTE,
+ "handle_drv_ioctl: DDI_MODEL_ILP32"));
+
+ xferlen = kdcmd->sgl.sge32[0].length;
+
+ ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
+ } else {
+#ifdef _ILP32
+ con_log(CL_ANN1, (CE_NOTE,
+ "handle_drv_ioctl: DDI_MODEL_ILP32"));
+ xferlen = kdcmd->sgl.sge32[0].length;
+ ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
+#else
+ con_log(CL_ANN1, (CE_NOTE,
+ "handle_drv_ioctl: DDI_MODEL_LP64"));
+ xferlen = kdcmd->sgl.sge64[0].length;
+ ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
+#endif
+ }
+ con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
+ "dataBuf=%p size=%d bytes", ubuf, xferlen));
+
+ switch (kdcmd->opcode) {
+ case DRSAS_DRIVER_IOCTL_DRIVER_VERSION:
+ con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
+ "DRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
+
+ fill_up_drv_ver(&dv);
+ for (i = 0; i < xferlen; i++) {
+ if (ddi_copyout((uint8_t *)&dv + i, (uint8_t *)ubuf + i,
+ 1, mode)) {
+ con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
+ "DRSAS_DRIVER_IOCTL_DRIVER_VERSION"
+ " : copy to user space failed"));
+ kdcmd->cmd_status = 1;
+ rval = DDI_FAILURE;
+ break;
+ }
+ }
+ if (i == xferlen)
+ kdcmd->cmd_status = 0;
+ break;
+ case DRSAS_DRIVER_IOCTL_PCI_INFORMATION:
+ con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
+ "DRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
+
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
+ 0, "reg", &props, &num_props)) {
+ con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
+ "DRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
+ "ddi_prop_look_int_array failed"));
+ rval = DDI_FAILURE;
+ } else {
+
+ pi.busNumber = (props[0] >> 16) & 0xFF;
+ pi.deviceNumber = (props[0] >> 11) & 0x1f;
+ pi.functionNumber = (props[0] >> 8) & 0x7;
+ ddi_prop_free((void *)props);
+ }
+
+ pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
+
+ for (i = 0; i < (sizeof (struct drsas_pci_information) -
+ offsetof(struct drsas_pci_information, pciHeaderInfo));
+ i++) {
+ pci_conf_buf[i] =
+ pci_config_get8(instance->pci_handle, i);
+ }
+ for (i = 0; i < xferlen; i++) {
+ if (ddi_copyout((uint8_t *)&pi + i, (uint8_t *)ubuf + i,
+ 1, mode)) {
+ con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
+ "DRSAS_DRIVER_IOCTL_PCI_INFORMATION"
+ " : copy to user space failed"));
+ kdcmd->cmd_status = 1;
+ rval = DDI_FAILURE;
+ break;
+ }
+ }
+
+ if (i == xferlen)
+ kdcmd->cmd_status = 0;
+
+ break;
+ default:
+ con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
+ "invalid driver specific IOCTL opcode = 0x%x",
+ kdcmd->opcode));
+ kdcmd->cmd_status = 1;
+ rval = DDI_FAILURE;
+ break;
+ }
+
+ return (rval);
+}
+
+/*
+ * handle_mfi_ioctl
+ */
+static int
+handle_mfi_ioctl(struct drsas_instance *instance, struct drsas_ioctl *ioctl,
+ int mode)
+{
+ int rval = DDI_SUCCESS;
+
+ struct drsas_header *hdr;
+ struct drsas_cmd *cmd;
+
+ cmd = get_mfi_pkt(instance);
+
+ if (!cmd) {
+ con_log(CL_ANN, (CE_WARN, "dr_sas: "
+ "failed to get a cmd packet"));
+ return (DDI_FAILURE);
+ }
+
+ /* Clear the frame buffer and assign back the context id */
+ (void) memset((char *)&cmd->frame[0], 0, sizeof (union drsas_frame));
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
+ cmd->index);
+
+ hdr = (struct drsas_header *)&ioctl->frame[0];
+
+ switch (hdr->cmd) {
+ case MFI_CMD_OP_DCMD:
+ rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
+ break;
+ case MFI_CMD_OP_SMP:
+ rval = issue_mfi_smp(instance, ioctl, cmd, mode);
+ break;
+ case MFI_CMD_OP_STP:
+ rval = issue_mfi_stp(instance, ioctl, cmd, mode);
+ break;
+ case MFI_CMD_OP_LD_SCSI:
+ case MFI_CMD_OP_PD_SCSI:
+ rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
+ break;
+ default:
+ con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
+ "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
+ rval = DDI_FAILURE;
+ break;
+ }
+
+
+ return_mfi_pkt(instance, cmd);
+ if (drsas_common_check(instance, cmd) != DDI_SUCCESS)
+ rval = DDI_FAILURE;
+ return (rval);
+}
+
+/*
+ * AEN
+ */
+static int
+handle_mfi_aen(struct drsas_instance *instance, struct drsas_aen *aen)
+{
+ int rval = 0;
+
+ rval = register_mfi_aen(instance, instance->aen_seq_num,
+ aen->class_locale_word);
+
+ aen->cmd_status = (uint8_t)rval;
+
+ return (rval);
+}
+
+static int
+register_mfi_aen(struct drsas_instance *instance, uint32_t seq_num,
+ uint32_t class_locale_word)
+{
+ int ret_val;
+
+ struct drsas_cmd *cmd, *aen_cmd;
+ struct drsas_dcmd_frame *dcmd;
+ union drsas_evt_class_locale curr_aen;
+ union drsas_evt_class_locale prev_aen;
+
+ /*
+ * If there an AEN pending already (aen_cmd), check if the
+ * class_locale of that pending AEN is inclusive of the new
+ * AEN request we currently have. If it is, then we don't have
+ * to do anything. In other words, whichever events the current
+ * AEN request is subscribing to, have already been subscribed
+ * to.
+ *
+ * If the old_cmd is _not_ inclusive, then we have to abort
+ * that command, form a class_locale that is superset of both
+ * old and current and re-issue to the FW
+ */
+
+ curr_aen.word = class_locale_word;
+ aen_cmd = instance->aen_cmd;
+ if (aen_cmd) {
+ prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
+ &aen_cmd->frame->dcmd.mbox.w[1]);
+
+ /*
+ * A class whose enum value is smaller is inclusive of all
+ * higher values. If a PROGRESS (= -1) was previously
+ * registered, then a new registration requests for higher
+ * classes need not be sent to FW. They are automatically
+ * included.
+ *
+ * Locale numbers don't have such hierarchy. They are bitmap
+ * values
+ */
+ if ((prev_aen.members.class <= curr_aen.members.class) &&
+ !((prev_aen.members.locale & curr_aen.members.locale) ^
+ curr_aen.members.locale)) {
+ /*
+ * Previously issued event registration includes
+ * current request. Nothing to do.
+ */
+
+ return (0);
+ } else {
+ curr_aen.members.locale |= prev_aen.members.locale;
+
+ if (prev_aen.members.class < curr_aen.members.class)
+ curr_aen.members.class = prev_aen.members.class;
+
+ ret_val = abort_aen_cmd(instance, aen_cmd);
+
+ if (ret_val) {
+ con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
+ "failed to abort prevous AEN command"));
+
+ return (ret_val);
+ }
+ }
+ } else {
+ curr_aen.word = class_locale_word;
+ }
+
+ cmd = get_mfi_pkt(instance);
+
+ if (!cmd)
+ return (ENOMEM);
+ /* Clear the frame buffer and assign back the context id */
+ (void) memset((char *)&cmd->frame[0], 0, sizeof (union drsas_frame));
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
+ cmd->index);
+
+ dcmd = &cmd->frame->dcmd;
+
+ /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
+ (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
+
+ (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
+ sizeof (struct drsas_evt_detail));
+
+ /* Prepare DCMD for aen registration */
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
+ MFI_FRAME_DIR_READ);
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
+ sizeof (struct drsas_evt_detail));
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
+ DR_DCMD_CTRL_EVENT_WAIT);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
+ curr_aen.word);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
+ instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
+ ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
+ sizeof (struct drsas_evt_detail));
+
+ instance->aen_seq_num = seq_num;
+
+
+ /*
+ * Store reference to the cmd used to register for AEN. When an
+ * application wants us to register for AEN, we have to abort this
+ * cmd and re-register with a new EVENT LOCALE supplied by that app
+ */
+ instance->aen_cmd = cmd;
+
+ cmd->frame_count = 1;
+
+ /* Issue the aen registration frame */
+ /* atomic_add_16 (&instance->fw_outstanding, 1); */
+ instance->func_ptr->issue_cmd(cmd, instance);
+
+ return (0);
+}
+
+static void
+display_scsi_inquiry(caddr_t scsi_inq)
+{
+#define MAX_SCSI_DEVICE_CODE 14
+ int i;
+ char inquiry_buf[256] = {0};
+ int len;
+ const char *const scsi_device_types[] = {
+ "Direct-Access ",
+ "Sequential-Access",
+ "Printer ",
+ "Processor ",
+ "WORM ",
+ "CD-ROM ",
+ "Scanner ",
+ "Optical Device ",
+ "Medium Changer ",
+ "Communications ",
+ "Unknown ",
+ "Unknown ",
+ "Unknown ",
+ "Enclosure ",
+ };
+
+ len = 0;
+
+ len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
+ for (i = 8; i < 16; i++) {
+ len += snprintf(inquiry_buf + len, 265 - len, "%c",
+ scsi_inq[i]);
+ }
+
+ len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
+
+ for (i = 16; i < 32; i++) {
+ len += snprintf(inquiry_buf + len, 265 - len, "%c",
+ scsi_inq[i]);
+ }
+
+ len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
+
+ for (i = 32; i < 36; i++) {
+ len += snprintf(inquiry_buf + len, 265 - len, "%c",
+ scsi_inq[i]);
+ }
+
+ len += snprintf(inquiry_buf + len, 265 - len, "\n");
+
+
+ i = scsi_inq[0] & 0x1f;
+
+
+ len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
+ i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
+ "Unknown ");
+
+
+ len += snprintf(inquiry_buf + len, 265 - len,
+ " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
+
+ if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
+ len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
+ } else {
+ len += snprintf(inquiry_buf + len, 265 - len, "\n");
+ }
+
+ con_log(CL_ANN1, (CE_CONT, inquiry_buf));
+}
+
+static int
+read_fw_status_reg_ppc(struct drsas_instance *instance)
+{
+ return ((int)RD_OB_SCRATCH_PAD_0(instance));
+}
+
+static void
+issue_cmd_ppc(struct drsas_cmd *cmd, struct drsas_instance *instance)
+{
+ atomic_add_16(&instance->fw_outstanding, 1);
+
+ /* Issue the command to the FW */
+ WR_IB_QPORT((cmd->frame_phys_addr) |
+ (((cmd->frame_count - 1) << 1) | 1), instance);
+}
+
+/*
+ * issue_cmd_in_sync_mode
+ */
+static int
+issue_cmd_in_sync_mode_ppc(struct drsas_instance *instance,
+ struct drsas_cmd *cmd)
+{
+ int i;
+ uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
+
+ con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
+
+ cmd->cmd_status = ENODATA;
+
+ WR_IB_QPORT((cmd->frame_phys_addr) |
+ (((cmd->frame_count - 1) << 1) | 1), instance);
+
+ mutex_enter(&instance->int_cmd_mtx);
+
+ for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
+ cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
+ }
+
+ mutex_exit(&instance->int_cmd_mtx);
+
+ con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
+
+ if (i < (msecs -1)) {
+ return (DDI_SUCCESS);
+ } else {
+ return (DDI_FAILURE);
+ }
+}
+
+/*
+ * issue_cmd_in_poll_mode
+ */
+static int
+issue_cmd_in_poll_mode_ppc(struct drsas_instance *instance,
+ struct drsas_cmd *cmd)
+{
+ int i;
+ uint16_t flags;
+ uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
+ struct drsas_header *frame_hdr;
+
+ con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
+
+ frame_hdr = (struct drsas_header *)cmd->frame;
+ ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
+ MFI_CMD_STATUS_POLL_MODE);
+ flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
+ flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
+
+ /* issue the frame using inbound queue port */
+ WR_IB_QPORT((cmd->frame_phys_addr) |
+ (((cmd->frame_count - 1) << 1) | 1), instance);
+
+ /* wait for cmd_status to change from 0xFF */
+ for (i = 0; i < msecs && (
+ ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
+ == MFI_CMD_STATUS_POLL_MODE); i++) {
+ drv_usecwait(MILLISEC); /* wait for 1000 usecs */
+ }
+
+ if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
+ == MFI_CMD_STATUS_POLL_MODE) {
+ con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
+ "cmd polling timed out"));
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+enable_intr_ppc(struct drsas_instance *instance)
+{
+ uint32_t mask;
+
+ con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
+
+ /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
+ WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
+
+ /* WR_OB_INTR_MASK(~0x80000000, instance); */
+ WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
+
+ /* dummy read to force PCI flush */
+ mask = RD_OB_INTR_MASK(instance);
+
+ con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
+ "outbound_intr_mask = 0x%x", mask));
+}
+
+static void
+disable_intr_ppc(struct drsas_instance *instance)
+{
+ uint32_t mask;
+
+ con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
+
+ con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
+ "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
+
+ /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
+ WR_OB_INTR_MASK(OB_INTR_MASK, instance);
+
+ con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
+ "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
+
+ /* dummy read to force PCI flush */
+ mask = RD_OB_INTR_MASK(instance);
+#ifdef lint
+ mask = mask;
+#endif
+}
+
+static int
+intr_ack_ppc(struct drsas_instance *instance)
+{
+ uint32_t status;
+
+ con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
+
+ /* check if it is our interrupt */
+ status = RD_OB_INTR_STATUS(instance);
+
+ con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
+
+ if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ /* clear the interrupt by writing back the same value */
+ WR_OB_DOORBELL_CLEAR(status, instance);
+
+ /* dummy READ */
+ status = RD_OB_INTR_STATUS(instance);
+
+ con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
+
+ return (DDI_INTR_CLAIMED);
+}
+
+static int
+drsas_common_check(struct drsas_instance *instance,
+ struct drsas_cmd *cmd)
+{
+ int ret = DDI_SUCCESS;
+
+ if (drsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
+ DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
+ if (cmd->pkt != NULL) {
+ cmd->pkt->pkt_reason = CMD_TRAN_ERR;
+ cmd->pkt->pkt_statistics = 0;
+ }
+ ret = DDI_FAILURE;
+ }
+ if (drsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
+ != DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
+ if (cmd->pkt != NULL) {
+ cmd->pkt->pkt_reason = CMD_TRAN_ERR;
+ cmd->pkt->pkt_statistics = 0;
+ }
+ ret = DDI_FAILURE;
+ }
+ if (drsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
+ DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
+ if (cmd->pkt != NULL) {
+ cmd->pkt->pkt_reason = CMD_TRAN_ERR;
+ cmd->pkt->pkt_statistics = 0;
+ }
+ ret = DDI_FAILURE;
+ }
+ if (drsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
+ ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
+
+ ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
+
+ if (cmd->pkt != NULL) {
+ cmd->pkt->pkt_reason = CMD_TRAN_ERR;
+ cmd->pkt->pkt_statistics = 0;
+ }
+ ret = DDI_FAILURE;
+ }
+
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+drsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
+{
+ /*
+ * as the driver can always deal with an error in any dma or
+ * access handle, we can just return the fme_status value.
+ */
+ pci_ereport_post(dip, err, NULL);
+ return (err->fme_status);
+}
+
+static void
+drsas_fm_init(struct drsas_instance *instance)
+{
+ /* Need to change iblock to priority for new MSI intr */
+ ddi_iblock_cookie_t fm_ibc;
+
+ /* Only register with IO Fault Services if we have some capability */
+ if (instance->fm_capabilities) {
+ /* Adjust access and dma attributes for FMA */
+ endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
+ drsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
+
+ /*
+ * Register capabilities with IO Fault Services.
+ * fm_capabilities will be updated to indicate
+ * capabilities actually supported (not requested.)
+ */
+
+ ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
+
+ /*
+ * Initialize pci ereport capabilities if ereport
+ * capable (should always be.)
+ */
+
+ if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
+ DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
+ pci_ereport_setup(instance->dip);
+ }
+
+ /*
+ * Register error callback if error callback capable.
+ */
+ if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
+ ddi_fm_handler_register(instance->dip,
+ drsas_fm_error_cb, (void*) instance);
+ }
+ } else {
+ endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
+ drsas_generic_dma_attr.dma_attr_flags = 0;
+ }
+}
+
+static void
+drsas_fm_fini(struct drsas_instance *instance)
+{
+ /* Only unregister FMA capabilities if registered */
+ if (instance->fm_capabilities) {
+ /*
+ * Un-register error callback if error callback capable.
+ */
+ if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
+ ddi_fm_handler_unregister(instance->dip);
+ }
+
+ /*
+ * Release any resources allocated by pci_ereport_setup()
+ */
+ if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
+ DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
+ pci_ereport_teardown(instance->dip);
+ }
+
+ /* Unregister from IO Fault Services */
+ ddi_fm_fini(instance->dip);
+
+ /* Adjust access and dma attributes for FMA */
+ endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
+ drsas_generic_dma_attr.dma_attr_flags = 0;
+ }
+}
+
+int
+drsas_check_acc_handle(ddi_acc_handle_t handle)
+{
+ ddi_fm_error_t de;
+
+ if (handle == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
+
+ return (de.fme_status);
+}
+
+int
+drsas_check_dma_handle(ddi_dma_handle_t handle)
+{
+ ddi_fm_error_t de;
+
+ if (handle == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
+
+ return (de.fme_status);
+}
+
+void
+drsas_fm_ereport(struct drsas_instance *instance, char *detail)
+{
+ uint64_t ena;
+ char buf[FM_MAX_CLASS];
+
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
+ if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
+ ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
+ }
+}
+
+static int
+drsas_add_intrs(struct drsas_instance *instance, int intr_type)
+{
+
+ dev_info_t *dip = instance->dip;
+ int avail, actual, count;
+ int i, flag, ret;
+
+ con_log(CL_DLEVEL1, (CE_WARN, "drsas_add_intrs: intr_type = %x",
+ intr_type));
+
+ /* Get number of interrupts */
+ ret = ddi_intr_get_nintrs(dip, intr_type, &count);
+ if ((ret != DDI_SUCCESS) || (count == 0)) {
+ con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
+ "ret %d count %d", ret, count));
+
+ return (DDI_FAILURE);
+ }
+
+ con_log(CL_DLEVEL1, (CE_WARN, "drsas_add_intrs: count = %d ", count));
+
+ /* Get number of available interrupts */
+ ret = ddi_intr_get_navail(dip, intr_type, &avail);
+ if ((ret != DDI_SUCCESS) || (avail == 0)) {
+ con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
+ "ret %d avail %d", ret, avail));
+
+ return (DDI_FAILURE);
+ }
+ con_log(CL_DLEVEL1, (CE_WARN, "drsas_add_intrs: avail = %d ", avail));
+
+ /* Only one interrupt routine. So limit the count to 1 */
+ if (count > 1) {
+ count = 1;
+ }
+
+ /*
+ * Allocate an array of interrupt handlers. Currently we support
+ * only one interrupt. The framework can be extended later.
+ */
+ instance->intr_size = count * sizeof (ddi_intr_handle_t);
+ instance->intr_htable = kmem_zalloc(instance->intr_size, KM_SLEEP);
+ ASSERT(instance->intr_htable);
+
+ flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type ==
+ DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
+
+ /* Allocate interrupt */
+ ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
+ count, &actual, flag);
+
+ if ((ret != DDI_SUCCESS) || (actual == 0)) {
+ con_log(CL_ANN, (CE_WARN, "drsas_add_intrs: "
+ "avail = %d", avail));
+ kmem_free(instance->intr_htable, instance->intr_size);
+ return (DDI_FAILURE);
+ }
+ if (actual < count) {
+ con_log(CL_ANN, (CE_WARN, "drsas_add_intrs: "
+ "Requested = %d Received = %d", count, actual));
+ }
+ instance->intr_cnt = actual;
+
+ /*
+ * Get the priority of the interrupt allocated.
+ */
+ if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
+ &instance->intr_pri)) != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN, "drsas_add_intrs: "
+ "get priority call failed"));
+
+ for (i = 0; i < actual; i++) {
+ (void) ddi_intr_free(instance->intr_htable[i]);
+ }
+ kmem_free(instance->intr_htable, instance->intr_size);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Test for high level mutex. we don't support them.
+ */
+ if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
+ con_log(CL_ANN, (CE_WARN, "drsas_add_intrs: "
+ "High level interrupts not supported."));
+
+ for (i = 0; i < actual; i++) {
+ (void) ddi_intr_free(instance->intr_htable[i]);
+ }
+ kmem_free(instance->intr_htable, instance->intr_size);
+ return (DDI_FAILURE);
+ }
+
+ con_log(CL_DLEVEL1, (CE_NOTE, "drsas_add_intrs: intr_pri = 0x%x ",
+ instance->intr_pri));
+
+ /* Call ddi_intr_add_handler() */
+ for (i = 0; i < actual; i++) {
+ ret = ddi_intr_add_handler(instance->intr_htable[i],
+ (ddi_intr_handler_t *)drsas_isr, (caddr_t)instance,
+ (caddr_t)(uintptr_t)i);
+
+ if (ret != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN, "drsas_add_intrs:"
+ "failed %d", ret));
+
+ for (i = 0; i < actual; i++) {
+ (void) ddi_intr_free(instance->intr_htable[i]);
+ }
+ kmem_free(instance->intr_htable, instance->intr_size);
+ return (DDI_FAILURE);
+ }
+
+ }
+
+ con_log(CL_DLEVEL1, (CE_WARN, " ddi_intr_add_handler done"));
+
+ if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
+ &instance->intr_cap)) != DDI_SUCCESS) {
+ con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
+ ret));
+
+ /* Free already allocated intr */
+ for (i = 0; i < actual; i++) {
+ (void) ddi_intr_remove_handler(
+ instance->intr_htable[i]);
+ (void) ddi_intr_free(instance->intr_htable[i]);
+ }
+ kmem_free(instance->intr_htable, instance->intr_size);
+ return (DDI_FAILURE);
+ }
+
+ if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
+
+ (void) ddi_intr_block_enable(instance->intr_htable,
+ instance->intr_cnt);
+ } else {
+ con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
+
+ for (i = 0; i < instance->intr_cnt; i++) {
+ (void) ddi_intr_enable(instance->intr_htable[i]);
+ con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
+ "%d", i));
+ }
+ }
+
+ return (DDI_SUCCESS);
+
+}
+
+
+static void
+drsas_rem_intrs(struct drsas_instance *instance)
+{
+ int i;
+
+ con_log(CL_ANN, (CE_NOTE, "drsas_rem_intrs called"));
+
+ /* Disable all interrupts first */
+ if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ (void) ddi_intr_block_disable(instance->intr_htable,
+ instance->intr_cnt);
+ } else {
+ for (i = 0; i < instance->intr_cnt; i++) {
+ (void) ddi_intr_disable(instance->intr_htable[i]);
+ }
+ }
+
+ /* Remove all the handlers */
+
+ for (i = 0; i < instance->intr_cnt; i++) {
+ (void) ddi_intr_remove_handler(instance->intr_htable[i]);
+ (void) ddi_intr_free(instance->intr_htable[i]);
+ }
+
+ kmem_free(instance->intr_htable, instance->intr_size);
+}
+
+static int
+drsas_tran_bus_config(dev_info_t *parent, uint_t flags,
+ ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
+{
+ struct drsas_instance *instance;
+ int config;
+ int rval;
+
+ char *ptr = NULL;
+ int tgt, lun;
+
+ con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
+
+ if ((instance = ddi_get_soft_state(drsas_state,
+ ddi_get_instance(parent))) == NULL) {
+ return (NDI_FAILURE);
+ }
+
+ /* Hold nexus during bus_config */
+ ndi_devi_enter(parent, &config);
+ switch (op) {
+ case BUS_CONFIG_ONE: {
+
+ /* parse wwid/target name out of name given */
+ if ((ptr = strchr((char *)arg, '@')) == NULL) {
+ rval = NDI_FAILURE;
+ break;
+ }
+ ptr++;
+
+ if (drsas_parse_devname(arg, &tgt, &lun) != 0) {
+ rval = NDI_FAILURE;
+ break;
+ }
+
+ if (lun == 0) {
+ rval = drsas_config_ld(instance, tgt, lun, childp);
+ } else {
+ rval = NDI_FAILURE;
+ }
+
+ break;
+ }
+ case BUS_CONFIG_DRIVER:
+ case BUS_CONFIG_ALL: {
+
+ rval = drsas_config_all_devices(instance);
+
+ rval = NDI_SUCCESS;
+ break;
+ }
+ }
+
+ if (rval == NDI_SUCCESS) {
+ rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
+
+ }
+ ndi_devi_exit(parent, config);
+
+ con_log(CL_ANN1, (CE_NOTE, "drsas_tran_bus_config: rval = %x",
+ rval));
+ return (rval);
+}
+
+static int
+drsas_config_all_devices(struct drsas_instance *instance)
+{
+ int rval, tgt;
+
+ for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
+ (void) drsas_config_ld(instance, tgt, 0, NULL);
+
+ }
+
+ rval = NDI_SUCCESS;
+ return (rval);
+}
+
+static int
+drsas_parse_devname(char *devnm, int *tgt, int *lun)
+{
+ char devbuf[SCSI_MAXNAMELEN];
+ char *addr;
+ char *p, *tp, *lp;
+ long num;
+
+ /* Parse dev name and address */
+ (void) strcpy(devbuf, devnm);
+ addr = "";
+ for (p = devbuf; *p != '\0'; p++) {
+ if (*p == '@') {
+ addr = p + 1;
+ *p = '\0';
+ } else if (*p == ':') {
+ *p = '\0';
+ break;
+ }
+ }
+
+ /* Parse target and lun */
+ for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
+ if (*p == ',') {
+ lp = p + 1;
+ *p = '\0';
+ break;
+ }
+ }
+ if (tgt && tp) {
+ if (ddi_strtol(tp, NULL, 0x10, &num)) {
+ return (DDI_FAILURE); /* Can declare this as constant */
+ }
+ *tgt = (int)num;
+ }
+ if (lun && lp) {
+ if (ddi_strtol(lp, NULL, 0x10, &num)) {
+ return (DDI_FAILURE);
+ }
+ *lun = (int)num;
+ }
+ return (DDI_SUCCESS); /* Success case */
+}
+
+static int
+drsas_config_ld(struct drsas_instance *instance, uint16_t tgt,
+ uint8_t lun, dev_info_t **ldip)
+{
+ struct scsi_device *sd;
+ dev_info_t *child;
+ int rval;
+
+ con_log(CL_ANN1, (CE_NOTE, "drsas_config_ld: t = %d l = %d",
+ tgt, lun));
+
+ if ((child = drsas_find_child(instance, tgt, lun)) != NULL) {
+ if (ldip) {
+ *ldip = child;
+ }
+ con_log(CL_ANN1, (CE_NOTE,
+ "drsas_config_ld: Child = %p found t = %d l = %d",
+ (void *)child, tgt, lun));
+ return (NDI_SUCCESS);
+ }
+
+ sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
+ sd->sd_address.a_hba_tran = instance->tran;
+ sd->sd_address.a_target = (uint16_t)tgt;
+ sd->sd_address.a_lun = (uint8_t)lun;
+
+ if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
+ rval = drsas_config_scsi_device(instance, sd, ldip);
+ else
+ rval = NDI_FAILURE;
+
+ /* sd_unprobe is blank now. Free buffer manually */
+ if (sd->sd_inq) {
+ kmem_free(sd->sd_inq, SUN_INQSIZE);
+ sd->sd_inq = (struct scsi_inquiry *)NULL;
+ }
+
+ kmem_free(sd, sizeof (struct scsi_device));
+ con_log(CL_ANN1, (CE_NOTE, "drsas_config_ld: return rval = %d",
+ rval));
+ return (rval);
+}
+
+static int
+drsas_config_scsi_device(struct drsas_instance *instance,
+ struct scsi_device *sd, dev_info_t **dipp)
+{
+ char *nodename = NULL;
+ char **compatible = NULL;
+ int ncompatible = 0;
+ char *childname;
+ dev_info_t *ldip = NULL;
+ int tgt = sd->sd_address.a_target;
+ int lun = sd->sd_address.a_lun;
+ int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
+ int rval;
+
+ con_log(CL_ANN1, (CE_WARN, "dr_sas: scsi_device t%dL%d", tgt, lun));
+ scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
+ NULL, &nodename, &compatible, &ncompatible);
+
+ if (nodename == NULL) {
+ con_log(CL_ANN1, (CE_WARN, "dr_sas: Found no compatible driver "
+ "for t%dL%d", tgt, lun));
+ rval = NDI_FAILURE;
+ goto finish;
+ }
+
+ childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
+ con_log(CL_ANN1, (CE_WARN,
+ "dr_sas: Childname = %2s nodename = %s", childname, nodename));
+
+ /* Create a dev node */
+ rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
+ con_log(CL_ANN1, (CE_WARN,
+ "dr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
+ if (rval == NDI_SUCCESS) {
+ if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
+ DDI_PROP_SUCCESS) {
+ con_log(CL_ANN1, (CE_WARN, "dr_sas: unable to create "
+ "property for t%dl%d target", tgt, lun));
+ rval = NDI_FAILURE;
+ goto finish;
+ }
+ if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
+ DDI_PROP_SUCCESS) {
+ con_log(CL_ANN1, (CE_WARN, "dr_sas: unable to create "
+ "property for t%dl%d lun", tgt, lun));
+ rval = NDI_FAILURE;
+ goto finish;
+ }
+
+ if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
+ "compatible", compatible, ncompatible) !=
+ DDI_PROP_SUCCESS) {
+ con_log(CL_ANN1, (CE_WARN, "dr_sas: unable to create "
+ "property for t%dl%d compatible", tgt, lun));
+ rval = NDI_FAILURE;
+ goto finish;
+ }
+
+ rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
+ if (rval != NDI_SUCCESS) {
+ con_log(CL_ANN1, (CE_WARN, "dr_sas: unable to online "
+ "t%dl%d", tgt, lun));
+ ndi_prop_remove_all(ldip);
+ (void) ndi_devi_free(ldip);
+ } else {
+ con_log(CL_ANN1, (CE_WARN, "dr_sas: online Done :"
+ "0 t%dl%d", tgt, lun));
+ }
+
+ }
+finish:
+ if (dipp) {
+ *dipp = ldip;
+ }
+
+ con_log(CL_DLEVEL1, (CE_WARN,
+ "dr_sas: config_scsi_device rval = %d t%dL%d",
+ rval, tgt, lun));
+ scsi_hba_nodename_compatible_free(nodename, compatible);
+ return (rval);
+}
+
+/*ARGSUSED*/
+static int
+drsas_service_evt(struct drsas_instance *instance, int tgt, int lun, int event,
+ uint64_t wwn)
+{
+ struct drsas_eventinfo *mrevt = NULL;
+
+ con_log(CL_ANN1, (CE_NOTE,
+ "drsas_service_evt called for t%dl%d event = %d",
+ tgt, lun, event));
+
+ if ((instance->taskq == NULL) || (mrevt =
+ kmem_zalloc(sizeof (struct drsas_eventinfo), KM_NOSLEEP)) == NULL) {
+ return (ENOMEM);
+ }
+
+ mrevt->instance = instance;
+ mrevt->tgt = tgt;
+ mrevt->lun = lun;
+ mrevt->event = event;
+
+ if ((ddi_taskq_dispatch(instance->taskq,
+ (void (*)(void *))drsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
+ DDI_SUCCESS) {
+ con_log(CL_ANN1, (CE_NOTE,
+ "dr_sas: Event task failed for t%dl%d event = %d",
+ tgt, lun, event));
+ kmem_free(mrevt, sizeof (struct drsas_eventinfo));
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+}
+
+static void
+drsas_issue_evt_taskq(struct drsas_eventinfo *mrevt)
+{
+ struct drsas_instance *instance = mrevt->instance;
+ dev_info_t *dip, *pdip;
+ int circ1 = 0;
+ char *devname;
+
+ con_log(CL_ANN1, (CE_NOTE, "drsas_issue_evt_taskq: called for"
+ " tgt %d lun %d event %d",
+ mrevt->tgt, mrevt->lun, mrevt->event));
+
+ if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
+ dip = instance->dr_ld_list[mrevt->tgt].dip;
+ } else {
+ return;
+ }
+
+ ndi_devi_enter(instance->dip, &circ1);
+ switch (mrevt->event) {
+ case DRSAS_EVT_CONFIG_TGT:
+ if (dip == NULL) {
+
+ if (mrevt->lun == 0) {
+ (void) drsas_config_ld(instance, mrevt->tgt,
+ 0, NULL);
+ }
+ con_log(CL_ANN1, (CE_NOTE,
+ "dr_sas: EVT_CONFIG_TGT called:"
+ " for tgt %d lun %d event %d",
+ mrevt->tgt, mrevt->lun, mrevt->event));
+
+ } else {
+ con_log(CL_ANN1, (CE_NOTE,
+ "dr_sas: EVT_CONFIG_TGT dip != NULL:"
+ " for tgt %d lun %d event %d",
+ mrevt->tgt, mrevt->lun, mrevt->event));
+ }
+ break;
+ case DRSAS_EVT_UNCONFIG_TGT:
+ if (dip) {
+ if (i_ddi_devi_attached(dip)) {
+
+ pdip = ddi_get_parent(dip);
+
+ devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
+ (void) ddi_deviname(dip, devname);
+
+ (void) devfs_clean(pdip, devname + 1,
+ DV_CLEAN_FORCE);
+ kmem_free(devname, MAXNAMELEN + 1);
+ }
+ (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
+ con_log(CL_ANN1, (CE_NOTE,
+ "dr_sas: EVT_UNCONFIG_TGT called:"
+ " for tgt %d lun %d event %d",
+ mrevt->tgt, mrevt->lun, mrevt->event));
+ } else {
+ con_log(CL_ANN1, (CE_NOTE,
+ "dr_sas: EVT_UNCONFIG_TGT dip == NULL:"
+ " for tgt %d lun %d event %d",
+ mrevt->tgt, mrevt->lun, mrevt->event));
+ }
+ break;
+ }
+ kmem_free(mrevt, sizeof (struct drsas_eventinfo));
+ ndi_devi_exit(instance->dip, circ1);
+}
+
+static int
+drsas_mode_sense_build(struct scsi_pkt *pkt)
+{
+ union scsi_cdb *cdbp;
+ uint16_t page_code;
+ struct scsa_cmd *acmd;
+ struct buf *bp;
+ struct mode_header *modehdrp;
+
+ cdbp = (void *)pkt->pkt_cdbp;
+ page_code = cdbp->cdb_un.sg.scsi[0];
+ acmd = PKT2CMD(pkt);
+ bp = acmd->cmd_buf;
+ if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
+ con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
+ /* ADD pkt statistics as Command failed. */
+ return (NULL);
+ }
+
+ bp_mapin(bp);
+ bzero(bp->b_un.b_addr, bp->b_bcount);
+
+ switch (page_code) {
+ case 0x3: {
+ struct mode_format *page3p = NULL;
+ modehdrp = (struct mode_header *)(bp->b_un.b_addr);
+ modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
+
+ page3p = (void *)((caddr_t)modehdrp +
+ MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
+ page3p->mode_page.code = 0x3;
+ page3p->mode_page.length =
+ (uchar_t)(sizeof (struct mode_format));
+ page3p->data_bytes_sect = 512;
+ page3p->sect_track = 63;
+ break;
+ }
+ case 0x4: {
+ struct mode_geometry *page4p = NULL;
+ modehdrp = (struct mode_header *)(bp->b_un.b_addr);
+ modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
+
+ page4p = (void *)((caddr_t)modehdrp +
+ MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
+ page4p->mode_page.code = 0x4;
+ page4p->mode_page.length =
+ (uchar_t)(sizeof (struct mode_geometry));
+ page4p->heads = 255;
+ page4p->rpm = 10000;
+ break;
+ }
+ default:
+ break;
+ }
+ return (NULL);
+}
diff --git a/usr/src/uts/common/io/dr_sas/dr_sas.conf b/usr/src/uts/common/io/dr_sas/dr_sas.conf
new file mode 100644
index 0000000000..3792f43ca4
--- /dev/null
+++ b/usr/src/uts/common/io/dr_sas/dr_sas.conf
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2008-2009, LSI Logic Corporation.
+# All rights reserved.
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# dr_sas.conf for sol 10 (and later) for all supported architectures
+#
+# global definitions
+
+# MSI specific flag. user can uncomment this line and set flag "yes" to enable MSI
+#drsas-enable-msi="yes";
diff --git a/usr/src/uts/common/io/dr_sas/dr_sas.h b/usr/src/uts/common/io/dr_sas/dr_sas.h
new file mode 100644
index 0000000000..8f78658edf
--- /dev/null
+++ b/usr/src/uts/common/io/dr_sas/dr_sas.h
@@ -0,0 +1,1766 @@
+/*
+ * dr_sas.h: header for dr_sas
+ *
+ * Solaris MegaRAID driver for SAS2.0 controllers
+ * Copyright (c) 2008-2009, LSI Logic Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the author nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _DR_SAS_H_
+#define _DR_SAS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/scsi/scsi.h>
+#include "dr_sas_list.h"
+
+/*
+ * MegaRAID SAS2.0 Driver meta data
+ */
+#define DRSAS_VERSION "LSIv2.0"
+#define DRSAS_RELDATE "Jan 9, 2009"
+
+#define DRSAS_TRUE 1
+#define DRSAS_FALSE 0
+
+/*
+ * MegaRAID SAS2.0 device id conversion definitions.
+ */
+#define INST2LSIRDCTL(x) ((x) << INST_MINOR_SHIFT)
+
+/*
+ * MegaRAID SAS2.0 supported controllers
+ */
+#define PCI_DEVICE_ID_LSI_2108VDE 0x0078
+#define PCI_DEVICE_ID_LSI_2108V 0x0079
+
+/*
+ * Register Index for 2108 Controllers.
+ */
+#define REGISTER_SET_IO_2108 (2)
+
+#define DRSAS_MAX_SGE_CNT 0x50
+
+#define DRSAS_IOCTL_DRIVER 0x12341234
+#define DRSAS_IOCTL_FIRMWARE 0x12345678
+#define DRSAS_IOCTL_AEN 0x87654321
+
+#define DRSAS_1_SECOND 1000000
+
+/* Dynamic Enumeration Flags */
+#define DRSAS_PD_LUN 1
+#define DRSAS_LD_LUN 0
+#define DRSAS_PD_TGT_MAX 255
+#define DRSAS_GET_PD_MAX(s) ((s)->dr_pd_max)
+#define WWN_STRLEN 17
+
+/*
+ * =====================================
+ * MegaRAID SAS2.0 MFI firmware definitions
+ * =====================================
+ */
+/*
+ * MFI stands for MegaRAID SAS2.0 FW Interface. This is just a moniker for
+ * protocol between the software and firmware. Commands are issued using
+ * "message frames"
+ */
+
+/*
+ * FW posts its state in upper 4 bits of outbound_msg_0 register
+ */
+#define MFI_STATE_SHIFT 28
+#define MFI_STATE_MASK ((uint32_t)0xF<<MFI_STATE_SHIFT)
+#define MFI_STATE_UNDEFINED ((uint32_t)0x0<<MFI_STATE_SHIFT)
+#define MFI_STATE_BB_INIT ((uint32_t)0x1<<MFI_STATE_SHIFT)
+#define MFI_STATE_FW_INIT ((uint32_t)0x4<<MFI_STATE_SHIFT)
+#define MFI_STATE_WAIT_HANDSHAKE ((uint32_t)0x6<<MFI_STATE_SHIFT)
+#define MFI_STATE_FW_INIT_2 ((uint32_t)0x7<<MFI_STATE_SHIFT)
+#define MFI_STATE_DEVICE_SCAN ((uint32_t)0x8<<MFI_STATE_SHIFT)
+#define MFI_STATE_BOOT_MESSAGE_PENDING ((uint32_t)0x9<<MFI_STATE_SHIFT)
+#define MFI_STATE_FLUSH_CACHE ((uint32_t)0xA<<MFI_STATE_SHIFT)
+#define MFI_STATE_READY ((uint32_t)0xB<<MFI_STATE_SHIFT)
+#define MFI_STATE_OPERATIONAL ((uint32_t)0xC<<MFI_STATE_SHIFT)
+#define MFI_STATE_FAULT ((uint32_t)0xF<<MFI_STATE_SHIFT)
+
+#define MRMFI_FRAME_SIZE 64
+
+/*
+ * During FW init, clear pending cmds & reset state using inbound_msg_0
+ *
+ * ABORT : Abort all pending cmds
+ * READY : Move from OPERATIONAL to READY state; discard queue info
+ * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
+ * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
+ */
+#define MFI_INIT_ABORT 0x00000001
+#define MFI_INIT_READY 0x00000002
+#define MFI_INIT_MFIMODE 0x00000004
+#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
+#define MFI_INIT_HOTPLUG 0x00000010
+#define MFI_STOP_ADP 0x00000020
+#define MFI_RESET_FLAGS MFI_INIT_READY|MFI_INIT_MFIMODE|MFI_INIT_ABORT
+
+/*
+ * MFI frame flags
+ */
+#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
+#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
+#define MFI_FRAME_SGL32 0x0000
+#define MFI_FRAME_SGL64 0x0002
+#define MFI_FRAME_SENSE32 0x0000
+#define MFI_FRAME_SENSE64 0x0004
+#define MFI_FRAME_DIR_NONE 0x0000
+#define MFI_FRAME_DIR_WRITE 0x0008
+#define MFI_FRAME_DIR_READ 0x0010
+#define MFI_FRAME_DIR_BOTH 0x0018
+
+/*
+ * Definition for cmd_status
+ */
+#define MFI_CMD_STATUS_POLL_MODE 0xFF
+#define MFI_CMD_STATUS_SYNC_MODE 0xFF
+
+/*
+ * MFI command opcodes
+ */
+#define MFI_CMD_OP_INIT 0x00
+#define MFI_CMD_OP_LD_READ 0x01
+#define MFI_CMD_OP_LD_WRITE 0x02
+#define MFI_CMD_OP_LD_SCSI 0x03
+#define MFI_CMD_OP_PD_SCSI 0x04
+#define MFI_CMD_OP_DCMD 0x05
+#define MFI_CMD_OP_ABORT 0x06
+#define MFI_CMD_OP_SMP 0x07
+#define MFI_CMD_OP_STP 0x08
+
+#define DR_DCMD_CTRL_GET_INFO 0x01010000
+
+#define DR_DCMD_CTRL_CACHE_FLUSH 0x01101000
+#define DR_FLUSH_CTRL_CACHE 0x01
+#define DR_FLUSH_DISK_CACHE 0x02
+
+#define DR_DCMD_CTRL_SHUTDOWN 0x01050000
+#define DRSAS_ENABLE_DRIVE_SPINDOWN 0x01
+
+#define DR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
+#define DR_DCMD_CTRL_EVENT_GET 0x01040300
+#define DR_DCMD_CTRL_EVENT_WAIT 0x01040500
+#define DR_DCMD_LD_GET_PROPERTIES 0x03030000
+#define DR_DCMD_PD_GET_INFO 0x02020000
+
+/*
+ * Solaris Specific MAX values
+ */
+#define MAX_SGL 24
+/*
+ * MFI command completion codes
+ */
+enum MFI_STAT {
+ MFI_STAT_OK = 0x00,
+ MFI_STAT_INVALID_CMD = 0x01,
+ MFI_STAT_INVALID_DCMD = 0x02,
+ MFI_STAT_INVALID_PARAMETER = 0x03,
+ MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
+ MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
+ MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
+ MFI_STAT_APP_IN_USE = 0x07,
+ MFI_STAT_APP_NOT_INITIALIZED = 0x08,
+ MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
+ MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
+ MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
+ MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
+ MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
+ MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
+ MFI_STAT_FLASH_BUSY = 0x0f,
+ MFI_STAT_FLASH_ERROR = 0x10,
+ MFI_STAT_FLASH_IMAGE_BAD = 0x11,
+ MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
+ MFI_STAT_FLASH_NOT_OPEN = 0x13,
+ MFI_STAT_FLASH_NOT_STARTED = 0x14,
+ MFI_STAT_FLUSH_FAILED = 0x15,
+ MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
+ MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
+ MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
+ MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
+ MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
+ MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
+ MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
+ MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
+ MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
+ MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
+ MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
+ MFI_STAT_MFC_HW_ERROR = 0x21,
+ MFI_STAT_NO_HW_PRESENT = 0x22,
+ MFI_STAT_NOT_FOUND = 0x23,
+ MFI_STAT_NOT_IN_ENCL = 0x24,
+ MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
+ MFI_STAT_PD_TYPE_WRONG = 0x26,
+ MFI_STAT_PR_DISABLED = 0x27,
+ MFI_STAT_ROW_INDEX_INVALID = 0x28,
+ MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
+ MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
+ MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
+ MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
+ MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
+ MFI_STAT_SCSI_IO_FAILED = 0x2e,
+ MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
+ MFI_STAT_SHUTDOWN_FAILED = 0x30,
+ MFI_STAT_TIME_NOT_SET = 0x31,
+ MFI_STAT_WRONG_STATE = 0x32,
+ MFI_STAT_LD_OFFLINE = 0x33,
+ /* UNUSED: 0x34 to 0xfe */
+ MFI_STAT_INVALID_STATUS = 0xFF
+};
+
+enum DR_EVT_CLASS {
+ DR_EVT_CLASS_DEBUG = -2,
+ DR_EVT_CLASS_PROGRESS = -1,
+ DR_EVT_CLASS_INFO = 0,
+ DR_EVT_CLASS_WARNING = 1,
+ DR_EVT_CLASS_CRITICAL = 2,
+ DR_EVT_CLASS_FATAL = 3,
+ DR_EVT_CLASS_DEAD = 4
+};
+
+enum DR_EVT_LOCALE {
+ DR_EVT_LOCALE_LD = 0x0001,
+ DR_EVT_LOCALE_PD = 0x0002,
+ DR_EVT_LOCALE_ENCL = 0x0004,
+ DR_EVT_LOCALE_BBU = 0x0008,
+ DR_EVT_LOCALE_SAS = 0x0010,
+ DR_EVT_LOCALE_CTRL = 0x0020,
+ DR_EVT_LOCALE_CONFIG = 0x0040,
+ DR_EVT_LOCALE_CLUSTER = 0x0080,
+ DR_EVT_LOCALE_ALL = 0xffff
+};
+
+#define DR_EVT_CFG_CLEARED 0x0004
+#define DR_EVT_LD_CREATED 0x008a
+#define DR_EVT_LD_DELETED 0x008b
+#define DR_EVT_PD_REMOVED_EXT 0x00f8
+#define DR_EVT_PD_INSERTED_EXT 0x00f7
+
+enum LD_STATE {
+ LD_OFFLINE = 0,
+ LD_PARTIALLY_DEGRADED = 1,
+ LD_DEGRADED = 2,
+ LD_OPTIMAL = 3,
+ LD_INVALID = 0xFF
+};
+
+enum DRSAS_EVT {
+ DRSAS_EVT_CONFIG_TGT = 0,
+ DRSAS_EVT_UNCONFIG_TGT = 1,
+ DRSAS_EVT_UNCONFIG_SMP = 2
+};
+
+#define DMA_OBJ_ALLOCATED 1
+#define DMA_OBJ_REALLOCATED 2
+#define DMA_OBJ_FREED 3
+
+/*
+ * dma_obj_t - Our DMA object
+ * @param buffer : kernel virtual address
+ * @param size : size of the data to be allocated
+ * @param acc_handle : access handle
+ * @param dma_handle : dma handle
+ * @param dma_cookie : scatter-gather list
+ * @param dma_attr : dma attributes for this buffer
+ * Our DMA object. The caller must initialize the size and dma attributes
+ * (dma_attr) fields before allocating the resources.
+ */
+typedef struct {
+ caddr_t buffer;
+ uint32_t size;
+ ddi_acc_handle_t acc_handle;
+ ddi_dma_handle_t dma_handle;
+ ddi_dma_cookie_t dma_cookie[DRSAS_MAX_SGE_CNT];
+ ddi_dma_attr_t dma_attr;
+ uint8_t status;
+ uint8_t reserved[3];
+} dma_obj_t;
+
+struct drsas_eventinfo {
+ struct drsas_instance *instance;
+ int tgt;
+ int lun;
+ int event;
+};
+
+struct drsas_ld {
+ dev_info_t *dip;
+ uint8_t lun_type;
+ uint8_t reserved[3];
+};
+
+struct drsas_pd {
+ dev_info_t *dip;
+ uint8_t lun_type;
+ uint8_t dev_id;
+ uint8_t flags;
+ uint8_t reserved;
+};
+
+struct drsas_pd_info {
+ uint16_t deviceId;
+ uint16_t seqNum;
+ uint8_t inquiryData[96];
+ uint8_t vpdPage83[64];
+ uint8_t notSupported;
+ uint8_t scsiDevType;
+ uint8_t a;
+ uint8_t device_speed;
+ uint32_t mediaerrcnt;
+ uint32_t other;
+ uint32_t pred;
+ uint32_t lastpred;
+ uint16_t fwState;
+ uint8_t disabled;
+ uint8_t linkspwwd;
+ uint32_t ddfType;
+ struct {
+ uint8_t count;
+ uint8_t isPathBroken;
+ uint8_t connectorIndex[2];
+ uint8_t reserved[4];
+ uint64_t sasAddr[2];
+ uint8_t reserved2[16];
+ } pathInfo;
+};
+
+typedef struct drsas_instance {
+ uint32_t *producer;
+ uint32_t *consumer;
+
+ uint32_t *reply_queue;
+ dma_obj_t mfi_internal_dma_obj;
+
+ uint8_t init_id;
+ uint8_t reserved[3];
+
+ uint16_t max_num_sge;
+ uint16_t max_fw_cmds;
+ uint32_t max_sectors_per_req;
+
+ struct drsas_cmd **cmd_list;
+
+ mlist_t cmd_pool_list;
+ kmutex_t cmd_pool_mtx;
+
+ mlist_t cmd_pend_list;
+ kmutex_t cmd_pend_mtx;
+
+ dma_obj_t mfi_evt_detail_obj;
+ struct drsas_cmd *aen_cmd;
+
+ uint32_t aen_seq_num;
+ uint32_t aen_class_locale_word;
+
+ scsi_hba_tran_t *tran;
+
+ kcondvar_t int_cmd_cv;
+ kmutex_t int_cmd_mtx;
+
+ kcondvar_t aen_cmd_cv;
+ kmutex_t aen_cmd_mtx;
+
+ kcondvar_t abort_cmd_cv;
+ kmutex_t abort_cmd_mtx;
+
+ dev_info_t *dip;
+ ddi_acc_handle_t pci_handle;
+
+ timeout_id_t timeout_id;
+ uint32_t unique_id;
+ uint16_t fw_outstanding;
+ caddr_t regmap;
+ ddi_acc_handle_t regmap_handle;
+ uint8_t isr_level;
+ ddi_iblock_cookie_t iblock_cookie;
+ ddi_iblock_cookie_t soft_iblock_cookie;
+ ddi_softintr_t soft_intr_id;
+ uint8_t softint_running;
+ kmutex_t completed_pool_mtx;
+ mlist_t completed_pool_list;
+
+ caddr_t internal_buf;
+ uint32_t internal_buf_dmac_add;
+ uint32_t internal_buf_size;
+
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint16_t subsysvid;
+ uint16_t subsysid;
+ int instance;
+ int baseaddress;
+ char iocnode[16];
+
+ int fm_capabilities;
+
+ struct drsas_func_ptr *func_ptr;
+ /* MSI interrupts specific */
+ ddi_intr_handle_t *intr_htable;
+ int intr_type;
+ int intr_cnt;
+ size_t intr_size;
+ uint_t intr_pri;
+ int intr_cap;
+
+ ddi_taskq_t *taskq;
+ struct drsas_ld *dr_ld_list;
+} drsas_t;
+
+struct drsas_func_ptr {
+ int (*read_fw_status_reg)(struct drsas_instance *);
+ void (*issue_cmd)(struct drsas_cmd *, struct drsas_instance *);
+ int (*issue_cmd_in_sync_mode)(struct drsas_instance *,
+ struct drsas_cmd *);
+ int (*issue_cmd_in_poll_mode)(struct drsas_instance *,
+ struct drsas_cmd *);
+ void (*enable_intr)(struct drsas_instance *);
+ void (*disable_intr)(struct drsas_instance *);
+ int (*intr_ack)(struct drsas_instance *);
+};
+
+/*
+ * ### Helper routines ###
+ */
+
+/*
+ * con_log() - console log routine
+ * @param level : indicates the severity of the message.
+ * @fparam mt : format string
+ *
+ * con_log displays the error messages on the console based on the current
+ * debug level. Also it attaches the appropriate kernel severity level with
+ * the message.
+ *
+ *
+ * console messages debug levels
+ */
+#define CL_NONE 0 /* No debug information */
+#define CL_ANN 1 /* print unconditionally, announcements */
+#define CL_ANN1 2 /* No o/p */
+#define CL_DLEVEL1 3 /* debug level 1, informative */
+#define CL_DLEVEL2 4 /* debug level 2, verbose */
+#define CL_DLEVEL3 5 /* debug level 3, very verbose */
+
+#ifdef __SUNPRO_C
+#define __func__ ""
+#endif
+
+#define con_log(level, fmt) { if (debug_level_g >= level) cmn_err fmt; }
+
+/*
+ * ### SCSA definitions ###
+ */
+#define PKT2TGT(pkt) ((pkt)->pkt_address.a_target)
+#define PKT2LUN(pkt) ((pkt)->pkt_address.a_lun)
+#define PKT2TRAN(pkt) ((pkt)->pkt_adress.a_hba_tran)
+#define ADDR2TRAN(ap) ((ap)->a_hba_tran)
+
+#define TRAN2MR(tran) (struct drsas_instance *)(tran)->tran_hba_private)
+#define ADDR2MR(ap) (TRAN2MR(ADDR2TRAN(ap))
+
+#define PKT2CMD(pkt) ((struct scsa_cmd *)(pkt)->pkt_ha_private)
+#define CMD2PKT(sp) ((sp)->cmd_pkt)
+#define PKT2REQ(pkt) (&(PKT2CMD(pkt)->request))
+
+#define CMD2ADDR(cmd) (&CMD2PKT(cmd)->pkt_address)
+#define CMD2TRAN(cmd) (CMD2PKT(cmd)->pkt_address.a_hba_tran)
+#define CMD2MR(cmd) (TRAN2MR(CMD2TRAN(cmd)))
+
+#define CFLAG_DMAVALID 0x0001 /* requires a dma operation */
+#define CFLAG_DMASEND 0x0002 /* Transfer from the device */
+#define CFLAG_CONSISTENT 0x0040 /* consistent data transfer */
+
+/*
+ * ### Data structures for ioctl inteface and internal commands ###
+ */
+
+/*
+ * Data direction flags
+ */
+#define UIOC_RD 0x00001
+#define UIOC_WR 0x00002
+
+#define SCP2HOST(scp) (scp)->device->host /* to host */
+#define SCP2HOSTDATA(scp) SCP2HOST(scp)->hostdata /* to soft state */
+#define SCP2CHANNEL(scp) (scp)->device->channel /* to channel */
+#define SCP2TARGET(scp) (scp)->device->id /* to target */
+#define SCP2LUN(scp) (scp)->device->lun /* to LUN */
+
+#define SCSIHOST2ADAP(host) (((caddr_t *)(host->hostdata))[0])
+#define SCP2ADAPTER(scp) \
+ (struct drsas_instance *)SCSIHOST2ADAP(SCP2HOST(scp))
+
+#define MRDRV_IS_LOGICAL_SCSA(instance, acmd) \
+ (acmd->device_id < MRDRV_MAX_LD) ? 1 : 0
+#define MRDRV_IS_LOGICAL(ap) \
+ ((ap->a_target < MRDRV_MAX_LD) && (ap->a_lun == 0)) ? 1 : 0
+#define MAP_DEVICE_ID(instance, ap) \
+ (ap->a_target)
+
+#define HIGH_LEVEL_INTR 1
+#define NORMAL_LEVEL_INTR 0
+
+/*
+ * scsa_cmd - Per-command mr private data
+ * @param cmd_dmahandle : dma handle
+ * @param cmd_dmacookies : current dma cookies
+ * @param cmd_pkt : scsi_pkt reference
+ * @param cmd_dmacount : dma count
+ * @param cmd_cookie : next cookie
+ * @param cmd_ncookies : cookies per window
+ * @param cmd_cookiecnt : cookies per sub-win
+ * @param cmd_nwin : number of dma windows
+ * @param cmd_curwin : current dma window
+ * @param cmd_dma_offset : current window offset
+ * @param cmd_dma_len : current window length
+ * @param cmd_flags : private flags
+ * @param cmd_cdblen : length of cdb
+ * @param cmd_scblen : length of scb
+ * @param cmd_buf : command buffer
+ * @param channel : channel for scsi sub-system
+ * @param target : target for scsi sub-system
+ * @param lun : LUN for scsi sub-system
+ *
+ * - Allocated at same time as scsi_pkt by scsi_hba_pkt_alloc(9E)
+ * - Pointed to by pkt_ha_private field in scsi_pkt
+ */
+struct scsa_cmd {
+ ddi_dma_handle_t cmd_dmahandle;
+ ddi_dma_cookie_t cmd_dmacookies[DRSAS_MAX_SGE_CNT];
+ struct scsi_pkt *cmd_pkt;
+ ulong_t cmd_dmacount;
+ uint_t cmd_cookie;
+ uint_t cmd_ncookies;
+ uint_t cmd_cookiecnt;
+ uint_t cmd_nwin;
+ uint_t cmd_curwin;
+ off_t cmd_dma_offset;
+ ulong_t cmd_dma_len;
+ ulong_t cmd_flags;
+ uint_t cmd_cdblen;
+ uint_t cmd_scblen;
+ struct buf *cmd_buf;
+ ushort_t device_id;
+ uchar_t islogical;
+ uchar_t lun;
+ struct drsas_device *drsas_dev;
+};
+
+
+struct drsas_cmd {
+ union drsas_frame *frame;
+ uint32_t frame_phys_addr;
+ uint8_t *sense;
+ uint32_t sense_phys_addr;
+ dma_obj_t frame_dma_obj;
+ uint8_t frame_dma_obj_status;
+
+ uint32_t index;
+ uint8_t sync_cmd;
+ uint8_t cmd_status;
+ uint16_t abort_aen;
+ mlist_t list;
+ uint32_t frame_count;
+ struct scsa_cmd *cmd;
+ struct scsi_pkt *pkt;
+};
+
+#define MAX_MGMT_ADAPTERS 1024
+#define IOC_SIGNATURE "MR-SAS"
+
+#define IOC_CMD_FIRMWARE 0x0
+#define DRSAS_DRIVER_IOCTL_COMMON 0xF0010000
+#define DRSAS_DRIVER_IOCTL_DRIVER_VERSION 0xF0010100
+#define DRSAS_DRIVER_IOCTL_PCI_INFORMATION 0xF0010200
+#define DRSAS_DRIVER_IOCTL_MRRAID_STATISTICS 0xF0010300
+
+
+#define DRSAS_MAX_SENSE_LENGTH 32
+
+struct drsas_mgmt_info {
+
+ uint16_t count;
+ struct drsas_instance *instance[MAX_MGMT_ADAPTERS];
+ uint16_t map[MAX_MGMT_ADAPTERS];
+ int max_index;
+};
+
+#pragma pack(1)
+
+/*
+ * SAS controller properties
+ */
+struct drsas_ctrl_prop {
+ uint16_t seq_num;
+ uint16_t pred_fail_poll_interval;
+ uint16_t intr_throttle_count;
+ uint16_t intr_throttle_timeouts;
+
+ uint8_t rebuild_rate;
+ uint8_t patrol_read_rate;
+ uint8_t bgi_rate;
+ uint8_t cc_rate;
+ uint8_t recon_rate;
+
+ uint8_t cache_flush_interval;
+
+ uint8_t spinup_drv_count;
+ uint8_t spinup_delay;
+
+ uint8_t cluster_enable;
+ uint8_t coercion_mode;
+ uint8_t disk_write_cache_disable;
+ uint8_t alarm_enable;
+
+ uint8_t reserved[44];
+};
+
+/*
+ * SAS controller information
+ */
+struct drsas_ctrl_info {
+ /* PCI device information */
+ struct {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint16_t sub_vendor_id;
+ uint16_t sub_device_id;
+ uint8_t reserved[24];
+ } pci;
+
+ /* Host interface information */
+ struct {
+ uint8_t PCIX : 1;
+ uint8_t PCIE : 1;
+ uint8_t iSCSI : 1;
+ uint8_t SAS_3G : 1;
+ uint8_t reserved_0 : 4;
+ uint8_t reserved_1[6];
+ uint8_t port_count;
+ uint64_t port_addr[8];
+ } host_interface;
+
+ /* Device (backend) interface information */
+ struct {
+ uint8_t SPI : 1;
+ uint8_t SAS_3G : 1;
+ uint8_t SATA_1_5G : 1;
+ uint8_t SATA_3G : 1;
+ uint8_t reserved_0 : 4;
+ uint8_t reserved_1[6];
+ uint8_t port_count;
+ uint64_t port_addr[8];
+ } device_interface;
+
+ /* List of components residing in flash. All str are null terminated */
+ uint32_t image_check_word;
+ uint32_t image_component_count;
+
+ struct {
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char built_time[16];
+ } image_component[8];
+
+ /*
+ * List of flash components that have been flashed on the card, but
+ * are not in use, pending reset of the adapter. This list will be
+ * empty if a flash operation has not occurred. All stings are null
+ * terminated
+ */
+ uint32_t pending_image_component_count;
+
+ struct {
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char build_time[16];
+ } pending_image_component[8];
+
+ uint8_t max_arms;
+ uint8_t max_spans;
+ uint8_t max_arrays;
+ uint8_t max_lds;
+
+ char product_name[80];
+ char serial_no[32];
+
+ /*
+ * Other physical/controller/operation information. Indicates the
+ * presence of the hardware
+ */
+ struct {
+ uint32_t bbu : 1;
+ uint32_t alarm : 1;
+ uint32_t nvram : 1;
+ uint32_t uart : 1;
+ uint32_t reserved : 28;
+ } hw_present;
+
+ uint32_t current_fw_time;
+
+ /* Maximum data transfer sizes */
+ uint16_t max_concurrent_cmds;
+ uint16_t max_sge_count;
+ uint32_t max_request_size;
+
+ /* Logical and physical device counts */
+ uint16_t ld_present_count;
+ uint16_t ld_degraded_count;
+ uint16_t ld_offline_count;
+
+ uint16_t pd_present_count;
+ uint16_t pd_disk_present_count;
+ uint16_t pd_disk_pred_failure_count;
+ uint16_t pd_disk_failed_count;
+
+ /* Memory size information */
+ uint16_t nvram_size;
+ uint16_t memory_size;
+ uint16_t flash_size;
+
+ /* Error counters */
+ uint16_t mem_correctable_error_count;
+ uint16_t mem_uncorrectable_error_count;
+
+ /* Cluster information */
+ uint8_t cluster_permitted;
+ uint8_t cluster_active;
+ uint8_t reserved_1[2];
+
+ /* Controller capabilities structures */
+ struct {
+ uint32_t raid_level_0 : 1;
+ uint32_t raid_level_1 : 1;
+ uint32_t raid_level_5 : 1;
+ uint32_t raid_level_1E : 1;
+ uint32_t reserved : 28;
+ } raid_levels;
+
+ struct {
+ uint32_t rbld_rate : 1;
+ uint32_t cc_rate : 1;
+ uint32_t bgi_rate : 1;
+ uint32_t recon_rate : 1;
+ uint32_t patrol_rate : 1;
+ uint32_t alarm_control : 1;
+ uint32_t cluster_supported : 1;
+ uint32_t bbu : 1;
+ uint32_t spanning_allowed : 1;
+ uint32_t dedicated_hotspares : 1;
+ uint32_t revertible_hotspares : 1;
+ uint32_t foreign_config_import : 1;
+ uint32_t self_diagnostic : 1;
+ uint32_t reserved : 19;
+ } adapter_operations;
+
+ struct {
+ uint32_t read_policy : 1;
+ uint32_t write_policy : 1;
+ uint32_t io_policy : 1;
+ uint32_t access_policy : 1;
+ uint32_t reserved : 28;
+ } ld_operations;
+
+ struct {
+ uint8_t min;
+ uint8_t max;
+ uint8_t reserved[2];
+ } stripe_size_operations;
+
+ struct {
+ uint32_t force_online : 1;
+ uint32_t force_offline : 1;
+ uint32_t force_rebuild : 1;
+ uint32_t reserved : 29;
+ } pd_operations;
+
+ struct {
+ uint32_t ctrl_supports_sas : 1;
+ uint32_t ctrl_supports_sata : 1;
+ uint32_t allow_mix_in_encl : 1;
+ uint32_t allow_mix_in_ld : 1;
+ uint32_t allow_sata_in_cluster : 1;
+ uint32_t reserved : 27;
+ } pd_mix_support;
+
+ /* Include the controller properties (changeable items) */
+ uint8_t reserved_2[12];
+ struct drsas_ctrl_prop properties;
+
+ uint8_t pad[0x800 - 0x640];
+};
+
+/*
+ * ==================================
+ * MegaRAID SAS2.0 driver definitions
+ * ==================================
+ */
+#define MRDRV_MAX_NUM_CMD 1024
+
+#define MRDRV_MAX_PD_CHANNELS 2
+#define MRDRV_MAX_LD_CHANNELS 2
+#define MRDRV_MAX_CHANNELS (MRDRV_MAX_PD_CHANNELS + \
+ MRDRV_MAX_LD_CHANNELS)
+#define MRDRV_MAX_DEV_PER_CHANNEL 128
+#define MRDRV_DEFAULT_INIT_ID -1
+#define MRDRV_MAX_CMD_PER_LUN 1000
+#define MRDRV_MAX_LUN 1
+#define MRDRV_MAX_LD 64
+
+#define MRDRV_RESET_WAIT_TIME 300
+#define MRDRV_RESET_NOTICE_INTERVAL 5
+
+#define DRSAS_IOCTL_CMD 0
+
+/*
+ * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
+ * SGLs based on the size of dma_addr_t
+ */
+#define IS_DMA64 (sizeof (dma_addr_t) == 8)
+
+#define IB_MSG_0_OFF 0x10 /* XScale */
+#define OB_MSG_0_OFF 0x18 /* XScale */
+#define IB_DOORBELL_OFF 0x20 /* XScale & ROC */
+#define OB_INTR_STATUS_OFF 0x30 /* XScale & ROC */
+#define OB_INTR_MASK_OFF 0x34 /* XScale & ROC */
+#define IB_QPORT_OFF 0x40 /* XScale & ROC */
+#define OB_DOORBELL_CLEAR_OFF 0xA0 /* ROC */
+#define OB_SCRATCH_PAD_0_OFF 0xB0 /* ROC */
+#define OB_INTR_MASK 0xFFFFFFFF
+#define OB_DOORBELL_CLEAR_MASK 0xFFFFFFFF
+
+/*
+ * All MFI register set macros accept drsas_register_set*
+ */
+#define WR_IB_MSG_0(v, instance) ddi_put32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + IB_MSG_0_OFF), (v))
+
+#define RD_OB_MSG_0(instance) ddi_get32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + OB_MSG_0_OFF))
+
+#define WR_IB_DOORBELL(v, instance) ddi_put32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + IB_DOORBELL_OFF), (v))
+
+#define RD_IB_DOORBELL(instance) ddi_get32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + IB_DOORBELL_OFF))
+
+#define WR_OB_INTR_STATUS(v, instance) ddi_put32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + OB_INTR_STATUS_OFF), (v))
+
+#define RD_OB_INTR_STATUS(instance) ddi_get32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + OB_INTR_STATUS_OFF))
+
+#define WR_OB_INTR_MASK(v, instance) ddi_put32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + OB_INTR_MASK_OFF), (v))
+
+#define RD_OB_INTR_MASK(instance) ddi_get32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + OB_INTR_MASK_OFF))
+
+#define WR_IB_QPORT(v, instance) ddi_put32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + IB_QPORT_OFF), (v))
+
+#define WR_OB_DOORBELL_CLEAR(v, instance) ddi_put32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + OB_DOORBELL_CLEAR_OFF), \
+ (v))
+
+#define RD_OB_SCRATCH_PAD_0(instance) ddi_get32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + OB_SCRATCH_PAD_0_OFF))
+
+/*
+ * When FW is in MFI_STATE_READY or MFI_STATE_OPERATIONAL, the state data
+ * of Outbound Msg Reg 0 indicates max concurrent cmds supported, max SGEs
+ * supported per cmd and if 64-bit MFAs (M64) is enabled or disabled.
+ */
+#define MFI_OB_INTR_STATUS_MASK 0x00000002
+
+/*
+ * This MFI_REPLY_2108_MESSAGE_INTR flag is used also
+ * in enable_intr_ppc also. Hence bit 2, i.e. 0x4 has
+ * been set in this flag along with bit 1.
+ */
+#define MFI_REPLY_2108_MESSAGE_INTR 0x00000001
+#define MFI_REPLY_2108_MESSAGE_INTR_MASK 0x00000005
+
+#define MFI_POLL_TIMEOUT_SECS 60
+
+#define MFI_ENABLE_INTR(instance) ddi_put32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + OB_INTR_MASK_OFF), 1)
+#define MFI_DISABLE_INTR(instance) \
+{ \
+ uint32_t disable = 1; \
+ uint32_t mask = ddi_get32((instance)->regmap_handle, \
+ (uint32_t *)((uintptr_t)(instance)->regmap + OB_INTR_MASK_OFF));\
+ mask &= ~disable; \
+ ddi_put32((instance)->regmap_handle, (uint32_t *) \
+ (uintptr_t)((instance)->regmap + OB_INTR_MASK_OFF), mask); \
+}
+
+/* By default, the firmware programs for 8 Kbytes of memory */
+#define DEFAULT_MFI_MEM_SZ 8192
+#define MINIMUM_MFI_MEM_SZ 4096
+
+/* DCMD Message Frame MAILBOX0-11 */
+#define DCMD_MBOX_SZ 12
+
+
+struct drsas_register_set {
+ uint32_t reserved_0[4];
+
+ uint32_t inbound_msg_0;
+ uint32_t inbound_msg_1;
+ uint32_t outbound_msg_0;
+ uint32_t outbound_msg_1;
+
+ uint32_t inbound_doorbell;
+ uint32_t inbound_intr_status;
+ uint32_t inbound_intr_mask;
+
+ uint32_t outbound_doorbell;
+ uint32_t outbound_intr_status;
+ uint32_t outbound_intr_mask;
+
+ uint32_t reserved_1[2];
+
+ uint32_t inbound_queue_port;
+ uint32_t outbound_queue_port;
+
+ uint32_t reserved_2[22];
+
+ uint32_t outbound_doorbell_clear;
+
+ uint32_t reserved_3[3];
+
+ uint32_t outbound_scratch_pad;
+
+ uint32_t reserved_4[3];
+
+ uint32_t inbound_low_queue_port;
+
+ uint32_t inbound_high_queue_port;
+
+ uint32_t reserved_5;
+ uint32_t index_registers[820];
+};
+
+struct drsas_sge32 {
+ uint32_t phys_addr;
+ uint32_t length;
+};
+
+struct drsas_sge64 {
+ uint64_t phys_addr;
+ uint32_t length;
+};
+
+union drsas_sgl {
+ struct drsas_sge32 sge32[1];
+ struct drsas_sge64 sge64[1];
+};
+
+struct drsas_header {
+ uint8_t cmd;
+ uint8_t sense_len;
+ uint8_t cmd_status;
+ uint8_t scsi_status;
+
+ uint8_t target_id;
+ uint8_t lun;
+ uint8_t cdb_len;
+ uint8_t sge_count;
+
+ uint32_t context;
+ uint8_t req_id;
+ uint8_t msgvector;
+ uint16_t pad_0;
+
+ uint16_t flags;
+ uint16_t timeout;
+ uint32_t data_xferlen;
+};
+
+union drsas_sgl_frame {
+ struct drsas_sge32 sge32[8];
+ struct drsas_sge64 sge64[5];
+};
+
+struct drsas_init_frame {
+ uint8_t cmd;
+ uint8_t reserved_0;
+ uint8_t cmd_status;
+
+ uint8_t reserved_1;
+ uint32_t reserved_2;
+
+ uint32_t context;
+ uint8_t req_id;
+ uint8_t msgvector;
+ uint16_t pad_0;
+
+ uint16_t flags;
+ uint16_t reserved_3;
+ uint32_t data_xfer_len;
+
+ uint32_t queue_info_new_phys_addr_lo;
+ uint32_t queue_info_new_phys_addr_hi;
+ uint32_t queue_info_old_phys_addr_lo;
+ uint32_t queue_info_old_phys_addr_hi;
+
+ uint32_t reserved_4[6];
+};
+
+struct drsas_init_queue_info {
+ uint32_t init_flags;
+ uint32_t reply_queue_entries;
+
+ uint32_t reply_queue_start_phys_addr_lo;
+ uint32_t reply_queue_start_phys_addr_hi;
+ uint32_t producer_index_phys_addr_lo;
+ uint32_t producer_index_phys_addr_hi;
+ uint32_t consumer_index_phys_addr_lo;
+ uint32_t consumer_index_phys_addr_hi;
+};
+
+struct drsas_io_frame {
+ uint8_t cmd;
+ uint8_t sense_len;
+ uint8_t cmd_status;
+ uint8_t scsi_status;
+
+ uint8_t target_id;
+ uint8_t access_byte;
+ uint8_t reserved_0;
+ uint8_t sge_count;
+
+ uint32_t context;
+ uint8_t req_id;
+ uint8_t msgvector;
+ uint16_t pad_0;
+
+ uint16_t flags;
+ uint16_t timeout;
+ uint32_t lba_count;
+
+ uint32_t sense_buf_phys_addr_lo;
+ uint32_t sense_buf_phys_addr_hi;
+
+ uint32_t start_lba_lo;
+ uint32_t start_lba_hi;
+
+ union drsas_sgl sgl;
+};
+
+struct drsas_pthru_frame {
+ uint8_t cmd;
+ uint8_t sense_len;
+ uint8_t cmd_status;
+ uint8_t scsi_status;
+
+ uint8_t target_id;
+ uint8_t lun;
+ uint8_t cdb_len;
+ uint8_t sge_count;
+
+ uint32_t context;
+ uint8_t req_id;
+ uint8_t msgvector;
+ uint16_t pad_0;
+
+ uint16_t flags;
+ uint16_t timeout;
+ uint32_t data_xfer_len;
+
+ uint32_t sense_buf_phys_addr_lo;
+ uint32_t sense_buf_phys_addr_hi;
+
+ uint8_t cdb[16];
+ union drsas_sgl sgl;
+};
+
+struct drsas_dcmd_frame {
+ uint8_t cmd;
+ uint8_t reserved_0;
+ uint8_t cmd_status;
+ uint8_t reserved_1[4];
+ uint8_t sge_count;
+
+ uint32_t context;
+ uint8_t req_id;
+ uint8_t msgvector;
+ uint16_t pad_0;
+
+ uint16_t flags;
+ uint16_t timeout;
+
+ uint32_t data_xfer_len;
+ uint32_t opcode;
+
+ union {
+ uint8_t b[DCMD_MBOX_SZ];
+ uint16_t s[6];
+ uint32_t w[3];
+ } mbox;
+
+ union drsas_sgl sgl;
+};
+
+struct drsas_abort_frame {
+ uint8_t cmd;
+ uint8_t reserved_0;
+ uint8_t cmd_status;
+
+ uint8_t reserved_1;
+ uint32_t reserved_2;
+
+ uint32_t context;
+ uint8_t req_id;
+ uint8_t msgvector;
+ uint16_t pad_0;
+
+ uint16_t flags;
+ uint16_t reserved_3;
+ uint32_t reserved_4;
+
+ uint32_t abort_context;
+ uint32_t pad_1;
+
+ uint32_t abort_mfi_phys_addr_lo;
+ uint32_t abort_mfi_phys_addr_hi;
+
+ uint32_t reserved_5[6];
+};
+
+struct drsas_smp_frame {
+ uint8_t cmd;
+ uint8_t reserved_1;
+ uint8_t cmd_status;
+ uint8_t connection_status;
+
+ uint8_t reserved_2[3];
+ uint8_t sge_count;
+
+ uint32_t context;
+ uint8_t req_id;
+ uint8_t msgvector;
+ uint16_t pad_0;
+
+ uint16_t flags;
+ uint16_t timeout;
+
+ uint32_t data_xfer_len;
+
+ uint64_t sas_addr;
+
+ union drsas_sgl sgl[2];
+};
+
+struct drsas_stp_frame {
+ uint8_t cmd;
+ uint8_t reserved_1;
+ uint8_t cmd_status;
+ uint8_t connection_status;
+
+ uint8_t target_id;
+ uint8_t reserved_2[2];
+ uint8_t sge_count;
+
+ uint32_t context;
+ uint8_t req_id;
+ uint8_t msgvector;
+ uint16_t pad_0;
+
+ uint16_t flags;
+ uint16_t timeout;
+
+ uint32_t data_xfer_len;
+
+ uint16_t fis[10];
+ uint32_t stp_flags;
+ union drsas_sgl sgl;
+};
+
+union drsas_frame {
+ struct drsas_header hdr;
+ struct drsas_init_frame init;
+ struct drsas_io_frame io;
+ struct drsas_pthru_frame pthru;
+ struct drsas_dcmd_frame dcmd;
+ struct drsas_abort_frame abort;
+ struct drsas_smp_frame smp;
+ struct drsas_stp_frame stp;
+
+ uint8_t raw_bytes[64];
+};
+
+typedef struct drsas_pd_address {
+ uint16_t device_id;
+ uint16_t encl_id;
+
+ union {
+ struct {
+ uint8_t encl_index;
+ uint8_t slot_number;
+ } pd_address;
+ struct {
+ uint8_t encl_position;
+ uint8_t encl_connector_index;
+ } encl_address;
+ }address;
+
+ uint8_t scsi_dev_type;
+
+ union {
+ uint8_t port_bitmap;
+ uint8_t port_numbers;
+ } connected;
+
+ uint64_t sas_addr[2];
+} drsas_pd_address_t;
+
+union drsas_evt_class_locale {
+ struct {
+ uint16_t locale;
+ uint8_t reserved;
+ int8_t class;
+ } members;
+
+ uint32_t word;
+};
+
+struct drsas_evt_log_info {
+ uint32_t newest_seq_num;
+ uint32_t oldest_seq_num;
+ uint32_t clear_seq_num;
+ uint32_t shutdown_seq_num;
+ uint32_t boot_seq_num;
+};
+
+struct drsas_progress {
+ uint16_t progress;
+ uint16_t elapsed_seconds;
+};
+
+struct drsas_evtarg_ld {
+ uint16_t target_id;
+ uint8_t ld_index;
+ uint8_t reserved;
+};
+
+struct drsas_evtarg_pd {
+ uint16_t device_id;
+ uint8_t encl_index;
+ uint8_t slot_number;
+};
+
+struct drsas_evt_detail {
+ uint32_t seq_num;
+ uint32_t time_stamp;
+ uint32_t code;
+ union drsas_evt_class_locale cl;
+ uint8_t arg_type;
+ uint8_t reserved1[15];
+
+ union {
+ struct {
+ struct drsas_evtarg_pd pd;
+ uint8_t cdb_length;
+ uint8_t sense_length;
+ uint8_t reserved[2];
+ uint8_t cdb[16];
+ uint8_t sense[64];
+ } cdbSense;
+
+ struct drsas_evtarg_ld ld;
+
+ struct {
+ struct drsas_evtarg_ld ld;
+ uint64_t count;
+ } ld_count;
+
+ struct {
+ uint64_t lba;
+ struct drsas_evtarg_ld ld;
+ } ld_lba;
+
+ struct {
+ struct drsas_evtarg_ld ld;
+ uint32_t prevOwner;
+ uint32_t newOwner;
+ } ld_owner;
+
+ struct {
+ uint64_t ld_lba;
+ uint64_t pd_lba;
+ struct drsas_evtarg_ld ld;
+ struct drsas_evtarg_pd pd;
+ } ld_lba_pd_lba;
+
+ struct {
+ struct drsas_evtarg_ld ld;
+ struct drsas_progress prog;
+ } ld_prog;
+
+ struct {
+ struct drsas_evtarg_ld ld;
+ uint32_t prev_state;
+ uint32_t new_state;
+ } ld_state;
+
+ struct {
+ uint64_t strip;
+ struct drsas_evtarg_ld ld;
+ } ld_strip;
+
+ struct drsas_evtarg_pd pd;
+
+ struct {
+ struct drsas_evtarg_pd pd;
+ uint32_t err;
+ } pd_err;
+
+ struct {
+ uint64_t lba;
+ struct drsas_evtarg_pd pd;
+ } pd_lba;
+
+ struct {
+ uint64_t lba;
+ struct drsas_evtarg_pd pd;
+ struct drsas_evtarg_ld ld;
+ } pd_lba_ld;
+
+ struct {
+ struct drsas_evtarg_pd pd;
+ struct drsas_progress prog;
+ } pd_prog;
+
+ struct {
+ struct drsas_evtarg_pd pd;
+ uint32_t prevState;
+ uint32_t newState;
+ } pd_state;
+
+ struct {
+ uint16_t vendorId;
+ uint16_t deviceId;
+ uint16_t subVendorId;
+ uint16_t subDeviceId;
+ } pci;
+
+ uint32_t rate;
+ char str[96];
+
+ struct {
+ uint32_t rtc;
+ uint32_t elapsedSeconds;
+ } time;
+
+ struct {
+ uint32_t ecar;
+ uint32_t elog;
+ char str[64];
+ } ecc;
+
+ drsas_pd_address_t pd_addr;
+
+ uint8_t b[96];
+ uint16_t s[48];
+ uint32_t w[24];
+ uint64_t d[12];
+ } args;
+
+ char description[128];
+
+};
+
+/* only 63 are usable by the application */
+#define MAX_LOGICAL_DRIVES 64
+/* only 255 physical devices may be used */
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_PD_PER_ENCLOSURE 64
+/* maximum disks per array */
+#define MAX_ROW_SIZE 32
+/* maximum spans per logical drive */
+#define MAX_SPAN_DEPTH 8
+/* maximum number of arrays a hot spare may be dedicated to */
+#define MAX_ARRAYS_DEDICATED 16
+/* maximum number of arrays which may exist */
+#define MAX_ARRAYS 128
+/* maximum number of foreign configs that may ha managed at once */
+#define MAX_FOREIGN_CONFIGS 8
+/* maximum spares (global and dedicated combined) */
+#define MAX_SPARES_FOR_THE_CONTROLLER MAX_PHYSICAL_DEVICES
+/* maximum possible Target IDs (i.e. 0 to 63) */
+#define MAX_TARGET_ID 63
+/* maximum number of supported enclosures */
+#define MAX_ENCLOSURES 32
+/* maximum number of PHYs per controller */
+#define MAX_PHYS_PER_CONTROLLER 16
+/* maximum number of LDs per array (due to DDF limitations) */
+#define MAX_LDS_PER_ARRAY 16
+
+/*
+ * -----------------------------------------------------------------------------
+ * -----------------------------------------------------------------------------
+ *
+ * Logical Drive commands
+ *
+ * -----------------------------------------------------------------------------
+ * -----------------------------------------------------------------------------
+ */
+#define DR_DCMD_LD 0x03000000, /* Logical Device (LD) opcodes */
+
+/*
+ * Input: dcmd.opcode - DR_DCMD_LD_GET_LIST
+ * dcmd.mbox - reserved
+ * dcmd.sge IN - ptr to returned DR_LD_LIST structure
+ * Desc: Return the logical drive list structure
+ * Status: No error
+ */
+
+/*
+ * defines the logical drive reference structure
+ */
+typedef union _DR_LD_REF { /* LD reference structure */
+ struct {
+ uint8_t targetId; /* LD target id (0 to MAX_TARGET_ID) */
+ uint8_t reserved; /* reserved for in line with DR_PD_REF */
+ uint16_t seqNum; /* Sequence Number */
+ } ld_ref;
+ uint32_t ref; /* shorthand reference to full 32-bits */
+} DR_LD_REF; /* 4 bytes */
+
+/*
+ * defines the logical drive list structure
+ */
+typedef struct _DR_LD_LIST {
+ uint32_t ldCount; /* number of LDs */
+ uint32_t reserved; /* pad to 8-byte boundary */
+ struct {
+ DR_LD_REF ref; /* LD reference */
+ uint8_t state; /* current LD state (DR_LD_STATE) */
+ uint8_t reserved[3]; /* pad to 8-byte boundary */
+ uint64_t size; /* LD size */
+ } ldList[MAX_LOGICAL_DRIVES];
+} DR_LD_LIST;
+
+struct drsas_drv_ver {
+ uint8_t signature[12];
+ uint8_t os_name[16];
+ uint8_t os_ver[12];
+ uint8_t drv_name[20];
+ uint8_t drv_ver[32];
+ uint8_t drv_rel_date[20];
+};
+
+#define PCI_TYPE0_ADDRESSES 6
+#define PCI_TYPE1_ADDRESSES 2
+#define PCI_TYPE2_ADDRESSES 5
+
+struct drsas_pci_common_header {
+ uint16_t vendorID; /* (ro) */
+ uint16_t deviceID; /* (ro) */
+ uint16_t command; /* Device control */
+ uint16_t status;
+ uint8_t revisionID; /* (ro) */
+ uint8_t progIf; /* (ro) */
+ uint8_t subClass; /* (ro) */
+ uint8_t baseClass; /* (ro) */
+ uint8_t cacheLineSize; /* (ro+) */
+ uint8_t latencyTimer; /* (ro+) */
+ uint8_t headerType; /* (ro) */
+ uint8_t bist; /* Built in self test */
+
+ union {
+ struct {
+ uint32_t baseAddresses[PCI_TYPE0_ADDRESSES];
+ uint32_t cis;
+ uint16_t subVendorID;
+ uint16_t subSystemID;
+ uint32_t romBaseAddress;
+ uint8_t capabilitiesPtr;
+ uint8_t reserved1[3];
+ uint32_t reserved2;
+ uint8_t interruptLine;
+ uint8_t interruptPin; /* (ro) */
+ uint8_t minimumGrant; /* (ro) */
+ uint8_t maximumLatency; /* (ro) */
+ } type_0;
+
+ struct {
+ uint32_t baseAddresses[PCI_TYPE1_ADDRESSES];
+ uint8_t primaryBus;
+ uint8_t secondaryBus;
+ uint8_t subordinateBus;
+ uint8_t secondaryLatency;
+ uint8_t ioBase;
+ uint8_t ioLimit;
+ uint16_t secondaryStatus;
+ uint16_t memoryBase;
+ uint16_t memoryLimit;
+ uint16_t prefetchBase;
+ uint16_t prefetchLimit;
+ uint32_t prefetchBaseUpper32;
+ uint32_t prefetchLimitUpper32;
+ uint16_t ioBaseUpper16;
+ uint16_t ioLimitUpper16;
+ uint8_t capabilitiesPtr;
+ uint8_t reserved1[3];
+ uint32_t romBaseAddress;
+ uint8_t interruptLine;
+ uint8_t interruptPin;
+ uint16_t bridgeControl;
+ } type_1;
+
+ struct {
+ uint32_t socketRegistersBaseAddress;
+ uint8_t capabilitiesPtr;
+ uint8_t reserved;
+ uint16_t secondaryStatus;
+ uint8_t primaryBus;
+ uint8_t secondaryBus;
+ uint8_t subordinateBus;
+ uint8_t secondaryLatency;
+ struct {
+ uint32_t base;
+ uint32_t limit;
+ } range[PCI_TYPE2_ADDRESSES-1];
+ uint8_t interruptLine;
+ uint8_t interruptPin;
+ uint16_t bridgeControl;
+ } type_2;
+ } header;
+};
+
+struct drsas_pci_link_capability {
+ union {
+ struct {
+ uint32_t linkSpeed :4;
+ uint32_t linkWidth :6;
+ uint32_t aspmSupport :2;
+ uint32_t losExitLatency :3;
+ uint32_t l1ExitLatency :3;
+ uint32_t rsvdp :6;
+ uint32_t portNumber :8;
+ } bits;
+
+ uint32_t asUlong;
+ } cap;
+
+};
+
+struct drsas_pci_link_status_capability {
+ union {
+ struct {
+ uint16_t linkSpeed :4;
+ uint16_t negotiatedLinkWidth :6;
+ uint16_t linkTrainingError :1;
+ uint16_t linkTraning :1;
+ uint16_t slotClockConfig :1;
+ uint16_t rsvdZ :3;
+ } bits;
+
+ uint16_t asUshort;
+ } stat_cap;
+
+ uint16_t reserved;
+
+};
+
+struct drsas_pci_capabilities {
+ struct drsas_pci_link_capability linkCapability;
+ struct drsas_pci_link_status_capability linkStatusCapability;
+};
+
+struct drsas_pci_information
+{
+ uint32_t busNumber;
+ uint8_t deviceNumber;
+ uint8_t functionNumber;
+ uint8_t interruptVector;
+ uint8_t reserved;
+ struct drsas_pci_common_header pciHeaderInfo;
+ struct drsas_pci_capabilities capability;
+ uint8_t reserved2[32];
+};
+
+struct drsas_ioctl {
+ uint16_t version;
+ uint16_t controller_id;
+ uint8_t signature[8];
+ uint32_t reserved_1;
+ uint32_t control_code;
+ uint32_t reserved_2[2];
+ uint8_t frame[64];
+ union drsas_sgl_frame sgl_frame;
+ uint8_t sense_buff[DRSAS_MAX_SENSE_LENGTH];
+ uint8_t data[1];
+};
+
+struct drsas_aen {
+ uint16_t host_no;
+ uint16_t cmd_status;
+ uint32_t seq_num;
+ uint32_t class_locale_word;
+};
+#pragma pack()
+
+#ifndef DDI_VENDOR_LSI
+#define DDI_VENDOR_LSI "LSI"
+#endif /* DDI_VENDOR_LSI */
+
+static int drsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
+static int drsas_attach(dev_info_t *, ddi_attach_cmd_t);
+static int drsas_reset(dev_info_t *, ddi_reset_cmd_t);
+static int drsas_detach(dev_info_t *, ddi_detach_cmd_t);
+static int drsas_open(dev_t *, int, int, cred_t *);
+static int drsas_close(dev_t, int, int, cred_t *);
+static int drsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
+
+static int drsas_tran_tgt_init(dev_info_t *, dev_info_t *,
+ scsi_hba_tran_t *, struct scsi_device *);
+static struct scsi_pkt *drsas_tran_init_pkt(struct scsi_address *, register
+ struct scsi_pkt *, struct buf *, int, int, int, int,
+ int (*)(), caddr_t);
+static int drsas_tran_start(struct scsi_address *,
+ register struct scsi_pkt *);
+static int drsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
+static int drsas_tran_reset(struct scsi_address *, int);
+static int drsas_tran_getcap(struct scsi_address *, char *, int);
+static int drsas_tran_setcap(struct scsi_address *, char *, int, int);
+static void drsas_tran_destroy_pkt(struct scsi_address *,
+ struct scsi_pkt *);
+static void drsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
+static void drsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
+static uint_t drsas_isr();
+static uint_t drsas_softintr();
+
+static int init_mfi(struct drsas_instance *);
+static int drsas_free_dma_obj(struct drsas_instance *, dma_obj_t);
+static int drsas_alloc_dma_obj(struct drsas_instance *, dma_obj_t *,
+ uchar_t);
+static struct drsas_cmd *get_mfi_pkt(struct drsas_instance *);
+static void return_mfi_pkt(struct drsas_instance *,
+ struct drsas_cmd *);
+
+static void free_space_for_mfi(struct drsas_instance *);
+static void free_additional_dma_buffer(struct drsas_instance *);
+static int alloc_additional_dma_buffer(struct drsas_instance *);
+static int read_fw_status_reg_ppc(struct drsas_instance *);
+static void issue_cmd_ppc(struct drsas_cmd *, struct drsas_instance *);
+static int issue_cmd_in_poll_mode_ppc(struct drsas_instance *,
+ struct drsas_cmd *);
+static int issue_cmd_in_sync_mode_ppc(struct drsas_instance *,
+ struct drsas_cmd *);
+static void enable_intr_ppc(struct drsas_instance *);
+static void disable_intr_ppc(struct drsas_instance *);
+static int intr_ack_ppc(struct drsas_instance *);
+static int mfi_state_transition_to_ready(struct drsas_instance *);
+static void destroy_mfi_frame_pool(struct drsas_instance *);
+static int create_mfi_frame_pool(struct drsas_instance *);
+static int drsas_dma_alloc(struct drsas_instance *, struct scsi_pkt *,
+ struct buf *, int, int (*)());
+static int drsas_dma_move(struct drsas_instance *,
+ struct scsi_pkt *, struct buf *);
+static void flush_cache(struct drsas_instance *instance);
+static void display_scsi_inquiry(caddr_t);
+static int start_mfi_aen(struct drsas_instance *instance);
+static int handle_drv_ioctl(struct drsas_instance *instance,
+ struct drsas_ioctl *ioctl, int mode);
+static int handle_mfi_ioctl(struct drsas_instance *instance,
+ struct drsas_ioctl *ioctl, int mode);
+static int handle_mfi_aen(struct drsas_instance *instance,
+ struct drsas_aen *aen);
+static void fill_up_drv_ver(struct drsas_drv_ver *dv);
+static struct drsas_cmd *build_cmd(struct drsas_instance *instance,
+ struct scsi_address *ap, struct scsi_pkt *pkt,
+ uchar_t *cmd_done);
+static int register_mfi_aen(struct drsas_instance *instance,
+ uint32_t seq_num, uint32_t class_locale_word);
+static int issue_mfi_pthru(struct drsas_instance *instance, struct
+ drsas_ioctl *ioctl, struct drsas_cmd *cmd, int mode);
+static int issue_mfi_dcmd(struct drsas_instance *instance, struct
+ drsas_ioctl *ioctl, struct drsas_cmd *cmd, int mode);
+static int issue_mfi_smp(struct drsas_instance *instance, struct
+ drsas_ioctl *ioctl, struct drsas_cmd *cmd, int mode);
+static int issue_mfi_stp(struct drsas_instance *instance, struct
+ drsas_ioctl *ioctl, struct drsas_cmd *cmd, int mode);
+static int abort_aen_cmd(struct drsas_instance *instance,
+ struct drsas_cmd *cmd_to_abort);
+
+static int drsas_common_check(struct drsas_instance *instance,
+ struct drsas_cmd *cmd);
+static void drsas_fm_init(struct drsas_instance *instance);
+static void drsas_fm_fini(struct drsas_instance *instance);
+static int drsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *,
+ const void *);
+static void drsas_fm_ereport(struct drsas_instance *instance,
+ char *detail);
+static int drsas_check_dma_handle(ddi_dma_handle_t handle);
+static int drsas_check_acc_handle(ddi_acc_handle_t handle);
+
+static void drsas_rem_intrs(struct drsas_instance *instance);
+static int drsas_add_intrs(struct drsas_instance *instance, int intr_type);
+
+static void drsas_tran_tgt_free(dev_info_t *, dev_info_t *,
+ scsi_hba_tran_t *, struct scsi_device *);
+static int drsas_tran_bus_config(dev_info_t *, uint_t,
+ ddi_bus_config_op_t, void *, dev_info_t **);
+static int drsas_parse_devname(char *, int *, int *);
+static int drsas_config_all_devices(struct drsas_instance *);
+static int drsas_config_scsi_device(struct drsas_instance *,
+ struct scsi_device *, dev_info_t **);
+static int drsas_config_ld(struct drsas_instance *, uint16_t,
+ uint8_t, dev_info_t **);
+static dev_info_t *drsas_find_child(struct drsas_instance *, uint16_t,
+ uint8_t);
+static int drsas_name_node(dev_info_t *, char *, int);
+static void drsas_issue_evt_taskq(struct drsas_eventinfo *);
+static int drsas_service_evt(struct drsas_instance *, int, int, int,
+ uint64_t);
+static int drsas_mode_sense_build(struct scsi_pkt *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DR_SAS_H_ */
diff --git a/usr/src/uts/common/io/dr_sas/dr_sas_list.h b/usr/src/uts/common/io/dr_sas/dr_sas_list.h
new file mode 100644
index 0000000000..4154a77796
--- /dev/null
+++ b/usr/src/uts/common/io/dr_sas/dr_sas_list.h
@@ -0,0 +1,212 @@
+/*
+ * dr_sas_list.h: header for dr_sas
+ *
+ * Solaris MegaRAID driver for SAS2.0 controllers
+ * Copyright (c) 2008-2009, LSI Logic Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the author nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _DR_SAS_LIST_H_
+#define _DR_SAS_LIST_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct mlist_head {
+ struct mlist_head *next, *prev;
+};
+
+typedef struct mlist_head mlist_t;
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct mlist_head name = LIST_HEAD_INIT(name)
+
+#define INIT_LIST_HEAD(ptr) { \
+ (ptr)->next = (ptr); (ptr)->prev = (ptr); \
+}
+
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static void __list_add(struct mlist_head *new,
+ struct mlist_head *prev,
+ struct mlist_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+
+/*
+ * mlist_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static void mlist_add(struct mlist_head *new, struct mlist_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+
+/*
+ * mlist_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static void mlist_add_tail(struct mlist_head *new, struct mlist_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static void __list_del(struct mlist_head *prev,
+ struct mlist_head *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+
+/*
+ * mlist_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static void mlist_del_init(struct mlist_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+
+/*
+ * mlist_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static int mlist_empty(struct mlist_head *head)
+{
+ return (head->next == head);
+}
+
+
+/*
+ * mlist_splice - join two lists
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static void mlist_splice(struct mlist_head *list, struct mlist_head *head)
+{
+ struct mlist_head *first = list->next;
+
+ if (first != list) {
+ struct mlist_head *last = list->prev;
+ struct mlist_head *at = head->next;
+
+ first->prev = head;
+ head->next = first;
+
+ last->next = at;
+ at->prev = last;
+ }
+}
+
+
+/*
+ * mlist_entry - get the struct for this entry
+ * @ptr: the &struct mlist_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define mlist_entry(ptr, type, member) \
+ ((type *)((size_t)(ptr) - offsetof(type, member)))
+
+
+/*
+ * mlist_for_each - iterate over a list
+ * @pos: the &struct mlist_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define mlist_for_each(pos, head) \
+ for (pos = (head)->next, prefetch(pos->next); pos != (head); \
+ pos = pos->next, prefetch(pos->next))
+
+
+/*
+ * mlist_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct mlist_head to use as a loop counter.
+ * @n: another &struct mlist_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define mlist_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DR_SAS_LIST_H_ */
diff --git a/usr/src/uts/common/io/fibre-channel/impl/fctl.c b/usr/src/uts/common/io/fibre-channel/impl/fctl.c
index 634de6c6dd..87105e779d 100644
--- a/usr/src/uts/common/io/fibre-channel/impl/fctl.c
+++ b/usr/src/uts/common/io/fibre-channel/impl/fctl.c
@@ -24,6 +24,7 @@
*/
/*
* Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
+ * Copyright (c) 2015 Joyent, Inc. All rights reserved.
*/
/*
* Fibre channel Transport Library (fctl)
@@ -5500,6 +5501,11 @@ fc_ulp_get_adapter_paths(char *pathList, int count)
maxPorts ++;
}
+ if (maxPorts == 0) {
+ mutex_exit(&fctl_port_lock);
+ return (0);
+ }
+
/* Now allocate a buffer to store all the pointers for comparisons */
portList = kmem_zalloc(sizeof (fc_local_port_t *) * maxPorts, KM_SLEEP);
diff --git a/usr/src/uts/common/io/gsqueue/gsqueue.c b/usr/src/uts/common/io/gsqueue/gsqueue.c
new file mode 100644
index 0000000000..b484b16142
--- /dev/null
+++ b/usr/src/uts/common/io/gsqueue/gsqueue.c
@@ -0,0 +1,612 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright (c) 2014 Joyent, Inc. All rights reserved.
+ */
+
+/*
+ * Serialization queues are a technique used in illumos to provide what's
+ * commonly known as a 'vertical' perimeter. The idea (described a bit in
+ * uts/common/inet/squeue.c) is to provide a means to make sure that message
+ * blocks (mblk_t) are processed in a specific order. Subsystems like ip and vnd
+ * consume these on different policies, ip on a conn_t basis, vnd on a per
+ * device basis, and use this to ensure that only one packet is being processed
+ * at a given time.
+ *
+ * Serialization queues were originally used by ip. As part of that
+ * implementation, many of the details of ip were baked into it. That includes
+ * things like conn_t, ip receive attributes, and the notion of sets. While an
+ * individual serialization queue, or gsqueue_t, is a useful level of
+ * abstraction, it isn't the basis on which monst consumers want to manage them.
+ * Instead, we have the notion of a set of serialization queues. These sets are
+ * DR (CPU Dynamic reconfiguration) aware, and allow consumers to have a
+ * gsqueue_t per CPU to fanout on without managing them all itself. In the
+ * original implementation, this existed, but they were heavily tied into the
+ * infrastructure of IP, and its notion of polling on the underlying MAC
+ * devices.
+ *
+ * The result of that past is a new interface to serialization queues and a
+ * similar, but slightly different, abstraction to sets of these
+ * (gsqueue_set_t). When designing this there are two different approaches that
+ * one could consider. The first is that the system has one gsqueue_set_t that
+ * the entire world shares, whether IP or some other consumer. The other is that
+ * every consumer has their own set.
+ *
+ * The trade offs between these two failure modes are the pathological failure
+ * modes. There is no guarantee that any two consumers here are equivalent. In
+ * fact, they very likely have very different latency profiles. If they are
+ * being processed in the same queue, that can lead to very odd behaviors. More
+ * generally, if we have a series of processing functions from one consumer
+ * which are generally short, and another which are generally long, that'll
+ * cause undue latency that's harder to observe. If we instead take the approach
+ * that each consumer should have its own set that it fans out over then we
+ * won't end up with the problem that a given serialization queue will have
+ * multiple latency profiles, but instead we'll see cpu contention for the bound
+ * gsqueue_t worker thread. Keep in mind though, that only the gsqueue_t worker
+ * thread is bound and it is in fact possible for it to be processed by other
+ * threads on other CPUs.
+ *
+ * We've opted to go down the second path, so each consumer has its own
+ * independent set of serialization queues that it is bound over.
+ *
+ * Structure Hierarchies
+ * ---------------------
+ *
+ * At the top level, we have a single list of gsqueue_set_t. The gsqueue_set_t
+ * encapsulates all the per-CPU gsqueue_t that exist in the form of
+ * gsqueue_cpu_t. The gsqueue_cpu_t has been designed such that it could
+ * accommodate more than one gsqueue_t, but today there is a one to one mapping.
+ *
+ * We maintain two different lists of gsqueue_cpu_t, the active and defunct
+ * sets. The active set is maintained in the array `gs_cpus`. There are NCPU
+ * entries available in `gs_cpus` with the total number of currently active cpus
+ * described in `gs_ncpus`. The ordering of `gs_cpus` is unimportant. When
+ * there is no longer a need for a given binding (see the following section for
+ * more explanation on when this is the case) then we move the entry to the
+ * `gs_defunct` list which is just a singly linked list of gsqueue_cpu_t.
+ *
+ * In addition, each gsqueue_set_t can have a series of callbacks registered
+ * with it. These are described in the following section. Graphically, a given
+ * gsqueue_set_t looks roughly like the following:
+ *
+ * +---------------+
+ * | gsqueue_set_t |
+ * +---------------+
+ * | | |
+ * | | * . . . gs_cpus
+ * | | |
+ * | | | +-------------------------------------------------+
+ * | | +----->| gsqueue_cpu_t || gsqueue_cpu_t || gsqueue_cpu_t |...
+ * | | +-------------------------------------------------+
+ * | |
+ * | * . . . gs_defunct
+ * | |
+ * | | +---------------+ +---------------+ +---------------+
+ * | +--->| gsqueue_cpu_t |-->| gsqueue_cpu_t |-->| gsqueue_cpu_t |...
+ * | +---------------+ +---------------+ +---------------+
+ * * . . . gs_cbs
+ * |
+ * | +--------------+ +--------------+ +--------------+
+ * +--->| gsqueue_cb_t |-->| gsqueue_cb_t |->| gsqueue_cb_t |...
+ * +--------------+ +--------------+ +--------------+
+ *
+ * CPU DR, gsqueue_t, and gsqueue_t
+ * --------------------------------
+ *
+ * Recall, that every serialization queue (gsqueue_t or squeue_t) has a worker
+ * thread that may end up doing work. As part of supporting fanout, we have one
+ * gsqueue_t per CPU, and its worker thread is bound to that CPU. Because of
+ * this binding, we need to deal with CPU DR changes.
+ *
+ * The gsqueue driver maintains a single CPU DR callback that is used for the
+ * entire sub-system. We break down CPU DR events into three groups. Offline
+ * events, online events, and events we can ignore. When the first group occurs,
+ * we need to go through every gsqueue_t, find the gsqueue_cpu_t that
+ * corresponds to that processor id, and unbind all of its gsqueue_t's. It's
+ * rather important that we only unbind the gsqueue_t's and not actually destroy
+ * them. When this happens, they could very easily have data queued inside of
+ * them and it's unreasonable to just throw out everything in them at this
+ * point. The data remains intact and service continues uinterrupted.
+ *
+ * When we receive an online event, we do the opposite. We try to find a
+ * gsqueue_cpu_t that previously was bound to this CPU (by leaving its gqc_cpuid
+ * field intact) in the defunct list. If we find one, we remove it from the
+ * defunct list and add it to the active list as well as binding the gsqueue_t
+ * to the CPU in question. If we don't find one, then we create a new one.
+ *
+ * To deal with these kinds of situations, we allow a consumer to register
+ * callbacks for the gsqueue_t that they are interested in. These callbacks will
+ * fire whenever we are handling a topology change. The design of the callbacks
+ * is not that the user can take any administrative action during them, but
+ * rather set something for them to do asynchronously. It is illegal to make any
+ * calls into the gsqueue system while you are in a callback.
+ *
+ * Locking
+ * -------
+ *
+ * The lock ordering here is fairly straightforward. Due to our use of CPU
+ * binding and the CPU DR callbacks, we have an additional lock to consider
+ * cpu_lock. Because of that, the following are the rules for locking:
+ *
+ *
+ * o If performing binding operations, you must grab cpu_lock. cpu_lock is
+ * also at the top of the order.
+ *
+ * o cpu_lock > gsqueue_lock > gsqueue_t`gs_lock > squeue_t`sq_lock
+ * If you need to take multiple locks, you must take the greatest
+ * (left-most) one first.
+ */
+
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/stat.h>
+#include <sys/kmem.h>
+#include <sys/stream.h>
+#include <sys/modctl.h>
+#include <sys/cpuvar.h>
+#include <sys/list.h>
+#include <sys/sysmacros.h>
+
+#include <sys/gsqueue.h>
+#include <sys/squeue_impl.h>
+
+typedef struct gsqueue_cb {
+ struct gsqueue_cb *gcb_next;
+ gsqueue_cb_f gcb_func;
+ void *gcb_arg;
+} gsqueue_cb_t;
+
+typedef struct gsqueue_cpu {
+ struct gsqueue_cpu *gqc_next;
+ squeue_t *gqc_head;
+ processorid_t gqc_cpuid;
+} gsqueue_cpu_t;
+
+struct gsqueue_set {
+ list_node_t gs_next;
+ uint_t gs_wwait;
+ pri_t gs_wpri;
+ kmutex_t gs_lock;
+ int gs_ncpus;
+ gsqueue_cpu_t **gs_cpus;
+ gsqueue_cpu_t *gs_defunct;
+ gsqueue_cb_t *gs_cbs;
+};
+
+static kmutex_t gsqueue_lock;
+static list_t gsqueue_list;
+static kmem_cache_t *gsqueue_cb_cache;
+static kmem_cache_t *gsqueue_cpu_cache;
+static kmem_cache_t *gsqueue_set_cache;
+
+static gsqueue_cpu_t *
+gsqueue_cpu_create(uint_t wwait, pri_t wpri, processorid_t cpuid)
+{
+ gsqueue_cpu_t *scp;
+
+ scp = kmem_cache_alloc(gsqueue_cpu_cache, KM_SLEEP);
+
+ scp->gqc_next = NULL;
+ scp->gqc_cpuid = cpuid;
+ scp->gqc_head = squeue_create(wwait, wpri, B_FALSE);
+ scp->gqc_head->sq_state = SQS_DEFAULT;
+ squeue_bind(scp->gqc_head, cpuid);
+
+ return (scp);
+}
+
+static void
+gsqueue_cpu_destroy(gsqueue_cpu_t *scp)
+{
+ squeue_destroy(scp->gqc_head);
+ kmem_cache_free(gsqueue_cpu_cache, scp);
+}
+
+gsqueue_set_t *
+gsqueue_set_create(uint_t wwait, pri_t wpri)
+{
+ int i;
+ gsqueue_set_t *gssp;
+
+ gssp = kmem_cache_alloc(gsqueue_set_cache, KM_SLEEP);
+ gssp->gs_wwait = wwait;
+ gssp->gs_wpri = wpri;
+ gssp->gs_ncpus = 0;
+
+ /*
+ * We're grabbing CPU lock. Once we let go of it we have to ensure all
+ * set up of the gsqueue_set_t is complete, as it'll be in there for the
+ * various CPU DR bits.
+ */
+ mutex_enter(&cpu_lock);
+
+ for (i = 0; i < NCPU; i++) {
+ gsqueue_cpu_t *scp;
+ cpu_t *cp = cpu_get(i);
+ if (cp != NULL && CPU_ACTIVE(cp) &&
+ cp->cpu_flags & CPU_EXISTS) {
+ scp = gsqueue_cpu_create(wwait, wpri, cp->cpu_id);
+ gssp->gs_cpus[gssp->gs_ncpus] = scp;
+ gssp->gs_ncpus++;
+ }
+ }
+
+ /* Finally we can add it to our global list and be done */
+ mutex_enter(&gsqueue_lock);
+ list_insert_tail(&gsqueue_list, gssp);
+ mutex_exit(&gsqueue_lock);
+ mutex_exit(&cpu_lock);
+
+ return (gssp);
+}
+
+void
+gsqueue_set_destroy(gsqueue_set_t *gssp)
+{
+ int i;
+ gsqueue_cpu_t *scp;
+
+ /*
+ * Go through and unbind all of the squeues while cpu_lock is held and
+ * move them to the defunct list. Once that's done, we don't need to do
+ * anything else with cpu_lock.
+ */
+ mutex_enter(&cpu_lock);
+ mutex_enter(&gsqueue_lock);
+ list_remove(&gsqueue_list, gssp);
+ mutex_exit(&gsqueue_lock);
+
+ mutex_enter(&gssp->gs_lock);
+
+ for (i = 0; i < gssp->gs_ncpus; i++) {
+ scp = gssp->gs_cpus[i];
+ squeue_unbind(scp->gqc_head);
+ scp->gqc_next = gssp->gs_defunct;
+ gssp->gs_defunct = scp;
+ gssp->gs_cpus[i] = NULL;
+ }
+ gssp->gs_ncpus = 0;
+
+ mutex_exit(&gssp->gs_lock);
+ mutex_exit(&cpu_lock);
+
+ while (gssp->gs_defunct != NULL) {
+ gsqueue_cpu_t *scp;
+
+ scp = gssp->gs_defunct;
+ gssp->gs_defunct = scp->gqc_next;
+ gsqueue_cpu_destroy(scp);
+ }
+
+ while (gssp->gs_cbs != NULL) {
+ gsqueue_cb_t *cbp;
+
+ cbp = gssp->gs_cbs;
+ gssp->gs_cbs = cbp->gcb_next;
+ kmem_cache_free(gsqueue_cb_cache, cbp);
+ }
+
+ ASSERT(gssp->gs_ncpus == 0);
+ ASSERT(gssp->gs_defunct == NULL);
+ ASSERT(gssp->gs_cbs == NULL);
+ kmem_cache_free(gsqueue_set_cache, gssp);
+}
+
+gsqueue_t *
+gsqueue_set_get(gsqueue_set_t *gssp, uint_t index)
+{
+ squeue_t *sqp;
+ gsqueue_cpu_t *scp;
+
+ mutex_enter(&gssp->gs_lock);
+ scp = gssp->gs_cpus[index % gssp->gs_ncpus];
+ sqp = scp->gqc_head;
+ mutex_exit(&gssp->gs_lock);
+ return ((gsqueue_t *)sqp);
+}
+
+uintptr_t
+gsqueue_set_cb_add(gsqueue_set_t *gssp, gsqueue_cb_f cb, void *arg)
+{
+ gsqueue_cb_t *cbp;
+
+ cbp = kmem_cache_alloc(gsqueue_cb_cache, KM_SLEEP);
+ cbp->gcb_func = cb;
+ cbp->gcb_arg = arg;
+
+ mutex_enter(&gssp->gs_lock);
+ cbp->gcb_next = gssp->gs_cbs;
+ gssp->gs_cbs = cbp;
+ mutex_exit(&gssp->gs_lock);
+ return ((uintptr_t)cbp);
+}
+
+int
+gsqueue_set_cb_remove(gsqueue_set_t *gssp, uintptr_t id)
+{
+ gsqueue_cb_t *cbp, *prev;
+ mutex_enter(&gssp->gs_lock);
+ cbp = gssp->gs_cbs;
+ prev = NULL;
+ while (cbp != NULL) {
+ if ((uintptr_t)cbp != id) {
+ prev = cbp;
+ cbp = cbp->gcb_next;
+ continue;
+ }
+
+ if (prev == NULL) {
+ gssp->gs_cbs = cbp->gcb_next;
+ } else {
+ prev->gcb_next = cbp->gcb_next;
+ }
+
+ mutex_exit(&gssp->gs_lock);
+ kmem_cache_free(gsqueue_cb_cache, cbp);
+ return (0);
+ }
+ mutex_exit(&gssp->gs_lock);
+ return (-1);
+}
+
+void
+gsqueue_enter_one(gsqueue_t *gsp, mblk_t *mp, gsqueue_proc_f func, void *arg,
+ int flags, uint8_t tag)
+{
+ squeue_t *sqp = (squeue_t *)gsp;
+
+ ASSERT(mp->b_next == NULL);
+ ASSERT(mp->b_prev == NULL);
+ mp->b_queue = (queue_t *)func;
+ mp->b_prev = arg;
+ sqp->sq_enter(sqp, mp, mp, 1, NULL, flags, tag);
+}
+
+static void
+gsqueue_notify(gsqueue_set_t *gssp, squeue_t *sqp, boolean_t online)
+{
+ gsqueue_cb_t *cbp;
+
+ ASSERT(MUTEX_HELD(&gssp->gs_lock));
+ cbp = gssp->gs_cbs;
+ while (cbp != NULL) {
+ cbp->gcb_func(gssp, (gsqueue_t *)sqp, cbp->gcb_arg, online);
+ cbp = cbp->gcb_next;
+ }
+
+}
+
+/*
+ * When we online a processor we need to go through and either bind a defunct
+ * squeue or create a new one. We'll try to reuse a gsqueue_cpu_t from the
+ * defunct list that used to be on that processor. If no such gsqueue_cpu_t
+ * exists, then we'll create a new one. We'd rather avoid taking over an
+ * existing defunct one that used to be on another CPU, as its not unreasonable
+ * to believe that its CPU will come back. More CPUs are offlined and onlined by
+ * the administrator or by creating cpu sets than actually get offlined by FMA.
+ */
+static void
+gsqueue_handle_online(processorid_t id)
+{
+ gsqueue_set_t *gssp;
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+ mutex_enter(&gsqueue_lock);
+ for (gssp = list_head(&gsqueue_list); gssp != NULL;
+ gssp = list_next(&gsqueue_list, gssp)) {
+ gsqueue_cpu_t *scp;
+
+ mutex_enter(&gssp->gs_lock);
+ scp = gssp->gs_defunct;
+ while (scp != NULL) {
+ if (scp->gqc_cpuid == id)
+ break;
+ scp = scp->gqc_next;
+ }
+
+ if (scp == NULL) {
+ scp = gsqueue_cpu_create(gssp->gs_wwait,
+ gssp->gs_wpri, id);
+ } else {
+ squeue_bind(scp->gqc_head, id);
+ }
+ ASSERT(gssp->gs_ncpus < NCPU);
+ gssp->gs_cpus[gssp->gs_ncpus] = scp;
+ gssp->gs_ncpus++;
+ gsqueue_notify(gssp, scp->gqc_head, B_TRUE);
+ mutex_exit(&gssp->gs_lock);
+ }
+ mutex_exit(&gsqueue_lock);
+}
+
+static void
+gsqueue_handle_offline(processorid_t id)
+{
+ gsqueue_set_t *gssp;
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+ mutex_enter(&gsqueue_lock);
+ for (gssp = list_head(&gsqueue_list); gssp != NULL;
+ gssp = list_next(&gsqueue_list, gssp)) {
+ int i;
+ gsqueue_cpu_t *scp = NULL;
+
+ mutex_enter(&gssp->gs_lock);
+ for (i = 0; i < gssp->gs_ncpus; i++) {
+ if (gssp->gs_cpus[i]->gqc_cpuid == id) {
+ scp = gssp->gs_cpus[i];
+ break;
+ }
+ }
+
+ if (scp != NULL) {
+ squeue_unbind(scp->gqc_head);
+ scp->gqc_next = gssp->gs_defunct;
+ gssp->gs_defunct = scp;
+ gssp->gs_cpus[i] = gssp->gs_cpus[gssp->gs_ncpus-1];
+ gssp->gs_ncpus--;
+ gsqueue_notify(gssp, scp->gqc_head, B_FALSE);
+ }
+ mutex_exit(&gssp->gs_lock);
+ }
+ mutex_exit(&gsqueue_lock);
+}
+
+/* ARGSUSED */
+static int
+gsqueue_cpu_setup(cpu_setup_t what, int id, void *unused)
+{
+ cpu_t *cp;
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+ cp = cpu_get(id);
+ switch (what) {
+ case CPU_CONFIG:
+ case CPU_ON:
+ case CPU_INIT:
+ case CPU_CPUPART_IN:
+ if (cp != NULL && CPU_ACTIVE(cp) && cp->cpu_flags & CPU_EXISTS)
+ gsqueue_handle_online(cp->cpu_id);
+ break;
+ case CPU_UNCONFIG:
+ case CPU_OFF:
+ case CPU_CPUPART_OUT:
+ gsqueue_handle_offline(cp->cpu_id);
+ break;
+ default:
+ break;
+ }
+
+ return (0);
+}
+
+
+/* ARGSUSED */
+static int
+gsqueue_set_cache_construct(void *buf, void *arg, int kmflags)
+{
+ gsqueue_set_t *gssp = buf;
+
+ gssp->gs_cpus = kmem_alloc(sizeof (gsqueue_cpu_t *) * NCPU, kmflags);
+ if (gssp->gs_cpus == NULL)
+ return (-1);
+
+ mutex_init(&gssp->gs_lock, NULL, MUTEX_DRIVER, NULL);
+ gssp->gs_ncpus = 0;
+ gssp->gs_defunct = NULL;
+ gssp->gs_cbs = NULL;
+
+ return (0);
+}
+
+static void
+gsqueue_set_cache_destruct(void *buf, void *arg)
+{
+ gsqueue_set_t *gssp = buf;
+
+ kmem_free(gssp->gs_cpus, sizeof (gsqueue_cpu_t *) * NCPU);
+ gssp->gs_cpus = NULL;
+ mutex_destroy(&gssp->gs_lock);
+}
+
+static void
+gsqueue_ddiinit(void)
+{
+ list_create(&gsqueue_list, sizeof (gsqueue_set_t),
+ offsetof(gsqueue_set_t, gs_next));
+ mutex_init(&gsqueue_lock, NULL, MUTEX_DRIVER, NULL);
+
+ gsqueue_cb_cache = kmem_cache_create("gsqueue_cb_cache",
+ sizeof (gsqueue_cb_t),
+ 0, NULL, NULL, NULL, NULL, NULL, 0);
+ gsqueue_cpu_cache = kmem_cache_create("gsqueue_cpu_cache",
+ sizeof (gsqueue_cpu_t),
+ 0, NULL, NULL, NULL, NULL, NULL, 0);
+ gsqueue_set_cache = kmem_cache_create("squeue_set_cache",
+ sizeof (gsqueue_set_t),
+ 0, gsqueue_set_cache_construct, gsqueue_set_cache_destruct,
+ NULL, NULL, NULL, 0);
+
+
+ mutex_enter(&cpu_lock);
+ register_cpu_setup_func(gsqueue_cpu_setup, NULL);
+ mutex_exit(&cpu_lock);
+}
+
+static int
+gsqueue_ddifini(void)
+{
+ mutex_enter(&gsqueue_lock);
+ if (list_is_empty(&gsqueue_list) == 0) {
+ mutex_exit(&gsqueue_lock);
+ return (EBUSY);
+ }
+ list_destroy(&gsqueue_list);
+ mutex_exit(&gsqueue_lock);
+
+ mutex_enter(&cpu_lock);
+ register_cpu_setup_func(gsqueue_cpu_setup, NULL);
+ mutex_exit(&cpu_lock);
+
+ kmem_cache_destroy(gsqueue_set_cache);
+ kmem_cache_destroy(gsqueue_cpu_cache);
+ kmem_cache_destroy(gsqueue_cb_cache);
+
+ mutex_destroy(&gsqueue_lock);
+
+ return (0);
+}
+
+static struct modlmisc gsqueue_modmisc = {
+ &mod_miscops,
+ "gsqueue"
+};
+
+static struct modlinkage gsqueue_modlinkage = {
+ MODREV_1,
+ &gsqueue_modmisc,
+ NULL
+};
+
+int
+_init(void)
+{
+ int ret;
+
+ gsqueue_ddiinit();
+ if ((ret = mod_install(&gsqueue_modlinkage)) != 0) {
+ VERIFY(gsqueue_ddifini() == 0);
+ return (ret);
+ }
+
+ return (ret);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&gsqueue_modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int ret;
+
+ if ((ret = gsqueue_ddifini()) != 0)
+ return (ret);
+
+ if ((ret = mod_remove(&gsqueue_modlinkage)) != 0)
+ return (ret);
+
+ return (0);
+}
diff --git a/usr/src/uts/common/io/i40e/core/README b/usr/src/uts/common/io/i40e/core/README
new file mode 100644
index 0000000000..dc0149ce62
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/README
@@ -0,0 +1,410 @@
+ ixl FreeBSD* Base Driver and ixlv VF Driver for the
+ Intel XL710 Ethernet Controller Family
+
+/*$FreeBSD$*/
+================================================================
+
+August 26, 2014
+
+
+Contents
+========
+
+- Overview
+- Supported Adapters
+- The VF Driver
+- Building and Installation
+- Additional Configurations
+- Known Limitations
+
+
+Overview
+========
+
+This file describes the IXL FreeBSD* Base driver and the IXLV VF Driver
+for the XL710 Ethernet Family of Adapters. The Driver has been developed
+for use with FreeBSD 10.0 or later, but should be compatible with any
+supported release.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel XL710 adapter. All hardware requirements listed
+apply for use with FreeBSD.
+
+
+Supported Adapters
+==================
+
+The drivers in this release are compatible with XL710 and X710-based
+Intel Ethernet Network Connections.
+
+
+SFP+ Devices with Pluggable Optics
+----------------------------------
+
+SR Modules
+----------
+ Intel DUAL RATE 1G/10G SFP+ SR (bailed) FTLX8571D3BCV-IT
+ Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDZ-IN2
+
+LR Modules
+----------
+ Intel DUAL RATE 1G/10G SFP+ LR (bailed) FTLX1471D3BCV-IT
+ Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDZ-IN2
+
+QSFP+ Modules
+-------------
+ Intel TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed) E40GQSFPSR
+ Intel TRIPLE RATE 1G/10G/40G QSFP+ LR (bailed) E40GQSFPLR
+ QSFP+ 1G speed is not supported on XL710 based devices.
+
+X710/XL710 Based SFP+ adapters support all passive and active limiting direct
+attach cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
+
+The VF Driver
+==================
+The VF driver is normally used in a virtualized environment where a host
+driver manages SRIOV, and provides a VF device to the guest. With this
+first release the only host environment tested was using Linux QEMU/KVM.
+Support is planned for Xen and VMWare hosts at a later time.
+
+In the FreeBSD guest the IXLV driver would be loaded and will function
+using the VF device assigned to it.
+
+The VF driver provides most of the same functionality as the CORE driver,
+but is actually a slave to the Host, access to many controls are actually
+accomplished by a request to the Host via what is called the "Admin queue".
+These are startup and initialization events however, once in operation
+the device is self-contained and should achieve near native performance.
+
+Some notable limitations of the VF environment: for security reasons
+the driver is never permitted to be promiscuous, therefore a tcpdump
+will not behave the same with the interface. Second, media info is not
+available from the PF, so it will always appear as auto.
+
+Tarball Building and Installation
+=========================
+
+NOTE: You must have kernel sources installed to compile the driver tarball.
+
+These instructions assume a standalone driver tarball, building the driver
+already in the kernel source is simply a matter of adding the device entry
+to the kernel config file, or building in the ixl or ixlv module directory.
+
+In the instructions below, x.x.x is the driver version
+as indicated in the name of the driver tarball. The example is
+for ixl, the same procedure applies for ixlv.
+
+1. Move the base driver tar file to the directory of your choice.
+ For example, use /home/username/ixl or /usr/local/src/ixl.
+
+2. Untar/unzip the archive:
+ tar xfz ixl-x.x.x.tar.gz
+
+3. To install man page:
+ cd ixl-x.x.x
+ gzip -c ixl.4 > /usr/share/man/man4/ixl.4.gz
+
+4. To load the driver onto a running system:
+ cd ixl-x.x.x/src
+ make load
+
+5. To assign an IP address to the interface, enter the following:
+ ifconfig ixl<interface_num> <IP_address>
+
+6. Verify that the interface works. Enter the following, where <IP_address>
+ is the IP address for another machine on the same subnet as the interface
+ that is being tested:
+
+ ping <IP_address>
+
+7. If you want the driver to load automatically when the system is booted:
+
+ cd ixl-x.x.x/src
+ make
+ make install
+
+ Edit /boot/loader.conf, and add the following line:
+ if_ixl_load="YES"
+
+ Edit /etc/rc.conf, and create the appropriate
+ ifconfig_ixl<interface_num> entry:
+
+ ifconfig_ixl<interface_num>="<ifconfig_settings>"
+
+ Example usage:
+
+ ifconfig_ixl0="inet 192.168.10.1 netmask 255.255.255.0"
+
+ NOTE: For assistance, see the ifconfig man page.
+
+
+
+Configuration and Tuning
+=========================
+
+Both drivers supports Transmit/Receive Checksum Offload for IPv4 and IPv6,
+TSO forIPv4 and IPv6, LRO, and Jumbo Frames on all 40 Gigabit adapters.
+
+ Jumbo Frames
+ ------------
+ To enable Jumbo Frames, use the ifconfig utility to increase
+ the MTU beyond 1500 bytes.
+
+ - The Jumbo Frames setting on the switch must be set to at least
+ 22 byteslarger than that of the adapter.
+
+ - The maximum MTU setting for Jumbo Frames is 9706. This value
+ coincides with the maximum jumbo frames size of 9728.
+ To modify the setting, enter the following:
+
+ ifconfig ixl<interface_num> <hostname or IP address> mtu 9000
+
+ - To confirm an interface's MTU value, use the ifconfig command.
+ To confirm the MTU used between two specific devices, use:
+
+ route get <destination_IP_address>
+
+ VLANs
+ -----
+ To create a new VLAN pseudo-interface:
+
+ ifconfig <vlan_name> create
+
+ To associate the VLAN pseudo-interface with a physical interface
+ and assign a VLAN ID, IP address, and netmask:
+
+ ifconfig <vlan_name> <ip_address> netmask <subnet_mask> vlan
+ <vlan_id> vlandev <physical_interface>
+
+ Example:
+
+ ifconfig vlan10 10.0.0.1 netmask 255.255.255.0 vlan 10 vlandev ixl0
+
+ In this example, all packets will be marked on egress with
+ 802.1Q VLAN tags, specifying a VLAN ID of 10.
+
+ To remove a VLAN pseudo-interface:
+
+ ifconfig <vlan_name> destroy
+
+
+ Checksum Offload
+ ----------------
+
+ Checksum offloading supports IPv4 and IPv6 with TCP and UDP packets
+ and is supported for both transmit and receive. Checksum offloading
+ for transmit and recieve is enabled by default for both IPv4 and IPv6.
+
+ Checksum offloading can be enabled or disabled using ifconfig.
+ Transmit and receive offloading for IPv4 and Ipv6 are enabled
+ and disabled seperately.
+
+ NOTE: TSO requires Tx checksum, so when Tx checksum
+ is disabled, TSO will also be disabled.
+
+ To enable Tx checksum offloading for ipv4:
+
+ ifconfig ixl<interface_num> txcsum4
+
+ To disable Tx checksum offloading for ipv4:
+
+ ifconfig ixl<interface_num> -txcsum4
+ (NOTE: This will disable TSO4)
+
+ To enable Rx checksum offloading for ipv6:
+
+ ifconfig ixl<interface_num> rxcsum6
+
+ To disable Rx checksum offloading for ipv6:
+
+ ifconfig ixl<interface_num> -rxcsum6
+ (NOTE: This will disable TSO6)
+
+
+ To confirm the current settings:
+
+ ifconfig ixl<interface_num>
+
+
+ TSO
+ ---
+
+ TSO supports both IPv4 and IPv6 and is enabled by default. TSO can
+ be disabled and enabled using the ifconfig utility.
+
+ NOTE: TSO requires Tx checksum, so when Tx checksum is
+ disabled, TSO will also be disabled.
+
+ To disable TSO IPv4:
+
+ ifconfig ixl<interface_num> -tso4
+
+ To enable TSO IPv4:
+
+ ifconfig ixl<interface_num> tso4
+
+ To disable TSO IPv6:
+
+ ifconfig ixl<interface_num> -tso6
+
+ To enable TSO IPv6:
+
+ ifconfig ixl<interface_num> tso6
+
+ To disable BOTH TSO IPv4 and IPv6:
+
+ ifconfig ixl<interface_num> -tso
+
+ To enable BOTH TSO IPv4 and IPv6:
+
+ ifconfig ixl<interface_num> tso
+
+
+ LRO
+ ---
+
+ Large Receive Offload is enabled by default. It can be enabled
+ or disabled by using the ifconfig utility.
+
+ NOTE: LRO should be disabled when forwarding packets.
+
+ To disable LRO:
+
+ ifconfig ixl<interface_num> -lro
+
+ To enable LRO:
+
+ ifconfig ixl<interface_num> lro
+
+
+Flow Control (IXL only)
+------------
+Flow control is disabled by default. To change flow control settings use sysctl.
+
+To enable flow control to Rx pause frames:
+
+ sysctl dev.ixl.<interface_num>.fc=1
+
+To enable flow control to Tx pause frames:
+
+ sysctl dev.ixl.<interface_num>.fc=2
+
+To enable flow control to Rx and Tx pause frames:
+
+ sysctl dev.ixl.<interface_num>.fc=3
+
+To disable flow control:
+
+ sysctl dev.ixl.<interface_num>.fc=0
+
+
+NOTE: You must have a flow control capable link partner.
+
+NOTE: The VF driver does not have access to flow control, it must be
+ managed from the host side.
+
+
+ Important system configuration changes:
+ =======================================
+
+-Change the file /etc/sysctl.conf, and add the line:
+
+ hw.intr_storm_threshold: 0 (the default is 1000)
+
+-Best throughput results are seen with a large MTU; use 9706 if possible.
+
+-The default number of descriptors per ring is 1024, increasing this may
+improve performance depending on the use case.
+
+-The VF driver uses a relatively large buf ring, this was found to eliminate
+ UDP transmit errors, it is a tuneable, and if no UDP traffic is used it can
+ be reduced. It is memory used per queue.
+
+
+Known Limitations
+=================
+
+Network Memory Buffer allocation
+--------------------------------
+ FreeBSD may have a low number of network memory buffers (mbufs) by default.
+If your mbuf value is too low, it may cause the driver to fail to initialize
+and/or cause the system to become unresponsive. You can check to see if the
+system is mbuf-starved by running 'netstat -m'. Increase the number of mbufs
+by editing the lines below in /etc/sysctl.conf:
+
+ kern.ipc.nmbclusters
+ kern.ipc.nmbjumbop
+ kern.ipc.nmbjumbo9
+ kern.ipc.nmbjumbo16
+ kern.ipc.nmbufs
+
+The amount of memory that you allocate is system specific, and may
+require some trial and error.
+
+Also, increasing the follwing in /etc/sysctl.conf could help increase
+network performance:
+
+ kern.ipc.maxsockbuf
+ net.inet.tcp.sendspace
+ net.inet.tcp.recvspace
+ net.inet.udp.maxdgram
+ net.inet.udp.recvspace
+
+
+UDP Stress Test Dropped Packet Issue
+------------------------------------
+Under small packet UDP stress test with the ixl driver, the FreeBSD system
+may drop UDP packets due to the fullness of socket buffers. You may want to
+change the driver's Flow Control variables to the minimum value for
+controlling packet reception.
+
+
+Disable LRO when routing/bridging
+---------------------------------
+LRO must be turned off when forwarding traffic.
+
+
+Lower than expected performance
+-------------------------------
+Some PCIe x8 slots are actually configured as x4 slots. These slots have
+insufficient bandwidth for full line rate with dual port and quad port
+devices.
+
+In addition, if you put a PCIe Generation 3-capable adapter into a PCIe
+Generation 2 slot, you cannot get full bandwidth. The driver detects this
+situation and writes the following message in the system log:
+
+ "PCI-Express bandwidth available for this card is not sufficient for
+ optimal performance. For optimal performance a x8 PCI-Express slot
+ is required."
+
+If this error occurs, moving your adapter to a true PCIe Generation 3 x8
+slot will resolve the issue.
+
+
+Support
+=======
+
+For general information and support, go to the Intel support website at:
+
+ http://support.intel.com
+
+If an issue is identified with the released source code on the supported kernel
+with a supported adapter, email the specific information related to the issue
+to freebsdnic@mailbox.intel.com.
+
+
+License
+=======
+
+This software program is released under the terms of a license agreement
+between you ('Licensee') and Intel. Do not use or load this software or any
+associated materials (collectively, the 'Software') until you have carefully
+read the full terms and conditions of the LICENSE located in this software
+package. By loadingor using the Software, you agree to the terms of this
+Agreement. If you do not agree with the terms of this Agreement, do not
+install or use the Software.
+
+* Other names and brands may be claimed as the property of others.
+
+
diff --git a/usr/src/uts/common/io/i40e/core/THIRDPARTYLICENSE b/usr/src/uts/common/io/i40e/core/THIRDPARTYLICENSE
new file mode 100644
index 0000000000..04c551f1b2
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/THIRDPARTYLICENSE
@@ -0,0 +1,29 @@
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/usr/src/uts/common/io/i40e/core/THIRDPARTYLICENSE.descrip b/usr/src/uts/common/io/i40e/core/THIRDPARTYLICENSE.descrip
new file mode 100644
index 0000000000..7a9537b10e
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/THIRDPARTYLICENSE.descrip
@@ -0,0 +1 @@
+i40e DRIVER
diff --git a/usr/src/uts/common/io/i40e/core/i40e_adminq.c b/usr/src/uts/common/io/i40e/core/i40e_adminq.c
new file mode 100644
index 0000000000..67b72fd9f2
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_adminq.c
@@ -0,0 +1,1101 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_adminq.c 284049 2015-06-05 22:52:42Z jfv $*/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
+ desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
+}
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (i40e_is_vf(hw)) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.asq.bal = I40E_VF_ATQBAL1;
+ hw->aq.asq.bah = I40E_VF_ATQBAH1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ hw->aq.arq.bal = I40E_VF_ARQBAL1;
+ hw->aq.arq.bah = I40E_VF_ARQBAH1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.asq.bal = I40E_PF_ATQBAL;
+ hw->aq.asq.bah = I40E_PF_ATQBAH;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ hw->aq.arq.bal = I40E_PF_ARQBAL;
+ hw->aq.arq.bah = I40E_PF_ARQBAH;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+ desc->params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+ if (!i40e_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ if (i40e_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+ if (!i40e_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ if (i40e_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ /* make sure spinlock is available */
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+ i40e_release_spinlock(&hw->aq.asq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ /* make sure spinlock is available */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ u16 eetrack_lo, eetrack_hi;
+ u16 cfg_ptr, oem_hi, oem_lo;
+ int retry = 0;
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize spin locks */
+ i40e_init_spinlock(&hw->aq.asq_spinlock);
+ i40e_init_spinlock(&hw->aq.arq_spinlock);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_destroy_spinlocks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_asq;
+
+ /* VF has no need of firmware */
+ if (i40e_is_vf(hw))
+ goto init_adminq_exit;
+ /* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.fw_build,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ i40e_msec_delay(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_arq;
+
+ /* get the NVM version info */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+ &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+ hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+ i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+ &oem_hi);
+ i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+ &oem_lo);
+ hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+
+ if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->aq.nvm_release_on_done = FALSE;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
+ ret_code = i40e_aq_set_hmc_resource_profile(hw,
+ I40E_HMC_PROFILE_DEFAULT,
+ 0,
+ NULL);
+ ret_code = I40E_SUCCESS;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_arq:
+ i40e_shutdown_arq(hw);
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, TRUE);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the spinlocks */
+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->nvm_buff.va)
+ i40e_free_virt_mem(hw, &hw->nvm_buff);
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_DMA);
+ cb_func(hw, &desc_cb);
+ }
+ i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
+ i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40e_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns TRUE if the firmware has processed all descriptors on the
+ * admin send queue. Returns FALSE if there are still requests pending.
+ **/
+bool i40e_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40e_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = FALSE;
+ u16 retval = 0;
+ u32 val = 0;
+
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ i40e_memcpy(details,
+ cmd_details,
+ sizeof(struct i40e_asq_cmd_details),
+ I40E_NONDMA_TO_NONDMA);
+
+ /* If the cmd_details are defined copy the cookie. The
+ * CPU_TO_LE32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
+ }
+ } else {
+ i40e_memset(details, 0,
+ sizeof(struct i40e_asq_cmd_details),
+ I40E_NONDMA_MEM);
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+ desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
+ I40E_NONDMA_TO_DMA);
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ i40e_memcpy(dma_buff->va, buff, buff_size,
+ I40E_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40e_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ i40e_msec_delay(1);
+ total_delay++;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40e_asq_done(hw)) {
+ i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_NONDMA);
+ if (buff != NULL)
+ i40e_memcpy(buff, dma_buff->va, buff_size,
+ I40E_DMA_TO_NONDMA);
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = TRUE;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = I40E_SUCCESS;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ i40e_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+asq_send_command_error:
+ i40e_release_spinlock(&hw->aq.asq_spinlock);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
+ I40E_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40e_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ /* set next_to_use to head */
+ if (!i40e_is_vf(hw))
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (i40e_is_vf(hw))
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ i40e_memcpy(e->msg_buf,
+ hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len, I40E_DMA_TO_NONDMA);
+
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+ desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+ if (i40e_is_nvm_update_op(&e->desc)) {
+ if (hw->aq.nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->aq.nvm_release_on_done = FALSE;
+ }
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return ret_code;
+}
+
+void i40e_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
diff --git a/usr/src/uts/common/io/i40e/core/i40e_adminq.h b/usr/src/uts/common/io/i40e/core/i40e_adminq.h
new file mode 100644
index 0000000000..e20d6893ed
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_adminq.h
@@ -0,0 +1,125 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_adminq.h 284049 2015-06-05 22:52:42Z jfv $*/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_status.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+ struct i40e_aq_desc *wb_desc;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+ bool nvm_release_on_done;
+
+ struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
+ struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_adminq_cmd.h b/usr/src/uts/common/io/i40e/core/i40e_adminq_cmd.h
new file mode 100644
index 0000000000..af9f107597
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_adminq_cmd.h
@@ -0,0 +1,2424 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_adminq_cmd.h 284049 2015-06-05 22:52:42Z jfv $*/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#ifdef X722_SUPPORT
+#define I40E_FW_API_VERSION_MINOR 0x0003
+#else
+#define I40E_FW_API_VERSION_MINOR 0x0004
+#endif
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* LAA */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_oem_post_update = 0x0720,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
+ i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+#ifdef X722_SUPPORT
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
+#endif
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_ISCSI 0x0022
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+ u8 reserved[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#ifdef X722_SUPPORT
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#endif
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#ifdef X722_SUPPORT
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+#endif
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01 /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_LINK_UP_PORT 0x20
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ __le16 element_id_msw; /* MSWord of field ID */
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE 0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+ __le32 field_id;
+ __le32 field_value;
+ __le16 field_options;
+ __le16 reserved;
+};
+
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+ struct i40e_aqc_nvm_oem_post_update {
+#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
+ u8 sel_data;
+ u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+ u8 str_len;
+ u8 dev_addr;
+ __le16 eeprom_addr;
+ u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
+#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3[2];
+ __le16 oper_app_prio;
+ u8 reserved4[2];
+ __le16 tlv_status;
+};
+
+I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct i40e_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_stop_start_specific_agent {
+#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+ u8 command;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+#ifdef X722_SUPPORT
+
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+#endif
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ __le16 param_value2;
+ u8 reserved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocsd_memory_block_addr_high;
+ __le32 ocsd_memory_block_addr_low;
+ __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocbb_memory_block_addr_high;
+ __le32 ocbb_memory_block_addr_low;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/usr/src/uts/common/io/i40e/core/i40e_alloc.h b/usr/src/uts/common/io/i40e/core/i40e_alloc.h
new file mode 100644
index 0000000000..4428287f83
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_alloc.h
@@ -0,0 +1,66 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_alloc.h 283119 2015-05-19 18:35:18Z jhb $*/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_common.c b/usr/src/uts/common/io/i40e/core/i40e_common.c
new file mode 100644
index 0000000000..c58eb9de1e
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_common.c
@@ -0,0 +1,5708 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_common.c 284049 2015-06-05 22:52:42Z jfv $*/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_set_mac_type\n");
+
+ if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+#ifdef X722_SUPPORT
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+#endif
+#ifdef X722_SUPPORT
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
+#endif
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
+{
+ switch (stat_err) {
+ case I40E_SUCCESS:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u16 len = LE16_TO_CPU(aq_desc->datalen);
+ u8 *buf = (u8 *)buffer;
+ u16 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ LE16_TO_CPU(aq_desc->opcode),
+ LE16_TO_CPU(aq_desc->flags),
+ LE16_TO_CPU(aq_desc->datalen),
+ LE16_TO_CPU(aq_desc->retval));
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->cookie_high),
+ LE32_TO_CPU(aq_desc->cookie_low));
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.internal.param0),
+ LE32_TO_CPU(aq_desc->params.internal.param1));
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.external.addr_high),
+ LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ i40e_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+ buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+ buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+ buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+ /* write whatever's left over without overrunning the buffer */
+ if (i < len) {
+ char d_buf[80];
+ int j = 0;
+
+ memset(d_buf, 0, sizeof(d_buf));
+#ifdef I40E_ILLUMOS
+ /*
+ * Sigh.
+ *
+ * The illumos DDI (inherited from OpenSolaris) says
+ * sprintf() returns the pointer to its first
+ * argument, NOT the length of bytes printed. A better
+ * solution would be to have the kernel provide
+ * something like real_sprintf() but for now, we
+ * hack around it.
+ */
+ (void) sprintf(d_buf, "\t0x%04X ", i);
+ j += strlen(d_buf);
+ /* Bounds-check at 77, because " XX" emits 4 chars. */
+ while (i < len && j < 77) {
+ (void) sprintf(&d_buf[j], " %02X", buf[i++]);
+ j += strlen(&d_buf[j]);
+ }
+#else
+ j += sprintf(d_buf, "\t0x%04X ", i);
+ while (i < len)
+ j += sprintf(&d_buf[j], " %02X", buf[i++]);
+#endif
+ i40e_debug(hw, mask, "%s\n", d_buf);
+ }
+ }
+}
+
+/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns TRUE if Queue is enabled else FALSE.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ if (hw->aq.asq.len)
+ if (!i40e_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ if (i40e_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_VF_ATQLEN1_ATQENABLE_MASK);
+ return FALSE;
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+#ifdef X722_SUPPORT
+
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set TRUE to set the table, FALSE to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)lut));
+ cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)lut));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ FALSE);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, TRUE);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set TRUE to set the key, FALSE to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)key));
+ cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)key));
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, FALSE);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, TRUE);
+}
+#endif /* X722_SUPPORT */
+
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
+/**
+ * i40e_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (I40E_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u32 port, ari, func_rid;
+
+ DEBUGFUNC("i40e_init_shared_code");
+
+ i40e_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+#ifdef X722_SUPPORT
+ case I40E_MAC_X722:
+#endif
+ break;
+ default:
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw->phy.get_link_info = TRUE;
+
+ /* Determine port number and PF number*/
+ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
+ >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+ hw->port = (u8)port;
+ ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
+ I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+ func_rid = rd32(hw, I40E_PF_FUNC_RID);
+ if (ari)
+ hw->pf_id = (u8)(func_rid & 0xff);
+ else
+ hw->pf_id = (u8)(func_rid & 0x7);
+
+ status = i40e_init_nvm(hw);
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+static enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_read *cmd_data =
+ (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, addrs,
+ sizeof(*addrs), cmd_details);
+ *flags = LE16_TO_CPU(cmd_data->command_flags);
+
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_write *cmd_data =
+ (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_mac_address_write);
+ cmd_data->command_flags = CPU_TO_LE16(flags);
+ cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+ if (flags & I40E_AQC_LAN_ADDR_VALID)
+ memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+
+ return status;
+}
+
+/**
+ * i40e_get_port_mac_addr - get Port MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to Port MAC address
+ *
+ * Reads the adapter's Port MAC address
+ **/
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ if (status)
+ return status;
+
+ if (flags & I40E_AQC_PORT_ADDR_VALID)
+ memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ else
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+ u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+ u32 reg_block = 0;
+ u32 reg_val;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+ if (enable)
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+ else
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
+ * i40e_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u16 pba_word = 0;
+ u16 pba_size = 0;
+ u16 pba_ptr = 0;
+ u16 i = 0;
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+ if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) {
+ DEBUGOUT("Failed to read PBA flags or flag is invalid.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT("Failed to read PBA Block pointer.\n");
+ return status;
+ }
+
+ status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT("Failed to read PBA Block size.\n");
+ return status;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ DEBUGOUT("Buffer to small for PBA data.\n");
+ return I40E_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+ if (status != I40E_SUCCESS) {
+ DEBUGOUT1("Failed to read PBA Block word %d.\n", i);
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
+
+/**
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
+ **/
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+{
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_1000BASE_SX:
+ case I40E_PHY_TYPE_1000BASE_LX:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ case I40E_PHY_TYPE_10GBASE_AOC:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
+ }
+
+ return media;
+}
+
+#define I40E_PF_RESET_WAIT_COUNT 200
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
+{
+ u32 cnt = 0;
+ u32 cnt1 = 0;
+ u32 reg = 0;
+ u32 grst_del;
+
+ /* Poll for Global Reset steady state in case of recent GRST.
+ * The grst delay value is in 100ms units, and we'll wait a
+ * couple counts longer to be sure we don't just miss the end.
+ */
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ for (cnt = 0; cnt < grst_del + 10; cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ break;
+ i40e_msec_delay(100);
+ }
+ if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Global reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ DEBUGOUT1("Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ i40e_msec_delay(10);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ DEBUGOUT("wait for FW Reset complete timedout\n");
+ DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* If there was a Global Reset in progress when we got here,
+ * we don't need to do the PF Reset
+ */
+ if (!cnt) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ wr32(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+ break;
+ i40e_msec_delay(1);
+ }
+ if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ DEBUGOUT("PF reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+ }
+
+ i40e_clear_pxe_mode(hw);
+
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hw - clear out any left over hw state
+ * @hw: pointer to the hw struct
+ *
+ * Clear queues and interrupts, typically called at init time,
+ * but after the capabilities have been found so we know how many
+ * queues and msix vectors have been allocated.
+ **/
+void i40e_clear_hw(struct i40e_hw *hw)
+{
+ u32 num_queues, base_queue;
+ u32 num_pf_int;
+ u32 num_vf_int;
+ u32 num_vfs;
+ u32 i, j;
+ u32 val;
+ u32 eol = 0x7ff;
+
+ /* get number of interrupts, queues, and vfs */
+ val = rd32(hw, I40E_GLPCI_CNF2);
+ num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+ num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+
+ val = rd32(hw, I40E_PFLAN_QALLOC);
+ base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
+ I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+ j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+ I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+ if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+ num_queues = (j - base_queue) + 1;
+ else
+ num_queues = 0;
+
+ val = rd32(hw, I40E_PF_VT_PFALLOC);
+ i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+ j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+ if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+ num_vfs = (j - i) + 1;
+ else
+ num_vfs = 0;
+
+ /* stop all the interrupts */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
+
+ /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
+ val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLST0, val);
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_LNKLSTN(i), val);
+ val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ for (i = 0; i < num_vfs; i++)
+ wr32(hw, I40E_VPINT_LNKLST0(i), val);
+ for (i = 0; i < num_vf_int - 2; i++)
+ wr32(hw, I40E_VPINT_LNKLSTN(i), val);
+
+ /* warn the HW of the coming Tx disables */
+ for (i = 0; i < num_queues; i++) {
+ u32 abs_queue_idx = base_queue + i;
+ u32 reg_block = 0;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+ val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
+ }
+ i40e_usec_delay(400);
+
+ /* stop all the queues */
+ for (i = 0; i < num_queues; i++) {
+ wr32(hw, I40E_QINT_TQCTL(i), 0);
+ wr32(hw, I40E_QTX_ENA(i), 0);
+ wr32(hw, I40E_QINT_RQCTL(i), 0);
+ wr32(hw, I40E_QRX_ENA(i), 0);
+ }
+
+ /* short wait for all queue disables to settle */
+ i40e_usec_delay(50);
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_clear_pxe_mode(hw, NULL);
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!hw->func_caps.led[idx])
+ return 0;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
+}
+
+#define I40E_COMBINED_ACTIVITY 0xA
+#define I40E_FILTER_ACTIVITY 0xE
+#define I40E_LINK_ACTIVITY 0xC
+#define I40E_MAC_ACTIVITY 0xD
+#define I40E_LED0 22
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+ u32 current_mode = 0;
+ u32 mode = 0;
+ int i;
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ /* ignore gpio LED src mode entries related to the activity
+ * LEDs
+ */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+ break;
+ }
+
+ return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: TRUE if the LED should blink when on, FALSE if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
+{
+ u32 current_mode = 0;
+ int i;
+
+ if (mode & 0xfffffff0) {
+ DEBUGOUT1("invalid mode passed in %X\n", mode);
+ }
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ /* ignore gpio LED src mode entries related to the activity
+ * LEDs
+ */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (mode == I40E_LINK_ACTIVITY)
+ blink = FALSE;
+
+ if (blink)
+ gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ else
+ gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+
+ wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
+ }
+}
+
+/* Admin command wrappers */
+
+/**
+ * i40e_aq_get_phy_capabilities
+ * @hw: pointer to the hw struct
+ * @abilities: structure for PHY capabilities to be filled
+ * @qualified_modules: report Qualified Modules
+ * @report_init: report init capabilities (active are default)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the various PHY abilities supported on the Port.
+ **/
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+ u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+
+ if (!abilities)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ if (qualified_modules)
+ desc.params.external.param0 |=
+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+
+ if (report_init)
+ desc.params.external.param0 |=
+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+
+ status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
+ cmd_details);
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
+ status = I40E_ERR_UNKNOWN_PHY;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_config
+ * @hw: pointer to the hw struct
+ * @config: structure with PHY configuration to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters
+ * supported on the Port.One or more of the Set PHY config parameters may be
+ * ignored in an MFP mode as the PF may not have the privilege to set some
+ * of the PHY Config parameters. This status will be indicated by the
+ * command response.
+ **/
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_phy_config *cmd =
+ (struct i40e_aq_set_phy_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (!config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_config);
+
+ *cmd = *config;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
+{
+ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ enum i40e_status_code status;
+ u8 pause_mask = 0x0;
+
+ *aq_failures = 0x0;
+
+ switch (fc_mode) {
+ case I40E_FC_FULL:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_RX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_TX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ break;
+ default:
+ break;
+ }
+
+ /* Get the current phy config */
+ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
+ NULL);
+ if (status) {
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
+ return status;
+ }
+
+ memset(&config, 0, sizeof(config));
+ /* clear the old pause settings */
+ config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+ /* set the new abilities */
+ config.abilities |= pause_mask;
+ /* If the abilities have changed, then set the new config */
+ if (config.abilities != abilities.abilities) {
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ config.phy_type = abilities.phy_type;
+ config.link_speed = abilities.link_speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+ }
+ /* Update the link info */
+ status = i40e_update_link_info(hw);
+ if (status) {
+ /* Wait a little bit (on 40G cards it sometimes takes a really
+ * long time for link to come back from the atomic reset)
+ * and try once more
+ */
+ i40e_msec_delay(1000);
+ status = i40e_update_link_info(hw);
+ }
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_mac_config
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @crc_en: Tell HW to append a CRC to outgoing frames
+ * @pacing: Pacing configurations
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Configure MAC settings for frame size, jumbo frame support and the
+ * addition of a CRC by the hardware.
+ **/
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+ u16 max_frame_size,
+ bool crc_en, u16 pacing,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_mac_config *cmd =
+ (struct i40e_aq_set_mac_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (max_frame_size == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_mac_config);
+
+ cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
+ cmd->params = ((u8)pacing & 0x0F) << 3;
+ if (crc_en)
+ cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_clear_pxe *cmd =
+ (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_pxe_mode);
+
+ cmd->rx_cnt = 0x2;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @enable_link: if TRUE: enable link, if FALSE: disable link
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_link_restart_an *cmd =
+ (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_link_restart_an);
+
+ cmd->command = I40E_AQ_PHY_RESTART_AN;
+ if (enable_link)
+ cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
+ else
+ cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_link_status *resp =
+ (struct i40e_aqc_get_link_status *)&desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ enum i40e_status_code status;
+ bool tx_pause, rx_pause;
+ u16 command_flags;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+ if (enable_lse)
+ command_flags = I40E_AQ_LSE_ENABLE;
+ else
+ command_flags = I40E_AQ_LSE_DISABLE;
+ resp->command_flags = CPU_TO_LE16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_link_info_exit;
+
+ /* save off old link status information */
+ i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
+ sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA);
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+ hw_link_info->link_info = resp->link_info;
+ hw_link_info->an_info = resp->an_info;
+ hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback;
+ hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ /* update fc info */
+ tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
+ rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
+ if (tx_pause & rx_pause)
+ hw->fc.current_mode = I40E_FC_FULL;
+ else if (tx_pause)
+ hw->fc.current_mode = I40E_FC_TX_PAUSE;
+ else if (rx_pause)
+ hw->fc.current_mode = I40E_FC_RX_PAUSE;
+ else
+ hw->fc.current_mode = I40E_FC_NONE;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = TRUE;
+ else
+ hw_link_info->crc_enable = FALSE;
+
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+ hw_link_info->lse_enable = TRUE;
+ else
+ hw_link_info->lse_enable = FALSE;
+
+ if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
+ /* save link status information */
+ if (link)
+ i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
+ I40E_NONDMA_TO_NONDMA);
+
+ /* flag cleared so helper functions don't call AQ again */
+ hw->phy.get_link_info = FALSE;
+
+aq_get_link_info_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_int_mask *cmd =
+ (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_int_mask);
+
+ cmd->event_mask = CPU_TO_LE16(mask);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *resp =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_local_advt_reg);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_local_advt_reg_exit;
+
+ *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+ *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_local_advt_reg_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *cmd =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_local_advt_reg);
+
+ cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg));
+ cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_partner_advt
+ * @hw: pointer to the hw struct
+ * @advt_reg: AN partner advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the link partner AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *resp =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_partner_advt);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_partner_advt_exit;
+
+ *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+ *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_partner_advt_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_lb_modes
+ * @hw: pointer to the hw struct
+ * @lb_modes: loopback mode to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets loopback modes.
+ **/
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
+ u16 lb_modes,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_lb_mode *cmd =
+ (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_lb_modes);
+
+ cmd->lb_mode = CPU_TO_LE16(lb_modes);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_debug
+ * @hw: pointer to the hw struct
+ * @cmd_flags: debug command flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_debug *cmd =
+ (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_debug);
+
+ cmd->command_flags = cmd_flags;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_vsi);
+
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid);
+ cmd->connection_type = vsi_ctx->connection_type;
+ cmd->vf_id = vsi_ctx->vf_num;
+ cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_add_vsi_exit;
+
+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_add_vsi_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: TRUE to set filter, FALSE to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set_filter)
+ cmd->promiscuous_flags
+ |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ else
+ cmd->promiscuous_flags
+ &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_vsi_parameters);
+
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), NULL);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_vsi_params_exit;
+
+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_update_vsi_parameters);
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *scfg =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ scfg->seid = CPU_TO_LE16(*start_seid);
+
+ status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+ *start_seid = LE16_TO_CPU(scfg->seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_version *resp =
+ (struct i40e_aqc_get_version *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS) {
+ if (fw_major_version != NULL)
+ *fw_major_version = LE16_TO_CPU(resp->fw_major);
+ if (fw_minor_version != NULL)
+ *fw_minor_version = LE16_TO_CPU(resp->fw_minor);
+ if (fw_build != NULL)
+ *fw_build = LE32_TO_CPU(resp->fw_build);
+ if (api_major_version != NULL)
+ *api_major_version = LE16_TO_CPU(resp->api_major);
+ if (api_minor_version != NULL)
+ *api_minor_version = LE16_TO_CPU(resp->api_minor);
+
+ /* A workaround to fix the API version in SW */
+ if (api_major_version && api_minor_version &&
+ fw_major_version && fw_minor_version &&
+ ((*api_major_version == 1) && (*api_minor_version == 1)) &&
+ (((*fw_major_version == 4) && (*fw_minor_version >= 2)) ||
+ (*fw_major_version > 4)))
+ *api_minor_version = 2;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_driver_version *cmd =
+ (struct i40e_aqc_driver_version *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 len;
+
+ if (dv == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ cmd->driver_major_ver = dv->major_version;
+ cmd->driver_minor_ver = dv->minor_version;
+ cmd->driver_build_ver = dv->build_version;
+ cmd->driver_subbuild_ver = dv->subbuild_version;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ * @link_up: pointer to bool (TRUE/FALSE = linkup/linkdown)
+ *
+ * Variable link_up TRUE if link is up, FALSE if link is down.
+ * The variable link_up is invalid if returned value of status != I40E_SUCCESS
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_update_link_info(hw);
+
+ if (status != I40E_SUCCESS)
+ i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+ status);
+ }
+
+ *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+ return status;
+}
+
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ if (status)
+ return status;
+
+ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
+ NULL);
+ if (status)
+ return status;
+
+ memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type));
+
+ return status;
+}
+
+
+/**
+ * i40e_get_link_speed
+ * @hw: pointer to the hw struct
+ *
+ * Returns the link speed of the adapter.
+ **/
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw)
+{
+ enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+
+ if (status != I40E_SUCCESS)
+ goto i40e_link_speed_exit;
+ }
+
+ speed = hw->phy.link_info.link_speed;
+
+i40e_link_speed_exit:
+ return speed;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: TRUE for default port VSI, FALSE for control port
+ * @enable_l2_filtering: TRUE to add L2 filter table rules to regular forwarding rules for cloud support
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements. If the uplink SEID is 0, this will be a floating VEB.
+ **/
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, bool enable_l2_filtering,
+ u16 *veb_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_veb *cmd =
+ (struct i40e_aqc_add_veb *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp =
+ (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 veb_flags = 0;
+
+ /* SEIDs need to either both be set or both be 0 for floating VEB */
+ if (!!uplink_seid != !!downlink_seid)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+ cmd->uplink_seid = CPU_TO_LE16(uplink_seid);
+ cmd->downlink_seid = CPU_TO_LE16(downlink_seid);
+ cmd->enable_tcs = enabled_tc;
+ if (!uplink_seid)
+ veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+ if (default_port)
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+ else
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ if (enable_l2_filtering)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
+ cmd->veb_flags = CPU_TO_LE16(veb_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && veb_seid)
+ *veb_seid = LE16_TO_CPU(resp->veb_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating: set to TRUE if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_free: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ (struct i40e_aqc_get_veb_parameters_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (veb_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_veb_parameters);
+ cmd_resp->seid = CPU_TO_LE16(veb_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status)
+ goto get_veb_exit;
+
+ if (switch_id)
+ *switch_id = LE16_TO_CPU(cmd_resp->switch_id);
+ if (statistic_index)
+ *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index);
+ if (vebs_used)
+ *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used);
+ if (vebs_free)
+ *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
+ if (floating) {
+ u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
+
+ if (flags & I40E_AQC_ADD_VEB_FLOATING)
+ *floating = TRUE;
+ else
+ *floating = FALSE;
+ }
+
+get_veb_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*v_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*v_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pf_vf_message *cmd =
+ (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd->id = CPU_TO_LE32(vfid);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+
+ cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS) {
+ *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) |
+ (u64)LE32_TO_CPU(cmd_resp->value_low);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_debug_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write to a register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+
+ cmd->address = CPU_TO_LE32(reg_addr);
+ cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32));
+ cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * query the HMC profile of the device.
+ **/
+enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile *profile,
+ u8 *pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_get_set_hmc_resource_profile *resp =
+ (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_query_hmc_resource_profile);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile &
+ I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK);
+ *pe_vf_enabled_count = resp->pe_vf_enabled &
+ I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * set the HMC profile of the device.
+ **/
+enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_get_set_hmc_resource_profile *cmd =
+ (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_hmc_resource_profile);
+
+ cmd->pm_profile = (u8)profile;
+ cmd->pe_vf_enabled = pe_vf_enabled_count;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd_resp =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_request_resource");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+ cmd_resp->resource_id = CPU_TO_LE16(resource);
+ cmd_resp->access_type = CPU_TO_LE16(access);
+ cmd_resp->resource_number = CPU_TO_LE32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by someone else, the command completes with
+ * busy return value and the timeout field indicates the maximum time
+ * the current owner of the resource has to free it.
+ */
+ if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_release_resource");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+ cmd->resource_id = CPU_TO_LE16(resource);
+ cmd->resource_number = CPU_TO_LE32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_read_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_read_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm_config - read an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature id
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: pointer to count of elements read by FW
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, u32 field_id, void *data,
+ u16 buf_size, u16 *element_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_config_read *cmd =
+ (struct i40e_aqc_nvm_config_read *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+ cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id));
+ if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK)
+ cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16));
+ else
+ cmd->element_id_msw = 0;
+
+ status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+ if (!status && element_count)
+ *element_count = LE16_TO_CPU(cmd->element_count);
+
+ return status;
+}
+
+/**
+ * i40e_aq_write_nvm_config - write an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: count of elements to be written
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, void *data, u16 buf_size,
+ u16 element_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_config_write *cmd =
+ (struct i40e_aqc_nvm_config_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->element_count = CPU_TO_LE16(element_count);
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+ status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_oem_post_update - triggers an OEM specific flow after update
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
+ status = I40E_ERR_NOT_IMPLEMENTED;
+
+ return status;
+}
+
+/**
+ * i40e_aq_erase_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_erase_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_erase_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_erase_nvm_exit:
+ return status;
+}
+
+#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01
+#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02
+#define I40E_DEV_FUNC_CAP_NPAR 0x03
+#define I40E_DEV_FUNC_CAP_OS2BMC 0x04
+#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05
+#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12
+#define I40E_DEV_FUNC_CAP_VF 0x13
+#define I40E_DEV_FUNC_CAP_VMDQ 0x14
+#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15
+#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16
+#define I40E_DEV_FUNC_CAP_VSI 0x17
+#define I40E_DEV_FUNC_CAP_DCB 0x18
+#define I40E_DEV_FUNC_CAP_FCOE 0x21
+#define I40E_DEV_FUNC_CAP_ISCSI 0x22
+#define I40E_DEV_FUNC_CAP_RSS 0x40
+#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
+#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
+#define I40E_DEV_FUNC_CAP_MSIX 0x43
+#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
+#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
+#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
+#define I40E_DEV_FUNC_CAP_FLEX10 0xF1
+#define I40E_DEV_FUNC_CAP_CEM 0xF2
+#define I40E_DEV_FUNC_CAP_IWARP 0x51
+#define I40E_DEV_FUNC_CAP_LED 0x61
+#define I40E_DEV_FUNC_CAP_SDP 0x62
+#define I40E_DEV_FUNC_CAP_MDIO 0x63
+#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ u32 cap_count,
+ enum i40e_admin_queue_opc list_type_opc)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 valid_functions, num_functions;
+ u32 number, logical_id, phys_id;
+ struct i40e_hw_capabilities *p;
+ u8 major_rev;
+ u32 i = 0;
+ u16 id;
+
+ cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+ if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ else
+ return;
+
+ for (i = 0; i < cap_count; i++, cap++) {
+ id = LE16_TO_CPU(cap->id);
+ number = LE32_TO_CPU(cap->number);
+ logical_id = LE32_TO_CPU(cap->logical_id);
+ phys_id = LE32_TO_CPU(cap->phys_id);
+ major_rev = cap->major_rev;
+
+ switch (id) {
+ case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+ p->switch_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MGMT_MODE:
+ p->management_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_NPAR:
+ p->npar_enable = number;
+ break;
+ case I40E_DEV_FUNC_CAP_OS2BMC:
+ p->os2bmc = number;
+ break;
+ case I40E_DEV_FUNC_CAP_VALID_FUNC:
+ p->valid_functions = number;
+ break;
+ case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+ if (number == 1)
+ p->sr_iov_1_1 = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_VF:
+ p->num_vfs = number;
+ p->vf_base_id = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_VMDQ:
+ if (number == 1)
+ p->vmdq = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBG:
+ if (number == 1)
+ p->evb_802_1_qbg = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBH:
+ if (number == 1)
+ p->evb_802_1_qbh = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_VSI:
+ p->num_vsis = number;
+ break;
+ case I40E_DEV_FUNC_CAP_DCB:
+ if (number == 1) {
+ p->dcb = TRUE;
+ p->enabled_tcmap = logical_id;
+ p->maxtc = phys_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_FCOE:
+ if (number == 1)
+ p->fcoe = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_ISCSI:
+ if (number == 1)
+ p->iscsi = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_RSS:
+ p->rss = TRUE;
+ p->rss_table_size = number;
+ p->rss_table_entry_width = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_RX_QUEUES:
+ p->num_rx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_TX_QUEUES:
+ p->num_tx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX:
+ p->num_msix_vectors = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX_VF:
+ p->num_msix_vectors_vf = number;
+ break;
+ case I40E_DEV_FUNC_CAP_FLEX10:
+ if (major_rev == 1) {
+ if (number == 1) {
+ p->flex10_enable = TRUE;
+ p->flex10_capable = TRUE;
+ }
+ } else {
+ /* Capability revision >= 2 */
+ if (number & 1)
+ p->flex10_enable = TRUE;
+ if (number & 2)
+ p->flex10_capable = TRUE;
+ }
+ p->flex10_mode = logical_id;
+ p->flex10_status = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_CEM:
+ if (number == 1)
+ p->mgmt_cem = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_IWARP:
+ if (number == 1)
+ p->iwarp = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_LED:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->led[phys_id] = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_SDP:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->sdp[phys_id] = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_MDIO:
+ if (number == 1) {
+ p->mdio_port_num = phys_id;
+ p->mdio_port_mode = logical_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_IEEE_1588:
+ if (number == 1)
+ p->ieee_1588 = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+ p->fd = TRUE;
+ p->fd_filters_guaranteed = number;
+ p->fd_filters_best_effort = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+ p->wr_csr_prot = (u64)number;
+ p->wr_csr_prot |= (u64)logical_id << 32;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (p->fcoe)
+ i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
+ /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
+ p->fcoe = FALSE;
+
+ /* count the enabled ports (aka the "not disabled" ports) */
+ hw->num_ports = 0;
+ for (i = 0; i < 4; i++) {
+ u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+ u64 port_cfg = 0;
+
+ /* use AQ read to get the physical register offset instead
+ * of the port relative offset
+ */
+ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+ if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+ hw->num_ports++;
+ }
+
+ valid_functions = p->valid_functions;
+ num_functions = 0;
+ while (valid_functions) {
+ if (valid_functions & 1)
+ num_functions++;
+ valid_functions >>= 1;
+ }
+
+ /* partition id is 1-based, and functions are evenly spread
+ * across the ports as partitions
+ */
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+
+ /* additional HW specific goodies that might
+ * someday be HW version specific
+ */
+ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_list_capabilites *cmd;
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+ if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+ list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+ status = I40E_ERR_PARAM;
+ goto exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ *data_size = LE16_TO_CPU(desc.datalen);
+
+ if (status)
+ goto exit;
+
+ i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count),
+ list_type_opc);
+
+exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_update_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_update_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_update_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_get_mib *cmd =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+ cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (local_len != NULL)
+ *local_len = LE16_TO_CPU(resp->local_len);
+ if (remote_len != NULL)
+ *remote_len = LE16_TO_CPU(resp->remote_len);
+ }
+
+ return status;
+}
+
+ /**
+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the hw struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB.
+ **/
+enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_set_local_mib *cmd =
+ (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_set_local_mib);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = mib_type;
+ cmd->length = CPU_TO_LE16(buff_size);
+ cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((uintptr_t)buff));
+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((uintptr_t)buff));
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_mib *cmd =
+ (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+ if (!enable_update)
+ cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to add
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be added
+ * @mib_len: length of the LLDP MIB returned in response
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add the specified TLV to LLDP Local MIB for the given bridge type,
+ * it is responsibility of the caller to make sure that the TLV is not
+ * already present in the LLDPDU.
+ * In return firmware will write the complete LLDP MIB with the newly
+ * added TLV in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+ void *buff, u16 buff_size, u16 tlv_len,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_add_tlv *cmd =
+ (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff || tlv_len == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->len = CPU_TO_LE16(tlv_len);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_update_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to update
+ * @buff_size: size of the buffer holding original and updated TLVs
+ * @old_len: Length of the Original TLV
+ * @new_len: Length of the Updated TLV
+ * @offset: offset of the updated TLV in the buff
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the specified TLV to the LLDP Local MIB for the given bridge type.
+ * Firmware will place the complete LLDP MIB in response buffer with the
+ * updated TLV.
+ **/
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_tlv *cmd =
+ (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff || offset == 0 ||
+ old_len == 0 || new_len == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->old_len = CPU_TO_LE16(old_len);
+ cmd->new_offset = CPU_TO_LE16(offset);
+ cmd->new_len = CPU_TO_LE16(new_len);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: pointer to a user supplied buffer that has the TLV
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be deleted
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Delete the specified TLV from LLDP Local MIB for the given bridge type.
+ * The firmware places the entire LLDP MIB in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 tlv_len, u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_add_tlv *cmd =
+ (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+ cmd->len = CPU_TO_LE16(tlv_len);
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop *cmd =
+ (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+ if (shutdown_agent)
+ cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_start *cmd =
+ (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+ cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_cee_dcb_config
+ * @hw: pointer to the hw struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @buff_size: size of the buffer passed
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware
+ **/
+enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * @hw: pointer to the hw struct
+ * @start_agent: True if DCBx Agent needs to be Started
+ * False if DCBx Agent needs to be Stopped
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent
+ **/
+enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+ bool start_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop_start_specific_agent *cmd =
+ (struct i40e_aqc_lldp_stop_start_specific_agent *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_stop_start_spec_agent);
+
+ if (start_agent)
+ cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = CPU_TO_LE16(udp_port);
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && filter_index)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_resource_alloc (0x0204)
+ * @hw: pointer to the hw struct
+ * @num_entries: pointer to u8 to store the number of resource entries returned
+ * @buf: pointer to a user supplied buffer. This buffer must be large enough
+ * to store the resource information for all resource types. Each
+ * resource type is a i40e_aqc_switch_resource_alloc_data structure.
+ * @count: size, in bytes, of the buffer provided
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Query the resources allocated to a function.
+ **/
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+ u8 *num_entries,
+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+ u16 count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
+ (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 length = count * sizeof(*buf);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_resource_alloc);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+ if (!status && num_entries)
+ *num_entries = cmd_resp->num_entries;
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * @hw: pointer to the hw struct
+ * @flags: component flags
+ * @mac_seid: uplink seid (MAC SEID)
+ * @vsi_seid: connected vsi seid
+ * @ret_seid: seid of create pv component
+ *
+ * This instantiates an i40e port virtualizer with specified flags.
+ * Depending on specified flags the port virtualizer can act as a
+ * 802.1Qbr port virtualizer or a 802.1Qbg S-component.
+ */
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+ u16 mac_seid, u16 vsi_seid,
+ u16 *ret_seid)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_update_pv *cmd =
+ (struct i40e_aqc_add_update_pv *)&desc.params.raw;
+ struct i40e_aqc_add_update_pv_completion *resp =
+ (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv);
+ cmd->command_flags = CPU_TO_LE16(flags);
+ cmd->uplink_seid = CPU_TO_LE16(mac_seid);
+ cmd->connected_seid = CPU_TO_LE16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (!status && ret_seid)
+ *ret_seid = LE16_TO_CPU(resp->pv_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_tag - Add an S/E-tag
+ * @hw: pointer to the hw struct
+ * @direct_to_queue: should s-tag direct flow to a specific queue
+ * @vsi_seid: VSI SEID to use this tag
+ * @tag: value of the tag
+ * @queue_num: queue number, only valid is direct_to_queue is TRUE
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates an S- or E-tag to a VSI in the switch complex. It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+ u16 vsi_seid, u16 tag, u16 queue_num,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_tag *cmd =
+ (struct i40e_aqc_add_tag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_tag_completion *resp =
+ (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->tag = CPU_TO_LE16(tag);
+ if (direct_to_queue) {
+ cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE);
+ cmd->queue_number = CPU_TO_LE16(queue_num);
+ }
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_tag - Remove an S- or E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID this tag is associated with
+ * @tag: value of the S-tag to delete
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an S- or E-tag from a VSI in the switch complex. It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 tag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_tag *cmd =
+ (struct i40e_aqc_remove_tag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_tag_completion *resp =
+ (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->tag = CPU_TO_LE16(tag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_mcast_etag - Add a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer of this SEID to associate E-tag with
+ * @etag: value of E-tag to add
+ * @num_tags_in_buf: number of unicast E-tags in indirect buffer
+ * @buf: address of indirect buffer
+ * @tags_used: return value, number of E-tags in use by this port
+ * @tags_free: return value, number of unallocated M-tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates a multicast E-tag to a port virtualizer. It will return
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ *
+ * The indirect buffer pointed to by buf is a list of 2-byte E-tags,
+ * num_tags_in_buf long.
+ **/
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+ u16 etag, u8 num_tags_in_buf, void *buf,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_mcast_etag *cmd =
+ (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+ (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 length = sizeof(u16) * num_tags_in_buf;
+
+ if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0))
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_multicast_etag);
+
+ cmd->pv_seid = CPU_TO_LE16(pv_seid);
+ cmd->etag = CPU_TO_LE16(etag);
+ cmd->num_unicast_etags = num_tags_in_buf;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_mcast_etag - Remove a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer SEID this M-tag is associated with
+ * @etag: value of the E-tag to remove
+ * @tags_used: return value, number of tags in use by this port
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an E-tag from the port virtualizer. It will return
+ * the number of tags allocated by the port, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+ u16 etag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_mcast_etag *cmd =
+ (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+ (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+
+ if (pv_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_multicast_etag);
+
+ cmd->pv_seid = CPU_TO_LE16(pv_seid);
+ cmd->etag = CPU_TO_LE16(etag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_update_tag - Update an S/E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID using this S-tag
+ * @old_tag: old tag value
+ * @new_tag: new tag value
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This updates the value of the tag currently attached to this VSI
+ * in the switch complex. It will return the number of tags allocated
+ * by the PF, and the number of unallocated tags available.
+ **/
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 old_tag, u16 new_tag, u16 *tags_used,
+ u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_update_tag *cmd =
+ (struct i40e_aqc_update_tag *)&desc.params.raw;
+ struct i40e_aqc_update_tag_completion *resp =
+ (struct i40e_aqc_update_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->old_tag = CPU_TO_LE16(old_tag);
+ cmd->new_tag = CPU_TO_LE16(new_tag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs
+ * @hw: pointer to the hw struct
+ * @tcmap: TC map for request/release any ignore PFC condition
+ * @request: request or release ignore PFC condition
+ * @tcmap_ret: return TCs for which PFC is currently ignored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This sends out request/release to ignore PFC condition for a TC.
+ * It will return the TCs for which PFC is currently ignored.
+ **/
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap,
+ bool request, u8 *tcmap_ret,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pfc_ignore *cmd_resp =
+ (struct i40e_aqc_pfc_ignore *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc);
+
+ if (request)
+ cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET;
+
+ cmd_resp->tc_bitmap = tcmap;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tcmap_ret != NULL)
+ *tcmap_ret = cmd_resp->tc_bitmap;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * When LLDP is handled in PF this command is used by the PF
+ * to notify EMP that a DCB setting is modified.
+ * When LLDP is handled in EMP this command is used by the PF
+ * to notify EMP whenever one of the following parameters get
+ * modified:
+ * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
+ * - PCIRTT in PRTDCB_GENC.PCIRTT
+ * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * allocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 *stat_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_statistics *cmd_resp =
+ (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if ((seid == 0) || (stat_index == NULL))
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics);
+
+ cmd_resp->seid = CPU_TO_LE16(seid);
+ cmd_resp->vlan = CPU_TO_LE16(vlan_id);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stat_index)
+ *stat_index = LE16_TO_CPU(cmd_resp->stat_index);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * deallocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 stat_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_statistics *cmd =
+ (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_statistics);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan = CPU_TO_LE16(vlan_id);
+ cmd->stat_index = CPU_TO_LE16(stat_index);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_port_parameters - set physical port parameters.
+ * @hw: pointer to the hw struct
+ * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
+ * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
+ * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
+ * @double_vlan: if set double VLAN is enabled
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+ u16 bad_frame_vsi, bool save_bad_pac,
+ bool pad_short_pac, bool double_vlan,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_set_port_parameters *cmd;
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u16 command_flags = 0;
+
+ cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_port_parameters);
+
+ cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
+ if (save_bad_pac)
+ command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS;
+ if (pad_short_pac)
+ command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS;
+ if (double_vlan)
+ command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->command_flags = CPU_TO_LE16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ void *buff, u16 buff_size,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_tx_sched_ind *cmd =
+ (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ enum i40e_status_code status;
+ bool cmd_param_flag = FALSE;
+
+ switch (opcode) {
+ case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+ case i40e_aqc_opc_configure_vsi_tc_bw:
+ case i40e_aqc_opc_enable_switching_comp_ets:
+ case i40e_aqc_opc_modify_switching_comp_ets:
+ case i40e_aqc_opc_disable_switching_comp_ets:
+ case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+ case i40e_aqc_opc_configure_switching_comp_bw_config:
+ cmd_param_flag = TRUE;
+ break;
+ case i40e_aqc_opc_query_vsi_bw_config:
+ case i40e_aqc_opc_query_vsi_ets_sla_config:
+ case i40e_aqc_opc_query_switching_comp_ets_config:
+ case i40e_aqc_opc_query_port_ets_config:
+ case i40e_aqc_opc_query_switching_comp_bw_config:
+ cmd_param_flag = FALSE;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (cmd_param_flag)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: switching component seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_bw: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_switching_comp_bw_limit *cmd =
+ (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_switching_comp_bw_limit);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_bw = max_bw;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_tc_bw,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_ets_sla_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_port_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns I40E_SUCCESS if the values passed are valid and within
+ * range else returns an error.
+ **/
+static enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ u32 fcoe_cntx_size, fcoe_filt_size;
+ u32 pe_cntx_size, pe_filt_size;
+ u32 fcoe_fmax;
+
+ u32 val;
+
+ /* Validate FCoE settings passed */
+ switch (settings->fcoe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->fcoe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* Validate PE settings passed */
+ switch (settings->pe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ case I40E_HASH_FILTER_SIZE_64K:
+ case I40E_HASH_FILTER_SIZE_128K:
+ case I40E_HASH_FILTER_SIZE_256K:
+ case I40E_HASH_FILTER_SIZE_512K:
+ case I40E_HASH_FILTER_SIZE_1M:
+ pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ pe_filt_size <<= (u32)settings->pe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->pe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ case I40E_DMA_CNTX_SIZE_8K:
+ case I40E_DMA_CNTX_SIZE_16K:
+ case I40E_DMA_CNTX_SIZE_32K:
+ case I40E_DMA_CNTX_SIZE_64K:
+ case I40E_DMA_CNTX_SIZE_128K:
+ case I40E_DMA_CNTX_SIZE_256K:
+ pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ pe_cntx_size <<= (u32)settings->pe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+ val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+ >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 hash_lut_size = 0;
+ u32 val;
+
+ if (!settings)
+ return I40E_ERR_PARAM;
+
+ /* Validate the input settings */
+ ret = i40e_validate_filter_settings(hw, settings);
+ if (ret)
+ return ret;
+
+ /* Read the PF Queue Filter control register */
+ val = rd32(hw, I40E_PFQF_CTL_0);
+
+ /* Program required PE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ /* Program required PE contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+ /* Program required FCoE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= ((u32)settings->fcoe_filt_num <<
+ I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ /* Program required FCoE DDP contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= ((u32)settings->fcoe_cntx_num <<
+ I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+ /* Program Hash LUT size for the PF */
+ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+ hash_lut_size = 1;
+ val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+ /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+ if (settings->enable_fdir)
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ if (settings->enable_ethtype)
+ val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+ if (settings->enable_macvlan)
+ val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+ wr32(hw, I40E_PFQF_CTL_0, val);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd =
+ (struct i40e_aqc_add_remove_control_packet_filter *)
+ &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ if (is_add) {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_control_packet_filter);
+ cmd->queue = CPU_TO_LE16(queue);
+ } else {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_control_packet_filter);
+ }
+
+ if (mac_addr)
+ i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+
+ cmd->etype = CPU_TO_LE16(ethtype);
+ cmd->flags = CPU_TO_LE16(flags);
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stats) {
+ stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used);
+ stats->etype_used = LE16_TO_CPU(resp->etype_used);
+ stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free);
+ stats->etype_free = LE16_TO_CPU(resp->etype_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ u16 buff_len;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+ u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write);
+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+ cmd_resp->data0 = CPU_TO_LE32(reg_val0);
+ cmd_resp->data1 = CPU_TO_LE32(reg_val1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of a first register to be modified
+ * @dw_count: number of alternate structure fields to write
+ * @buffer: pointer to the command buffer
+ *
+ * Write 'dw_count' dwords from 'buffer' to alternate structure
+ * starting at 'addr'.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_ind_write *cmd_resp =
+ (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buffer == NULL)
+ return I40E_ERR_PARAM;
+
+ /* Indirect command */
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_write_indirect);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (dw_count > (I40E_AQ_LARGE_BUF/4))
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd_resp->address = CPU_TO_LE32(addr);
+ cmd_resp->length = CPU_TO_LE32(dw_count);
+ cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((uintptr_t)buffer));
+ cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((uintptr_t)buffer));
+
+ status = i40e_asq_send_command(hw, &desc, buffer,
+ I40E_LO_DWORD(4*dw_count), NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val0 == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ if (status == I40E_SUCCESS) {
+ *reg_val0 = LE32_TO_CPU(cmd_resp->data0);
+
+ if (reg_val1 != NULL)
+ *reg_val1 = LE32_TO_CPU(cmd_resp->data1);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_read_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of the alternate structure field
+ * @dw_count: number of alternate structure fields to read
+ * @buffer: pointer to the command buffer
+ *
+ * Read 'dw_count' dwords from alternate structure starting at 'addr' and
+ * place them in 'buffer'. The buffer should be allocated by caller.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_ind_write *cmd_resp =
+ (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buffer == NULL)
+ return I40E_ERR_PARAM;
+
+ /* Indirect command */
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_read_indirect);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (dw_count > (I40E_AQ_LARGE_BUF/4))
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd_resp->address = CPU_TO_LE32(addr);
+ cmd_resp->length = CPU_TO_LE32(dw_count);
+ cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_DWORD((uintptr_t)buffer));
+ cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((uintptr_t)buffer));
+
+ status = i40e_asq_send_command(hw, &desc, buffer,
+ I40E_LO_DWORD(4*dw_count), NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_clear
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_clear_port);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write_done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+ u8 bios_mode, bool *reset_needed)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write_done *cmd =
+ (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reset_needed == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_write_done);
+
+ cmd->cmd_flags = CPU_TO_LE16(bios_mode);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (!status && reset_needed)
+ *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
+ I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_oem_mode
+ * @hw: pointer to the HW structure.
+ * @oem_mode: the OEM mode to be used
+ *
+ * Sets the device to a specific operating mode. Currently the only supported
+ * mode is no_clp, which causes FW to refrain from using Alternate RAM.
+ *
+ **/
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+ u8 oem_mode)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write_done *cmd =
+ (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_set_mode);
+
+ cmd->cmd_flags = CPU_TO_LE16(oem_mode);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & I40E_PCI_LINK_WIDTH) {
+ case I40E_PCI_LINK_WIDTH_1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case I40E_PCI_LINK_WIDTH_2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case I40E_PCI_LINK_WIDTH_4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case I40E_PCI_LINK_WIDTH_8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & I40E_PCI_LINK_SPEED) {
+ case I40E_PCI_LINK_SPEED_2500:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case I40E_PCI_LINK_SPEED_5000:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case I40E_PCI_LINK_SPEED_8000:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
+
+/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_dump_internals *cmd =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_debug_dump_internals);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cluster_id = cluster_id;
+ cmd->table_id = table_id;
+ cmd->idx = CPU_TO_LE32(start_index);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (ret_buff_size != NULL)
+ *ret_buff_size = LE16_TO_CPU(desc.datalen);
+ if (ret_next_table != NULL)
+ *ret_next_table = resp->table_id;
+ if (ret_next_index != NULL)
+ *ret_next_index = LE32_TO_CPU(resp->idx);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is TRUE if min_bw is a valid value
+ * @max_valid: pointer for bool that is TRUE if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
+{
+ enum i40e_status_code status;
+ u32 max_bw_addr, min_bw_addr;
+
+ /* Calculate the address of the min/max bw registers */
+ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MAX_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+ min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MIN_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+
+ /* Read the bandwidths from alt ram */
+ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+ min_bw_addr, min_bw);
+
+ if (*min_bw & I40E_ALT_BW_VALID_MASK)
+ *min_valid = TRUE;
+ else
+ *min_valid = FALSE;
+
+ if (*max_bw & I40E_ALT_BW_VALID_MASK)
+ *max_valid = TRUE;
+ else
+ *max_valid = FALSE;
+
+ return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u16 bwd_size = sizeof(*bw_data);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_partition_bw);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ if (bwd_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(bwd_size);
+
+ status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_asq_cmd_details details;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ if (!cmd_details) {
+ i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM);
+ details.async = TRUE;
+ cmd_details = &details;
+ }
+ status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg)
+{
+ struct i40e_virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ hw->dev_caps.iwarp = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ i40e_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+ I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+ i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
+{
+ return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_SUCCESS, NULL, 0, NULL);
+}
diff --git a/usr/src/uts/common/io/i40e/core/i40e_devids.h b/usr/src/uts/common/io/i40e/core/i40e_devids.h
new file mode 100644
index 0000000000..5b927bed9f
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_devids.h
@@ -0,0 +1,68 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_devids.h 284049 2015-06-05 22:52:42Z jfv $*/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+#ifdef X722_SUPPORT
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
+#endif /* X722_SUPPORT */
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_hmc.c b/usr/src/uts/common/io/i40e/core/i40e_hmc.c
new file mode 100644
index 0000000000..3f0e6e8d5b
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_hmc.c
@@ -0,0 +1,373 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_hmc.c 284049 2015-06-05 22:52:42Z jfv $*/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#ifndef I40E_NO_TYPE_HEADER
+#include "i40e_type.h"
+#endif
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+ enum i40e_memory_type mem_type;
+ bool dma_mem_alloc_done = FALSE;
+ struct i40e_dma_mem mem;
+ u64 alloc_len;
+
+ if (NULL == hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (sd_index >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
+ goto exit;
+ }
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (I40E_SD_TYPE_PAGED == type) {
+ mem_type = i40e_mem_pd;
+ alloc_len = I40E_HMC_PAGED_BP_SIZE;
+ } else {
+ mem_type = i40e_mem_bp_jumbo;
+ alloc_len = direct_mode_sz;
+ }
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = TRUE;
+ if (I40E_SD_TYPE_PAGED == type) {
+ ret_code = i40e_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40e_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry =
+ (struct i40e_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+ i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
+ &mem, sizeof(struct i40e_dma_mem),
+ I40E_NONDMA_TO_NONDMA);
+ } else {
+ i40e_memcpy(&sd_entry->u.bp.addr,
+ &mem, sizeof(struct i40e_dma_mem),
+ I40E_NONDMA_TO_NONDMA);
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+ /* initialize the sd entry */
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ /* increment the ref count */
+ I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ /* Increment backing page reference count */
+ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+ I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (I40E_SUCCESS != ret_code)
+ if (dma_mem_alloc_done)
+ i40e_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40e_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ **/
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_dma_mem mem;
+ struct i40e_dma_mem *page = &mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
+ goto exit;
+ }
+
+ /* find corresponding sd */
+ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+ if (I40E_SD_TYPE_PAGED !=
+ hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ goto exit;
+
+ rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = TRUE;
+ page = rsrc_pg;
+ } else {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ pd_entry->rsrc_pg = FALSE;
+ }
+
+ i40e_memcpy(&pd_entry->bp.addr, page,
+ sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+ /* Set page address and valid bit */
+ page_desc = page->pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ /* Add the backing page physical address in the pd entry */
+ i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
+ I40E_NONDMA_TO_DMA);
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = TRUE;
+ I40E_INC_PD_REFCNT(pd_table);
+ }
+ I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+
+ /* calculate index */
+ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
+ goto exit;
+ }
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
+ goto exit;
+ }
+ /* get the entry and decrease its ref counter */
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40E_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ goto exit;
+
+ /* mark the entry invalid */
+ pd_entry->valid = FALSE;
+ I40E_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+
+ /* free memory here */
+ if (!pd_entry->rsrc_pg)
+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ if (!pd_table->ref_cnt)
+ i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ /* mark the entry invalid */
+ sd_entry->valid = FALSE;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+ return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+
+ /* mark the entry invalid */
+ sd_entry->valid = FALSE;
+
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+ return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+}
diff --git a/usr/src/uts/common/io/i40e/core/i40e_hmc.h b/usr/src/uts/common/io/i40e/core/i40e_hmc.h
new file mode 100644
index 0000000000..d6e1f93421
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_hmc.h
@@ -0,0 +1,246 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_hmc.h 284049 2015-06-05 22:52:42Z jfv $*/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(I40E_HI_DWORD(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.c b/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.c
new file mode 100644
index 0000000000..2b2fa4f8f9
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.c
@@ -0,0 +1,1412 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_lan_hmc.c 284049 2015-06-05 22:52:42Z jfv $*/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+static u64 i40e_align_l2obj_base(u64 offset)
+{
+ u64 aligned_offset = offset;
+
+ if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+ aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+ (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+ return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+ u64 fpm_size = 0;
+
+ fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ * - HMC Resource Profile has been selected before calling this function.
+ **/
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
+{
+ struct i40e_hmc_obj_info *obj, *full_obj;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 l2fpm_size;
+ u32 size_exp;
+
+ hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+ hw->hmc.hmc_fn_id = hw->pf_id;
+
+ /* allocate memory for hmc_obj */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+ sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+ hw->hmc.hmc_obj_virt_mem.va;
+
+ /* The full object will be used to create the LAN HMC SD */
+ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+ full_obj->max_cnt = 0;
+ full_obj->cnt = 0;
+ full_obj->base = 0;
+ full_obj->size = 0;
+
+ /* Tx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = txq_num;
+ obj->base = 0;
+ size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (txq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ txq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* Rx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = rxq_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (rxq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ rxq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+ obj->cnt = fcoe_cntx_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_cntx_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_cntx_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE filter information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ obj->cnt = fcoe_filt_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+ obj->size = BIT_ULL(size_exp);
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_filt_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_filt_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ hw->hmc.first_sd_index = 0;
+ hw->hmc.sd_table.ref_cnt = 0;
+ l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+ fcoe_filt_num);
+ if (NULL == hw->hmc.sd_table.sd_entry) {
+ hw->hmc.sd_table.sd_cnt = (u32)
+ (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+ I40E_HMC_DIRECT_BP_SIZE;
+
+ /* allocate the sd_entry members in the sd_table */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+ (sizeof(struct i40e_hmc_sd_entry) *
+ hw->hmc.sd_table.sd_cnt));
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.sd_table.sd_entry =
+ (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+ }
+ /* store in the LAN full object for later */
+ full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) invalid
+ * 2. write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for pd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by pd after this function
+ * returns.
+ **/
+static enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
+ ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
+
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in sd table (for direct address mode) invalid
+ * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ * to 0) and PMSDDATAHIGH to invalidate the sd page
+ * 3. Decrement the ref count for the sd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+static enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
+ ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
+
+ return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 pd_idx = 0, pd_lmt = 0;
+ bool pd_error = FALSE;
+ u32 sd_idx, sd_lmt;
+ u64 sd_size;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
+ goto exit;
+ }
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+ /* find pd index */
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ /* This is to cover for cases where you may not want to have an SD with
+ * the full 2M memory but something smaller. By not filling out any
+ * size, the function will default the SD size to be 2M.
+ */
+ if (info->direct_mode_sz == 0)
+ sd_size = I40E_HMC_DIRECT_BP_SIZE;
+ else
+ sd_size = info->direct_mode_sz;
+
+ /* check if all the sds are valid. If not, allocate a page and
+ * initialize it.
+ */
+ for (j = sd_idx; j < sd_lmt; j++) {
+ /* update the sd table entry */
+ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+ info->entry_type,
+ sd_size);
+ if (I40E_SUCCESS != ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ /* check if all the pds in this sd are valid. If not,
+ * allocate a page and initialize it.
+ */
+
+ /* find pd_idx and pd_lmt in this sd */
+ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40e_add_pd_table_entry(hw,
+ info->hmc_info,
+ i, NULL);
+ if (I40E_SUCCESS != ret_code) {
+ pd_error = TRUE;
+ break;
+ }
+ }
+ if (pd_error) {
+ /* remove the backing pages from pd_idx1 to i */
+ while (i && (i > pd_idx1)) {
+ i40e_remove_pd_bp(hw, info->hmc_info,
+ (i - 1));
+ i--;
+ }
+ }
+ }
+ if (!sd_entry->valid) {
+ sd_entry->valid = TRUE;
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ I40E_SET_PF_SD_ENTRY(hw,
+ sd_entry->u.pd_table.pd_page_addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ goto exit;
+ }
+ }
+ }
+ goto exit;
+
+exit_sd_error:
+ /* cleanup for sd entries from j to sd_idx */
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++)
+ i40e_remove_pd_bp(hw, info->hmc_info, i);
+ i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ * any LAN/FCoE HMC objects can be created.
+ **/
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
+{
+ struct i40e_hmc_lan_create_obj_info info;
+ u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ struct i40e_hmc_obj_info *obj;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ /* Initialize part of the create object info struct */
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+ /* Build the SD entry for the LAN objects */
+ switch (model) {
+ case I40E_HMC_MODEL_DIRECT_PREFERRED:
+ case I40E_HMC_MODEL_DIRECT_ONLY:
+ info.entry_type = I40E_SD_TYPE_DIRECT;
+ /* Make one big object, a single SD */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ goto try_type_paged;
+ else if (ret_code != I40E_SUCCESS)
+ goto configure_lan_hmc_out;
+ /* else clause falls through the break */
+ break;
+ case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+ info.entry_type = I40E_SD_TYPE_PAGED;
+ /* Make one big object in the PD table */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if (ret_code != I40E_SUCCESS)
+ goto configure_lan_hmc_out;
+ break;
+ default:
+ /* unsupported type */
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
+ ret_code);
+ goto configure_lan_hmc_out;
+ }
+
+ /* Configure and program the FPM registers so objects can be created */
+
+ /* Tx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+ /* Rx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE filters */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_table *pd_table;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 sd_idx, sd_lmt;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+ goto exit;
+ }
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+ if (I40E_SD_TYPE_PAGED !=
+ info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ continue;
+
+ rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+ pd_table =
+ &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ }
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40E_SD_TYPE_DIRECT:
+ ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ break;
+ case I40E_SD_TYPE_PAGED:
+ ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ break;
+ default:
+ break;
+ }
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+ struct i40e_hmc_lan_delete_obj_info info;
+ enum i40e_status_code ret_code;
+
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.count = 1;
+
+ /* delete the object */
+ ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+ /* free the SD table entry for LAN */
+ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+ hw->hmc.sd_table.sd_cnt = 0;
+ hw->hmc.sd_table.sd_entry = NULL;
+
+ /* free memory used for hmc_obj */
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+ hw->hmc.hmc_obj = NULL;
+
+ return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele) \
+ offsetof(struct _struct, _ele), \
+ FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+ /* Field Width LSB */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
+/* line 1 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
+/* line 7 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
+ { 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+ /* Field Width LSB */
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
+ { 0 }
+};
+
+/**
+ * i40e_write_byte - replace HMC context byte
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u8 src_byte, dest_byte, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ src_byte = *from;
+ src_byte &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_byte <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+ dest_byte &= ~mask; /* get the bits not changing */
+ dest_byte |= src_byte; /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_word - replace HMC context word
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u16 src_word, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le16 dest_word;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_word = *(u16 *)from;
+ src_word &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_word <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
+
+ dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
+ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_dword - replace HMC context dword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u32 src_dword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le32 dest_dword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = ~(u32)0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_dword = *(u32 *)from;
+ src_dword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_dword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
+
+ dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
+ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_qword - replace HMC context qword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u64 src_qword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le64 dest_qword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = ~(u64)0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_qword = *(u64 *)from;
+ src_qword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_qword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
+
+ dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
+ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_byte - read HMC context byte into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u8 dest_byte, mask;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+ dest_byte &= ~(mask);
+
+ dest_byte >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_word - read HMC context word into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u16 dest_word, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le16 src_word;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_word &= ~(CPU_TO_LE16(mask));
+
+ /* get the data back into host order before shifting */
+ dest_word = LE16_TO_CPU(src_word);
+
+ dest_word >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_dword - read HMC context dword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u32 dest_dword, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le32 src_dword;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = ~(u32)0;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_dword &= ~(CPU_TO_LE32(mask));
+
+ /* get the data back into host order before shifting */
+ dest_dword = LE32_TO_CPU(src_dword);
+
+ dest_dword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
+ I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_qword - read HMC context qword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u64 dest_qword, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le64 src_qword;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = ~(u64)0;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_qword &= ~(CPU_TO_LE64(mask));
+
+ /* get the data back into host order before shifting */
+ dest_qword = LE64_TO_CPU(src_qword);
+
+ dest_qword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
+ I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_get_hmc_context - extract HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_read_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_read_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_read_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_read_qword(context_bytes, &ce_info[f], dest);
+ break;
+ default:
+ /* nothing to do, just keep going */
+ break;
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw: the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+ /* clean the bit array */
+ i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
+ I40E_DMA_MEM);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+
+ /* we have to deal with each element of the HMC using the
+ * correct size so that we are correct regardless of the
+ * endianness of the machine
+ */
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_write_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_write_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_write_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_write_qword(context_bytes, &ce_info[f], dest);
+ break;
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hw: pointer to the hw structure
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer. This function is used for LAN Queue contexts.
+ **/
+static
+enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
+ u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
+{
+ u32 obj_offset_in_sd, obj_offset_in_pd;
+ struct i40e_hmc_info *hmc_info = &hw->hmc;
+ struct i40e_hmc_sd_entry *sd_entry;
+ struct i40e_hmc_pd_entry *pd_entry;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 obj_offset_in_fpm;
+ u32 sd_idx, sd_lmt;
+
+ if (NULL == hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (NULL == hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+ goto exit;
+ }
+ if (NULL == object_base) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
+ goto exit;
+ }
+ if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+ DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
+ ret_code);
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ goto exit;
+ }
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &sd_idx, &sd_lmt);
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+ hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &pd_idx, &pd_lmt);
+ rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+ obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_PAGED_BP_SIZE);
+ *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+ } else {
+ obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_DIRECT_BP_SIZE);
+ *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_get_lan_tx_queue_context - return the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_get_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_get_lan_rx_queue_context - return the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_get_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.h b/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.h
new file mode 100644
index 0000000000..2a575264ab
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_lan_hmc.h
@@ -0,0 +1,201 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_lan_hmc.h 283119 2015-05-19 18:35:18Z jhb $*/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u16 dbuff; /* bigger than needed, see above for reason */
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u16 hbuff; /* bigger than needed, see above for reason */
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u32 rxmax; /* bigger than needed, see above for reason */
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u8 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num);
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info);
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_nvm.c b/usr/src/uts/common/io/i40e/core/i40e_nvm.c
new file mode 100644
index 0000000000..04d61bb969
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_nvm.c
@@ -0,0 +1,712 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_nvm.c 284049 2015-06-05 22:52:42Z jfv $*/
+
+#include "i40e_prototype.h"
+
+enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command);
+
+/**
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
+ **/
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 fla, gens;
+ u8 sr_size;
+
+ DEBUGFUNC("i40e_init_nvm");
+
+ /* The SR size is stored regardless of the nvm programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens = rd32(hw, I40E_GLNVM_GENS);
+ sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+ I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ /* Switching to words (sr_size contains power of 2KB) */
+ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = rd32(hw, I40E_GLNVM_FLA);
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+ /* Max NVM timeout */
+ nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+ nvm->blank_nvm_mode = FALSE;
+ } else { /* Blank programming mode */
+ nvm->blank_nvm_mode = TRUE;
+ ret_code = I40E_ERR_NVM_BLANK_MODE;
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 gtime, timeout;
+ u64 time_left = 0;
+
+ DEBUGFUNC("i40e_acquire_nvm");
+
+ if (hw->nvm.blank_nvm_mode)
+ goto i40e_i40e_acquire_nvm_exit;
+
+ ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+ 0, &time_left, NULL);
+ /* Reading the Global Device Timer */
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+ /* Store the timeout */
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
+
+ if (ret_code)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
+ access, time_left, ret_code, hw->aq.asq_last_status);
+
+ if (ret_code && time_left) {
+ /* Poll until the current NVM owner timeouts */
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
+ while ((gtime < timeout) && time_left) {
+ i40e_msec_delay(10);
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ ret_code = i40e_aq_request_resource(hw,
+ I40E_NVM_RESOURCE_ID,
+ access, 0, &time_left,
+ NULL);
+ if (ret_code == I40E_SUCCESS) {
+ hw->nvm.hw_semaphore_timeout =
+ I40E_MS_TO_GTIME(time_left) + gtime;
+ break;
+ }
+ }
+ if (ret_code != I40E_SUCCESS) {
+ hw->nvm.hw_semaphore_timeout = 0;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
+ time_left, ret_code, hw->aq.asq_last_status);
+ }
+ }
+
+i40e_i40e_acquire_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 total_delay = 0;
+
+ DEBUGFUNC("i40e_release_nvm");
+
+ if (hw->nvm.blank_nvm_mode)
+ return;
+
+ ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
+ /* there are some rare cases when trying to release the resource
+ * results in an admin Q timeout, so handle them correctly
+ */
+ while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
+ (total_delay < hw->aq.asq_cmd_timeout)) {
+ i40e_msec_delay(1);
+ ret_code = i40e_aq_release_resource(hw,
+ I40E_NVM_RESOURCE_ID, 0, NULL);
+ total_delay++;
+ }
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+ u32 srctl, wait_cnt;
+
+ DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
+
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
+ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+ srctl = rd32(hw, I40E_GLNVM_SRCTL);
+ if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+ ret_code = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(5);
+ }
+ if (ret_code == I40E_ERR_TIMEOUT)
+ i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+#ifdef X722_SUPPORT
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_read_nvm_word_aq(hw, offset, data);
+#endif
+ return i40e_read_nvm_word_srctl(hw, offset, data);
+}
+
+/**
+ * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+ u32 sr_reg;
+
+ DEBUGFUNC("i40e_read_nvm_word_srctl");
+
+ if (offset >= hw->nvm.sr_size) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
+ offset, hw->nvm.sr_size);
+ ret_code = I40E_ERR_PARAM;
+ goto read_nvm_exit;
+ }
+
+ /* Poll the done bit first */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (ret_code == I40E_SUCCESS) {
+ /* Write the address and start reading */
+ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ BIT(I40E_GLNVM_SRCTL_START_SHIFT);
+ wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (ret_code == I40E_SUCCESS) {
+ sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+ *data = (u16)((sr_reg &
+ I40E_GLNVM_SRDATA_RDDATA_MASK)
+ >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ }
+ }
+ if (ret_code != I40E_SUCCESS)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
+
+read_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+
+ DEBUGFUNC("i40e_read_nvm_word_aq");
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
+ *data = LE16_TO_CPU(*(__le16 *)data);
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+#ifdef X722_SUPPORT
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+#endif
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
+
+/**
+ * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 index, word;
+
+ DEBUGFUNC("i40e_read_nvm_buffer_srctl");
+
+ /* Loop thru the selected region */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
+ if (ret_code != I40E_SUCCESS)
+ break;
+ }
+
+ /* Update the number of words read from the Shadow RAM */
+ *words = word;
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code;
+ u16 read_size = *words;
+ bool last_cmd = FALSE;
+ u16 words_read = 0;
+ u16 i = 0;
+
+ DEBUGFUNC("i40e_read_nvm_buffer_aq");
+
+ do {
+ /* Calculate number of bytes we should read in this step.
+ * FVL AQ do not allow to read more than one page at a time or
+ * to cross page boundaries.
+ */
+ if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+ read_size = min(*words,
+ (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+ (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+ else
+ read_size = min((*words - words_read),
+ I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+ /* Check if this is last command, if so set proper flag */
+ if ((words_read + read_size) >= *words)
+ last_cmd = TRUE;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+ data + words_read, last_cmd);
+ if (ret_code != I40E_SUCCESS)
+ goto read_nvm_buffer_aq_exit;
+
+ /* Increment counter for words already read and move offset to
+ * new read location
+ */
+ words_read += read_size;
+ offset += read_size;
+ } while (words_read < *words);
+
+ for (i = 0; i < *words; i++)
+ data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+ *words = words_read;
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ DEBUGFUNC("i40e_read_nvm_aq");
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * i40e_write_nvm_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ DEBUGFUNC("i40e_write_nvm_aq");
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
+ else
+ ret_code = i40e_aq_update_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * i40e_write_nvm_word - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
+ * NVM ownership have to be acquired and released (on ARQ completion event
+ * reception) by caller. To commit SR to NVM update checksum function
+ * should be called.
+ **/
+enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+ void *data)
+{
+ DEBUGFUNC("i40e_write_nvm_word");
+
+ *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
+
+ /* Value 0x00 below means that we treat SR as a flat mem */
+ return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
+}
+
+/**
+ * i40e_write_nvm_buffer - Writes Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller. To commit SR to NVM update
+ * checksum function should be called.
+ **/
+enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data)
+{
+ __le16 *le_word_ptr = (__le16 *)data;
+ u16 *word_ptr = (u16 *)data;
+ u32 i = 0;
+
+ DEBUGFUNC("i40e_write_nvm_buffer");
+
+ for (i = 0; i < words; i++)
+ le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ return i40e_write_nvm_aq(hw, module_pointer, offset, words,
+ data, FALSE);
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
+ *
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_virt_mem vmem;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 vpd_module = 0;
+ u16 *data;
+ u16 i = 0;
+
+ DEBUGFUNC("i40e_calc_nvm_checksum");
+
+ ret_code = i40e_allocate_virt_mem(hw, &vmem,
+ I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
+ if (ret_code)
+ goto i40e_calc_nvm_checksum_exit;
+ data = (u16 *)vmem.va;
+
+ /* read pointer to VPD area */
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* read pointer to PCIe Alt Auto-load module */
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Read SR page */
+ if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
+ ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ }
+
+ /* Skip Checksum word */
+ if (i == I40E_SR_SW_CHECKSUM_WORD)
+ continue;
+ /* Skip VPD module (convert byte size to word count) */
+ if ((i >= (u32)vpd_module) &&
+ (i < ((u32)vpd_module +
+ (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
+ continue;
+ }
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if ((i >= (u32)pcie_alt_module) &&
+ (i < ((u32)pcie_alt_module +
+ (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
+ continue;
+ }
+
+ checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
+ }
+
+ *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+ i40e_free_virt_mem(hw, &vmem);
+ return ret_code;
+}
+
+/**
+ * i40e_update_nvm_checksum - Updates the NVM checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ **/
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 checksum;
+ __le16 le_sum;
+
+ DEBUGFUNC("i40e_update_nvm_checksum");
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+ le_sum = CPU_TO_LE16(checksum);
+ if (ret_code == I40E_SUCCESS)
+ ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
+ 1, &le_sum, TRUE);
+
+ return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 checksum_sr = 0;
+ u16 checksum_local = 0;
+
+ DEBUGFUNC("i40e_validate_nvm_checksum");
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ if (ret_code != I40E_SUCCESS)
+ goto i40e_validate_nvm_checksum_exit;
+
+ /* Do not use i40e_read_nvm_word() because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (checksum_local != checksum_sr)
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum)
+ *checksum = checksum_local;
+
+i40e_validate_nvm_checksum_exit:
+ return ret_code;
+}
diff --git a/usr/src/uts/common/io/i40e/core/i40e_prototype.h b/usr/src/uts/common/io/i40e/core/i40e_prototype.h
new file mode 100644
index 0000000000..6f1cfc3afe
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_prototype.h
@@ -0,0 +1,478 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_prototype.h 284049 2015-06-05 22:52:42Z jfv $*/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw);
+u16 i40e_clean_asq(struct i40e_hw *hw);
+void i40e_free_adminq_asq(struct i40e_hw *hw);
+void i40e_free_adminq_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40e_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+#ifdef X722_SUPPORT
+
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+#endif
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
+
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+
+/* admin send queue commands */
+
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_reset);
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+ u16 max_frame_size, bool crc_en, u16 pacing,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, bool enable_l2_filtering,
+ u16 *pveb_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, u32 field_id, void *data,
+ u16 buf_size, u16 *element_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
+ u8 cmd_flags, void *data, u16 buf_size,
+ u16 element_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+ void *buff, u16 buff_size, u16 tlv_len,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 tlv_len, u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+ bool start_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+ u8 *num_entries,
+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+ u16 count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+ u16 mac_seid, u16 vsi_seid,
+ u16 *ret_seid);
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+ u16 vsi_seid, u16 tag, u16 queue_num,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 tag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+ u16 etag, u8 num_tags_in_buf, void *buf,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+ u16 etag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 old_tag, u16 new_tag, u16 *tags_used,
+ u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 *stat_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 stat_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+ u16 bad_frame_vsi, bool save_bad_pac,
+ bool pad_short_pac, bool double_vlan,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
+ u8 tcmap, bool request, u8 *tcmap_ret,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile *profile,
+ u8 *pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+ u16 vsi,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count);
+
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 vsi,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count);
+
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1);
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+ u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1);
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+ u8 bios_mode, bool *reset_needed);
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+ u8 oem_mode);
+
+/* i40e_common */
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_hw(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
+/* prototype for functions used for NVM access */
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
+ u32 offset, u16 words, void *data,
+ bool last_command);
+enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+ void *data);
+enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
+ u32 offset, u16 words, void *data);
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
+
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40e_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void i40e_init_spinlock(struct i40e_spinlock *sp);
+void i40e_acquire_spinlock(struct i40e_spinlock *sp);
+void i40e_release_spinlock(struct i40e_spinlock *sp);
+void i40e_destroy_spinlock(struct i40e_spinlock *sp);
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_register.h b/usr/src/uts/common/io/i40e/core/i40e_register.h
new file mode 100644
index 0000000000..ff4b8a54f2
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_register.h
@@ -0,0 +1,5317 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_register.h 284049 2015-06-05 22:52:42Z jfv $*/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+
+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX 143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX 143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+#ifdef X722_SUPPORT
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
+#endif /* X722_SUPPORT */
+#endif /* _I40E_REGISTER_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_status.h b/usr/src/uts/common/io/i40e/core/i40e_status.h
new file mode 100644
index 0000000000..1f27507970
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_status.h
@@ -0,0 +1,108 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_status.h 283119 2015-05-19 18:35:18Z jhb $*/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_type.h b/usr/src/uts/common/io/i40e/core/i40e_type.h
new file mode 100644
index 0000000000..b4a84993e9
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_type.h
@@ -0,0 +1,1581 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_type.h 284049 2015-06-05 22:52:42Z jfv $*/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_devids.h"
+
+#define UNREFERENCED_XPARAMETER
+
+#define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
+
+#ifndef I40E_MASK
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (((uint32_t)(mask)) << ((uint32_t)(shift)))
+#endif
+
+#define I40E_MAX_PF 16
+#define I40E_MAX_PF_VSI 64
+#define I40E_MAX_PF_QP 128
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* something less than 1 minute */
+#define I40E_HEARTBEAT_TIMEOUT (HZ * 50)
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Check whether address is multicast. */
+#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define I40E_ETH_LENGTH_OF_ADDRESS 6
+/* Data type manipulation macros. */
+#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+
+#define I40E_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define I40E_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ /*
+ * Ugggh, have to cast these because of enums being "int" and these
+ * overflow int.
+ */
+ I40E_DEBUG_USER = (int)0xF0000000,
+
+ I40E_DEBUG_ALL = (int)0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_STATUS 0xB2
+#define I40E_PCI_LINK_WIDTH 0x3F0
+#define I40E_PCI_LINK_WIDTH_1 0x10
+#define I40E_PCI_LINK_WIDTH_2 0x20
+#define I40E_PCI_LINK_WIDTH_4 0x40
+#define I40E_PCI_LINK_WIDTH_8 0x80
+#define I40E_PCI_LINK_SPEED 0xF
+#define I40E_PCI_LINK_SPEED_2500 0x1
+#define I40E_PCI_LINK_SPEED_5000 0x2
+#define I40E_PCI_LINK_SPEED_8000 0x3
+
+/* Memory types */
+enum i40e_memset_type {
+ I40E_NONDMA_MEM = 0,
+ I40E_DMA_MEM
+};
+
+/* Memcpy types */
+enum i40e_memcpy_type {
+ I40E_NONDMA_TO_NONDMA = 0,
+ I40E_NONDMA_TO_DMA,
+ I40E_DMA_TO_DMA,
+ I40E_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+#ifdef X722_SUPPORT
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
+#endif
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_set_fc_aq_failures {
+ I40E_SET_FC_AQ_FAIL_NONE = 0,
+ I40E_SET_FC_AQ_FAIL_GET = 1,
+ I40E_SET_FC_AQ_FAIL_SET = 2,
+ I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+ I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1 = 1,
+ I40E_VSI_VMDQ2 = 2,
+ I40E_VSI_CTRL = 3,
+ I40E_VSI_FCOE = 4,
+ I40E_VSI_MIRROR = 5,
+ I40E_VSI_SRIOV = 6,
+ I40E_VSI_FDIR = 7,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
+ u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP 0x03
+#define I40E_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
+#define I40E_MODULE_TYPE_40G_LR4 0x02
+#define I40E_MODULE_TYPE_40G_SR4 0x04
+#define I40E_MODULE_TYPE_40G_CR4 0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX 0x01
+#define I40E_MODULE_TYPE_1000BASE_LX 0x02
+#define I40E_MODULE_TYPE_1000BASE_CX 0x04
+#define I40E_MODULE_TYPE_1000BASE_T 0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+ I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
+ I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
+ I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+ I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
+ I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+ I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
+ I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
+ I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
+ I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
+ I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+ I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+ I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
+ I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
+ I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
+ I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+ I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+ I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+ I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
+ I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
+ I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+ I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ enum i40e_aq_capabilities_phy_type phy_types;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
+#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
+
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool iscsi; /* Indicates iSCSI enabled */
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+ u64 wr_csr_prot;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
+};
+
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+ I40E_NVMUPD_INVALID,
+ I40E_NVMUPD_READ_CON,
+ I40E_NVMUPD_READ_SNT,
+ I40E_NVMUPD_READ_LCB,
+ I40E_NVMUPD_READ_SA,
+ I40E_NVMUPD_WRITE_ERA,
+ I40E_NVMUPD_WRITE_CON,
+ I40E_NVMUPD_WRITE_SNT,
+ I40E_NVMUPD_WRITE_LCB,
+ I40E_NVMUPD_WRITE_SA,
+ I40E_NVMUPD_CSUM_CON,
+ I40E_NVMUPD_CSUM_SA,
+ I40E_NVMUPD_CSUM_LCB,
+ I40E_NVMUPD_STATUS,
+ I40E_NVMUPD_EXEC_AQ,
+ I40E_NVMUPD_GET_AQ_RESULT,
+};
+
+enum i40e_nvmupd_state {
+ I40E_NVMUPD_STATE_INIT,
+ I40E_NVMUPD_STATE_READING,
+ I40E_NVMUPD_STATE_WRITING,
+ I40E_NVMUPD_STATE_INIT_WAIT,
+ I40E_NVMUPD_STATE_WRITE_WAIT,
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code. Where is the right file?
+ */
+#define I40E_NVM_READ 0xB
+#define I40E_NVM_WRITE 0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_EXEC 0xf
+
+#define I40E_NVM_ADAPT_SHIFT 16
+#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA 4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+ u32 command;
+ u32 config;
+ u32 offset; /* in bytes */
+ u32 data_size; /* in bytes */
+ u8 data[1];
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+#define I40E_TLV_STATUS_OPER 0x1
+#define I40E_TLV_STATUS_SYNC 0x2
+#define I40E_TLV_STATUS_ERR 0x4
+#define I40E_CEE_OPER_MAX_APPS 3
+#define I40E_APP_PROTOID_FCOE 0x8906
+#define I40E_APP_PROTOID_ISCSI 0x0cbc
+#define I40E_APP_PROTOID_FIP 0x8914
+#define I40E_APP_SEL_ETHTYPE 0x1
+#define I40E_APP_SEL_TCPIP 0x2
+#define I40E_CEE_APP_SEL_ETHTYPE 0x0
+#define I40E_CEE_APP_SEL_TCPIP 0x1
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct i40e_dcb_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct i40e_dcb_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct i40e_dcb_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u8 dcbx_mode;
+#define I40E_DCBX_MODE_CEE 0x1
+#define I40E_DCBX_MODE_IEEE 0x2
+ u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
+ struct i40e_dcb_ets_config etscfg;
+ struct i40e_dcb_ets_config etsrec;
+ struct i40e_dcb_pfc_config pfc;
+ struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 *hw_addr;
+ void *back;
+
+ /* subsystem structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* state of nvm update process */
+ enum i40e_nvmupd_state nvmupd_state;
+ struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_virt_mem nvm_buff;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+ /* debug mask */
+ u32 debug_mask;
+ char err_str[16];
+};
+
+static INLINE bool i40e_is_vf(struct i40e_hw *hw)
+{
+#ifdef X722_SUPPORT
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
+#else
+ return hw->mac.type == I40E_MAC_VF;
+#endif
+}
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8
+#define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
+ I40E_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define I40E_RXD_QW0_FCOEINDX_SHIFT 0
+#define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
+ I40E_RXD_QW0_FCOEINDX_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+#ifdef X722_SUPPORT
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+#else
+ I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
+#endif
+
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
+#ifdef X722_SUPPORT
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+#else
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+#endif
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \
+ I40E_RXD_QW1_STATUS_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST
+#define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_PACKET_TYPE_UNICAST 0
+#define I40E_RXD_PACKET_TYPE_MULTICAST 1
+#define I40E_RXD_PACKET_TYPE_BROADCAST 2
+#define I40E_RXD_PACKET_TYPE_MIRRORED 3
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF
+#define I40E_RX_PTYPE_SHIFT 56
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define I40E_RXD_QW1_NEXTP_SHIFT 38
+#define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
+
+#define I40E_RXD_QW2_EXT_STATUS_SHIFT 0
+#define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
+ I40E_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+#define I40E_RXD_QW2_L2TAG2_SHIFT 0
+#define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT)
+
+#define I40E_RXD_QW2_L2TAG3_SHIFT 16
+#define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT)
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+#define I40E_TWO_BIT_MASK 0x3
+#define I40E_THREE_BIT_MASK 0x7
+#define I40E_FOUR_BIT_MASK 0xF
+#define I40E_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#ifdef X722_SUPPORT
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
+#endif
+struct i40e_nop_desc {
+ __le64 rsvd;
+ __le64 dtype_cmd;
+};
+
+#define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_NOP_QW1_CMD_SHIFT 4
+#define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT)
+
+enum i40e_tx_nop_desc_cmd_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_NOP_DESC_EOP_SHIFT = 0,
+ I40E_TX_NOP_DESC_RS_SHIFT = 1,
+ I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
+};
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+#ifdef X722_SUPPORT
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+#else
+ /* Note: Values 0-30 are reserved for future use */
+#endif
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+#ifdef X722_SUPPORT
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+#else
+ /* Note: Value 32 is reserved for future use */
+#endif
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+#ifdef X722_SUPPORT
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+#else
+ /* Note: Values 37-40 are reserved for future use */
+#endif
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+#ifdef X722_SUPPORT
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+#endif
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+#ifdef X722_SUPPORT
+
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+#endif
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+ u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* rxon2offcnt[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03
+#define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04
+#define I40E_SR_OPTION_ROM_PTR 0x05
+#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define I40E_SR_RO_PCIE_LCB_PTR 0x0A
+#define I40E_SR_EMP_IMAGE_PTR 0x0B
+#define I40E_SR_PE_IMAGE_PTR 0x0C
+#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define I40E_SR_MNG_CONFIG_PTR 0x0E
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_PBA_FLAGS 0x15
+#define I40E_SR_PBA_BLOCK_PTR 0x16
+#define I40E_SR_BOOT_CONFIG_PTR 0x17
+#define I40E_NVM_OEM_VER_OFF 0x83
+#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define I40E_SR_NVM_MAP_VERSION 0x29
+#define I40E_SR_NVM_IMAGE_VERSION 0x2A
+#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PXE_SETUP_PTR 0x30
+#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34
+#define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35
+#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
+#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
+#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
+#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
+#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
+#define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49
+#define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D
+#define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_BUF_ALIGNMENT 4096
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0xD
+struct i40e_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK 0xFF
+#define I40E_ALT_BW_RELATIVE_MASK 0x40000000
+#define I40E_ALT_BW_VALID_MASK 0x80000000
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* PBA length (and one with additional zero-padding byte), see Table 6-2. */
+#define I40E_PBANUM_LENGTH 12
+#define I40E_PBANUM_STRLEN 13
+
+#endif /* _I40E_TYPE_H_ */
diff --git a/usr/src/uts/common/io/i40e/core/i40e_virtchnl.h b/usr/src/uts/common/io/i40e/core/i40e_virtchnl.h
new file mode 100644
index 0000000000..17b090f454
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/core/i40e_virtchnl.h
@@ -0,0 +1,378 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: head/sys/dev/ixl/i40e_virtchnl.h 284049 2015-06-05 22:52:42Z jfv $*/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the I40E_VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF = 2,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
+ I40E_VIRTCHNL_OP_ADD_VLAN = 12,
+ I40E_VIRTCHNL_OP_DEL_VLAN = 13,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ I40E_VIRTCHNL_OP_GET_STATS = 15,
+ I40E_VIRTCHNL_OP_FCOE = 16,
+ I40E_VIRTCHNL_OP_EVENT = 17,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum i40e_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
+#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/usr/src/uts/common/io/i40e/i40e.conf b/usr/src/uts/common/io/i40e/i40e.conf
new file mode 100644
index 0000000000..b4c3459931
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/i40e.conf
@@ -0,0 +1,19 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2016 Joyent, Inc.
+#
+
+#
+# Driver.conf file for Intel XL710 PCIe NIC Driver (i40e)
+# See i40e(7D) for valid options.
+#
diff --git a/usr/src/uts/common/io/i40e/i40e_gld.c b/usr/src/uts/common/io/i40e/i40e_gld.c
new file mode 100644
index 0000000000..0af4c4c71f
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/i40e_gld.c
@@ -0,0 +1,1097 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
+ * Copyright 2016 Joyent, Inc.
+ */
+
+/*
+ * For more information, please see the big theory statement in i40e_main.c.
+ */
+
+#include "i40e_sw.h"
+
+#define I40E_PROP_RX_DMA_THRESH "_rx_dma_threshold"
+#define I40E_PROP_TX_DMA_THRESH "_tx_dma_threshold"
+#define I40E_PROP_RX_ITR "_rx_intr_throttle"
+#define I40E_PROP_TX_ITR "_tx_intr_throttle"
+#define I40E_PROP_OTHER_ITR "_other_intr_throttle"
+
+char *i40e_priv_props[] = {
+ I40E_PROP_RX_DMA_THRESH,
+ I40E_PROP_TX_DMA_THRESH,
+ I40E_PROP_RX_ITR,
+ I40E_PROP_TX_ITR,
+ I40E_PROP_OTHER_ITR,
+ NULL
+};
+
+static int
+i40e_group_remove_mac(void *arg, const uint8_t *mac_addr)
+{
+ i40e_t *i40e = arg;
+ struct i40e_aqc_remove_macvlan_element_data filt;
+ struct i40e_hw *hw = &i40e->i40e_hw_space;
+ int ret, i, last;
+ i40e_uaddr_t *iua;
+
+ if (I40E_IS_MULTICAST(mac_addr))
+ return (EINVAL);
+
+ mutex_enter(&i40e->i40e_general_lock);
+
+ if (i40e->i40e_state & I40E_SUSPENDED) {
+ ret = ECANCELED;
+ goto done;
+ }
+
+ for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt_used; i++) {
+ if (bcmp(mac_addr, i40e->i40e_uaddrs[i].iua_mac,
+ ETHERADDRL) == 0)
+ break;
+ }
+
+ if (i == i40e->i40e_resources.ifr_nmacfilt_used) {
+ ret = ENOENT;
+ goto done;
+ }
+
+ iua = &i40e->i40e_uaddrs[i];
+ ASSERT(i40e->i40e_resources.ifr_nmacfilt_used > 0);
+
+ bzero(&filt, sizeof (filt));
+ bcopy(mac_addr, filt.mac_addr, ETHERADDRL);
+ filt.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+
+ if (i40e_aq_remove_macvlan(hw, iua->iua_vsi, &filt, 1, NULL) !=
+ I40E_SUCCESS) {
+ i40e_error(i40e, "failed to remove mac address "
+ "%2x:%2x:%2x:%2x:%2x:%2x from unicast filter: %d",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5], filt.error_code);
+ ret = EIO;
+ goto done;
+ }
+
+ last = i40e->i40e_resources.ifr_nmacfilt_used - 1;
+ if (i != last) {
+ i40e_uaddr_t *src = &i40e->i40e_uaddrs[last];
+ bcopy(src, iua, sizeof (i40e_uaddr_t));
+ }
+
+ /*
+ * Set the multicast bit in the last one to indicate to ourselves that
+ * it's invalid.
+ */
+ bzero(&i40e->i40e_uaddrs[last], sizeof (i40e_uaddr_t));
+ i40e->i40e_uaddrs[last].iua_mac[0] = 0x01;
+ i40e->i40e_resources.ifr_nmacfilt_used--;
+ ret = 0;
+done:
+ mutex_exit(&i40e->i40e_general_lock);
+
+ return (ret);
+}
+
+static int
+i40e_group_add_mac(void *arg, const uint8_t *mac_addr)
+{
+ i40e_t *i40e = arg;
+ struct i40e_hw *hw = &i40e->i40e_hw_space;
+ int i, ret;
+ i40e_uaddr_t *iua;
+ struct i40e_aqc_add_macvlan_element_data filt;
+
+ if (I40E_IS_MULTICAST(mac_addr))
+ return (EINVAL);
+
+ mutex_enter(&i40e->i40e_general_lock);
+ if (i40e->i40e_state & I40E_SUSPENDED) {
+ ret = ECANCELED;
+ goto done;
+ }
+
+ if (i40e->i40e_resources.ifr_nmacfilt ==
+ i40e->i40e_resources.ifr_nmacfilt_used) {
+ ret = ENOSPC;
+ goto done;
+ }
+
+ for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt_used; i++) {
+ if (bcmp(mac_addr, i40e->i40e_uaddrs[i].iua_mac,
+ ETHERADDRL) == 0) {
+ ret = EEXIST;
+ goto done;
+ }
+ }
+
+ /*
+ * Note, the general use of the i40e_vsi_id will have to be refactored
+ * when we have proper group support.
+ */
+ bzero(&filt, sizeof (filt));
+ bcopy(mac_addr, filt.mac_addr, ETHERADDRL);
+ filt.flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+
+ if ((ret = i40e_aq_add_macvlan(hw, i40e->i40e_vsi_id, &filt, 1,
+ NULL)) != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to add mac address "
+ "%2x:%2x:%2x:%2x:%2x:%2x to unicast filter: %d",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5], ret);
+ ret = EIO;
+ goto done;
+ }
+
+ iua = &i40e->i40e_uaddrs[i40e->i40e_resources.ifr_nmacfilt_used];
+ bcopy(mac_addr, iua->iua_mac, ETHERADDRL);
+ iua->iua_vsi = i40e->i40e_vsi_id;
+ i40e->i40e_resources.ifr_nmacfilt_used++;
+ ASSERT(i40e->i40e_resources.ifr_nmacfilt_used <=
+ i40e->i40e_resources.ifr_nmacfilt);
+ ret = 0;
+done:
+ mutex_exit(&i40e->i40e_general_lock);
+ return (ret);
+}
+
+static int
+i40e_m_start(void *arg)
+{
+ i40e_t *i40e = arg;
+ int rc = 0;
+
+ mutex_enter(&i40e->i40e_general_lock);
+ if (i40e->i40e_state & I40E_SUSPENDED) {
+ rc = ECANCELED;
+ goto done;
+ }
+
+ if (!i40e_start(i40e, B_TRUE)) {
+ rc = EIO;
+ goto done;
+ }
+
+ atomic_or_32(&i40e->i40e_state, I40E_STARTED);
+done:
+ mutex_exit(&i40e->i40e_general_lock);
+
+ return (rc);
+}
+
+static void
+i40e_m_stop(void *arg)
+{
+ i40e_t *i40e = arg;
+
+ mutex_enter(&i40e->i40e_general_lock);
+
+ if (i40e->i40e_state & I40E_SUSPENDED)
+ goto done;
+
+ atomic_and_32(&i40e->i40e_state, ~I40E_STARTED);
+ i40e_stop(i40e, B_TRUE);
+done:
+ mutex_exit(&i40e->i40e_general_lock);
+}
+
+/*
+ * Enable and disable promiscuous mode as requested. We have to toggle both
+ * unicast and multicast. Note that multicast may already be enabled due to the
+ * i40e_m_multicast may toggle it itself. See i40e_main.c for more information
+ * on this.
+ */
+static int
+i40e_m_promisc(void *arg, boolean_t on)
+{
+ i40e_t *i40e = arg;
+ struct i40e_hw *hw = &i40e->i40e_hw_space;
+ int ret = 0, err = 0;
+
+ mutex_enter(&i40e->i40e_general_lock);
+ if (i40e->i40e_state & I40E_SUSPENDED) {
+ ret = ECANCELED;
+ goto done;
+ }
+
+
+ ret = i40e_aq_set_vsi_unicast_promiscuous(hw, i40e->i40e_vsi_id,
+ on, NULL);
+ if (ret != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to %s unicast promiscuity on "
+ "the default VSI: %d", on == B_TRUE ? "enable" : "disable",
+ ret);
+ err = EIO;
+ goto done;
+ }
+
+ /*
+ * If we have a non-zero mcast_promisc_count, then it has already been
+ * enabled or we need to leave it that way and not touch it.
+ */
+ if (i40e->i40e_mcast_promisc_count > 0) {
+ i40e->i40e_promisc_on = on;
+ goto done;
+ }
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, i40e->i40e_vsi_id,
+ on, NULL);
+ if (ret != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to %s multicast promiscuity on "
+ "the default VSI: %d", on == B_TRUE ? "enable" : "disable",
+ ret);
+
+ /*
+ * Try our best to put us back into a state that MAC expects us
+ * to be in.
+ */
+ ret = i40e_aq_set_vsi_unicast_promiscuous(hw, i40e->i40e_vsi_id,
+ !on, NULL);
+ if (ret != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to %s unicast promiscuity on "
+ "the default VSI after toggling multicast failed: "
+ "%d", on == B_TRUE ? "disable" : "enable", ret);
+ }
+
+ err = EIO;
+ goto done;
+ } else {
+ i40e->i40e_promisc_on = on;
+ }
+
+done:
+ mutex_exit(&i40e->i40e_general_lock);
+ return (err);
+}
+
+/*
+ * See the big theory statement in i40e_main.c for multicast address management.
+ */
+static int
+i40e_multicast_add(i40e_t *i40e, const uint8_t *multicast_address)
+{
+ struct i40e_hw *hw = &i40e->i40e_hw_space;
+ struct i40e_aqc_add_macvlan_element_data filt;
+ i40e_maddr_t *mc;
+ int ret;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
+
+ if (i40e->i40e_resources.ifr_nmcastfilt_used ==
+ i40e->i40e_resources.ifr_nmcastfilt) {
+ if (i40e->i40e_mcast_promisc_count == 0 &&
+ i40e->i40e_promisc_on == B_FALSE) {
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
+ i40e->i40e_vsi_id, B_TRUE, NULL);
+ if (ret != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to enable promiscuous "
+ "mode on VSI %d: %d", i40e->i40e_vsi_id,
+ ret);
+ return (EIO);
+ }
+ }
+ i40e->i40e_mcast_promisc_count++;
+ return (0);
+ }
+
+ mc = &i40e->i40e_maddrs[i40e->i40e_resources.ifr_nmcastfilt_used];
+ bzero(&filt, sizeof (filt));
+ bcopy(multicast_address, filt.mac_addr, ETHERADDRL);
+ filt.flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
+ I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+
+ if ((ret = i40e_aq_add_macvlan(hw, i40e->i40e_vsi_id, &filt, 1,
+ NULL)) != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to add mac address "
+ "%2x:%2x:%2x:%2x:%2x:%2x to multicast filter: %d",
+ multicast_address[0], multicast_address[1],
+ multicast_address[2], multicast_address[3],
+ multicast_address[4], multicast_address[5],
+ ret);
+ return (EIO);
+ }
+
+ bcopy(multicast_address, mc->ima_mac, ETHERADDRL);
+ i40e->i40e_resources.ifr_nmcastfilt_used++;
+ return (0);
+}
+
+/*
+ * See the big theory statement in i40e_main.c for multicast address management.
+ */
+static int
+i40e_multicast_remove(i40e_t *i40e, const uint8_t *multicast_address)
+{
+ int i, ret;
+ struct i40e_hw *hw = &i40e->i40e_hw_space;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
+
+ for (i = 0; i < i40e->i40e_resources.ifr_nmcastfilt_used; i++) {
+ struct i40e_aqc_remove_macvlan_element_data filt;
+ int last;
+
+ if (bcmp(multicast_address, i40e->i40e_maddrs[i].ima_mac,
+ ETHERADDRL) != 0) {
+ continue;
+ }
+
+ bzero(&filt, sizeof (filt));
+ bcopy(multicast_address, filt.mac_addr, ETHERADDRL);
+ filt.flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+
+ if (i40e_aq_remove_macvlan(hw, i40e->i40e_vsi_id,
+ &filt, 1, NULL) != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to remove mac address "
+ "%2x:%2x:%2x:%2x:%2x:%2x from multicast "
+ "filter: %d",
+ multicast_address[0], multicast_address[1],
+ multicast_address[2], multicast_address[3],
+ multicast_address[4], multicast_address[5],
+ filt.error_code);
+ return (EIO);
+ }
+
+ last = i40e->i40e_resources.ifr_nmcastfilt_used - 1;
+ if (i != last) {
+ bcopy(&i40e->i40e_maddrs[last], &i40e->i40e_maddrs[i],
+ sizeof (i40e_maddr_t));
+ bzero(&i40e->i40e_maddrs[last], sizeof (i40e_maddr_t));
+ }
+
+ ASSERT(i40e->i40e_resources.ifr_nmcastfilt_used > 0);
+ i40e->i40e_resources.ifr_nmcastfilt_used--;
+ return (0);
+ }
+
+ if (i40e->i40e_mcast_promisc_count > 0) {
+ if (i40e->i40e_mcast_promisc_count == 1 &&
+ i40e->i40e_promisc_on == B_FALSE) {
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
+ i40e->i40e_vsi_id, B_FALSE, NULL);
+ if (ret != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to disable "
+ "promiscuous mode on VSI %d: %d",
+ i40e->i40e_vsi_id, ret);
+ return (EIO);
+ }
+ }
+ i40e->i40e_mcast_promisc_count--;
+
+ return (0);
+ }
+
+ return (ENOENT);
+}
+
+static int
+i40e_m_multicast(void *arg, boolean_t add, const uint8_t *multicast_address)
+{
+ i40e_t *i40e = arg;
+ int rc;
+
+ mutex_enter(&i40e->i40e_general_lock);
+
+ if (i40e->i40e_state & I40E_SUSPENDED) {
+ mutex_exit(&i40e->i40e_general_lock);
+ return (ECANCELED);
+ }
+
+ if (add == B_TRUE) {
+ rc = i40e_multicast_add(i40e, multicast_address);
+ } else {
+ rc = i40e_multicast_remove(i40e, multicast_address);
+ }
+
+ mutex_exit(&i40e->i40e_general_lock);
+ return (rc);
+}
+
+/* ARGSUSED */
+static void
+i40e_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
+{
+ /*
+ * At this time, we don't support toggling i40e into loopback mode. It's
+ * questionable how much value this has when there's no clear way to
+ * toggle this behavior from a supported way in userland.
+ */
+ miocnak(q, mp, 0, EINVAL);
+}
+
+static int
+i40e_ring_start(mac_ring_driver_t rh, uint64_t gen_num)
+{
+ i40e_trqpair_t *itrq = (i40e_trqpair_t *)rh;
+
+ /*
+ * GLDv3 requires we keep track of a generation number, as it uses
+ * that number to keep track of whether or not a ring is active.
+ */
+ mutex_enter(&itrq->itrq_rx_lock);
+ itrq->itrq_rxgen = gen_num;
+ mutex_exit(&itrq->itrq_rx_lock);
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+i40e_rx_ring_intr_enable(mac_intr_handle_t intrh)
+{
+ i40e_trqpair_t *itrq = (i40e_trqpair_t *)intrh;
+ i40e_t *i40e = itrq->itrq_i40e;
+
+ mutex_enter(&i40e->i40e_general_lock);
+ ASSERT(i40e->i40e_intr_poll == B_TRUE);
+ i40e_intr_rx_queue_enable(i40e, itrq->itrq_index);
+ i40e->i40e_intr_poll = B_FALSE;
+ mutex_exit(&i40e->i40e_general_lock);
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+i40e_rx_ring_intr_disable(mac_intr_handle_t intrh)
+{
+ i40e_trqpair_t *itrq = (i40e_trqpair_t *)intrh;
+ i40e_t *i40e = itrq->itrq_i40e;
+
+ mutex_enter(&i40e->i40e_general_lock);
+ i40e_intr_rx_queue_disable(i40e, itrq->itrq_index);
+ i40e->i40e_intr_poll = B_TRUE;
+ mutex_exit(&i40e->i40e_general_lock);
+
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+i40e_fill_tx_ring(void *arg, mac_ring_type_t rtype, const int group_index,
+ const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
+{
+ i40e_t *i40e = arg;
+ mac_intr_t *mintr = &infop->mri_intr;
+ i40e_trqpair_t *itrq = &(i40e->i40e_trqpairs[ring_index]);
+
+ /*
+ * Note the group index here is expected to be -1 due to the fact that
+ * we're not actually grouping things tx-wise at this time.
+ */
+ ASSERT(group_index == -1);
+ ASSERT(ring_index < i40e->i40e_num_trqpairs);
+
+ itrq->itrq_mactxring = rh;
+ infop->mri_driver = (mac_ring_driver_t)itrq;
+ infop->mri_start = NULL;
+ infop->mri_stop = NULL;
+ infop->mri_tx = i40e_ring_tx;
+ infop->mri_stat = i40e_tx_ring_stat;
+
+ /*
+ * We only provide the handle in cases where we have MSI-X interrupts,
+ * to indicate that we'd actually support retargetting.
+ */
+ if (i40e->i40e_intr_type & DDI_INTR_TYPE_MSIX) {
+ mintr->mi_ddi_handle =
+ i40e->i40e_intr_handles[itrq->itrq_tx_intrvec];
+ }
+}
+
+/* ARGSUSED */
+static void
+i40e_fill_rx_ring(void *arg, mac_ring_type_t rtype, const int group_index,
+ const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
+{
+ i40e_t *i40e = arg;
+ mac_intr_t *mintr = &infop->mri_intr;
+ i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[ring_index];
+
+ /*
+ * We assert the group number and ring index to help sanity check
+ * ourselves and mark that we'll need to rework this when we have
+ * multiple groups.
+ */
+ ASSERT3S(group_index, ==, 0);
+ ASSERT3S(ring_index, <, i40e->i40e_num_trqpairs);
+
+ itrq->itrq_macrxring = rh;
+ infop->mri_driver = (mac_ring_driver_t)itrq;
+ infop->mri_start = i40e_ring_start;
+ infop->mri_stop = NULL;
+ infop->mri_poll = i40e_ring_rx_poll;
+ infop->mri_stat = i40e_rx_ring_stat;
+ mintr->mi_handle = (mac_intr_handle_t)itrq;
+ mintr->mi_enable = i40e_rx_ring_intr_enable;
+ mintr->mi_disable = i40e_rx_ring_intr_disable;
+
+ /*
+ * We only provide the handle in cases where we have MSI-X interrupts,
+ * to indicate that we'd actually support retargetting.
+ */
+ if (i40e->i40e_intr_type & DDI_INTR_TYPE_MSIX) {
+ mintr->mi_ddi_handle =
+ i40e->i40e_intr_handles[itrq->itrq_rx_intrvec];
+ }
+}
+
+/* ARGSUSED */
+static void
+i40e_fill_rx_group(void *arg, mac_ring_type_t rtype, const int index,
+ mac_group_info_t *infop, mac_group_handle_t gh)
+{
+ i40e_t *i40e = arg;
+
+ if (rtype != MAC_RING_TYPE_RX)
+ return;
+
+ /*
+ * Note, this is a simplified view of a group, given that we only have a
+ * single group and a single ring at the moment. We'll want to expand
+ * upon this as we leverage more hardware functionality.
+ */
+ i40e->i40e_rx_group_handle = gh;
+ infop->mgi_driver = (mac_group_driver_t)i40e;
+ infop->mgi_start = NULL;
+ infop->mgi_stop = NULL;
+ infop->mgi_addmac = i40e_group_add_mac;
+ infop->mgi_remmac = i40e_group_remove_mac;
+
+ ASSERT(i40e->i40e_num_rx_groups == I40E_GROUP_MAX);
+ infop->mgi_count = i40e->i40e_num_trqpairs;
+}
+
+static boolean_t
+i40e_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
+{
+ i40e_t *i40e = arg;
+ mac_capab_rings_t *cap_rings;
+
+ switch (cap) {
+ case MAC_CAPAB_HCKSUM: {
+ uint32_t *txflags = cap_data;
+
+ *txflags = 0;
+ if (i40e->i40e_tx_hcksum_enable == B_TRUE)
+ *txflags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM;
+ break;
+ }
+
+ case MAC_CAPAB_RINGS:
+ cap_rings = cap_data;
+ cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
+ switch (cap_rings->mr_type) {
+ case MAC_RING_TYPE_TX:
+ /*
+ * Note, saying we have no rings, but some number of
+ * groups indicates to MAC that it should create
+ * psuedo-groups with one for each TX ring. This may not
+ * be the long term behavior we want, but it'll work for
+ * now.
+ */
+ cap_rings->mr_gnum = 0;
+ cap_rings->mr_rnum = i40e->i40e_num_trqpairs;
+ cap_rings->mr_rget = i40e_fill_tx_ring;
+ cap_rings->mr_gget = NULL;
+ cap_rings->mr_gaddring = NULL;
+ cap_rings->mr_gremring = NULL;
+ break;
+ case MAC_RING_TYPE_RX:
+ cap_rings->mr_rnum = i40e->i40e_num_trqpairs;
+ cap_rings->mr_rget = i40e_fill_rx_ring;
+ cap_rings->mr_gnum = I40E_GROUP_MAX;
+ cap_rings->mr_gget = i40e_fill_rx_group;
+ cap_rings->mr_gaddring = NULL;
+ cap_rings->mr_gremring = NULL;
+ break;
+ default:
+ return (B_FALSE);
+ }
+ break;
+ default:
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+/* ARGSUSED */
+static int
+i40e_m_setprop_private(i40e_t *i40e, const char *pr_name, uint_t pr_valsize,
+ const void *pr_val)
+{
+ int ret;
+ long val;
+ char *eptr;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
+
+ if ((ret = ddi_strtol(pr_val, &eptr, 10, &val)) != 0 ||
+ *eptr != '\0') {
+ return (ret);
+ }
+
+ if (strcmp(pr_name, I40E_PROP_RX_DMA_THRESH) == 0) {
+ if (val < I40E_MIN_RX_DMA_THRESH ||
+ val > I40E_MAX_RX_DMA_THRESH) {
+ return (EINVAL);
+ }
+ i40e->i40e_rx_dma_min = (uint32_t)val;
+ return (0);
+ }
+
+ if (strcmp(pr_name, I40E_PROP_TX_DMA_THRESH) == 0) {
+ if (val < I40E_MIN_TX_DMA_THRESH ||
+ val > I40E_MAX_TX_DMA_THRESH) {
+ return (EINVAL);
+ }
+ i40e->i40e_tx_dma_min = (uint32_t)val;
+ return (0);
+ }
+
+ if (strcmp(pr_name, I40E_PROP_RX_ITR) == 0) {
+ if (val < I40E_MIN_ITR ||
+ val > I40E_MAX_ITR) {
+ return (EINVAL);
+ }
+ i40e->i40e_rx_itr = (uint32_t)val;
+ i40e_intr_set_itr(i40e, I40E_ITR_INDEX_RX, i40e->i40e_rx_itr);
+ return (0);
+ }
+
+ if (strcmp(pr_name, I40E_PROP_TX_ITR) == 0) {
+ if (val < I40E_MIN_ITR ||
+ val > I40E_MAX_ITR) {
+ return (EINVAL);
+ }
+ i40e->i40e_tx_itr = (uint32_t)val;
+ i40e_intr_set_itr(i40e, I40E_ITR_INDEX_TX, i40e->i40e_tx_itr);
+ return (0);
+ }
+
+ if (strcmp(pr_name, I40E_PROP_OTHER_ITR) == 0) {
+ if (val < I40E_MIN_ITR ||
+ val > I40E_MAX_ITR) {
+ return (EINVAL);
+ }
+ i40e->i40e_tx_itr = (uint32_t)val;
+ i40e_intr_set_itr(i40e, I40E_ITR_INDEX_OTHER,
+ i40e->i40e_other_itr);
+ return (0);
+ }
+
+ return (ENOTSUP);
+}
+
+static int
+i40e_m_getprop_private(i40e_t *i40e, const char *pr_name, uint_t pr_valsize,
+ void *pr_val)
+{
+ uint32_t val;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
+
+ if (strcmp(pr_name, I40E_PROP_RX_DMA_THRESH) == 0) {
+ val = i40e->i40e_rx_dma_min;
+ } else if (strcmp(pr_name, I40E_PROP_TX_DMA_THRESH) == 0) {
+ val = i40e->i40e_tx_dma_min;
+ } else if (strcmp(pr_name, I40E_PROP_RX_ITR) == 0) {
+ val = i40e->i40e_rx_itr;
+ } else if (strcmp(pr_name, I40E_PROP_TX_ITR) == 0) {
+ val = i40e->i40e_tx_itr;
+ } else if (strcmp(pr_name, I40E_PROP_OTHER_ITR) == 0) {
+ val = i40e->i40e_other_itr;
+ } else {
+ return (ENOTSUP);
+ }
+
+ if (snprintf(pr_val, pr_valsize, "%d", val) >= pr_valsize)
+ return (ERANGE);
+ return (0);
+}
+
+/*
+ * Annoyingly for private properties MAC seems to ignore default values that
+ * aren't strings. That means that we have to translate all of these into
+ * uint32_t's and instead we size the buffer to be large enough to hold a
+ * uint32_t.
+ */
+/* ARGSUSED */
+static void
+i40e_m_propinfo_private(i40e_t *i40e, const char *pr_name,
+ mac_prop_info_handle_t prh)
+{
+ char buf[64];
+ uint32_t def;
+
+ if (strcmp(pr_name, I40E_PROP_RX_DMA_THRESH) == 0) {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
+ def = I40E_DEF_RX_DMA_THRESH;
+ mac_prop_info_set_range_uint32(prh,
+ I40E_MIN_RX_DMA_THRESH,
+ I40E_MAX_RX_DMA_THRESH);
+ } else if (strcmp(pr_name, I40E_PROP_TX_DMA_THRESH) == 0) {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
+ def = I40E_DEF_TX_DMA_THRESH;
+ mac_prop_info_set_range_uint32(prh,
+ I40E_MIN_TX_DMA_THRESH,
+ I40E_MAX_TX_DMA_THRESH);
+ } else if (strcmp(pr_name, I40E_PROP_RX_ITR) == 0) {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
+ def = I40E_DEF_RX_ITR;
+ mac_prop_info_set_range_uint32(prh, I40E_MIN_ITR, I40E_MAX_ITR);
+ } else if (strcmp(pr_name, I40E_PROP_TX_ITR) == 0) {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
+ def = I40E_DEF_TX_ITR;
+ mac_prop_info_set_range_uint32(prh, I40E_MIN_ITR, I40E_MAX_ITR);
+ } else if (strcmp(pr_name, I40E_PROP_OTHER_ITR) == 0) {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
+ def = I40E_DEF_OTHER_ITR;
+ mac_prop_info_set_range_uint32(prh, I40E_MIN_ITR, I40E_MAX_ITR);
+ } else {
+ return;
+ }
+
+ (void) snprintf(buf, sizeof (buf), "%d", def);
+ mac_prop_info_set_default_str(prh, buf);
+}
+
+static int
+i40e_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_valsize, const void *pr_val)
+{
+ uint32_t new_mtu;
+ i40e_t *i40e = arg;
+ int ret = 0;
+
+ mutex_enter(&i40e->i40e_general_lock);
+ if (i40e->i40e_state & I40E_SUSPENDED) {
+ mutex_exit(&i40e->i40e_general_lock);
+ return (ECANCELED);
+ }
+
+ switch (pr_num) {
+ /*
+ * These properties are always read-only across every device.
+ */
+ case MAC_PROP_DUPLEX:
+ case MAC_PROP_SPEED:
+ case MAC_PROP_STATUS:
+ case MAC_PROP_ADV_100FDX_CAP:
+ case MAC_PROP_ADV_1000FDX_CAP:
+ case MAC_PROP_ADV_10GFDX_CAP:
+ case MAC_PROP_ADV_40GFDX_CAP:
+ ret = ENOTSUP;
+ break;
+ /*
+ * These are read-only at this time as we don't support configuring
+ * auto-negotiation. See the theory statement in i40e_main.c.
+ */
+ case MAC_PROP_EN_100FDX_CAP:
+ case MAC_PROP_EN_1000FDX_CAP:
+ case MAC_PROP_EN_10GFDX_CAP:
+ case MAC_PROP_EN_40GFDX_CAP:
+ case MAC_PROP_AUTONEG:
+ case MAC_PROP_FLOWCTRL:
+ ret = ENOTSUP;
+ break;
+
+ case MAC_PROP_MTU:
+ bcopy(pr_val, &new_mtu, sizeof (new_mtu));
+ if (new_mtu == i40e->i40e_sdu)
+ break;
+
+ if (new_mtu < I40E_MIN_MTU ||
+ new_mtu > I40E_MAX_MTU) {
+ ret = EINVAL;
+ break;
+ }
+
+ if (i40e->i40e_state & I40E_STARTED) {
+ ret = EBUSY;
+ break;
+ }
+
+ ret = mac_maxsdu_update(i40e->i40e_mac_hdl, new_mtu);
+ if (ret == 0) {
+ i40e->i40e_sdu = new_mtu;
+ i40e_update_mtu(i40e);
+ }
+ break;
+
+ case MAC_PROP_PRIVATE:
+ ret = i40e_m_setprop_private(i40e, pr_name, pr_valsize, pr_val);
+ break;
+ default:
+ ret = ENOTSUP;
+ break;
+ }
+
+ mutex_exit(&i40e->i40e_general_lock);
+ return (ret);
+}
+
+static int
+i40e_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_valsize, void *pr_val)
+{
+ i40e_t *i40e = arg;
+ uint64_t speed;
+ int ret = 0;
+ uint8_t *u8;
+ link_flowctrl_t fctl;
+
+ mutex_enter(&i40e->i40e_general_lock);
+
+ switch (pr_num) {
+ case MAC_PROP_DUPLEX:
+ if (pr_valsize < sizeof (link_duplex_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ bcopy(&i40e->i40e_link_duplex, pr_val, sizeof (link_duplex_t));
+ break;
+ case MAC_PROP_SPEED:
+ if (pr_valsize < sizeof (uint64_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ speed = i40e->i40e_link_speed * 1000000ULL;
+ bcopy(&speed, pr_val, sizeof (speed));
+ break;
+ case MAC_PROP_STATUS:
+ if (pr_valsize < sizeof (link_state_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ bcopy(&i40e->i40e_link_state, pr_val, sizeof (link_state_t));
+ break;
+ case MAC_PROP_AUTONEG:
+ if (pr_valsize < sizeof (uint8_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ u8 = pr_val;
+ *u8 = 1;
+ break;
+ case MAC_PROP_FLOWCTRL:
+ /*
+ * Because we don't currently support hardware flow control, we
+ * just hardcode this to be none.
+ */
+ if (pr_valsize < sizeof (link_flowctrl_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ fctl = LINK_FLOWCTRL_NONE;
+ bcopy(&fctl, pr_val, sizeof (link_flowctrl_t));
+ break;
+ case MAC_PROP_MTU:
+ if (pr_valsize < sizeof (uint32_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ bcopy(&i40e->i40e_sdu, pr_val, sizeof (uint32_t));
+ break;
+
+ /*
+ * Because we don't let users control the speeds we may auto-negotiate
+ * to, the values of the ADV_ and EN_ will always be the same.
+ */
+ case MAC_PROP_ADV_100FDX_CAP:
+ case MAC_PROP_EN_100FDX_CAP:
+ if (pr_valsize < sizeof (uint8_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ u8 = pr_val;
+ *u8 = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_100MB) != 0;
+ break;
+ case MAC_PROP_ADV_1000FDX_CAP:
+ case MAC_PROP_EN_1000FDX_CAP:
+ if (pr_valsize < sizeof (uint8_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ u8 = pr_val;
+ *u8 = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_1GB) != 0;
+ break;
+ case MAC_PROP_ADV_10GFDX_CAP:
+ case MAC_PROP_EN_10GFDX_CAP:
+ if (pr_valsize < sizeof (uint8_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ u8 = pr_val;
+ *u8 = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_10GB) != 0;
+ break;
+ case MAC_PROP_ADV_40GFDX_CAP:
+ case MAC_PROP_EN_40GFDX_CAP:
+ if (pr_valsize < sizeof (uint8_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ u8 = pr_val;
+ *u8 = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_40GB) != 0;
+ break;
+ case MAC_PROP_PRIVATE:
+ ret = i40e_m_getprop_private(i40e, pr_name, pr_valsize, pr_val);
+ break;
+ default:
+ ret = ENOTSUP;
+ break;
+ }
+
+ mutex_exit(&i40e->i40e_general_lock);
+
+ return (ret);
+}
+
+static void
+i40e_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ mac_prop_info_handle_t prh)
+{
+ i40e_t *i40e = arg;
+
+ mutex_enter(&i40e->i40e_general_lock);
+
+ switch (pr_num) {
+ case MAC_PROP_DUPLEX:
+ case MAC_PROP_SPEED:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ break;
+ case MAC_PROP_FLOWCTRL:
+ /*
+ * At the moment, the driver doesn't support flow control, hence
+ * why this is set to read-only and none.
+ */
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_link_flowctrl(prh,
+ LINK_FLOWCTRL_NONE);
+ break;
+ case MAC_PROP_MTU:
+ mac_prop_info_set_range_uint32(prh, I40E_MIN_MTU, I40E_MAX_MTU);
+ break;
+
+ /*
+ * We set the defaults for these based upon the phy's ability to
+ * support the speeds. Note, auto-negotiation is required for fiber,
+ * hence it is read-only and always enabled. When we have access to
+ * copper phys we can revisit this.
+ */
+ case MAC_PROP_AUTONEG:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(prh, 1);
+ break;
+ case MAC_PROP_ADV_100FDX_CAP:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(prh,
+ (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_100MB) != 0);
+ break;
+ case MAC_PROP_EN_100FDX_CAP:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(prh,
+ (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_100MB) != 0);
+ break;
+ case MAC_PROP_ADV_1000FDX_CAP:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(prh,
+ (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_1GB) != 0);
+ break;
+ case MAC_PROP_EN_1000FDX_CAP:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(prh,
+ (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_1GB) != 0);
+ break;
+ case MAC_PROP_ADV_10GFDX_CAP:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(prh,
+ (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_10GB) != 0);
+ break;
+ case MAC_PROP_EN_10GFDX_CAP:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(prh,
+ (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_10GB) != 0);
+ break;
+ case MAC_PROP_ADV_40GFDX_CAP:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(prh,
+ (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_40GB) != 0);
+ break;
+ case MAC_PROP_EN_40GFDX_CAP:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(prh,
+ (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_40GB) != 0);
+ break;
+ case MAC_PROP_PRIVATE:
+ i40e_m_propinfo_private(i40e, pr_name, prh);
+ break;
+ default:
+ break;
+ }
+
+ mutex_exit(&i40e->i40e_general_lock);
+}
+
+#define I40E_M_CALLBACK_FLAGS \
+ (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
+
+static mac_callbacks_t i40e_m_callbacks = {
+ I40E_M_CALLBACK_FLAGS,
+ i40e_m_stat,
+ i40e_m_start,
+ i40e_m_stop,
+ i40e_m_promisc,
+ i40e_m_multicast,
+ NULL,
+ NULL,
+ NULL,
+ i40e_m_ioctl,
+ i40e_m_getcapab,
+ NULL,
+ NULL,
+ i40e_m_setprop,
+ i40e_m_getprop,
+ i40e_m_propinfo
+};
+
+boolean_t
+i40e_register_mac(i40e_t *i40e)
+{
+ struct i40e_hw *hw = &i40e->i40e_hw_space;
+ int status;
+ mac_register_t *mac = mac_alloc(MAC_VERSION);
+
+ if (mac == NULL)
+ return (B_FALSE);
+
+ mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
+ mac->m_driver = i40e;
+ mac->m_dip = i40e->i40e_dip;
+ mac->m_src_addr = hw->mac.addr;
+ mac->m_callbacks = &i40e_m_callbacks;
+ mac->m_min_sdu = 0;
+ mac->m_max_sdu = i40e->i40e_sdu;
+ mac->m_margin = VLAN_TAGSZ;
+ mac->m_priv_props = i40e_priv_props;
+ mac->m_v12n = MAC_VIRT_LEVEL1;
+
+ status = mac_register(mac, &i40e->i40e_mac_hdl);
+ if (status != 0)
+ i40e_error(i40e, "mac_register() returned %d", status);
+ mac_free(mac);
+
+ return (status == 0);
+}
diff --git a/usr/src/uts/common/io/i40e/i40e_intr.c b/usr/src/uts/common/io/i40e/i40e_intr.c
new file mode 100644
index 0000000000..ba9bea7b20
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/i40e_intr.c
@@ -0,0 +1,757 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2016 Joyent, Inc.
+ */
+
+/*
+ * -------------------------
+ * Interrupt Handling Theory
+ * -------------------------
+ *
+ * There are a couple different sets of interrupts that we need to worry about:
+ *
+ * - Interrupts from receive queues
+ * - Interrupts from transmit queues
+ * - 'Other Interrupts', such as the administrative queue
+ *
+ * 'Other Interrupts' are asynchronous events such as a link status change event
+ * being posted to the administrative queue, unrecoverable ECC errors, and more.
+ * If we have something being posted to the administrative queue, then we go
+ * through and process it, because it's generally enabled as a separate logical
+ * interrupt. Note, we may need to do more here eventually. To re-enable the
+ * interrupts from the 'Other Interrupts' section, we need to clear the PBA and
+ * write ENA to PFINT_ICR0.
+ *
+ * Interrupts from the transmit and receive queues indicates that our requests
+ * have been processed. In the rx case, it means that we have data that we
+ * should take a look at and send up the stack. In the tx case, it means that
+ * data which we got from MAC has now been sent out on the wire and we can free
+ * the associated data. Most of the logic for acting upon the presence of this
+ * data can be found in i40e_transciever.c which handles all of the DMA, rx, and
+ * tx operations. This file is dedicated to handling and dealing with interrupt
+ * processing.
+ *
+ * All devices supported by this driver support three kinds of interrupts:
+ *
+ * o Extended Message Signaled Interrupts (MSI-X)
+ * o Message Signaled Interrupts (MSI)
+ * o Legacy PCI interrupts (INTx)
+ *
+ * Generally speaking the hardware logically handles MSI and INTx the same and
+ * restricts us to only using a single interrupt, which isn't the interesting
+ * case. With MSI-X available, each physical function of the device provides the
+ * opportunity for multiple interrupts which is what we'll focus on.
+ *
+ * --------------------
+ * Interrupt Management
+ * --------------------
+ *
+ * By default, the admin queue, which consists of the asynchronous other
+ * interrupts is always bound to MSI-X vector zero. Next, we spread out all of
+ * the other interrupts that we have available to us over the remaining
+ * interrupt vectors.
+ *
+ * This means that there may be multiple queues, both tx and rx, which are
+ * mapped to the same interrupt. When the interrupt fires, we'll have to check
+ * all of them for servicing, before we go through and indicate that the
+ * interrupt is claimed.
+ *
+ * The hardware provides the means of mapping various queues to MSI-X interrupts
+ * by programming the I40E_QINT_RQCTL() and I4OE_QINT_TQCTL() registers. These
+ * registers can also be used to enable and disable whether or not the queue is
+ * a source of interrupts. As part of this, the hardware requires that we
+ * maintain a linked list of queues for each interrupt vector. While it may seem
+ * like this is only there for the purproses of ITRs, that's not the case. The
+ * first queue must be programmed in I40E_QINT_LNKLSTN(%vector) register. Each
+ * queue defines the next one in either the I40E_QINT_RQCTL or I40E_QINT_TQCTL
+ * register.
+ *
+ * Because we only have a single queue enabled at the moment and we always have
+ * two interrupts, we do something pretty simple and just know that there's one
+ * data queue in the interrupt handler. Longer term, we'll need to think harder
+ * about this, but for the moment it'll have to suffice.
+ *
+ * Finally, the individual interrupt vector itself has the ability to be enabled
+ * and disabled. The overall interrupt is controlled through the
+ * I40E_PFINT_DYN_CTLN() register. This is used to turn on and off the interrupt
+ * as a whole.
+ *
+ * Note that this means that both the individual queue and the interrupt as a
+ * whole can be toggled and re-enabled.
+ *
+ * -------------------
+ * Non-MSIX Management
+ * -------------------
+ *
+ * We may have a case where the Operating System is unable to actually allocate
+ * any MSI-X to the system. In such a world, there is only one transmit/receive
+ * queue pair and it is bound to the same interrupt with index zero. The
+ * hardware doesn't allow us access to additional interrupt vectors in these
+ * modes. Note that technically we could support more transmit/receive queues if
+ * we wanted.
+ *
+ * In this world, because the interrupts for the admin queue and traffic are
+ * mixed together, we have to consult ICR0 to determine what has occurred. The
+ * QINT_TQCTL and QINT_RQCTL registers have a field, 'MSI-X 0 index' which
+ * allows us to set a specific bit in ICR0. There are up to seven such bits;
+ * however, we only use the bit 0 and 1 for the rx and tx queue respectively.
+ * These are contained by the I40E_INTR_NOTX_{R|T}X_QUEUE and
+ * I40E_INTR_NOTX_{R|T}X_MASK registers respectively.
+ *
+ * Unfortunately, these corresponding queue bits have no corresponding entry in
+ * the ICR0_ENA register. So instead, when enabling interrupts on the queues, we
+ * end up enabling it on the queue registers rather than on the MSI-X registers.
+ * In the MSI-X world, because they can be enabled and disabled, this is
+ * different and the queues can always be enabled and disabled, but the
+ * interrupts themselves are toggled (ignoring the question of interrupt
+ * blanking for polling on rings).
+ *
+ * Finally, we still have to set up the interrupt linked list, but the list is
+ * instead rooted at the register I40E_PFINT_LNKLST0, rather than being tied to
+ * one of the other MSI-X registers.
+ *
+ * --------------------
+ * Interrupt Moderation
+ * --------------------
+ *
+ * The XL710 hardware has three different interrupt moderation registers per
+ * interrupt. Unsurprisingly, we use these for:
+ *
+ * o RX interrupts
+ * o TX interrupts
+ * o 'Other interrupts' (link status change, admin queue, etc.)
+ *
+ * By default, we throttle 'other interrupts' the most, then TX interrupts, and
+ * then RX interrupts. The default values for these were based on trying to
+ * reason about both the importance and frequency of events. Generally speaking
+ * 'other interrupts' are not very frequent and they're not important for the
+ * I/O data path in and of itself (though they may indicate issues with the I/O
+ * data path).
+ *
+ * On the flip side, when we're not polling, RX interrupts are very important.
+ * The longer we wait for them, the more latency that we inject into the system.
+ * However, if we allow interrupts to occur too frequently, we risk a few
+ * problems:
+ *
+ * 1) Abusing system resources. Without proper interrupt blanking and polling,
+ * we can see upwards of 200k-300k interrupts per second on the system.
+ *
+ * 2) Not enough data coalescing to enable polling. In other words, the more
+ * data that we allow to build up, the more likely we'll be able to enable
+ * polling mode and allowing us to better handle bulk data.
+ *
+ * In-between the 'other interrupts' and the TX interrupts we have the
+ * reclamation of TX buffers. This operation is not quite as important as we
+ * generally size the ring large enough that we should be able to reclaim a
+ * substantial amount of the descriptors that we have used per interrupt. So
+ * while it's important that this interrupt occur, we don't necessarily need it
+ * firing as frequently as RX; it doesn't, on its own, induce additional latency
+ * into the system.
+ *
+ * Based on all this we currently assign static ITR values for the system. While
+ * we could move to a dynamic system (the hardware supports that), we'd want to
+ * make sure that we're seeing problems from this that we believe would be
+ * generally helped by the added complexity.
+ *
+ * Based on this, the default values that we have allow for the following
+ * interrupt thresholds:
+ *
+ * o 20k interrupts/s for RX
+ * o 5k interrupts/s for TX
+ * o 2k interupts/s for 'Other Interrupts'
+ */
+
+#include "i40e_sw.h"
+
+#define I40E_INTR_NOTX_QUEUE 0
+#define I40E_INTR_NOTX_INTR 0
+#define I40E_INTR_NOTX_RX_QUEUE 0
+#define I40E_INTR_NOTX_RX_MASK (1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_INTR_NOTX_TX_QUEUE 1
+#define I40E_INTR_NOTX_TX_MASK (1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+
+void
+i40e_intr_set_itr(i40e_t *i40e, i40e_itr_index_t itr, uint_t val)
+{
+ int i;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ VERIFY3U(val, <=, I40E_MAX_ITR);
+ VERIFY3U(itr, <, I40E_ITR_INDEX_NONE);
+
+ /*
+ * No matter the interrupt mode, the ITR for other interrupts is always
+ * on interrupt zero and the same is true if we're not using MSI-X.
+ */
+ if (itr == I40E_ITR_INDEX_OTHER ||
+ i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) {
+ I40E_WRITE_REG(hw, I40E_PFINT_ITR0(itr), val);
+ return;
+ }
+
+ for (i = 1; i < i40e->i40e_intr_count; i++) {
+ I40E_WRITE_REG(hw, I40E_PFINT_ITRN(itr, i - 1), val);
+ }
+}
+
+/*
+ * Re-enable the adminq. Note that the adminq doesn't have a traditional queue
+ * associated with it from an interrupt perspective and just lives on ICR0.
+ * However when MSI-X interrupts are not being used, then this also enables and
+ * disables those interrupts.
+ */
+static void
+i40e_intr_adminq_enable(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ uint32_t reg;
+
+ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (I40E_ITR_INDEX_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, reg);
+ i40e_flush(hw);
+}
+
+static void
+i40e_intr_adminq_disable(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ uint32_t reg;
+
+ reg = I40E_ITR_INDEX_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, reg);
+}
+
+static void
+i40e_intr_io_enable(i40e_t *i40e, int vector)
+{
+ uint32_t reg;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_INDEX_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vector - 1), reg);
+}
+
+static void
+i40e_intr_io_disable(i40e_t *i40e, int vector)
+{
+ uint32_t reg;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ reg = I40E_ITR_INDEX_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vector - 1), reg);
+}
+
+/*
+ * When MSI-X interrupts are being used, then we can enable the actual
+ * interrupts themselves. However, when they are not, we instead have to turn
+ * towards the queue's CAUSE_ENA bit and enable that.
+ */
+void
+i40e_intr_io_enable_all(i40e_t *i40e)
+{
+ if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) {
+ int i;
+
+ for (i = 1; i < i40e->i40e_intr_count; i++) {
+ i40e_intr_io_enable(i40e, i);
+ }
+ } else {
+ uint32_t reg;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ reg = I40E_READ_REG(hw, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
+ reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
+
+ reg = I40E_READ_REG(hw, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
+ reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
+ }
+}
+
+/*
+ * When MSI-X interrupts are being used, then we can disable the actual
+ * interrupts themselves. However, when they are not, we instead have to turn
+ * towards the queue's CAUSE_ENA bit and disable that.
+ */
+void
+i40e_intr_io_disable_all(i40e_t *i40e)
+{
+ if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) {
+ int i;
+
+ for (i = 1; i < i40e->i40e_intr_count; i++) {
+ i40e_intr_io_disable(i40e, i);
+ }
+ } else {
+ uint32_t reg;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ reg = I40E_READ_REG(hw, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
+ reg &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
+
+ reg = I40E_READ_REG(hw, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
+ reg &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
+ }
+}
+
+/*
+ * As part of disabling the tx and rx queue's we're technically supposed to
+ * remove the linked list entries. The simplest way is to clear the LNKLSTN
+ * register by setting it to I40E_QUEUE_TYPE_EOL (0x7FF).
+ *
+ * Note all of the FM register access checks are performed by the caller.
+ */
+void
+i40e_intr_io_clear_cause(i40e_t *i40e)
+{
+ int i;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) {
+ uint32_t reg;
+ reg = I40E_QUEUE_TYPE_EOL;
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, reg);
+ return;
+ }
+
+ for (i = 1; i < i40e->i40e_intr_count; i++) {
+ uint32_t reg;
+#ifdef DEBUG
+ /*
+ * Verify that the interrupt in question is disabled. This is a
+ * prerequisite of modifying the data in question.
+ */
+ reg = I40E_READ_REG(hw, I40E_PFINT_DYN_CTLN(i - 1));
+ VERIFY0(reg & I40E_PFINT_DYN_CTLN_INTENA_MASK);
+#endif
+ reg = I40E_QUEUE_TYPE_EOL;
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(i - 1), reg);
+ }
+
+ i40e_flush(hw);
+}
+
+/*
+ * Finalize interrupt handling. Mostly this disables the admin queue.
+ */
+void
+i40e_intr_chip_fini(i40e_t *i40e)
+{
+#ifdef DEBUG
+ int i;
+ uint32_t reg;
+
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ /*
+ * Take a look and verify that all other interrupts have been disabled
+ * and the interrupt linked lists have been zeroed.
+ */
+ if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) {
+ for (i = 1; i < i40e->i40e_intr_count; i++) {
+ reg = I40E_READ_REG(hw, I40E_PFINT_DYN_CTLN(i - 1));
+ VERIFY0(reg & I40E_PFINT_DYN_CTLN_INTENA_MASK);
+
+ reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1));
+ VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL);
+ }
+ }
+#endif
+
+ i40e_intr_adminq_disable(i40e);
+}
+
+/*
+ * Enable all of the queues and set the corresponding LNKLSTN registers. Note
+ * that we always enable queues as interrupt sources, even though we don't
+ * enable the MSI-X interrupt vectors.
+ */
+static void
+i40e_intr_init_queue_msix(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ uint32_t reg;
+
+ /*
+ * Because we only have a single queue, just do something simple now.
+ * How this all works will need to really be properly redone based on
+ * the bit maps, etc. Note that we skip the ITR logic for the moment,
+ * just to make our lives as explicit and simple as possible.
+ */
+ reg = (0 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(0), reg);
+
+ reg = (1 << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (0 << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(0), reg);
+
+ reg = (1 << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (I40E_ITR_INDEX_TX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(0), reg);
+
+}
+
+/*
+ * Set up a single queue to share the admin queue interrupt in the non-MSI-X
+ * world. Note we do not enable the queue as an interrupt cause at this time. We
+ * don't have any other vector of control here, unlike with the MSI-X interrupt
+ * case.
+ */
+static void
+i40e_intr_init_queue_shared(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ uint32_t reg;
+
+ VERIFY(i40e->i40e_intr_type == DDI_INTR_TYPE_FIXED ||
+ i40e->i40e_intr_type == DDI_INTR_TYPE_MSI);
+
+ reg = (I40E_INTR_NOTX_QUEUE << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, reg);
+
+ reg = (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (I40E_INTR_NOTX_RX_QUEUE << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
+ (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
+
+ reg = (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (I40E_INTR_NOTX_TX_QUEUE << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
+}
+
+/*
+ * Enable the specified queue as a valid source of interrupts. Note, this should
+ * only be used as part of the GLDv3's interrupt blanking routines. The debug
+ * build assertions are specific to that.
+ */
+void
+i40e_intr_rx_queue_enable(i40e_t *i40e, uint_t queue)
+{
+ uint32_t reg;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
+ ASSERT(queue < i40e->i40e_num_trqpairs);
+
+ reg = I40E_READ_REG(hw, I40E_QINT_RQCTL(queue));
+ ASSERT0(reg & I40E_QINT_RQCTL_CAUSE_ENA_MASK);
+ reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(queue), reg);
+}
+
+/*
+ * Disable the specified queue as a valid source of interrupts. Note, this
+ * should only be used as part of the GLDv3's interrupt blanking routines. The
+ * debug build assertions are specific to that.
+ */
+void
+i40e_intr_rx_queue_disable(i40e_t *i40e, uint_t queue)
+{
+ uint32_t reg;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
+ ASSERT(queue < i40e->i40e_num_trqpairs);
+
+ reg = I40E_READ_REG(hw, I40E_QINT_RQCTL(queue));
+ ASSERT3U(reg & I40E_QINT_RQCTL_CAUSE_ENA_MASK, ==,
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK);
+ reg &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(queue), reg);
+}
+
+/*
+ * Start up the various chip's interrupt handling. We not only configure the
+ * adminq here, but we also go through and configure all of the actual queues,
+ * the interrupt linked lists, and others.
+ */
+void
+i40e_intr_chip_init(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ uint32_t reg;
+
+ /*
+ * Ensure that all non adminq interrupts are disabled at the chip level.
+ */
+ i40e_intr_io_disable_all(i40e);
+
+ I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, 0);
+ (void) I40E_READ_REG(hw, I40E_PFINT_ICR0);
+
+ /*
+ * Always enable all of the other-class interrupts to be on their own
+ * ITR. This only needs to be set on interrupt zero, which has its own
+ * special setting.
+ */
+ reg = I40E_ITR_INDEX_OTHER << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT;
+ I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0, reg);
+
+ /*
+ * Enable interrupt types we expect to receive. At the moment, this
+ * is limited to the adminq; however, we'll want to review 11.2.2.9.22
+ * for more types here as we add support for detecting them, handling
+ * them, and resetting the device as appropriate.
+ */
+ reg = I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, reg);
+
+ /*
+ * Always set the interrupt linked list to empty. We'll come back and
+ * change this if MSI-X are actually on the scene.
+ */
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL);
+
+ i40e_intr_adminq_enable(i40e);
+
+ /*
+ * Set up all of the queues and map them to interrupts based on the bit
+ * assignments.
+ */
+ if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) {
+ i40e_intr_init_queue_msix(i40e);
+ } else {
+ i40e_intr_init_queue_shared(i40e);
+ }
+
+ /*
+ * Finally set all of the default ITRs for the interrupts. Note that the
+ * queues will have been set up above.
+ */
+ i40e_intr_set_itr(i40e, I40E_ITR_INDEX_RX, i40e->i40e_rx_itr);
+ i40e_intr_set_itr(i40e, I40E_ITR_INDEX_TX, i40e->i40e_tx_itr);
+ i40e_intr_set_itr(i40e, I40E_ITR_INDEX_OTHER, i40e->i40e_other_itr);
+}
+
+static void
+i40e_intr_adminq_work(i40e_t *i40e)
+{
+ struct i40e_hw *hw = &i40e->i40e_hw_space;
+ struct i40e_arq_event_info evt;
+ uint16_t remain = 1;
+
+ bzero(&evt, sizeof (struct i40e_arq_event_info));
+ evt.buf_len = I40E_ADMINQ_BUFSZ;
+ evt.msg_buf = i40e->i40e_aqbuf;
+
+ while (remain != 0) {
+ enum i40e_status_code ret;
+ uint16_t opcode;
+
+ /*
+ * At the moment, the only error code that seems to be returned
+ * is one saying that there's no work. In such a case we leave
+ * this be.
+ */
+ ret = i40e_clean_arq_element(hw, &evt, &remain);
+ if (ret != I40E_SUCCESS)
+ break;
+
+ opcode = LE_16(evt.desc.opcode);
+ switch (opcode) {
+ case i40e_aqc_opc_get_link_status:
+ mutex_enter(&i40e->i40e_general_lock);
+ i40e_link_check(i40e);
+ mutex_exit(&i40e->i40e_general_lock);
+ break;
+ default:
+ /*
+ * Longer term we'll want to enable other causes here
+ * and get these cleaned up and doing something.
+ */
+ break;
+ }
+ }
+}
+
+static void
+i40e_intr_rx_work(i40e_t *i40e, int queue)
+{
+ mblk_t *mp;
+ i40e_trqpair_t *itrq;
+
+ ASSERT(queue < i40e->i40e_num_trqpairs);
+ itrq = &i40e->i40e_trqpairs[queue];
+
+ mutex_enter(&itrq->itrq_rx_lock);
+ mp = i40e_ring_rx(itrq, I40E_POLL_NULL);
+ mutex_exit(&itrq->itrq_rx_lock);
+
+ if (mp != NULL) {
+ mac_rx_ring(i40e->i40e_mac_hdl, itrq->itrq_macrxring, mp,
+ itrq->itrq_rxgen);
+ }
+}
+
+static void
+i40e_intr_tx_work(i40e_t *i40e, int queue)
+{
+ i40e_trqpair_t *itrq;
+
+ itrq = &i40e->i40e_trqpairs[queue];
+ i40e_tx_recycle_ring(itrq);
+}
+
+/*
+ * At the moment, the only 'other' interrupt on ICR0 that we handle is the
+ * adminq. We should go through and support the other notifications at some
+ * point.
+ */
+static void
+i40e_intr_other_work(i40e_t *i40e)
+{
+ struct i40e_hw *hw = &i40e->i40e_hw_space;
+ uint32_t reg;
+
+ reg = I40E_READ_REG(hw, I40E_PFINT_ICR0);
+ if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_DEGRADED);
+ atomic_or_32(&i40e->i40e_state, I40E_ERROR);
+ return;
+ }
+
+ if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
+ i40e_intr_adminq_work(i40e);
+
+ /*
+ * Make sure that the adminq interrupt is not masked and then explicitly
+ * enable the adminq and thus the other interrupt.
+ */
+ reg = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, reg);
+
+ i40e_intr_adminq_enable(i40e);
+}
+
+uint_t
+i40e_intr_msix(void *arg1, void *arg2)
+{
+ i40e_t *i40e = (i40e_t *)arg1;
+ int vector_idx = (int)(uintptr_t)arg2;
+
+ /*
+ * When using MSI-X interrupts, vector 0 is always reserved for the
+ * adminq at this time. Though longer term, we'll want to also bridge
+ * some I/O to them.
+ */
+ if (vector_idx == 0) {
+ i40e_intr_other_work(i40e);
+ return (DDI_INTR_CLAIMED);
+ }
+
+ VERIFY(vector_idx == 1);
+
+ /*
+ * Note that we explicitly do not check this value under the lock even
+ * though assignments to it are done so. In this case, the cost of
+ * getting this wrong is at worst a bit of additional contention and
+ * even more rarely, a duplicated packet. However, the cost on the other
+ * hand is a lot more. This is something that as we more generally
+ * implement ring support we should revisit.
+ */
+ if (i40e->i40e_intr_poll != B_TRUE)
+ i40e_intr_rx_work(i40e, 0);
+ i40e_intr_tx_work(i40e, 0);
+ i40e_intr_io_enable(i40e, 1);
+
+ return (DDI_INTR_CLAIMED);
+}
+
+static uint_t
+i40e_intr_notx(i40e_t *i40e, boolean_t shared)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ uint32_t reg;
+ int ret = DDI_INTR_CLAIMED;
+
+ if (shared == B_TRUE) {
+ mutex_enter(&i40e->i40e_general_lock);
+ if (i40e->i40e_state & I40E_SUSPENDED) {
+ mutex_exit(&i40e->i40e_general_lock);
+ return (DDI_INTR_UNCLAIMED);
+ }
+ mutex_exit(&i40e->i40e_general_lock);
+ }
+
+ reg = I40E_READ_REG(hw, I40E_PFINT_ICR0);
+ if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_DEGRADED);
+ atomic_or_32(&i40e->i40e_state, I40E_ERROR);
+ return (DDI_INTR_CLAIMED);
+ }
+
+ if (reg == 0) {
+ if (shared == B_TRUE)
+ ret = DDI_INTR_UNCLAIMED;
+ goto done;
+ }
+
+ if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
+ i40e_intr_adminq_work(i40e);
+
+ if (reg & I40E_INTR_NOTX_RX_MASK)
+ i40e_intr_rx_work(i40e, 0);
+
+ if (reg & I40E_INTR_NOTX_TX_MASK)
+ i40e_intr_tx_work(i40e, 0);
+
+done:
+ i40e_intr_adminq_enable(i40e);
+ return (ret);
+
+}
+
+/* ARGSUSED */
+uint_t
+i40e_intr_msi(void *arg1, void *arg2)
+{
+ i40e_t *i40e = (i40e_t *)arg1;
+
+ return (i40e_intr_notx(i40e, B_FALSE));
+}
+
+/* ARGSUSED */
+uint_t
+i40e_intr_legacy(void *arg1, void *arg2)
+{
+ i40e_t *i40e = (i40e_t *)arg1;
+
+ return (i40e_intr_notx(i40e, B_TRUE));
+}
diff --git a/usr/src/uts/common/io/i40e/i40e_main.c b/usr/src/uts/common/io/i40e/i40e_main.c
new file mode 100644
index 0000000000..91164abf87
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/i40e_main.c
@@ -0,0 +1,2883 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
+ * Copyright 2016 Joyent, Inc.
+ */
+
+/*
+ * i40e - Intel 10/40 Gb Ethernet driver
+ *
+ * The i40e driver is the main software device driver for the Intel 40 Gb family
+ * of devices. Note that these devices come in many flavors with both 40 GbE
+ * ports and 10 GbE ports. This device is the successor to the 82599 family of
+ * devices (ixgbe).
+ *
+ * Unlike previous generations of Intel 1 GbE and 10 GbE devices, the 40 GbE
+ * devices defined in the XL710 controller (previously known as Fortville) are a
+ * rather different beast and have a small switch embedded inside of them. In
+ * addition, the way that most of the programming is done has been overhauled.
+ * As opposed to just using PCIe memory mapped registers, it also has an
+ * administrative queue which is used to communicate with firmware running on
+ * the chip.
+ *
+ * Each physical function in the hardware shows up as a device that this driver
+ * will bind to. The hardware splits many resources evenly across all of the
+ * physical functions present on the device, while other resources are instead
+ * shared across the entire card and its up to the device driver to
+ * intelligently partition them.
+ *
+ * ------------
+ * Organization
+ * ------------
+ *
+ * This driver is made up of several files which have their own theory
+ * statements spread across them. We'll touch on the high level purpose of each
+ * file here, and then we'll get into more discussion on how the device is
+ * generally modelled with respect to the interfaces in illumos.
+ *
+ * i40e_gld.c: This file contains all of the bindings to MAC and the networking
+ * stack.
+ *
+ * i40e_intr.c: This file contains all of the interrupt service routines and
+ * contains logic to enable and disable interrupts on the hardware.
+ * It also contains the logic to map hardware resources such as the
+ * rings to and from interrupts and controls their ability to fire.
+ *
+ * There is a big theory statement on interrupts present there.
+ *
+ * i40e_main.c: The file that you're currently in. It interfaces with the
+ * traditional OS DDI interfaces and is in charge of configuring
+ * the device.
+ *
+ * i40e_osdep.[ch]: These files contain interfaces and definitions needed to
+ * work with Intel's common code for the device.
+ *
+ * i40e_stats.c: This file contains the general work and logic around our
+ * kstats. A theory statement on their organization and use of the
+ * hardware exists there.
+ *
+ * i40e_sw.h: This header file contains all of the primary structure definitions
+ * and constants that are used across the entire driver.
+ *
+ * i40e_transceiver.c: This file contains all of the logic for sending and
+ * receiving data. It contains all of the ring and DMA
+ * allocation logic, as well as, the actual interfaces to
+ * send and receive data.
+ *
+ * A big theory statement on ring management, descriptors,
+ * and how it ties into the OS is present there.
+ *
+ * --------------
+ * General Design
+ * --------------
+ *
+ * Before we go too far into the general way we've laid out data structures and
+ * the like, it's worth taking some time to explain how the hardware is
+ * organized. This organization informs a lot of how we do things at this time
+ * in the driver.
+ *
+ * Each physical device consists of a number of one or more ports, which are
+ * considered physical functions in the PCI sense and thus each get enumerated
+ * by the system, resulting in an instance being created and attached to. While
+ * there are many resources that are unique to each physical function eg.
+ * instance of the device, there are many that are shared across all of them.
+ * Several resources have an amount reserved for each VSI and then a static pool
+ * of resources, available for all functions on the card.
+ *
+ * The most important resource in hardware are its transmit and receive queue
+ * pairs (i40e_trqpair_t). These should be thought of as rings in GLDv3
+ * parlance. There are a set number of these on each device; however, they are
+ * statically partitioned among all of the different physical functions.
+ *
+ * 'Fortville' (the code name for this device family) is basically a switch. To
+ * map MAC addresses and other things to queues, we end up having to create
+ * Virtual Station Interfaces (VSIs) and establish forwarding rules that direct
+ * traffic to a queue. A VSI owns a collection of queues and has a series of
+ * forwarding rules that point to it. One way to think of this is to treat it
+ * like MAC does a VNIC. When MAC refers to a group, a collection of rings and
+ * classification resources, that is a VSI in i40e.
+ *
+ * The sets of VSIs is shared across the entire device, though there may be some
+ * amount that are reserved to each PF. Because the GLDv3 does not let us change
+ * the number of groups dynamically, we instead statically divide this amount
+ * evenly between all the functions that exist. In addition, we have the same
+ * problem with the mac address forwarding rules. There are a static number that
+ * exist shared across all the functions.
+ *
+ * To handle both of these resources, what we end up doing is going through and
+ * determining which functions belong to the same device. Nominally one might do
+ * this by having a nexus driver; however, a prime requirement for a nexus
+ * driver is identifying the various children and activating them. While it is
+ * possible to get this information from NVRAM, we would end up duplicating a
+ * lot of the PCI enumeration logic. Really, at the end of the day, the device
+ * doesn't give us the traditional identification properties we want from a
+ * nexus driver.
+ *
+ * Instead, we rely on some properties that are guaranteed to be unique. While
+ * it might be tempting to leverage the PBA or serial number of the device from
+ * NVRAM, there is nothing that says that two devices can't be mis-programmed to
+ * have the same values in NVRAM. Instead, we uniquely identify a group of
+ * functions based on their parent in the /devices tree, their PCI bus and PCI
+ * function identifiers. Using either on their own may not be sufficient.
+ *
+ * For each unique PCI device that we encounter, we'll create a i40e_device_t.
+ * From there, because we don't have a good way to tell the GLDv3 about sharing
+ * resources between everything, we'll end up just dividing the resources
+ * evenly between all of the functions. Longer term, if we don't have to declare
+ * to the GLDv3 that these resources are shared, then we'll maintain a pool and
+ * hae each PF allocate from the pool in the device, thus if only two of four
+ * ports are being used, for example, then all of the resources can still be
+ * used.
+ *
+ * -------------------------------------------
+ * Transmit and Receive Queue Pair Allocations
+ * -------------------------------------------
+ *
+ * NVRAM ends up assigning each PF its own share of the transmit and receive LAN
+ * queue pairs, we have no way of modifying it, only observing it. From there,
+ * it's up to us to map these queues to VSIs and VFs. Since we don't support any
+ * VFs at this time, we only focus on assignments to VSIs.
+ *
+ * At the moment, we used a static mapping of transmit/receive queue pairs to a
+ * given VSI (eg. rings to a group). Though in the fullness of time, we want to
+ * make this something which is fully dynamic and take advantage of documented,
+ * but not yet available functionality for adding filters based on VXLAN and
+ * other encapsulation technologies.
+ *
+ * -------------------------------------
+ * Broadcast, Multicast, and Promiscuous
+ * -------------------------------------
+ *
+ * As part of the GLDv3, we need to make sure that we can handle receiving
+ * broadcast and multicast traffic. As well as enabling promiscuous mode when
+ * requested. GLDv3 requires that all broadcast and multicast traffic be
+ * retrieved by the default group, eg. the first one. This is the same thing as
+ * the default VSI.
+ *
+ * To receieve broadcast traffic, we enable it through the admin queue, rather
+ * than use one of our filters for it. For multicast traffic, we reserve a
+ * certain number of the hash filters and assign them to a given PF. When we
+ * exceed those, we then switch to using promicuous mode for multicast traffic.
+ *
+ * More specifically, once we exceed the number of filters (indicated because
+ * the i40e_t`i40e_resources.ifr_nmcastfilt ==
+ * i40e_t`i40e_resources.ifr_nmcastfilt_used), we then instead need to toggle
+ * promiscuous mode. If promiscuous mode is toggled then we keep track of the
+ * number of MACs added to it by incrementing i40e_t`i40e_mcast_promisc_count.
+ * That will stay enabled until that count reaches zero indicating that we have
+ * only added multicast addresses that we have a corresponding entry for.
+ *
+ * Because MAC itself wants to toggle promiscuous mode, which includes both
+ * unicast and multicast traffic, we go through and keep track of that
+ * ourselves. That is maintained through the use of the i40e_t`i40e_promisc_on
+ * member.
+ *
+ * --------------
+ * VSI Management
+ * --------------
+ *
+ * At this time, we currently only support a single MAC group, and thus a single
+ * VSI. This VSI is considered the default VSI and should be the only one that
+ * exists after a reset. Currently it is stored as the member
+ * i40e_t`i40e_vsi_id. While this works for the moment and for an initial
+ * driver, it's not sufficient for the longer-term path of the driver. Instead,
+ * we'll want to actually have a unique i40e_vsi_t structure which is used
+ * everywhere. Note that this means that every place that uses the
+ * i40e_t`i40e_vsi_id will need to be refactored.
+ *
+ * ----------------
+ * Structure Layout
+ * ----------------
+ *
+ * The following images relates the core data structures together. The primary
+ * structure in the system is the i40e_t. It itself contains multiple rings,
+ * i40e_trqpair_t's which contain the various transmit and receive data. The
+ * receive data is stored outside of the i40e_trqpair_t and instead in the
+ * i40e_rx_data_t. The i40e_t has a corresponding i40e_device_t which keeps
+ * track of per-physical device state. Finally, for every active descriptor,
+ * there is a corresponding control block, which is where the
+ * i40e_rx_control_block_t and the i40e_tx_control_block_t come from.
+ *
+ * +-----------------------+ +-----------------------+
+ * | Global i40e_t list | | Global Device list |
+ * | | +--| |
+ * | i40e_glist | | | i40e_dlist |
+ * +-----------------------+ | +-----------------------+
+ * | v
+ * | +------------------------+ +-----------------------+
+ * | | Device-wide Structure |----->| Device-wide Structure |--> ...
+ * | | i40e_device_t | | i40e_device_t |
+ * | | | +-----------------------+
+ * | | dev_info_t * ------+--> Parent in devices tree.
+ * | | uint_t ------+--> PCI bus number
+ * | | uint_t ------+--> PCI device number
+ * | | uint_t ------+--> Number of functions
+ * | | i40e_switch_rsrcs_t ---+--> Captured total switch resources
+ * | | list_t ------+-------------+
+ * | +------------------------+ |
+ * | ^ |
+ * | +--------+ |
+ * | | v
+ * | +---------------------------+ | +-------------------+
+ * +->| GLDv3 Device, per PF |-----|-->| GLDv3 Device (PF) |--> ...
+ * | i40e_t | | | i40e_t |
+ * | **Primary Structure** | | +-------------------+
+ * | | |
+ * | i40e_device_t * --+-----+
+ * | i40e_state_t --+---> Device State
+ * | i40e_hw_t --+---> Intel common code structure
+ * | mac_handle_t --+---> GLDv3 handle to MAC
+ * | ddi_periodic_t --+---> Link activity timer
+ * | int (vsi_id) --+---> VSI ID, main identifier
+ * | i40e_func_rsrc_t --+---> Available hardware resources
+ * | i40e_switch_rsrc_t * --+---> Switch resource snapshot
+ * | i40e_sdu --+---> Current MTU
+ * | i40e_frame_max --+---> Current HW frame size
+ * | i40e_uaddr_t * --+---> Array of assigned unicast MACs
+ * | i40e_maddr_t * --+---> Array of assigned multicast MACs
+ * | i40e_mcast_promisccount --+---> Active multicast state
+ * | i40e_promisc_on --+---> Current promiscuous mode state
+ * | int --+---> Number of transmit/receive pairs
+ * | kstat_t * --+---> PF kstats
+ * | kstat_t * --+---> VSI kstats
+ * | i40e_pf_stats_t --+---> PF kstat backing data
+ * | i40e_vsi_stats_t --+---> VSI kstat backing data
+ * | i40e_trqpair_t * --+---------+
+ * +---------------------------+ |
+ * |
+ * v
+ * +-------------------------------+ +-----------------------------+
+ * | Transmit/Receive Queue Pair |-------| Transmit/Receive Queue Pair |->...
+ * | i40e_trqpair_t | | i40e_trqpair_t |
+ * + Ring Data Structure | +-----------------------------+
+ * | |
+ * | mac_ring_handle_t +--> MAC RX ring handle
+ * | mac_ring_handle_t +--> MAC TX ring handle
+ * | i40e_rxq_stat_t --+--> RX Queue stats
+ * | i40e_txq_stat_t --+--> TX Queue stats
+ * | uint32_t (tx ring size) +--> TX Ring Size
+ * | uint32_t (tx free list size) +--> TX Free List Size
+ * | i40e_dma_buffer_t --------+--> TX Descriptor ring DMA
+ * | i40e_tx_desc_t * --------+--> TX descriptor ring
+ * | volatile unt32_t * +--> TX Write back head
+ * | uint32_t -------+--> TX ring head
+ * | uint32_t -------+--> TX ring tail
+ * | uint32_t -------+--> Num TX desc free
+ * | i40e_tx_control_block_t * --+--> TX control block array ---+
+ * | i40e_tx_control_block_t ** --+--> TCB work list ----+
+ * | i40e_tx_control_block_t ** --+--> TCB free list ---+
+ * | uint32_t -------+--> Free TCB count |
+ * | i40e_rx_data_t * -------+--+ v
+ * +-------------------------------+ | +---------------------------+
+ * | | Per-TX Frame Metadata |
+ * | | i40e_tx_control_block_t |
+ * +--------------------+ | |
+ * | mblk to transmit <--+--- mblk_t * |
+ * | type of transmit <--+--- i40e_tx_type_t |
+ * | TX DMA handle <--+--- ddi_dma_handle_t |
+ * v TX DMA buffer <--+--- i40e_dma_buffer_t |
+ * +------------------------------+ +---------------------------+
+ * | Core Receive Data |
+ * | i40e_rx_data_t |
+ * | |
+ * | i40e_dma_buffer_t --+--> RX descriptor DMA Data
+ * | i40e_rx_desc_t --+--> RX descriptor ring
+ * | uint32_t --+--> Next free desc.
+ * | i40e_rx_control_block_t * --+--> RX Control Block Array ---+
+ * | i40e_rx_control_block_t ** --+--> RCB work list ---+
+ * | i40e_rx_control_block_t ** --+--> RCB free list ---+
+ * +------------------------------+ |
+ * ^ |
+ * | +---------------------------+ |
+ * | | Per-RX Frame Metadata |<---------------+
+ * | | i40e_rx_control_block_t |
+ * | | |
+ * | | mblk_t * ----+--> Received mblk_t data
+ * | | uint32_t ----+--> Reference count
+ * | | i40e_dma_buffer_t ----+--> Receive data DMA info
+ * | | frtn_t ----+--> mblk free function info
+ * +-----+-- i40e_rx_data_t * |
+ * +---------------------------+
+ *
+ * -------------
+ * Lock Ordering
+ * -------------
+ *
+ * In order to ensure that we don't deadlock, the following represents the
+ * lock oder being used. When grabbing locks, follow the following order. Lower
+ * numbers are more important. Thus, the i40e_glock which is number 0, must be
+ * taken before any other locks in the driver. On the other hand, the
+ * i40e_t`i40e_stat_lock, has the highest number because it's the least
+ * important lock. Note, that just because one lock is higher than another does
+ * not mean that all intermediary locks are required.
+ *
+ * 0) i40e_glock
+ * 1) i40e_t`i40e_general_lock
+ *
+ * 2) i40e_trqpair_t`itrq_rx_lock
+ * 3) i40e_trqpair_t`itrq_tx_lock
+ * 4) i40e_t`i40e_rx_pending_lock
+ * 5) i40e_trqpair_t`itrq_tcb_lock
+ *
+ * 6) i40e_t`i40e_stat_lock
+ *
+ * Rules and expectations:
+ *
+ * 1) A thread holding locks belong to one PF should not hold locks belonging to
+ * a second. If for some reason this becomes necessary, locks should be grabbed
+ * based on the list order in the i40e_device_t, which implies that the
+ * i40e_glock is held.
+ *
+ * 2) When grabbing locks between multiple transmit and receive queues, the
+ * locks for the lowest number transmit/receive queue should be grabbed first.
+ *
+ * 3) When grabbing both the transmit and receive lock for a given queue, always
+ * grab i40e_trqpair_t`itrq_rx_lock before the i40e_trqpair_t`itrq_tx_lock.
+ *
+ * 4) The following pairs of locks are not expected to be held at the same time:
+ *
+ * o i40e_t`i40e_rx_pending_lock and i40e_trqpair_t`itrq_tcb_lock
+ *
+ * -----------
+ * Future Work
+ * -----------
+ *
+ * At the moment the i40e_t driver is rather bare bones, allowing us to start
+ * getting data flowing and folks using it while we develop additional features.
+ * While bugs have been filed to cover this future work, the following gives an
+ * overview of expected work:
+ *
+ * o TSO support
+ * o RSS / multiple ring support
+ * o Multiple group support
+ * o DMA binding and breaking up the locking in ring recycling.
+ * o Enhanced detection of device errors
+ * o Participation in IRM
+ * o FMA device reset
+ * o Stall detection, temperature error detection, etc.
+ * o More dynamic resource pools
+ */
+
+#include "i40e_sw.h"
+
+static char i40e_ident[] = "Intel 10/40Gb Ethernet v1.0.0";
+
+/*
+ * The i40e_glock primarily protects the lists below and the i40e_device_t
+ * structures.
+ */
+static kmutex_t i40e_glock;
+static list_t i40e_glist;
+static list_t i40e_dlist;
+
+/*
+ * Access attributes for register mapping.
+ */
+static ddi_device_acc_attr_t i40e_regs_acc_attr = {
+ DDI_DEVICE_ATTR_V1,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
+};
+
+/*
+ * Logging function for this driver.
+ */
+static void
+i40e_dev_err(i40e_t *i40e, int level, boolean_t console, const char *fmt,
+ va_list ap)
+{
+ char buf[1024];
+
+ (void) vsnprintf(buf, sizeof (buf), fmt, ap);
+
+ if (i40e == NULL) {
+ cmn_err(level, (console) ? "%s: %s" : "!%s: %s",
+ I40E_MODULE_NAME, buf);
+ } else {
+ dev_err(i40e->i40e_dip, level, (console) ? "%s" : "!%s",
+ buf);
+ }
+}
+
+/*
+ * Because there's the stupid trailing-comma problem with the C preprocessor
+ * and variable arguments, I need to instantiate these. Pardon the redundant
+ * code.
+ */
+void
+i40e_error(i40e_t *i40e, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ i40e_dev_err(i40e, CE_WARN, B_FALSE, fmt, ap);
+ va_end(ap);
+}
+
+void
+i40e_log(i40e_t *i40e, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ i40e_dev_err(i40e, CE_NOTE, B_FALSE, fmt, ap);
+ va_end(ap);
+}
+
+void
+i40e_notice(i40e_t *i40e, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ i40e_dev_err(i40e, CE_NOTE, B_TRUE, fmt, ap);
+ va_end(ap);
+}
+
+static void
+i40e_device_rele(i40e_t *i40e)
+{
+ i40e_device_t *idp = i40e->i40e_device;
+
+ if (idp == NULL)
+ return;
+
+ mutex_enter(&i40e_glock);
+ VERIFY(idp->id_nreg > 0);
+ list_remove(&idp->id_i40e_list, i40e);
+ idp->id_nreg--;
+ if (idp->id_nreg == 0) {
+ list_remove(&i40e_dlist, idp);
+ list_destroy(&idp->id_i40e_list);
+ kmem_free(idp->id_rsrcs, sizeof (i40e_switch_rsrc_t) *
+ idp->id_rsrcs_alloc);
+ kmem_free(idp, sizeof (i40e_device_t));
+ }
+ i40e->i40e_device = NULL;
+ mutex_exit(&i40e_glock);
+}
+
+static i40e_device_t *
+i40e_device_find(i40e_t *i40e, dev_info_t *parent, uint_t bus, uint_t device)
+{
+ i40e_device_t *idp;
+ mutex_enter(&i40e_glock);
+ for (idp = list_head(&i40e_dlist); idp != NULL;
+ idp = list_next(&i40e_dlist, idp)) {
+ if (idp->id_parent == parent && idp->id_pci_bus == bus &&
+ idp->id_pci_device == device) {
+ break;
+ }
+ }
+
+ if (idp != NULL) {
+ VERIFY(idp->id_nreg < idp->id_nfuncs);
+ idp->id_nreg++;
+ } else {
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ ASSERT(hw->num_ports > 0);
+ ASSERT(hw->num_partitions > 0);
+
+ /*
+ * The Intel common code doesn't exactly keep the number of PCI
+ * functions. But it calculates it during discovery of
+ * partitions and ports. So what we do is undo the calculation
+ * that it does originally, as functions are evenly spread
+ * across ports in the rare case of partitions.
+ */
+ idp = kmem_alloc(sizeof (i40e_device_t), KM_SLEEP);
+ idp->id_parent = parent;
+ idp->id_pci_bus = bus;
+ idp->id_pci_device = device;
+ idp->id_nfuncs = hw->num_ports * hw->num_partitions;
+ idp->id_nreg = 1;
+ idp->id_rsrcs_alloc = i40e->i40e_switch_rsrc_alloc;
+ idp->id_rsrcs_act = i40e->i40e_switch_rsrc_actual;
+ idp->id_rsrcs = kmem_alloc(sizeof (i40e_switch_rsrc_t) *
+ idp->id_rsrcs_alloc, KM_SLEEP);
+ bcopy(i40e->i40e_switch_rsrcs, idp->id_rsrcs,
+ sizeof (i40e_switch_rsrc_t) * idp->id_rsrcs_alloc);
+ list_create(&idp->id_i40e_list, sizeof (i40e_t),
+ offsetof(i40e_t, i40e_dlink));
+
+ list_insert_tail(&i40e_dlist, idp);
+ }
+
+ list_insert_tail(&idp->id_i40e_list, i40e);
+ mutex_exit(&i40e_glock);
+
+ return (idp);
+}
+
+static void
+i40e_link_state_set(i40e_t *i40e, link_state_t state)
+{
+ if (i40e->i40e_link_state == state)
+ return;
+
+ i40e->i40e_link_state = state;
+ mac_link_update(i40e->i40e_mac_hdl, i40e->i40e_link_state);
+}
+
+/*
+ * This is a basic link check routine. Mostly we're using this just to see
+ * if we can get any accurate information about the state of the link being
+ * up or down, as well as updating the link state, speed, etc. information.
+ */
+void
+i40e_link_check(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ boolean_t ls;
+ int ret;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
+
+ hw->phy.get_link_info = B_TRUE;
+ if ((ret = i40e_get_link_status(hw, &ls)) != I40E_SUCCESS) {
+ i40e->i40e_s_link_status_errs++;
+ i40e->i40e_s_link_status_lasterr = ret;
+ return;
+ }
+
+ /*
+ * Firmware abstracts all of the mac and phy information for us, so we
+ * can use i40e_get_link_status to determine the current state.
+ */
+ if (ls == B_TRUE) {
+ enum i40e_aq_link_speed speed;
+
+ speed = i40e_get_link_speed(hw);
+
+ /*
+ * Translate from an i40e value to a value in Mbits/s.
+ */
+ switch (speed) {
+ case I40E_LINK_SPEED_100MB:
+ i40e->i40e_link_speed = 100;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ i40e->i40e_link_speed = 1000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ i40e->i40e_link_speed = 10000;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ i40e->i40e_link_speed = 20000;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ i40e->i40e_link_speed = 40000;
+ break;
+ default:
+ i40e->i40e_link_speed = 0;
+ break;
+ }
+
+ /*
+ * At this time, hardware does not support half-duplex
+ * operation, hence why we don't ask the hardware about our
+ * current speed.
+ */
+ i40e->i40e_link_duplex = LINK_DUPLEX_FULL;
+ i40e_link_state_set(i40e, LINK_STATE_UP);
+ } else {
+ i40e->i40e_link_speed = 0;
+ i40e->i40e_link_duplex = 0;
+ i40e_link_state_set(i40e, LINK_STATE_DOWN);
+ }
+}
+
+static void
+i40e_rem_intrs(i40e_t *i40e)
+{
+ int i, rc;
+
+ for (i = 0; i < i40e->i40e_intr_count; i++) {
+ rc = ddi_intr_free(i40e->i40e_intr_handles[i]);
+ if (rc != DDI_SUCCESS) {
+ i40e_log(i40e, "failed to free interrupt %d: %d",
+ i, rc);
+ }
+ }
+
+ kmem_free(i40e->i40e_intr_handles, i40e->i40e_intr_size);
+ i40e->i40e_intr_handles = NULL;
+}
+
+static void
+i40e_rem_intr_handlers(i40e_t *i40e)
+{
+ int i, rc;
+
+ for (i = 0; i < i40e->i40e_intr_count; i++) {
+ rc = ddi_intr_remove_handler(i40e->i40e_intr_handles[i]);
+ if (rc != DDI_SUCCESS) {
+ i40e_log(i40e, "failed to remove interrupt %d: %d",
+ i, rc);
+ }
+ }
+}
+
+/*
+ * illumos Fault Management Architecture (FMA) support.
+ */
+
+int
+i40e_check_acc_handle(ddi_acc_handle_t handle)
+{
+ ddi_fm_error_t de;
+
+ ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
+ ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
+ return (de.fme_status);
+}
+
+int
+i40e_check_dma_handle(ddi_dma_handle_t handle)
+{
+ ddi_fm_error_t de;
+
+ ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
+ return (de.fme_status);
+}
+
+/*
+ * Fault service error handling callback function.
+ */
+/* ARGSUSED */
+static int
+i40e_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
+{
+ pci_ereport_post(dip, err, NULL);
+ return (err->fme_status);
+}
+
+static void
+i40e_fm_init(i40e_t *i40e)
+{
+ ddi_iblock_cookie_t iblk;
+
+ i40e->i40e_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY,
+ i40e->i40e_dip, DDI_PROP_DONTPASS, "fm_capable",
+ DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
+ DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
+
+ if (i40e->i40e_fm_capabilities < 0) {
+ i40e->i40e_fm_capabilities = 0;
+ } else if (i40e->i40e_fm_capabilities > 0xf) {
+ i40e->i40e_fm_capabilities = DDI_FM_EREPORT_CAPABLE |
+ DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
+ DDI_FM_ERRCB_CAPABLE;
+ }
+
+ /*
+ * Only register with IO Fault Services if we have some capability
+ */
+ if (i40e->i40e_fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
+ i40e_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
+ } else {
+ i40e_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
+ }
+
+ if (i40e->i40e_fm_capabilities) {
+ ddi_fm_init(i40e->i40e_dip, &i40e->i40e_fm_capabilities, &iblk);
+
+ if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
+ DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
+ pci_ereport_setup(i40e->i40e_dip);
+ }
+
+ if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
+ ddi_fm_handler_register(i40e->i40e_dip,
+ i40e_fm_error_cb, (void*)i40e);
+ }
+ }
+
+ if (i40e->i40e_fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
+ i40e_init_dma_attrs(i40e, B_TRUE);
+ } else {
+ i40e_init_dma_attrs(i40e, B_FALSE);
+ }
+}
+
+static void
+i40e_fm_fini(i40e_t *i40e)
+{
+ if (i40e->i40e_fm_capabilities) {
+
+ if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
+ DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
+ pci_ereport_teardown(i40e->i40e_dip);
+
+ if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
+ ddi_fm_handler_unregister(i40e->i40e_dip);
+
+ ddi_fm_fini(i40e->i40e_dip);
+ }
+}
+
+void
+i40e_fm_ereport(i40e_t *i40e, char *detail)
+{
+ uint64_t ena;
+ char buf[FM_MAX_CLASS];
+
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
+ if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities)) {
+ ddi_fm_ereport_post(i40e->i40e_dip, buf, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
+ }
+}
+
+/*
+ * Here we're trying to get the ID of the default VSI. In general, when we come
+ * through and look at this shortly after attach, we expect there to only be a
+ * single element present, which is the default VSI. Importantly, each PF seems
+ * to not see any other devices, in part because of the simple switch mode that
+ * we're using. If for some reason, we see more artifact, we'll need to revisit
+ * what we're doing here.
+ */
+static int
+i40e_get_vsi_id(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ uint8_t aq_buf[I40E_AQ_LARGE_BUF];
+ uint16_t next = 0;
+ int rc;
+
+ /* LINTED: E_BAD_PTR_CAST_ALIGN */
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+ rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
+ NULL);
+ if (rc != I40E_SUCCESS) {
+ i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d\n",
+ rc, hw->aq.asq_last_status);
+ return (-1);
+ }
+
+ if (LE_16(sw_config->header.num_reported) != 1) {
+ i40e_error(i40e, "encountered multiple (%d) switching units "
+ "during attach, not proceeding",
+ LE_16(sw_config->header.num_reported));
+ return (-1);
+ }
+
+ return (sw_config->element[0].seid);
+}
+
+/*
+ * We need to fill the i40e_hw_t structure with the capabilities of this PF. We
+ * must also provide the memory for it; however, we don't need to keep it around
+ * to the call to the common code. It takes it and parses it into an internal
+ * structure.
+ */
+static boolean_t
+i40e_get_hw_capabilities(i40e_t *i40e, i40e_hw_t *hw)
+{
+ struct i40e_aqc_list_capabilities_element_resp *buf;
+ int rc;
+ size_t len;
+ uint16_t needed;
+ int nelems = I40E_HW_CAP_DEFAULT;
+
+ for (;;) {
+ len = nelems * sizeof (*buf);
+ ASSERT(len > 0);
+ buf = kmem_alloc(len, KM_SLEEP);
+ rc = i40e_aq_discover_capabilities(hw, buf, len,
+ &needed, i40e_aqc_opc_list_func_capabilities, NULL);
+ kmem_free(buf, len);
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOMEM &&
+ nelems == I40E_HW_CAP_DEFAULT) {
+ if (nelems == needed) {
+ i40e_error(i40e, "Capability discovery failed "
+ "due to byzantine common code");
+ return (B_FALSE);
+ }
+ nelems = needed;
+ continue;
+ } else if (hw->aq.asq_last_status != I40E_AQ_RC_OK) {
+ i40e_error(i40e, "Capability discovery failed: %d", rc);
+ return (B_FALSE);
+ }
+
+ break;
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Obtain the switch's capabilities as seen by this PF and keep it around for
+ * our later use.
+ */
+static boolean_t
+i40e_get_switch_resources(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ uint8_t cnt = 2;
+ uint8_t act;
+ size_t size;
+ i40e_switch_rsrc_t *buf;
+
+ for (;;) {
+ enum i40e_status_code ret;
+ size = cnt * sizeof (i40e_switch_rsrc_t);
+ ASSERT(size > 0);
+ if (size > UINT16_MAX)
+ return (B_FALSE);
+ buf = kmem_alloc(size, KM_SLEEP);
+
+ ret = i40e_aq_get_switch_resource_alloc(hw, &act, buf,
+ cnt, NULL);
+ if (ret == I40E_ERR_ADMIN_QUEUE_ERROR &&
+ hw->aq.asq_last_status == I40E_AQ_RC_EINVAL) {
+ kmem_free(buf, size);
+ cnt += I40E_SWITCH_CAP_DEFAULT;
+ continue;
+ } else if (ret != I40E_SUCCESS) {
+ kmem_free(buf, size);
+ i40e_error(i40e,
+ "failed to retrieve switch statistics: %d\n", ret);
+ return (B_FALSE);
+ }
+
+ break;
+ }
+
+ i40e->i40e_switch_rsrc_alloc = cnt;
+ i40e->i40e_switch_rsrc_actual = act;
+ i40e->i40e_switch_rsrcs = buf;
+
+ return (B_TRUE);
+}
+
+static void
+i40e_cleanup_resources(i40e_t *i40e)
+{
+ if (i40e->i40e_uaddrs != NULL) {
+ kmem_free(i40e->i40e_uaddrs, sizeof (i40e_uaddr_t) *
+ i40e->i40e_resources.ifr_nmacfilt);
+ i40e->i40e_uaddrs = NULL;
+ }
+
+ if (i40e->i40e_maddrs != NULL) {
+ kmem_free(i40e->i40e_maddrs, sizeof (i40e_maddr_t) *
+ i40e->i40e_resources.ifr_nmcastfilt);
+ i40e->i40e_maddrs = NULL;
+ }
+
+ if (i40e->i40e_switch_rsrcs != NULL) {
+ size_t sz = sizeof (i40e_switch_rsrc_t) *
+ i40e->i40e_switch_rsrc_alloc;
+ ASSERT(sz > 0);
+ kmem_free(i40e->i40e_switch_rsrcs, sz);
+ i40e->i40e_switch_rsrcs = NULL;
+ }
+
+ if (i40e->i40e_device != NULL)
+ i40e_device_rele(i40e);
+}
+
+static boolean_t
+i40e_get_available_resources(i40e_t *i40e)
+{
+ dev_info_t *parent;
+ uint16_t bus, device, func;
+ uint_t nregs;
+ int *regs, i;
+ i40e_device_t *idp;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ parent = ddi_get_parent(i40e->i40e_dip);
+
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, i40e->i40e_dip, 0, "reg",
+ &regs, &nregs) != DDI_PROP_SUCCESS) {
+ return (B_FALSE);
+ }
+
+ if (nregs < 1) {
+ ddi_prop_free(regs);
+ return (B_FALSE);
+ }
+
+ bus = PCI_REG_BUS_G(regs[0]);
+ device = PCI_REG_DEV_G(regs[0]);
+ func = PCI_REG_FUNC_G(regs[0]);
+ ddi_prop_free(regs);
+
+ i40e->i40e_hw_space.bus.func = func;
+ i40e->i40e_hw_space.bus.device = device;
+
+ if (i40e_get_switch_resources(i40e) == B_FALSE) {
+ return (B_FALSE);
+ }
+
+ /*
+ * To calculate the total amount of a resource we have available, we
+ * need to add how many our i40e_t thinks it has guaranteed, if any, and
+ * then we need to go through and divide the number of available on the
+ * device, which was snapshotted before anyone should have allocated
+ * anything, and use that to derive how many are available from the
+ * pool. Longer term, we may want to turn this into something that's
+ * more of a pool-like resource that everything can share (though that
+ * may require some more assistance from MAC).
+ *
+ * Though for transmit and receive queue pairs, we just have to ask
+ * firmware instead.
+ */
+ idp = i40e_device_find(i40e, parent, bus, device);
+ i40e->i40e_device = idp;
+ i40e->i40e_resources.ifr_nvsis = 0;
+ i40e->i40e_resources.ifr_nvsis_used = 0;
+ i40e->i40e_resources.ifr_nmacfilt = 0;
+ i40e->i40e_resources.ifr_nmacfilt_used = 0;
+ i40e->i40e_resources.ifr_nmcastfilt = 0;
+ i40e->i40e_resources.ifr_nmcastfilt_used = 0;
+
+ for (i = 0; i < i40e->i40e_switch_rsrc_actual; i++) {
+ i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
+
+ switch (srp->resource_type) {
+ case I40E_AQ_RESOURCE_TYPE_VSI:
+ i40e->i40e_resources.ifr_nvsis +=
+ LE_16(srp->guaranteed);
+ i40e->i40e_resources.ifr_nvsis_used = LE_16(srp->used);
+ break;
+ case I40E_AQ_RESOURCE_TYPE_MACADDR:
+ i40e->i40e_resources.ifr_nmacfilt +=
+ LE_16(srp->guaranteed);
+ i40e->i40e_resources.ifr_nmacfilt_used =
+ LE_16(srp->used);
+ break;
+ case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
+ i40e->i40e_resources.ifr_nmcastfilt +=
+ LE_16(srp->guaranteed);
+ i40e->i40e_resources.ifr_nmcastfilt_used =
+ LE_16(srp->used);
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (i = 0; i < idp->id_rsrcs_act; i++) {
+ i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
+ switch (srp->resource_type) {
+ case I40E_AQ_RESOURCE_TYPE_VSI:
+ i40e->i40e_resources.ifr_nvsis +=
+ LE_16(srp->total_unalloced) / idp->id_nfuncs;
+ break;
+ case I40E_AQ_RESOURCE_TYPE_MACADDR:
+ i40e->i40e_resources.ifr_nmacfilt +=
+ LE_16(srp->total_unalloced) / idp->id_nfuncs;
+ break;
+ case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
+ i40e->i40e_resources.ifr_nmcastfilt +=
+ LE_16(srp->total_unalloced) / idp->id_nfuncs;
+ default:
+ break;
+ }
+ }
+
+ i40e->i40e_resources.ifr_nrx_queue = hw->func_caps.num_rx_qp;
+ i40e->i40e_resources.ifr_ntx_queue = hw->func_caps.num_tx_qp;
+
+ i40e->i40e_uaddrs = kmem_zalloc(sizeof (i40e_uaddr_t) *
+ i40e->i40e_resources.ifr_nmacfilt, KM_SLEEP);
+ i40e->i40e_maddrs = kmem_zalloc(sizeof (i40e_maddr_t) *
+ i40e->i40e_resources.ifr_nmcastfilt, KM_SLEEP);
+
+ /*
+ * Initialize these a multicast address to indicate it's invalid for
+ * sanity purposes. Think of it like 0xdeadbeef.
+ */
+ for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt; i++)
+ i40e->i40e_uaddrs[i].iua_mac[0] = 0x01;
+
+ return (B_TRUE);
+}
+
+static boolean_t
+i40e_enable_interrupts(i40e_t *i40e)
+{
+ int i, rc;
+
+ if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
+ rc = ddi_intr_block_enable(i40e->i40e_intr_handles,
+ i40e->i40e_intr_count);
+ if (rc != DDI_SUCCESS) {
+ i40e_error(i40e, "Interrupt block-enable failed: %d",
+ rc);
+ return (B_FALSE);
+ }
+ } else {
+ for (i = 0; i < i40e->i40e_intr_count; i++) {
+ rc = ddi_intr_enable(i40e->i40e_intr_handles[i]);
+ if (rc != DDI_SUCCESS) {
+ i40e_error(i40e,
+ "Failed to enable interrupt %d: %d", i, rc);
+ while (--i >= 0) {
+ (void) ddi_intr_disable(
+ i40e->i40e_intr_handles[i]);
+ }
+ return (B_FALSE);
+ }
+ }
+ }
+
+ return (B_TRUE);
+}
+
+static boolean_t
+i40e_disable_interrupts(i40e_t *i40e)
+{
+ int i, rc;
+
+ if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
+ rc = ddi_intr_block_disable(i40e->i40e_intr_handles,
+ i40e->i40e_intr_count);
+ if (rc != DDI_SUCCESS) {
+ i40e_error(i40e,
+ "Interrupt block-disabled failed: %d", rc);
+ return (B_FALSE);
+ }
+ } else {
+ for (i = 0; i < i40e->i40e_intr_count; i++) {
+ rc = ddi_intr_disable(i40e->i40e_intr_handles[i]);
+ if (rc != DDI_SUCCESS) {
+ i40e_error(i40e,
+ "Failed to disable interrupt %d: %d",
+ i, rc);
+ return (B_FALSE);
+ }
+ }
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Free receive & transmit rings.
+ */
+static void
+i40e_free_trqpairs(i40e_t *i40e)
+{
+ int i;
+ i40e_trqpair_t *itrq;
+
+ if (i40e->i40e_trqpairs != NULL) {
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ itrq = &i40e->i40e_trqpairs[i];
+ mutex_destroy(&itrq->itrq_rx_lock);
+ mutex_destroy(&itrq->itrq_tx_lock);
+ mutex_destroy(&itrq->itrq_tcb_lock);
+
+ /*
+ * Should have already been cleaned up by start/stop,
+ * etc.
+ */
+ ASSERT(itrq->itrq_txkstat == NULL);
+ ASSERT(itrq->itrq_rxkstat == NULL);
+ }
+
+ kmem_free(i40e->i40e_trqpairs,
+ sizeof (i40e_trqpair_t) * i40e->i40e_num_trqpairs);
+ i40e->i40e_trqpairs = NULL;
+ }
+
+ cv_destroy(&i40e->i40e_rx_pending_cv);
+ mutex_destroy(&i40e->i40e_rx_pending_lock);
+ mutex_destroy(&i40e->i40e_general_lock);
+}
+
+/*
+ * Allocate transmit and receive rings, as well as other data structures that we
+ * need.
+ */
+static boolean_t
+i40e_alloc_trqpairs(i40e_t *i40e)
+{
+ int i;
+ void *mutexpri = DDI_INTR_PRI(i40e->i40e_intr_pri);
+
+ /*
+ * Now that we have the priority for the interrupts, initialize
+ * all relevant locks.
+ */
+ mutex_init(&i40e->i40e_general_lock, NULL, MUTEX_DRIVER, mutexpri);
+ mutex_init(&i40e->i40e_rx_pending_lock, NULL, MUTEX_DRIVER, mutexpri);
+ cv_init(&i40e->i40e_rx_pending_cv, NULL, CV_DRIVER, NULL);
+
+ i40e->i40e_trqpairs = kmem_zalloc(sizeof (i40e_trqpair_t) *
+ i40e->i40e_num_trqpairs, KM_SLEEP);
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i];
+
+ itrq->itrq_i40e = i40e;
+ mutex_init(&itrq->itrq_rx_lock, NULL, MUTEX_DRIVER, mutexpri);
+ mutex_init(&itrq->itrq_tx_lock, NULL, MUTEX_DRIVER, mutexpri);
+ mutex_init(&itrq->itrq_tcb_lock, NULL, MUTEX_DRIVER, mutexpri);
+ itrq->itrq_index = i;
+ }
+
+ return (B_TRUE);
+}
+
+
+
+/*
+ * Unless a .conf file already overrode i40e_t structure values, they will
+ * be 0, and need to be set in conjunction with the now-available HW report.
+ *
+ * However, at the moment, we cap all of these resources as we only support a
+ * single receive ring and a single group.
+ */
+/* ARGSUSED */
+static void
+i40e_hw_to_instance(i40e_t *i40e, i40e_hw_t *hw)
+{
+ if (i40e->i40e_num_trqpairs == 0) {
+ i40e->i40e_num_trqpairs = I40E_TRQPAIR_MAX;
+ }
+
+ if (i40e->i40e_num_rx_groups == 0) {
+ i40e->i40e_num_rx_groups = I40E_GROUP_MAX;
+ }
+}
+
+/*
+ * Free any resources required by, or setup by, the Intel common code.
+ */
+static void
+i40e_common_code_fini(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ int rc;
+
+ rc = i40e_shutdown_lan_hmc(hw);
+ if (rc != I40E_SUCCESS)
+ i40e_error(i40e, "failed to shutdown LAN hmc: %d", rc);
+
+ rc = i40e_shutdown_adminq(hw);
+ if (rc != I40E_SUCCESS)
+ i40e_error(i40e, "failed to shutdown admin queue: %d", rc);
+}
+
+/*
+ * Initialize and call Intel common-code routines, includes some setup
+ * the common code expects from the driver. Also prints on failure, so
+ * the caller doesn't have to.
+ */
+static boolean_t
+i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw)
+{
+ int rc;
+
+ i40e_clear_hw(hw);
+ rc = i40e_pf_reset(hw);
+ if (rc != 0) {
+ i40e_error(i40e, "failed to reset hardware: %d", rc);
+ i40e_fm_ereport(i40e, DDI_FM_DEVICE_NO_RESPONSE);
+ return (B_FALSE);
+ }
+
+ rc = i40e_init_shared_code(hw);
+ if (rc != 0) {
+ i40e_error(i40e, "failed to initialize i40e core: %d", rc);
+ return (B_FALSE);
+ }
+
+ hw->aq.num_arq_entries = I40E_DEF_ADMINQ_SIZE;
+ hw->aq.num_asq_entries = I40E_DEF_ADMINQ_SIZE;
+ hw->aq.arq_buf_size = I40E_ADMINQ_BUFSZ;
+ hw->aq.asq_buf_size = I40E_ADMINQ_BUFSZ;
+
+ rc = i40e_init_adminq(hw);
+ if (rc != 0) {
+ i40e_error(i40e, "failed to initialize firmware admin queue: "
+ "%d, potential firmware version mismatch", rc);
+ i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
+ return (B_FALSE);
+ }
+
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+ i40e_notice(i40e, "The driver for the device detected a newer "
+ "version of the NVM image (%d.%d) than expected (%d.%d).\n"
+ "Please install the most recent version of the network "
+ "driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+ } else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
+ hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) {
+ i40e_notice(i40e, "The driver for the device detected an older"
+ " version of the NVM image (%d.%d) than expected (%d.%d)."
+ "\nPlease update the NVM image.\n",
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR - 1);
+ }
+
+ i40e_clear_pxe_mode(hw);
+
+ /*
+ * We need to call this so that the common code can discover
+ * capabilities of the hardware, which it uses throughout the rest.
+ */
+ if (!i40e_get_hw_capabilities(i40e, hw)) {
+ i40e_error(i40e, "failed to obtain hardware capabilities");
+ return (B_FALSE);
+ }
+
+ if (i40e_get_available_resources(i40e) == B_FALSE) {
+ i40e_error(i40e, "failed to obtain hardware resources");
+ return (B_FALSE);
+ }
+
+ i40e_hw_to_instance(i40e, hw);
+
+ rc = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (rc != 0) {
+ i40e_error(i40e, "failed to initialize hardware memory cache: "
+ "%d\n", rc);
+ return (B_FALSE);
+ }
+
+ rc = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (rc != 0) {
+ i40e_error(i40e, "failed to configure hardware memory cache: "
+ "%d\n", rc);
+ return (B_FALSE);
+ }
+
+ (void) i40e_aq_stop_lldp(hw, TRUE, NULL);
+
+ rc = i40e_get_mac_addr(hw, hw->mac.addr);
+ if (rc != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to retrieve hardware mac address: %d",
+ rc);
+ return (B_FALSE);
+ }
+
+ rc = i40e_validate_mac_addr(hw->mac.addr);
+ if (rc != 0) {
+ i40e_error(i40e, "failed to validate internal mac address: "
+ "%d\n", rc);
+ return (B_FALSE);
+ }
+ bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
+ if ((rc = i40e_get_port_mac_addr(hw, hw->mac.port_addr)) !=
+ I40E_SUCCESS) {
+ i40e_error(i40e, "failed to retrieve port mac address: %d",
+ rc);
+ return (B_FALSE);
+ }
+
+ /*
+ * We need to obtain the Virtual Station ID (VSI) before we can
+ * perform other operations on the device.
+ */
+ i40e->i40e_vsi_id = i40e_get_vsi_id(i40e);
+ if (i40e->i40e_vsi_id == -1) {
+ i40e_error(i40e, "failed to obtain VSI ID");
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+static void
+i40e_unconfigure(dev_info_t *devinfo, i40e_t *i40e)
+{
+ int rc;
+
+ if (i40e->i40e_attach_progress & I40E_ATTACH_ENABLE_INTR)
+ (void) i40e_disable_interrupts(i40e);
+
+ if ((i40e->i40e_attach_progress & I40E_ATTACH_LINK_TIMER) &&
+ i40e->i40e_periodic_id != 0) {
+ ddi_periodic_delete(i40e->i40e_periodic_id);
+ i40e->i40e_periodic_id = 0;
+ }
+
+ if (i40e->i40e_attach_progress & I40E_ATTACH_MAC) {
+ rc = mac_unregister(i40e->i40e_mac_hdl);
+ if (rc != 0) {
+ i40e_error(i40e, "failed to unregister from mac: %d",
+ rc);
+ }
+ }
+
+ if (i40e->i40e_attach_progress & I40E_ATTACH_STATS) {
+ i40e_stats_fini(i40e);
+ }
+
+ if (i40e->i40e_attach_progress & I40E_ATTACH_ADD_INTR)
+ i40e_rem_intr_handlers(i40e);
+
+ if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_RINGSLOCKS)
+ i40e_free_trqpairs(i40e);
+
+ if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_INTR)
+ i40e_rem_intrs(i40e);
+
+ if (i40e->i40e_attach_progress & I40E_ATTACH_COMMON_CODE)
+ i40e_common_code_fini(i40e);
+
+ i40e_cleanup_resources(i40e);
+
+ if (i40e->i40e_attach_progress & I40E_ATTACH_PROPS)
+ (void) ddi_prop_remove_all(devinfo);
+
+ if (i40e->i40e_attach_progress & I40E_ATTACH_REGS_MAP &&
+ i40e->i40e_osdep_space.ios_reg_handle != NULL) {
+ ddi_regs_map_free(&i40e->i40e_osdep_space.ios_reg_handle);
+ i40e->i40e_osdep_space.ios_reg_handle = NULL;
+ }
+
+ if ((i40e->i40e_attach_progress & I40E_ATTACH_PCI_CONFIG) &&
+ i40e->i40e_osdep_space.ios_cfg_handle != NULL) {
+ pci_config_teardown(&i40e->i40e_osdep_space.ios_cfg_handle);
+ i40e->i40e_osdep_space.ios_cfg_handle = NULL;
+ }
+
+ if (i40e->i40e_attach_progress & I40E_ATTACH_FM_INIT)
+ i40e_fm_fini(i40e);
+
+ kmem_free(i40e->i40e_aqbuf, I40E_ADMINQ_BUFSZ);
+ kmem_free(i40e, sizeof (i40e_t));
+
+ ddi_set_driver_private(devinfo, NULL);
+}
+
+static boolean_t
+i40e_final_init(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ struct i40e_osdep *osdep = OS_DEP(hw);
+ uint8_t pbanum[I40E_PBANUM_STRLEN];
+ enum i40e_status_code irc;
+ char buf[I40E_DDI_PROP_LEN];
+
+ pbanum[0] = '\0';
+ irc = i40e_read_pba_string(hw, pbanum, sizeof (pbanum));
+ if (irc != I40E_SUCCESS) {
+ i40e_log(i40e, "failed to read PBA string: %d", irc);
+ } else {
+ (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
+ "printed-board-assembly", (char *)pbanum);
+ }
+
+#ifdef DEBUG
+ ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.fw_maj_ver,
+ hw->aq.fw_min_ver) < sizeof (buf));
+ ASSERT(snprintf(NULL, 0, "%x", hw->aq.fw_build) < sizeof (buf));
+ ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.api_maj_ver,
+ hw->aq.api_min_ver) < sizeof (buf));
+#endif
+
+ (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.fw_maj_ver,
+ hw->aq.fw_min_ver);
+ (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
+ "firmware-version", buf);
+ (void) snprintf(buf, sizeof (buf), "%x", hw->aq.fw_build);
+ (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
+ "firmware-build", buf);
+ (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.api_maj_ver,
+ hw->aq.api_min_ver);
+ (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
+ "api-version", buf);
+
+ if (!i40e_set_hw_bus_info(hw))
+ return (B_FALSE);
+
+ if (i40e_check_acc_handle(osdep->ios_reg_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+static boolean_t
+i40e_identify_hardware(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
+
+ hw->vendor_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_VENID);
+ hw->device_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_DEVID);
+ hw->revision_id = pci_config_get8(osdep->ios_cfg_handle,
+ PCI_CONF_REVID);
+ hw->subsystem_device_id =
+ pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBSYSID);
+ hw->subsystem_vendor_id =
+ pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBVENID);
+
+ /*
+ * Note that we set the hardware's bus information later on, in
+ * i40e_get_available_resources(). The common code doesn't seem to
+ * require that it be set in any ways, it seems to be mostly for
+ * book-keeping.
+ */
+
+ /* Call common code to set the MAC type for this adapter. */
+ if (i40e_set_mac_type(hw) != I40E_SUCCESS)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+static boolean_t
+i40e_regs_map(i40e_t *i40e)
+{
+ dev_info_t *devinfo = i40e->i40e_dip;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
+ off_t memsize;
+ int ret;
+
+ if (ddi_dev_regsize(devinfo, I40E_ADAPTER_REGSET, &memsize) !=
+ DDI_SUCCESS) {
+ i40e_error(i40e, "Used invalid register set to map PCIe regs");
+ return (B_FALSE);
+ }
+
+ if ((ret = ddi_regs_map_setup(devinfo, I40E_ADAPTER_REGSET,
+ (caddr_t *)&hw->hw_addr, 0, memsize, &i40e_regs_acc_attr,
+ &osdep->ios_reg_handle)) != DDI_SUCCESS) {
+ i40e_error(i40e, "failed to map device registers: %d", ret);
+ return (B_FALSE);
+ }
+
+ osdep->ios_reg_size = memsize;
+ return (B_TRUE);
+}
+
+/*
+ * Update parameters required when a new MTU has been configured. Calculate the
+ * maximum frame size, as well as, size our DMA buffers which we size in
+ * increments of 1K.
+ */
+void
+i40e_update_mtu(i40e_t *i40e)
+{
+ uint32_t rx, tx;
+
+ i40e->i40e_frame_max = i40e->i40e_sdu +
+ sizeof (struct ether_vlan_header) + ETHERFCSL;
+
+ rx = i40e->i40e_frame_max + I40E_BUF_IPHDR_ALIGNMENT;
+ i40e->i40e_rx_buf_size = ((rx >> 10) +
+ ((rx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
+
+ tx = i40e->i40e_frame_max;
+ i40e->i40e_tx_buf_size = ((tx >> 10) +
+ ((tx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
+}
+
+static int
+i40e_get_prop(i40e_t *i40e, char *prop, int min, int max, int def)
+{
+ int val;
+
+ val = ddi_prop_get_int(DDI_DEV_T_ANY, i40e->i40e_dip, DDI_PROP_DONTPASS,
+ prop, def);
+ if (val > max)
+ val = max;
+ if (val < min)
+ val = min;
+ return (val);
+}
+
+static void
+i40e_init_properties(i40e_t *i40e)
+{
+ i40e->i40e_sdu = i40e_get_prop(i40e, "default_mtu",
+ I40E_MIN_MTU, I40E_MAX_MTU, I40E_DEF_MTU);
+
+ i40e->i40e_intr_force = i40e_get_prop(i40e, "intr_force",
+ I40E_INTR_NONE, I40E_INTR_LEGACY, I40E_INTR_NONE);
+
+ i40e->i40e_mr_enable = i40e_get_prop(i40e, "mr_enable",
+ B_FALSE, B_TRUE, B_TRUE);
+
+ i40e->i40e_tx_ring_size = i40e_get_prop(i40e, "tx_ring_size",
+ I40E_MIN_TX_RING_SIZE, I40E_MAX_TX_RING_SIZE,
+ I40E_DEF_TX_RING_SIZE);
+ if ((i40e->i40e_tx_ring_size % I40E_DESC_ALIGN) != 0) {
+ i40e->i40e_tx_ring_size = P2ROUNDUP(i40e->i40e_tx_ring_size,
+ I40E_DESC_ALIGN);
+ }
+
+ i40e->i40e_tx_block_thresh = i40e_get_prop(i40e, "tx_resched_threshold",
+ I40E_MIN_TX_BLOCK_THRESH,
+ i40e->i40e_tx_ring_size - I40E_TX_MAX_COOKIE,
+ I40E_DEF_TX_BLOCK_THRESH);
+
+ i40e->i40e_rx_ring_size = i40e_get_prop(i40e, "rx_ring_size",
+ I40E_MIN_RX_RING_SIZE, I40E_MAX_RX_RING_SIZE,
+ I40E_DEF_RX_RING_SIZE);
+ if ((i40e->i40e_rx_ring_size % I40E_DESC_ALIGN) != 0) {
+ i40e->i40e_rx_ring_size = P2ROUNDUP(i40e->i40e_rx_ring_size,
+ I40E_DESC_ALIGN);
+ }
+
+ i40e->i40e_rx_limit_per_intr = i40e_get_prop(i40e, "rx_limit_per_intr",
+ I40E_MIN_RX_LIMIT_PER_INTR, I40E_MAX_RX_LIMIT_PER_INTR,
+ I40E_DEF_RX_LIMIT_PER_INTR);
+
+ i40e->i40e_tx_hcksum_enable = i40e_get_prop(i40e, "tx_hcksum_enable",
+ B_FALSE, B_TRUE, B_TRUE);
+
+ i40e->i40e_rx_hcksum_enable = i40e_get_prop(i40e, "rx_hcksum_enable",
+ B_FALSE, B_TRUE, B_TRUE);
+
+ i40e->i40e_rx_dma_min = i40e_get_prop(i40e, "rx_dma_threshold",
+ I40E_MIN_RX_DMA_THRESH, I40E_MAX_RX_DMA_THRESH,
+ I40E_DEF_RX_DMA_THRESH);
+
+ i40e->i40e_tx_dma_min = i40e_get_prop(i40e, "tx_dma_threshold",
+ I40E_MIN_TX_DMA_THRESH, I40E_MAX_TX_DMA_THRESH,
+ I40E_DEF_TX_DMA_THRESH);
+
+ i40e->i40e_tx_itr = i40e_get_prop(i40e, "tx_intr_throttle",
+ I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_TX_ITR);
+
+ i40e->i40e_rx_itr = i40e_get_prop(i40e, "rx_intr_throttle",
+ I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_RX_ITR);
+
+ i40e->i40e_other_itr = i40e_get_prop(i40e, "other_intr_throttle",
+ I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_OTHER_ITR);
+
+ if (!i40e->i40e_mr_enable) {
+ i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
+ i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
+ }
+
+ i40e_update_mtu(i40e);
+}
+
+/*
+ * There are a few constraints on interrupts that we're currently imposing, some
+ * of which are restrictions from hardware. For a fuller treatment, see
+ * i40e_intr.c.
+ *
+ * Currently, to use MSI-X we require two interrupts be available though in
+ * theory we should participate in IRM and happily use more interrupts.
+ *
+ * Hardware only supports a single MSI being programmed and therefore if we
+ * don't have MSI-X interrupts available at this time, then we ratchet down the
+ * number of rings and groups available. Obviously, we only bother with a single
+ * fixed interrupt.
+ */
+static boolean_t
+i40e_alloc_intr_handles(i40e_t *i40e, dev_info_t *devinfo, int intr_type)
+{
+ int request, count, actual, rc, min;
+
+ switch (intr_type) {
+ case DDI_INTR_TYPE_FIXED:
+ case DDI_INTR_TYPE_MSI:
+ request = 1;
+ min = 1;
+ break;
+ case DDI_INTR_TYPE_MSIX:
+ /*
+ * At the moment, we always request two MSI-X while we still
+ * only support a single interrupt. The upper bound on what's
+ * supported by a given device is defined by MSI_X_PF_N in
+ * GLPCI_CNF2. When we evolve, we should read it to determine
+ * what the real max is.
+ */
+ ASSERT(i40e->i40e_num_trqpairs == 1);
+ request = 2;
+ min = 2;
+ break;
+ default:
+ panic("bad interrupt type passed to i40e_alloc_intr_handles: "
+ "%d", intr_type);
+ return (B_FALSE);
+ }
+
+ rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
+ if (rc != DDI_SUCCESS || count < min) {
+ i40e_log(i40e, "Get interrupt number failed, "
+ "returned %d, count %d\n", rc, count);
+ return (B_FALSE);
+ }
+
+ rc = ddi_intr_get_navail(devinfo, intr_type, &count);
+ if (rc != DDI_SUCCESS || count < min) {
+ i40e_log(i40e, "Get AVAILABLE interrupt number failed, "
+ "returned %d, count %d\n", rc, count);
+ return (B_FALSE);
+ }
+
+ actual = 0;
+ i40e->i40e_intr_count = 0;
+ i40e->i40e_intr_count_max = 0;
+ i40e->i40e_intr_count_min = 0;
+
+ i40e->i40e_intr_size = request * sizeof (ddi_intr_handle_t);
+ ASSERT(i40e->i40e_intr_size != 0);
+ i40e->i40e_intr_handles = kmem_alloc(i40e->i40e_intr_size, KM_SLEEP);
+
+ rc = ddi_intr_alloc(devinfo, i40e->i40e_intr_handles, intr_type, 0,
+ min(request, count), &actual, DDI_INTR_ALLOC_NORMAL);
+ if (rc != DDI_SUCCESS) {
+ i40e_log(i40e, "Interrupt allocation failed with %d.", rc);
+ goto alloc_handle_fail;
+ }
+
+ i40e->i40e_intr_count = actual;
+ i40e->i40e_intr_count_max = request;
+ i40e->i40e_intr_count_min = min;
+
+ if (actual < min) {
+ i40e_log(i40e, "actual (%d) is less than minimum (%d).",
+ actual, min);
+ goto alloc_handle_fail;
+ }
+
+ /*
+ * Record the priority and capabilities for our first vector. Once
+ * we have it, that's our priority until detach time. Even if we
+ * eventually participate in IRM, our priority shouldn't change.
+ */
+ rc = ddi_intr_get_pri(i40e->i40e_intr_handles[0], &i40e->i40e_intr_pri);
+ if (rc != DDI_SUCCESS) {
+ i40e_log(i40e,
+ "Getting interrupt priority failed with %d.", rc);
+ goto alloc_handle_fail;
+ }
+
+ rc = ddi_intr_get_cap(i40e->i40e_intr_handles[0], &i40e->i40e_intr_cap);
+ if (rc != DDI_SUCCESS) {
+ i40e_log(i40e,
+ "Getting interrupt capabilities failed with %d.", rc);
+ goto alloc_handle_fail;
+ }
+
+ i40e->i40e_intr_type = intr_type;
+ return (B_TRUE);
+
+alloc_handle_fail:
+
+ i40e_rem_intrs(i40e);
+ return (B_FALSE);
+}
+
+static boolean_t
+i40e_alloc_intrs(i40e_t *i40e, dev_info_t *devinfo)
+{
+ int intr_types, rc;
+
+ rc = ddi_intr_get_supported_types(devinfo, &intr_types);
+ if (rc != DDI_SUCCESS) {
+ i40e_error(i40e, "failed to get supported interrupt types: %d",
+ rc);
+ return (B_FALSE);
+ }
+
+ i40e->i40e_intr_type = 0;
+
+ if ((intr_types & DDI_INTR_TYPE_MSIX) &&
+ i40e->i40e_intr_force <= I40E_INTR_MSIX) {
+ if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSIX))
+ return (B_TRUE);
+ }
+
+ /*
+ * We only use multiple transmit/receive pairs when MSI-X interrupts are
+ * available due to the fact that the device basically only supports a
+ * single MSI interrupt.
+ */
+ i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
+ i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
+
+ if ((intr_types & DDI_INTR_TYPE_MSI) &&
+ (i40e->i40e_intr_force <= I40E_INTR_MSI)) {
+ if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSI))
+ return (B_TRUE);
+ }
+
+ if (intr_types & DDI_INTR_TYPE_FIXED) {
+ if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_FIXED))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * Map different interrupts to MSI-X vectors.
+ */
+static boolean_t
+i40e_map_intrs_to_vectors(i40e_t *i40e)
+{
+ if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) {
+ return (B_TRUE);
+ }
+
+ /*
+ * At the moment, we only have one queue and one interrupt thus both are
+ * on that one interrupt. However, longer term we need to go back to
+ * using the ixgbe style map of queues to vectors or walk the linked
+ * list from the device to know what to go handle. Therefore for the
+ * moment, since we need to map our single set of rings to the one
+ * I/O interrupt that exists for MSI-X.
+ */
+ ASSERT(i40e->i40e_intr_count == 2);
+ ASSERT(i40e->i40e_num_trqpairs == 1);
+
+ i40e->i40e_trqpairs[0].itrq_rx_intrvec = 1;
+ i40e->i40e_trqpairs[0].itrq_tx_intrvec = 1;
+
+ return (B_TRUE);
+}
+
+static boolean_t
+i40e_add_intr_handlers(i40e_t *i40e)
+{
+ int rc, vector;
+
+ switch (i40e->i40e_intr_type) {
+ case DDI_INTR_TYPE_MSIX:
+ for (vector = 0; vector < i40e->i40e_intr_count; vector++) {
+ rc = ddi_intr_add_handler(
+ i40e->i40e_intr_handles[vector],
+ (ddi_intr_handler_t *)i40e_intr_msix, i40e,
+ (void *)(uintptr_t)vector);
+ if (rc != DDI_SUCCESS) {
+ i40e_log(i40e, "Add interrupt handler (MSI-X) "
+ "failed: return %d, vector %d", rc, vector);
+ for (vector--; vector >= 0; vector--) {
+ (void) ddi_intr_remove_handler(
+ i40e->i40e_intr_handles[vector]);
+ }
+ return (B_FALSE);
+ }
+ }
+ break;
+ case DDI_INTR_TYPE_MSI:
+ rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
+ (ddi_intr_handler_t *)i40e_intr_msi, i40e, NULL);
+ if (rc != DDI_SUCCESS) {
+ i40e_log(i40e, "Add interrupt handler (MSI) failed: "
+ "return %d", rc);
+ return (B_FALSE);
+ }
+ break;
+ case DDI_INTR_TYPE_FIXED:
+ rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
+ (ddi_intr_handler_t *)i40e_intr_legacy, i40e, NULL);
+ if (rc != DDI_SUCCESS) {
+ i40e_log(i40e, "Add interrupt handler (legacy) failed:"
+ " return %d", rc);
+ return (B_FALSE);
+ }
+ break;
+ default:
+ /* Cast to pacify lint */
+ panic("i40e_intr_type %p contains an unknown type: %d",
+ (void *)i40e, i40e->i40e_intr_type);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Perform periodic checks. Longer term, we should be thinking about additional
+ * things here:
+ *
+ * o Stall Detection
+ * o Temperature sensor detection
+ * o Device resetting
+ * o Statistics updating to avoid wraparound
+ */
+static void
+i40e_timer(void *arg)
+{
+ i40e_t *i40e = arg;
+
+ mutex_enter(&i40e->i40e_general_lock);
+ i40e_link_check(i40e);
+ mutex_exit(&i40e->i40e_general_lock);
+}
+
+/*
+ * Get the hardware state, and scribble away anything that needs scribbling.
+ */
+static void
+i40e_get_hw_state(i40e_t *i40e, i40e_hw_t *hw)
+{
+ int rc;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
+
+ (void) i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ i40e_link_check(i40e);
+
+ /*
+ * Try and determine our PHY. Note that we may have to retry to and
+ * delay to detect fiber correctly.
+ */
+ rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, &i40e->i40e_phy,
+ NULL);
+ if (rc == I40E_ERR_UNKNOWN_PHY) {
+ i40e_msec_delay(200);
+ rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE,
+ &i40e->i40e_phy, NULL);
+ }
+
+ if (rc != I40E_SUCCESS) {
+ if (rc == I40E_ERR_UNKNOWN_PHY) {
+ i40e_error(i40e, "encountered unknown PHY type, "
+ "not attaching.");
+ } else {
+ i40e_error(i40e, "error getting physical capabilities: "
+ "%d, %d", rc, hw->aq.asq_last_status);
+ }
+ }
+
+ rc = i40e_update_link_info(hw);
+ if (rc != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to update link information: %d", rc);
+ }
+
+ /*
+ * In general, we don't want to mask off (as in stop from being a cause)
+ * any of the interrupts that the phy might be able to generate.
+ */
+ rc = i40e_aq_set_phy_int_mask(hw, 0, NULL);
+ if (rc != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to update phy link mask: %d\n", rc);
+ }
+}
+
+/*
+ * Go through and re-initialize any existing filters that we may have set up for
+ * this device. Note that we would only expect them to exist if hardware had
+ * already been initialized and we had just reset it. While we're not
+ * implementing this yet, we're keeping this around for when we add reset
+ * capabilities, so this isn't forgotten.
+ */
+/* ARGSUSED */
+static void
+i40e_init_macaddrs(i40e_t *i40e, i40e_hw_t *hw)
+{
+}
+
+/*
+ * Configure the hardware for the Virtual Station Interface (VSI). Currently
+ * we only support one, but in the future we could instantiate more than one
+ * per attach-point.
+ */
+static boolean_t
+i40e_config_vsi(i40e_t *i40e, i40e_hw_t *hw)
+{
+ struct i40e_vsi_context context;
+ int err;
+
+ bzero(&context, sizeof (struct i40e_vsi_context));
+ context.seid = i40e->i40e_vsi_id;
+ context.pf_num = hw->pf_id;
+ err = i40e_aq_get_vsi_params(hw, &context, NULL);
+ if (err != I40E_SUCCESS) {
+ i40e_error(i40e, "get VSI params failed with %d\n", err);
+ return (B_FALSE);
+ }
+
+ /*
+ * Set the queue and traffic class bits. Keep it simple for now.
+ */
+ context.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ context.info.mapping_flags = I40E_AQ_VSI_QUE_MAP_CONTIG;
+ context.info.queue_mapping[0] = I40E_ASSIGN_ALL_QUEUES;
+ context.info.tc_mapping[0] = I40E_TRAFFIC_CLASS_NO_QUEUES;
+
+ context.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
+ context.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ context.flags = LE16_TO_CPU(I40E_AQ_VSI_TYPE_PF);
+
+ i40e->i40e_vsi_stat_id = LE16_TO_CPU(context.info.stat_counter_idx);
+ if (i40e_stat_vsi_init(i40e) == B_FALSE)
+ return (B_FALSE);
+
+ err = i40e_aq_update_vsi_params(hw, &context, NULL);
+ if (err != I40E_SUCCESS) {
+ i40e_error(i40e, "Update VSI params failed with %d", err);
+ return (B_FALSE);
+ }
+
+
+ return (B_TRUE);
+}
+
+/*
+ * Wrapper to kick the chipset on.
+ */
+static boolean_t
+i40e_chip_start(i40e_t *i40e)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ struct i40e_filter_control_settings filter;
+ int rc;
+
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)) {
+ i40e_msec_delay(75);
+ if (i40e_aq_set_link_restart_an(hw, TRUE, NULL) !=
+ I40E_SUCCESS) {
+ i40e_error(i40e, "failed to restart link: admin queue "
+ "error: %d\n", hw->aq.asq_last_status);
+ return (B_FALSE);
+ }
+ }
+
+ /* Determine hardware state */
+ i40e_get_hw_state(i40e, hw);
+
+ /* Initialize mac addresses. */
+ i40e_init_macaddrs(i40e, hw);
+
+ /*
+ * Set up the filter control.
+ */
+ bzero(&filter, sizeof (filter));
+ filter.enable_ethtype = TRUE;
+ filter.enable_macvlan = TRUE;
+
+ rc = i40e_set_filter_control(hw, &filter);
+ if (rc != I40E_SUCCESS) {
+ i40e_error(i40e, "i40e_set_filter_control() returned %d", rc);
+ return (B_FALSE);
+ }
+
+ i40e_intr_chip_init(i40e);
+
+ if (!i40e_config_vsi(i40e, hw))
+ return (B_FALSE);
+
+ i40e_flush(hw);
+
+ return (B_TRUE);
+}
+
+/*
+ * Take care of tearing down the rx ring. See 8.3.3.1.2 for more information.
+ */
+static void
+i40e_shutdown_rx_rings(i40e_t *i40e)
+{
+ int i;
+ uint32_t reg;
+
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ /*
+ * Step 1. The interrupt linked list (see i40e_intr.c for more
+ * information) should have already been cleared before calling this
+ * function.
+ */
+#ifdef DEBUG
+ if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) {
+ for (i = 1; i < i40e->i40e_intr_count; i++) {
+ reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1));
+ VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL);
+ }
+ } else {
+ reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0);
+ VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL);
+ }
+
+#endif /* DEBUG */
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ /*
+ * Step 1. Request the queue by clearing QENA_REQ. It may not be
+ * set due to unwinding from failures and a partially enabled
+ * ring set.
+ */
+ reg = I40E_READ_REG(hw, I40E_QRX_ENA(i));
+ if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK))
+ continue;
+ VERIFY((reg & I40E_QRX_ENA_QENA_REQ_MASK) ==
+ I40E_QRX_ENA_QENA_REQ_MASK);
+ reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+ I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg);
+ }
+
+ /*
+ * Step 2. Wait for the disable to take, by having QENA_STAT in the FPM
+ * be cleared. Note that we could still receive data in the queue during
+ * this time. We don't actually wait for this now and instead defer this
+ * to i40e_shutdown_rings_wait(), after we've interleaved disabling the
+ * TX queues as well.
+ */
+}
+
+static void
+i40e_shutdown_tx_rings(i40e_t *i40e)
+{
+ int i;
+ uint32_t reg;
+
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ /*
+ * Step 1. The interrupt linked list should already have been cleared.
+ */
+#ifdef DEBUG
+ if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) {
+ for (i = 1; i < i40e->i40e_intr_count; i++) {
+ reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1));
+ VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL);
+ }
+ } else {
+ reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0);
+ VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL);
+
+ }
+#endif /* DEBUG */
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ /*
+ * Step 2. Set the SET_QDIS flag for every queue.
+ */
+ i40e_pre_tx_queue_cfg(hw, i, B_FALSE);
+ }
+
+ /*
+ * Step 3. Wait at least 400 usec (can be done once for all queues).
+ */
+ drv_usecwait(500);
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ /*
+ * Step 4. Clear the QENA_REQ flag which tells hardware to
+ * quiesce. If QENA_REQ is not already set then that means that
+ * we likely already tried to disable this queue.
+ */
+ reg = I40E_READ_REG(hw, I40E_QTX_ENA(i));
+ if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK))
+ continue;
+ reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg);
+ }
+
+ /*
+ * Step 5. Wait for all drains to finish. This will be done by the
+ * hardware removing the QENA_STAT flag from the queue. Rather than
+ * waiting here, we interleave it with all the others in
+ * i40e_shutdown_rings_wait().
+ */
+}
+
+/*
+ * Wait for all the rings to be shut down. e.g. Steps 2 and 5 from the above
+ * functions.
+ */
+static boolean_t
+i40e_shutdown_rings_wait(i40e_t *i40e)
+{
+ int i, try;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ uint32_t reg;
+
+ for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
+ reg = I40E_READ_REG(hw, I40E_QRX_ENA(i));
+ if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
+ break;
+ i40e_msec_delay(I40E_RING_WAIT_PAUSE);
+ }
+
+ if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) != 0) {
+ i40e_error(i40e, "timed out disabling rx queue %d\n",
+ i);
+ return (B_FALSE);
+ }
+
+ for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
+ reg = I40E_READ_REG(hw, I40E_QTX_ENA(i));
+ if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
+ break;
+ i40e_msec_delay(I40E_RING_WAIT_PAUSE);
+ }
+
+ if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) != 0) {
+ i40e_error(i40e, "timed out disabling tx queue %d\n",
+ i);
+ return (B_FALSE);
+ }
+ }
+
+ return (B_TRUE);
+}
+
+static boolean_t
+i40e_shutdown_rings(i40e_t *i40e)
+{
+ i40e_shutdown_rx_rings(i40e);
+ i40e_shutdown_tx_rings(i40e);
+ return (i40e_shutdown_rings_wait(i40e));
+}
+
+static void
+i40e_setup_rx_descs(i40e_trqpair_t *itrq)
+{
+ int i;
+ i40e_rx_data_t *rxd = itrq->itrq_rxdata;
+
+ for (i = 0; i < rxd->rxd_ring_size; i++) {
+ i40e_rx_control_block_t *rcb;
+ i40e_rx_desc_t *rdesc;
+
+ rcb = rxd->rxd_work_list[i];
+ rdesc = &rxd->rxd_desc_ring[i];
+
+ rdesc->read.pkt_addr =
+ CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address);
+ rdesc->read.hdr_addr = 0;
+ }
+}
+
+static boolean_t
+i40e_setup_rx_hmc(i40e_trqpair_t *itrq)
+{
+ i40e_rx_data_t *rxd = itrq->itrq_rxdata;
+ i40e_t *i40e = itrq->itrq_i40e;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ struct i40e_hmc_obj_rxq rctx;
+ int err;
+
+ bzero(&rctx, sizeof (struct i40e_hmc_obj_rxq));
+ rctx.base = rxd->rxd_desc_area.dmab_dma_address /
+ I40E_HMC_RX_CTX_UNIT;
+ rctx.qlen = rxd->rxd_ring_size;
+ VERIFY(i40e->i40e_rx_buf_size >= I40E_HMC_RX_DBUFF_MIN);
+ VERIFY(i40e->i40e_rx_buf_size <= I40E_HMC_RX_DBUFF_MAX);
+ rctx.dbuff = i40e->i40e_rx_buf_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
+ rctx.dtype = I40E_HMC_RX_DTYPE_NOSPLIT;
+ rctx.dsize = I40E_HMC_RX_DSIZE_32BYTE;
+ rctx.crcstrip = I40E_HMC_RX_CRCSTRIP_ENABLE;
+ rctx.fc_ena = I40E_HMC_RX_FC_DISABLE;
+ rctx.l2tsel = I40E_HMC_RX_L2TAGORDER;
+ rctx.hsplit_0 = I40E_HMC_RX_HDRSPLIT_DISABLE;
+ rctx.hsplit_1 = I40E_HMC_RX_HDRSPLIT_DISABLE;
+ rctx.showiv = I40E_HMC_RX_INVLAN_DONTSTRIP;
+ rctx.rxmax = i40e->i40e_frame_max;
+ rctx.tphrdesc_ena = I40E_HMC_RX_TPH_DISABLE;
+ rctx.tphwdesc_ena = I40E_HMC_RX_TPH_DISABLE;
+ rctx.tphdata_ena = I40E_HMC_RX_TPH_DISABLE;
+ rctx.tphhead_ena = I40E_HMC_RX_TPH_DISABLE;
+ rctx.lrxqthresh = I40E_HMC_RX_LOWRXQ_NOINTR;
+
+ /*
+ * This must be set to 0x1, see Table 8-12 in section 8.3.3.2.2.
+ */
+ rctx.prefena = I40E_HMC_RX_PREFENA;
+
+ err = i40e_clear_lan_rx_queue_context(hw, itrq->itrq_index);
+ if (err != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to clear rx queue %d context: %d\n",
+ itrq->itrq_index, err);
+ return (B_FALSE);
+ }
+
+ err = i40e_set_lan_rx_queue_context(hw, itrq->itrq_index, &rctx);
+ if (err != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to set rx queue %d context: %d\n",
+ itrq->itrq_index, err);
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Take care of setting up the descriptor rings and actually programming the
+ * device. See 8.3.3.1.1 for the full list of steps we need to do to enable the
+ * rx rings.
+ */
+static boolean_t
+i40e_setup_rx_rings(i40e_t *i40e)
+{
+ int i;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i];
+ i40e_rx_data_t *rxd = itrq->itrq_rxdata;
+ uint32_t reg;
+
+ /*
+ * Step 1. Program all receive ring descriptors.
+ */
+ i40e_setup_rx_descs(itrq);
+
+ /*
+ * Step 2. Program the queue's FPM/HMC context.
+ */
+ if (i40e_setup_rx_hmc(itrq) == B_FALSE)
+ return (B_FALSE);
+
+ /*
+ * Step 3. Clear the queue's tail pointer and set it to the end
+ * of the space.
+ */
+ I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), 0);
+ I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), rxd->rxd_ring_size - 1);
+
+ /*
+ * Step 4. Enable the queue via the QENA_REQ.
+ */
+ reg = I40E_READ_REG(hw, I40E_QRX_ENA(i));
+ VERIFY0(reg & (I40E_QRX_ENA_QENA_REQ_MASK |
+ I40E_QRX_ENA_QENA_STAT_MASK));
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg);
+ }
+
+ /*
+ * Note, we wait for every queue to be enabled before we start checking.
+ * This will hopefully cause most queues to be enabled at this point.
+ */
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ uint32_t j, reg;
+
+ /*
+ * Step 5. Verify that QENA_STAT has been set. It's promised
+ * that this should occur within about 10 us, but like other
+ * systems, we give the card a bit more time.
+ */
+ for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) {
+ reg = I40E_READ_REG(hw, I40E_QRX_ENA(i));
+
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
+ break;
+ i40e_msec_delay(I40E_RING_WAIT_PAUSE);
+ }
+
+ if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
+ i40e_error(i40e, "failed to enable rx queue %d, timed "
+ "out.");
+ return (B_FALSE);
+ }
+ }
+
+ return (B_TRUE);
+}
+
+static boolean_t
+i40e_setup_tx_hmc(i40e_trqpair_t *itrq)
+{
+ i40e_t *i40e = itrq->itrq_i40e;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ struct i40e_hmc_obj_txq tctx;
+ struct i40e_vsi_context context;
+ int err;
+
+ bzero(&tctx, sizeof (struct i40e_hmc_obj_txq));
+ tctx.new_context = I40E_HMC_TX_NEW_CONTEXT;
+ tctx.base = itrq->itrq_desc_area.dmab_dma_address /
+ I40E_HMC_TX_CTX_UNIT;
+ tctx.fc_ena = I40E_HMC_TX_FC_DISABLE;
+ tctx.timesync_ena = I40E_HMC_TX_TS_DISABLE;
+ tctx.fd_ena = I40E_HMC_TX_FD_DISABLE;
+ tctx.alt_vlan_ena = I40E_HMC_TX_ALT_VLAN_DISABLE;
+ tctx.head_wb_ena = I40E_HMC_TX_WB_ENABLE;
+ tctx.qlen = itrq->itrq_tx_ring_size;
+ tctx.tphrdesc_ena = I40E_HMC_TX_TPH_DISABLE;
+ tctx.tphrpacket_ena = I40E_HMC_TX_TPH_DISABLE;
+ tctx.tphwdesc_ena = I40E_HMC_TX_TPH_DISABLE;
+ tctx.head_wb_addr = itrq->itrq_desc_area.dmab_dma_address +
+ sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size;
+
+ /*
+ * This field isn't actually documented, like crc, but it suggests that
+ * it should be zeroed. We leave both of these here because of that for
+ * now. We should check with Intel on why these are here even.
+ */
+ tctx.crc = 0;
+ tctx.rdylist_act = 0;
+
+ /*
+ * We're supposed to assign the rdylist field with the value of the
+ * traffic class index for the first device. We query the VSI parameters
+ * again to get what the handle is. Note that every queue is always
+ * assigned to traffic class zero, because we don't actually use them.
+ */
+ bzero(&context, sizeof (struct i40e_vsi_context));
+ context.seid = i40e->i40e_vsi_id;
+ context.pf_num = hw->pf_id;
+ err = i40e_aq_get_vsi_params(hw, &context, NULL);
+ if (err != I40E_SUCCESS) {
+ i40e_error(i40e, "get VSI params failed with %d\n", err);
+ return (B_FALSE);
+ }
+ tctx.rdylist = LE_16(context.info.qs_handle[0]);
+
+ err = i40e_clear_lan_tx_queue_context(hw, itrq->itrq_index);
+ if (err != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to clear tx queue %d context: %d\n",
+ itrq->itrq_index, err);
+ return (B_FALSE);
+ }
+
+ err = i40e_set_lan_tx_queue_context(hw, itrq->itrq_index, &tctx);
+ if (err != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to set tx queue %d context: %d\n",
+ itrq->itrq_index, err);
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Take care of setting up the descriptor rings and actually programming the
+ * device. See 8.4.3.1.1 for what we need to do here.
+ */
+static boolean_t
+i40e_setup_tx_rings(i40e_t *i40e)
+{
+ int i;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i];
+ uint32_t reg;
+
+ /*
+ * Step 1. Clear the queue disable flag and verify that the
+ * index is set correctly.
+ */
+ i40e_pre_tx_queue_cfg(hw, i, B_TRUE);
+
+ /*
+ * Step 2. Prepare the queue's FPM/HMC context.
+ */
+ if (i40e_setup_tx_hmc(itrq) == B_FALSE)
+ return (B_FALSE);
+
+ /*
+ * Step 3. Verify that it's clear that this PF owns this queue.
+ */
+ reg = I40E_QTX_CTL_PF_QUEUE;
+ reg |= (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK;
+ I40E_WRITE_REG(hw, I40E_QTX_CTL(itrq->itrq_index), reg);
+ i40e_flush(hw);
+
+ /*
+ * Step 4. Set the QENA_REQ flag.
+ */
+ reg = I40E_READ_REG(hw, I40E_QTX_ENA(i));
+ VERIFY0(reg & (I40E_QTX_ENA_QENA_REQ_MASK |
+ I40E_QTX_ENA_QENA_STAT_MASK));
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg);
+ }
+
+ /*
+ * Note, we wait for every queue to be enabled before we start checking.
+ * This will hopefully cause most queues to be enabled at this point.
+ */
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ uint32_t j, reg;
+
+ /*
+ * Step 5. Verify that QENA_STAT has been set. It's promised
+ * that this should occur within about 10 us, but like BSD,
+ * we'll try for up to 100 ms for this queue.
+ */
+ for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) {
+ reg = I40E_READ_REG(hw, I40E_QTX_ENA(i));
+
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
+ break;
+ i40e_msec_delay(I40E_RING_WAIT_PAUSE);
+ }
+
+ if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
+ i40e_error(i40e, "failed to enable tx queue %d, timed "
+ "out");
+ return (B_FALSE);
+ }
+ }
+
+ return (B_TRUE);
+}
+
+void
+i40e_stop(i40e_t *i40e, boolean_t free_allocations)
+{
+ int i;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
+
+ /*
+ * Shutdown and drain the tx and rx pipeline. We do this using the
+ * following steps.
+ *
+ * 1) Shutdown interrupts to all the queues (trying to keep the admin
+ * queue alive).
+ *
+ * 2) Remove all of the interrupt tx and rx causes by setting the
+ * interrupt linked lists to zero.
+ *
+ * 2) Shutdown the tx and rx rings. Because i40e_shutdown_rings() should
+ * wait for all the queues to be disabled, once we reach that point
+ * it should be safe to free associated data.
+ *
+ * 4) Wait 50ms after all that is done. This ensures that the rings are
+ * ready for programming again and we don't have to think about this
+ * in other parts of the driver.
+ *
+ * 5) Disable remaining chip interrupts, (admin queue, etc.)
+ *
+ * 6) Verify that FM is happy with all the register accesses we
+ * performed.
+ */
+ i40e_intr_io_disable_all(i40e);
+ i40e_intr_io_clear_cause(i40e);
+
+ if (i40e_shutdown_rings(i40e) == B_FALSE) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
+ }
+
+ delay(50 * drv_usectohz(1000));
+
+ i40e_intr_chip_fini(i40e);
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ mutex_enter(&i40e->i40e_trqpairs[i].itrq_rx_lock);
+ mutex_enter(&i40e->i40e_trqpairs[i].itrq_tx_lock);
+ }
+
+ /*
+ * We should consider refactoring this to be part of the ring start /
+ * stop routines at some point.
+ */
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ i40e_stats_trqpair_fini(&i40e->i40e_trqpairs[i]);
+ }
+
+ if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
+ }
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ i40e_tx_cleanup_ring(&i40e->i40e_trqpairs[i]);
+ }
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ mutex_exit(&i40e->i40e_trqpairs[i].itrq_rx_lock);
+ mutex_exit(&i40e->i40e_trqpairs[i].itrq_tx_lock);
+ }
+
+ i40e_stat_vsi_fini(i40e);
+
+ i40e->i40e_link_speed = 0;
+ i40e->i40e_link_duplex = 0;
+ i40e_link_state_set(i40e, LINK_STATE_UNKNOWN);
+
+ if (free_allocations) {
+ i40e_free_ring_mem(i40e, B_FALSE);
+ }
+}
+
+boolean_t
+i40e_start(i40e_t *i40e, boolean_t alloc)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ boolean_t rc = B_TRUE;
+ int i, err;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
+
+ if (alloc) {
+ if (i40e_alloc_ring_mem(i40e) == B_FALSE) {
+ i40e_error(i40e,
+ "Failed to allocate ring memory");
+ return (B_FALSE);
+ }
+ }
+
+ /*
+ * This should get refactored to be part of ring start and stop at
+ * some point, along with most of the logic here.
+ */
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ if (i40e_stats_trqpair_init(&i40e->i40e_trqpairs[i]) ==
+ B_FALSE) {
+ int j;
+
+ for (j = 0; j < i; j++) {
+ i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[j];
+ i40e_stats_trqpair_fini(itrq);
+ }
+ return (B_FALSE);
+ }
+ }
+
+ if (!i40e_chip_start(i40e)) {
+ i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
+ rc = B_FALSE;
+ goto done;
+ }
+
+ if (i40e_setup_rx_rings(i40e) == B_FALSE) {
+ rc = B_FALSE;
+ goto done;
+ }
+
+ if (i40e_setup_tx_rings(i40e) == B_FALSE) {
+ rc = B_FALSE;
+ goto done;
+ }
+
+ /*
+ * Enable broadcast traffic; however, do not enable multicast traffic.
+ * That's handle exclusively through MAC's mc_multicst routines.
+ */
+ err = i40e_aq_set_vsi_broadcast(hw, i40e->i40e_vsi_id, B_TRUE, NULL);
+ if (err != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to set default VSI: %d\n", err);
+ rc = B_FALSE;
+ goto done;
+ }
+
+ err = i40e_aq_set_mac_config(hw, i40e->i40e_frame_max, B_TRUE, 0, NULL);
+ if (err != I40E_SUCCESS) {
+ i40e_error(i40e, "failed to set MAC config: %d\n", err);
+ rc = B_FALSE;
+ goto done;
+ }
+
+ /*
+ * Finally, make sure that we're happy from an FM perspective.
+ */
+ if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
+ DDI_FM_OK) {
+ rc = B_FALSE;
+ goto done;
+ }
+
+ /* Clear state bits prior to final interrupt enabling. */
+ atomic_and_32(&i40e->i40e_state,
+ ~(I40E_ERROR | I40E_STALL | I40E_OVERTEMP));
+
+ i40e_intr_io_enable_all(i40e);
+
+done:
+ if (rc == B_FALSE) {
+ i40e_stop(i40e, B_FALSE);
+ if (alloc == B_TRUE) {
+ i40e_free_ring_mem(i40e, B_TRUE);
+ }
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
+ }
+
+ return (rc);
+}
+
+/*
+ * We may have loaned up descriptors to the stack. As such, if we still have
+ * them outstanding, then we will not continue with detach.
+ */
+static boolean_t
+i40e_drain_rx(i40e_t *i40e)
+{
+ mutex_enter(&i40e->i40e_rx_pending_lock);
+ while (i40e->i40e_rx_pending > 0) {
+ if (cv_reltimedwait(&i40e->i40e_rx_pending_cv,
+ &i40e->i40e_rx_pending_lock,
+ drv_usectohz(I40E_DRAIN_RX_WAIT), TR_CLOCK_TICK) == -1) {
+ mutex_exit(&i40e->i40e_rx_pending_lock);
+ return (B_FALSE);
+ }
+ }
+ mutex_exit(&i40e->i40e_rx_pending_lock);
+
+ return (B_TRUE);
+}
+
+static int
+i40e_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
+{
+ i40e_t *i40e;
+ struct i40e_osdep *osdep;
+ i40e_hw_t *hw;
+ int instance;
+
+ if (cmd != DDI_ATTACH)
+ return (DDI_FAILURE);
+
+ instance = ddi_get_instance(devinfo);
+ i40e = kmem_zalloc(sizeof (i40e_t), KM_SLEEP);
+
+ i40e->i40e_aqbuf = kmem_zalloc(I40E_ADMINQ_BUFSZ, KM_SLEEP);
+ i40e->i40e_instance = instance;
+ i40e->i40e_dip = devinfo;
+
+ hw = &i40e->i40e_hw_space;
+ osdep = &i40e->i40e_osdep_space;
+ hw->back = osdep;
+ osdep->ios_i40e = i40e;
+
+ ddi_set_driver_private(devinfo, i40e);
+
+ i40e_fm_init(i40e);
+ i40e->i40e_attach_progress |= I40E_ATTACH_FM_INIT;
+
+ if (pci_config_setup(devinfo, &osdep->ios_cfg_handle) != DDI_SUCCESS) {
+ i40e_error(i40e, "Failed to map PCI configurations.");
+ goto attach_fail;
+ }
+ i40e->i40e_attach_progress |= I40E_ATTACH_PCI_CONFIG;
+
+ if (!i40e_identify_hardware(i40e)) {
+ i40e_error(i40e, "Failed to identify hardware");
+ goto attach_fail;
+ }
+
+ if (!i40e_regs_map(i40e)) {
+ i40e_error(i40e, "Failed to map device registers.");
+ goto attach_fail;
+ }
+ i40e->i40e_attach_progress |= I40E_ATTACH_REGS_MAP;
+
+ i40e_init_properties(i40e);
+ i40e->i40e_attach_progress |= I40E_ATTACH_PROPS;
+
+ if (!i40e_common_code_init(i40e, hw))
+ goto attach_fail;
+ i40e->i40e_attach_progress |= I40E_ATTACH_COMMON_CODE;
+
+ /*
+ * When we participate in IRM, we should make sure that we register
+ * ourselves with it before callbacks.
+ */
+ if (!i40e_alloc_intrs(i40e, devinfo)) {
+ i40e_error(i40e, "Failed to allocate interrupts.");
+ goto attach_fail;
+ }
+ i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_INTR;
+
+ if (!i40e_alloc_trqpairs(i40e)) {
+ i40e_error(i40e,
+ "Failed to allocate receive & transmit rings.");
+ goto attach_fail;
+ }
+ i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_RINGSLOCKS;
+
+ if (!i40e_map_intrs_to_vectors(i40e)) {
+ i40e_error(i40e, "Failed to map interrupts to vectors.");
+ goto attach_fail;
+ }
+
+ if (!i40e_add_intr_handlers(i40e)) {
+ i40e_error(i40e, "Failed to add the interrupt handlers.");
+ goto attach_fail;
+ }
+ i40e->i40e_attach_progress |= I40E_ATTACH_ADD_INTR;
+
+ if (!i40e_final_init(i40e)) {
+ i40e_error(i40e, "Final initialization failed.");
+ goto attach_fail;
+ }
+ i40e->i40e_attach_progress |= I40E_ATTACH_INIT;
+
+ if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
+ goto attach_fail;
+ }
+
+ if (!i40e_stats_init(i40e)) {
+ i40e_error(i40e, "Stats initialization failed.");
+ goto attach_fail;
+ }
+ i40e->i40e_attach_progress |= I40E_ATTACH_STATS;
+
+ if (!i40e_register_mac(i40e)) {
+ i40e_error(i40e, "Failed to register to MAC/GLDv3");
+ goto attach_fail;
+ }
+ i40e->i40e_attach_progress |= I40E_ATTACH_MAC;
+
+ i40e->i40e_periodic_id = ddi_periodic_add(i40e_timer, i40e,
+ I40E_CYCLIC_PERIOD, DDI_IPL_0);
+ if (i40e->i40e_periodic_id == 0) {
+ i40e_error(i40e, "Failed to add the link-check timer");
+ goto attach_fail;
+ }
+ i40e->i40e_attach_progress |= I40E_ATTACH_LINK_TIMER;
+
+ if (!i40e_enable_interrupts(i40e)) {
+ i40e_error(i40e, "Failed to enable DDI interrupts");
+ goto attach_fail;
+ }
+ i40e->i40e_attach_progress |= I40E_ATTACH_ENABLE_INTR;
+
+ atomic_or_32(&i40e->i40e_state, I40E_INITIALIZED);
+
+ mutex_enter(&i40e_glock);
+ list_insert_tail(&i40e_glist, i40e);
+ mutex_exit(&i40e_glock);
+
+ return (DDI_SUCCESS);
+
+attach_fail:
+ i40e_unconfigure(devinfo, i40e);
+ return (DDI_FAILURE);
+}
+
+static int
+i40e_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
+{
+ i40e_t *i40e;
+
+ if (cmd != DDI_DETACH)
+ return (DDI_FAILURE);
+
+ i40e = (i40e_t *)ddi_get_driver_private(devinfo);
+ if (i40e == NULL) {
+ i40e_log(NULL, "i40e_detach() called with no i40e pointer!");
+ return (DDI_FAILURE);
+ }
+
+ if (i40e_drain_rx(i40e) == B_FALSE) {
+ i40e_log(i40e, "timed out draining DMA resources, %d buffers "
+ "remain", i40e->i40e_rx_pending);
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&i40e_glock);
+ list_remove(&i40e_glist, i40e);
+ mutex_exit(&i40e_glock);
+
+ i40e_unconfigure(devinfo, i40e);
+
+ return (DDI_SUCCESS);
+}
+
+static struct cb_ops i40e_cb_ops = {
+ nulldev, /* cb_open */
+ nulldev, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read */
+ nodev, /* cb_write */
+ nodev, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ NULL, /* cb_stream */
+ D_MP | D_HOTPLUG, /* cb_flag */
+ CB_REV, /* cb_rev */
+ nodev, /* cb_aread */
+ nodev /* cb_awrite */
+};
+
+static struct dev_ops i40e_dev_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ NULL, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ i40e_attach, /* devo_attach */
+ i40e_detach, /* devo_detach */
+ nodev, /* devo_reset */
+ &i40e_cb_ops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ ddi_power, /* devo_power */
+ ddi_quiesce_not_supported /* devo_quiesce */
+};
+
+static struct modldrv i40e_modldrv = {
+ &mod_driverops,
+ i40e_ident,
+ &i40e_dev_ops
+};
+
+static struct modlinkage i40e_modlinkage = {
+ MODREV_1,
+ &i40e_modldrv,
+ NULL
+};
+
+/*
+ * Module Initialization Functions.
+ */
+int
+_init(void)
+{
+ int status;
+
+ list_create(&i40e_glist, sizeof (i40e_t), offsetof(i40e_t, i40e_glink));
+ list_create(&i40e_dlist, sizeof (i40e_device_t),
+ offsetof(i40e_device_t, id_link));
+ mutex_init(&i40e_glock, NULL, MUTEX_DRIVER, NULL);
+ mac_init_ops(&i40e_dev_ops, I40E_MODULE_NAME);
+
+ status = mod_install(&i40e_modlinkage);
+ if (status != DDI_SUCCESS) {
+ mac_fini_ops(&i40e_dev_ops);
+ mutex_destroy(&i40e_glock);
+ list_destroy(&i40e_dlist);
+ list_destroy(&i40e_glist);
+ }
+
+ return (status);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&i40e_modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int status;
+
+ status = mod_remove(&i40e_modlinkage);
+ if (status == DDI_SUCCESS) {
+ mac_fini_ops(&i40e_dev_ops);
+ mutex_destroy(&i40e_glock);
+ list_destroy(&i40e_dlist);
+ list_destroy(&i40e_glist);
+ }
+
+ return (status);
+}
diff --git a/usr/src/uts/common/io/i40e/i40e_osdep.c b/usr/src/uts/common/io/i40e/i40e_osdep.c
new file mode 100644
index 0000000000..41a13ee4ec
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/i40e_osdep.c
@@ -0,0 +1,236 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
+ * Copyright 2016 Joyent, Inc.
+ */
+
+#include "i40e_sw.h"
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_osdep.h"
+
+#include <sys/dtrace.h>
+
+/* ARGSUSED */
+i40e_status
+i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size)
+{
+ mem->va = kmem_zalloc(size, KM_SLEEP);
+ mem->size = size;
+ return (I40E_SUCCESS);
+}
+
+/* ARGSUSED */
+i40e_status
+i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+{
+ if (mem->va != NULL)
+ kmem_free(mem->va, mem->size);
+ return (I40E_SUCCESS);
+}
+
+/* ARGSUSED */
+i40e_status
+i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+ enum i40e_memory_type type, u64 size, u32 alignment)
+{
+ int rc;
+ i40e_t *i40e = OS_DEP(hw)->ios_i40e;
+ dev_info_t *dip = i40e->i40e_dip;
+ size_t len;
+ ddi_dma_cookie_t cookie;
+ uint_t cookie_num;
+ ddi_dma_attr_t attr;
+
+ /*
+ * Because we need to honor the specified alignment, we need to
+ * dynamically construct the attributes. We save the alignment for
+ * debugging purposes.
+ */
+ bcopy(&i40e->i40e_static_dma_attr, &attr, sizeof (ddi_dma_attr_t));
+ attr.dma_attr_align = alignment;
+ mem->idm_alignment = alignment;
+ rc = ddi_dma_alloc_handle(dip, &i40e->i40e_static_dma_attr,
+ DDI_DMA_DONTWAIT, NULL, &mem->idm_dma_handle);
+ if (rc != DDI_SUCCESS) {
+ mem->idm_dma_handle = NULL;
+ i40e_error(i40e, "failed to allocate DMA handle for common "
+ "code: %d", rc);
+
+ /*
+ * Swallow unknown errors and treat them like we do
+ * DDI_DMA_NORESOURCES, in other words, a memory error.
+ */
+ if (rc == DDI_DMA_BADATTR)
+ return (I40E_ERR_PARAM);
+ return (I40E_ERR_NO_MEMORY);
+ }
+
+ rc = ddi_dma_mem_alloc(mem->idm_dma_handle, size,
+ &i40e->i40e_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
+ NULL, (caddr_t *)&mem->va, &len, &mem->idm_acc_handle);
+ if (rc != DDI_SUCCESS) {
+ mem->idm_acc_handle = NULL;
+ mem->va = NULL;
+ ASSERT(mem->idm_dma_handle != NULL);
+ ddi_dma_free_handle(&mem->idm_dma_handle);
+ mem->idm_dma_handle = NULL;
+
+ i40e_error(i40e, "failed to allocate %d bytes of DMA memory "
+ "for common code", size);
+ return (I40E_ERR_NO_MEMORY);
+ }
+
+ bzero(mem->va, len);
+
+ rc = ddi_dma_addr_bind_handle(mem->idm_dma_handle, NULL, mem->va, len,
+ DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
+ &cookie, &cookie_num);
+ if (rc != DDI_DMA_MAPPED) {
+ mem->pa = NULL;
+ ASSERT(mem->idm_acc_handle != NULL);
+ ddi_dma_mem_free(&mem->idm_acc_handle);
+ mem->idm_acc_handle = NULL;
+ mem->va = NULL;
+ ASSERT(mem->idm_dma_handle != NULL);
+ ddi_dma_free_handle(&mem->idm_dma_handle);
+ mem->idm_dma_handle = NULL;
+
+ i40e_error(i40e, "failed to bind %d byte sized dma region: %d",
+ len, rc);
+ switch (rc) {
+ case DDI_DMA_INUSE:
+ return (I40E_ERR_NOT_READY);
+ case DDI_DMA_TOOBIG:
+ return (I40E_ERR_INVALID_SIZE);
+ case DDI_DMA_NOMAPPING:
+ case DDI_DMA_NORESOURCES:
+ default:
+ return (I40E_ERR_NO_MEMORY);
+ }
+ }
+
+ ASSERT(cookie_num == 1);
+ mem->pa = cookie.dmac_laddress;
+ /*
+ * Lint doesn't like this because the common code gives us a uint64_t as
+ * input, but the common code then asks us to assign it to a size_t. So
+ * lint's right, but in this case there isn't much we can do.
+ */
+ mem->size = (size_t)size;
+
+ return (I40E_SUCCESS);
+}
+
+/* ARGSUSED */
+i40e_status
+i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+ if (mem->pa != 0) {
+ VERIFY(mem->idm_dma_handle != NULL);
+ (void) ddi_dma_unbind_handle(mem->idm_dma_handle);
+ mem->pa = 0;
+ mem->size = 0;
+ }
+
+ if (mem->idm_acc_handle != NULL) {
+ ddi_dma_mem_free(&mem->idm_acc_handle);
+ mem->idm_acc_handle = NULL;
+ mem->va = NULL;
+ }
+
+ if (mem->idm_dma_handle != NULL) {
+ ddi_dma_free_handle(&mem->idm_dma_handle);
+ mem->idm_dma_handle = NULL;
+ }
+
+ /*
+ * Watch out for sloppiness.
+ */
+ ASSERT(mem->pa == 0);
+ ASSERT(mem->va == NULL);
+ ASSERT(mem->size == 0);
+ mem->idm_alignment = UINT32_MAX;
+
+ return (I40E_SUCCESS);
+}
+
+/*
+ * The common code wants to initialize its 'spinlocks' here, aka adaptive
+ * mutexes. At this time these are only used to maintain the adminq's data and
+ * as such it will only be used outside of interrupt context and even then,
+ * we're not going to actually end up ever doing anything above lock level and
+ * up in doing stuff with high level interrupts.
+ */
+void
+i40e_init_spinlock(struct i40e_spinlock *lock)
+{
+ mutex_init(&lock->ispl_mutex, NULL, MUTEX_DRIVER, NULL);
+}
+
+void
+i40e_acquire_spinlock(struct i40e_spinlock *lock)
+{
+ mutex_enter(&lock->ispl_mutex);
+}
+
+void
+i40e_release_spinlock(struct i40e_spinlock *lock)
+{
+ mutex_exit(&lock->ispl_mutex);
+}
+
+void
+i40e_destroy_spinlock(struct i40e_spinlock *lock)
+{
+ mutex_destroy(&lock->ispl_mutex);
+}
+
+boolean_t
+i40e_set_hw_bus_info(struct i40e_hw *hw)
+{
+ uint8_t pcie_id = PCI_CAP_ID_PCI_E;
+ uint16_t pcie_cap, value;
+ int status;
+
+ /* locate the pci-e capability block */
+ status = pci_lcap_locate((OS_DEP(hw))->ios_cfg_handle, pcie_id,
+ &pcie_cap);
+ if (status != DDI_SUCCESS) {
+ i40e_error(OS_DEP(hw)->ios_i40e, "failed to locate PCIe "
+ "capability block: %d",
+ status);
+ return (B_FALSE);
+ }
+
+ value = pci_config_get16(OS_DEP(hw)->ios_cfg_handle,
+ pcie_cap + PCIE_LINKSTS);
+
+ i40e_set_pci_config_data(hw, value);
+
+ return (B_TRUE);
+}
+
+/* ARGSUSED */
+void
+i40e_debug(void *hw, u32 mask, char *fmt, ...)
+{
+ char buf[1024];
+ va_list args;
+
+ va_start(args, fmt);
+ (void) vsnprintf(buf, sizeof (buf), fmt, args);
+ va_end(args);
+
+ DTRACE_PROBE2(i40e__debug, uint32_t, mask, char *, buf);
+}
diff --git a/usr/src/uts/common/io/i40e/i40e_osdep.h b/usr/src/uts/common/io/i40e/i40e_osdep.h
new file mode 100644
index 0000000000..12f498bc72
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/i40e_osdep.h
@@ -0,0 +1,201 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
+ * Copyright 2016 Joyent, Inc.
+ */
+
+#ifndef _I40E_OSDEP_H
+#define _I40E_OSDEP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/pci_cap.h>
+#include <sys/sysmacros.h>
+
+/*
+ * For the moment, we use this to basically deal with a few custom changes
+ * particularly around mutex initialization. This is used to indicate that we
+ * should take illumos variants.
+ */
+#define I40E_ILLUMOS 1
+
+#define DEBUGOUT(S) i40e_debug(NULL, 0, S)
+#define DEBUGOUT1(S, A) i40e_debug(NULL, 0, S, A)
+#define DEBUGOUT2(S, A, B) i40e_debug(NULL, 0, S, A, B)
+#define DEBUGOUT3(S, A, B, C) i40e_debug(NULL, 0, S, A, B, C)
+#define DEBUGOUT4(S, A, B, C, D) \
+ i40e_debug(NULL, 0, S, A, B, C, D)
+#define DEBUGOUT5(S, A, B, C, D, E) \
+ i40e_debug(NULL, 0, S, A, B, C, D, E)
+#define DEBUGOUT6(S, A, B, C, D, E, F) \
+ i40e_debug(NULL, 0, S, A, B, C, D, E, F)
+#define DEBUGOUT7(S, A, B, C, D, E, F, G) \
+ i40e_debug(NULL, 0, S, A, B, C, D, E, F, G)
+#define DEBUGFUNC(F) DEBUGOUT(F);
+
+
+#define UNREFERENCED_PARAMETER(x) _NOTE(ARGUNUSED(x))
+#define UNREFERENCED_1PARAMETER(_p) UNREFERENCED_PARAMETER(_p)
+#define UNREFERENCED_2PARAMETER(_p, _q) _NOTE(ARGUNUSED(_p, _q))
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) _NOTE(ARGUNUSED(_p, _q, _r))
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) _NOTE(ARGUNUSED(_p, _q,_r, _s))
+
+#define INLINE inline
+
+/*
+ * The mdb dmod needs to use this code as well, but mdb already defines TRUE and
+ * FALSE in the module API. Thus we don't define these if we're building the
+ * dmod, as indicated by _I40E_MDB_DMOD. However, if we don't define these, then
+ * the shared code will be upset.
+ */
+#ifndef _I40E_MDB_DMOD
+#define FALSE B_FALSE
+#define false B_FALSE
+#define TRUE B_TRUE
+#define true B_TRUE
+#endif /* _I40E_MDB_DMOD */
+
+
+#define CPU_TO_LE16(o) LE_16(o)
+#define CPU_TO_LE32(s) LE_32(s)
+#define CPU_TO_LE64(h) LE_64(h)
+#define LE16_TO_CPU(a) LE_16(a)
+#define LE32_TO_CPU(c) LE_32(c)
+#define LE64_TO_CPU(k) LE_64(k)
+
+#define I40E_NTOHS(a) ntohs(a)
+#define I40E_NTOHL(a) ntohl(a)
+#define I40E_HTONS(a) htons(a)
+#define I40E_HTONL(a) htonl(a)
+
+#define i40e_memset(a, b, c, d) memset((a), (b), (c))
+#define i40e_memcpy(a, b, c, d) bcopy((b), (a), (c))
+
+#define i40e_usec_delay(x) drv_usecwait(x)
+#define i40e_msec_delay(x) drv_usecwait(1000 * (x))
+
+#define FIELD_SIZEOF(x, y) (sizeof (((x*)0)->y))
+
+#define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
+
+typedef boolean_t bool;
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+
+/* long string relief */
+typedef enum i40e_status_code i40e_status;
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+
+/*
+ * Most other systems use spin locks for interrupts. However, illumos always
+ * uses a single kmutex_t for both and we decide what to do based on IPL (hint:
+ * it's not going to be a true spin lock, we'll use an adaptive mutex).
+ */
+struct i40e_spinlock {
+ kmutex_t ispl_mutex;
+};
+
+/*
+ * Note, while prefetch is strictly not present on all architectures, (it's an
+ * SSE extension on i386), it is expected that the platforms provide it.
+ */
+#define prefetch(x) prefetch_read_many(x)
+
+struct i40e_osdep {
+ off_t ios_reg_size;
+ ddi_acc_handle_t ios_reg_handle;
+ ddi_acc_handle_t ios_cfg_handle;
+ struct i40e *ios_i40e;
+};
+
+/*
+ * This structure and its members are defined by the common code. This means we
+ * cannot structure prefix it, even if we want to.
+ */
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+};
+
+/*
+ * The first three members of this structure are defined by the common code.
+ * This means we cannot structure prefix them, even if we wanted to.
+ */
+struct i40e_dma_mem {
+ void *va; /* Virtual address. */
+ u64 pa; /* Physical (DMA/Hardware) address. */
+ size_t size; /* Buffer size. */
+
+ /* illumos-private members */
+ ddi_acc_handle_t idm_acc_handle; /* Data access handle */
+ ddi_dma_handle_t idm_dma_handle; /* DMA handle */
+ uint32_t idm_alignment; /* Requested alignment */
+};
+
+struct i40e_hw; /* forward decl */
+
+#define OS_DEP(hw) ((struct i40e_osdep *)((hw)->back))
+#define i40e_read_pci_cfg(hw, reg) \
+ (pci_config_get16(OS_DEP(hw)->ios_cfg_handle, (reg)))
+#define i40e_write_pci_cfg(hw, reg, value) \
+ (pci_config_put16(OS_DEP(hw)->ios_cfg_handle, (reg), (value)))
+
+/*
+ * Intel expects that the symbol wr32 and r32 be defined to something which can
+ * read and write the 32-bit register in PCI space.
+ *
+ * To make it easier for readers and satisfy the general agreement that macros
+ * should be in all capitals, we use our own versions of these macros.
+ */
+#define wr32(hw, reg, value) \
+ ddi_put32(OS_DEP(hw)->ios_reg_handle, \
+ (uint32_t *)((uintptr_t)(hw)->hw_addr + (reg)), (value))
+#define rd32(hw, reg) \
+ ddi_get32(OS_DEP(hw)->ios_reg_handle, \
+ (uint32_t *)((uintptr_t)(hw)->hw_addr + (reg)))
+#define I40E_WRITE_REG wr32
+#define I40E_READ_REG rd32
+
+/*
+ * The use of GLEN_STAT presumes that we're only using this file for a PF
+ * driver. If we end up doing a VF driver, then we'll want to logically change
+ * this.
+ */
+#define i40e_flush(hw) (void) rd32(hw, I40E_GLGEN_STAT)
+
+extern void i40e_debug(void *, u32, char *, ...);
+extern boolean_t i40e_set_hw_bus_info(struct i40e_hw *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _I40E_OSDEP_H */
diff --git a/usr/src/uts/common/io/i40e/i40e_stats.c b/usr/src/uts/common/io/i40e/i40e_stats.c
new file mode 100644
index 0000000000..c7dd403fc8
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/i40e_stats.c
@@ -0,0 +1,1310 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
+ * Copyright 2016 Joyent, Inc.
+ */
+
+#include "i40e_sw.h"
+
+/*
+ * -------------------
+ * Statistics Overview
+ * -------------------
+ *
+ * As part of managing the driver and understanding what's going on, we keep
+ * track of statistics from two different sources:
+ *
+ * - Statistics from the device
+ * - Statistics maintained by the driver
+ *
+ * Generally, the hardware provides us traditional IETF and MIB Ethernet
+ * statistics, for example, the total packets in and out, various errors in
+ * packets, the negotiated status etc. The driver, on the other hand, primarily
+ * contains statistics around driver-specific issues, such as information about
+ * checksumming on receive and transmit and the data in and out of a specific
+ * ring.
+ *
+ * We export statistics in two different forms. The first form is the required
+ * GLDv3 endpoints, specifically:
+ *
+ * - The general GLDv3 mc_getstat interface
+ * - The GLDv3 ring mri_stat interface
+ *
+ * The second form that we export statistics is through kstats. kstats are
+ * exported in different ways. Particularly we arrange the kstats to monitor the
+ * layout of the device. Currently we have kstats which capture both the IEEE
+ * and driver-implementation specific stats. There are kstats for each of the
+ * following structures:
+ *
+ * - Each physical function
+ * - Each VSI
+ * - Each Queue
+ *
+ * The PF's kstat is called 'pfstats' so as not to collide with other system
+ * provided kstats. Thus, for instance 0, usually the first PF, the full kstat
+ * would be: i40e:0:pfstats:.
+ *
+ * The kstat for each VSI is called vsi_%instance. So for the first PF, which is
+ * instance zero and the first vsi, which has id 0, it will be named vsi_0 and
+ * the full kstat would be i40e:0:vsi_0:.
+ *
+ * The kstat for each queue is trqpair_tx_%queue and trqpair_rx_%queue. Note
+ * that these are labeled based on their local index, which may mean that
+ * different instances have overlapping sets of queues. This isn't a problem as
+ * the kstats will always use the instance number of the pf to distinguish it in
+ * the kstat tuple.
+ *
+ * ---------------------
+ * Hardware Arrangements
+ * ---------------------
+ *
+ * The hardware keeps statistics at each physical function/MAC (PF) and it keeps
+ * statistics on each virtual station interface (VSI). Currently we only use one
+ * VSI per PF (see the i40e_main.c theory statement). The hardware has a limited
+ * number of statistics units available. While every PF is guaranteed to have a
+ * statistics unit, it is possible that we will run out for a given VSI. We'll
+ * have to figure out an appropriate strategy here when we end up supporting
+ * multiple VSIs.
+ *
+ * The hardware keeps these statistics as 32-bit and 48-bit counters. We are
+ * required to read them and then compute the differences between them. The
+ * 48-bit counters span more than one 32-bit register in the BAR. The hardware
+ * suggests that to read them, we perform 64-bit reads of the lower of the two
+ * registers that make up a 48-bit stat. The hardware guarantees that the reads
+ * of those two registers will be atomic and we'll get a consistent value, not a
+ * property it has for every read of two registers.
+ *
+ * For every kstat we have based on this, we have a corresponding uint64_t that
+ * we keep around as a base value in a separate structure. Whenever we read a
+ * value, we end up grabbing the current value, calculating a difference between
+ * the previously stored value and the current one, and updating the kstat with
+ * that difference. After which, we go through and update the base value that we
+ * stored. This is all encapsulated in i40e_stat_get_uint32() and
+ * i40e_stat_get_uint48().
+ *
+ * The only unfortunate thing here is that the hardware doesn't give us any kind
+ * of overflow counter. It just tries to make sure that the uint32_t and
+ * uint48_t counters are large enough to hopefully not overflow right away. This
+ * isn't the most reassuring statement and we should investigate ways of
+ * ensuring that if a system is active, but not actively measured, we don't lose
+ * data.
+ *
+ * The pf kstats data is stored in the i40e_t`i40e_pf_kstat. It is backed by the
+ * i40e_t`i40e_pf_stat structure. Similarly the VSI related kstat is in
+ * i40e_t`i40e_vsi_kstat and the data is backed in the i40e_t`i40e_vsi_stat. All
+ * of this data is protected by the i40e_stat_lock, which should be taken last,
+ * when acquiring locks.
+ */
+
+static void
+i40e_stat_get_uint48(i40e_t *i40e, uintptr_t reg, kstat_named_t *kstat,
+ uint64_t *base, boolean_t init)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ uint64_t raw, delta;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_stat_lock));
+
+ raw = ddi_get64(i40e->i40e_osdep_space.ios_reg_handle,
+ (uint64_t *)((uintptr_t)hw->hw_addr + reg));
+
+ if (init == B_TRUE) {
+ *base = raw;
+ return;
+ }
+
+ /*
+ * Check for wraparound, note that the counter is actually only 48-bits,
+ * even though it has two uint32_t regs present.
+ */
+ if (raw >= *base) {
+ delta = raw - *base;
+ } else {
+ delta = 0x1000000000000ULL - *base + raw;
+ }
+
+ kstat->value.ui64 += delta;
+ *base = raw;
+}
+
+static void
+i40e_stat_get_uint32(i40e_t *i40e, uintptr_t reg, kstat_named_t *kstat,
+ uint64_t *base, boolean_t init)
+{
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ uint64_t raw, delta;
+
+ ASSERT(MUTEX_HELD(&i40e->i40e_stat_lock));
+
+ raw = ddi_get32(i40e->i40e_osdep_space.ios_reg_handle,
+ (uint32_t *)((uintptr_t)hw->hw_addr + reg));
+
+ if (init == B_TRUE) {
+ *base = raw;
+ return;
+ }
+
+ /*
+ * Watch out for wraparound as we only have a 32-bit counter.
+ */
+ if (raw >= *base) {
+ delta = raw - *base;
+ } else {
+ delta = 0x100000000ULL - *base + raw;
+ }
+
+ kstat->value.ui64 += delta;
+ *base = raw;
+
+}
+
+static void
+i40e_stat_vsi_update(i40e_t *i40e, boolean_t init)
+{
+ i40e_vsi_stats_t *ivs;
+ i40e_vsi_kstats_t *ivk;
+ int id = i40e->i40e_vsi_stat_id;
+
+ ASSERT(i40e->i40e_vsi_kstat != NULL);
+ ivs = &i40e->i40e_vsi_stat;
+ ivk = i40e->i40e_vsi_kstat->ks_data;
+
+ mutex_enter(&i40e->i40e_stat_lock);
+
+ i40e_stat_get_uint48(i40e, I40E_GLV_GORCL(id), &ivk->ivk_rx_bytes,
+ &ivs->ivs_rx_bytes, init);
+ i40e_stat_get_uint48(i40e, I40E_GLV_UPRCL(id), &ivk->ivk_rx_unicast,
+ &ivs->ivs_rx_unicast, init);
+ i40e_stat_get_uint48(i40e, I40E_GLV_MPRCL(id), &ivk->ivk_rx_multicast,
+ &ivs->ivs_rx_multicast, init);
+ i40e_stat_get_uint48(i40e, I40E_GLV_BPRCL(id), &ivk->ivk_rx_broadcast,
+ &ivs->ivs_rx_broadcast, init);
+
+ i40e_stat_get_uint32(i40e, I40E_GLV_RDPC(id), &ivk->ivk_rx_discards,
+ &ivs->ivs_rx_discards, init);
+ i40e_stat_get_uint32(i40e, I40E_GLV_RUPP(id),
+ &ivk->ivk_rx_unknown_protocol,
+ &ivs->ivs_rx_unknown_protocol,
+ init);
+
+ i40e_stat_get_uint48(i40e, I40E_GLV_GOTCL(id), &ivk->ivk_tx_bytes,
+ &ivs->ivs_tx_bytes, init);
+ i40e_stat_get_uint48(i40e, I40E_GLV_UPTCL(id), &ivk->ivk_tx_unicast,
+ &ivs->ivs_tx_unicast, init);
+ i40e_stat_get_uint48(i40e, I40E_GLV_MPTCL(id), &ivk->ivk_tx_multicast,
+ &ivs->ivs_tx_multicast, init);
+ i40e_stat_get_uint48(i40e, I40E_GLV_BPTCL(id), &ivk->ivk_tx_broadcast,
+ &ivs->ivs_tx_broadcast, init);
+
+ i40e_stat_get_uint32(i40e, I40E_GLV_TEPC(id), &ivk->ivk_tx_errors,
+ &ivs->ivs_tx_errors, init);
+
+ mutex_exit(&i40e->i40e_stat_lock);
+
+ /*
+ * We follow ixgbe's lead here and that if a kstat update didn't work
+ * 100% then we mark service unaffected as opposed to when fetching
+ * things for MAC directly.
+ */
+ if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_UNAFFECTED);
+ }
+}
+
+static int
+i40e_stat_vsi_kstat_update(kstat_t *ksp, int rw)
+{
+ i40e_t *i40e;
+
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+
+ i40e = ksp->ks_private;
+ i40e_stat_vsi_update(i40e, B_FALSE);
+ return (0);
+}
+
+void
+i40e_stat_vsi_fini(i40e_t *i40e)
+{
+ if (i40e->i40e_vsi_kstat != NULL) {
+ kstat_delete(i40e->i40e_vsi_kstat);
+ i40e->i40e_vsi_kstat = NULL;
+ }
+}
+
+boolean_t
+i40e_stat_vsi_init(i40e_t *i40e)
+{
+ kstat_t *ksp;
+ i40e_vsi_kstats_t *ivk;
+ char buf[64];
+
+ (void) snprintf(buf, sizeof (buf), "vsi_%d", i40e->i40e_vsi_id);
+
+ ksp = kstat_create(I40E_MODULE_NAME, ddi_get_instance(i40e->i40e_dip),
+ buf, "net", KSTAT_TYPE_NAMED,
+ sizeof (i40e_vsi_kstats_t) / sizeof (kstat_named_t), 0);
+
+ if (ksp == NULL) {
+ i40e_error(i40e, "Failed to create kstats for VSI %d",
+ i40e->i40e_vsi_id);
+ return (B_FALSE);
+ }
+
+ i40e->i40e_vsi_kstat = ksp;
+ ivk = ksp->ks_data;
+ ksp->ks_update = i40e_stat_vsi_kstat_update;
+ ksp->ks_private = i40e;
+
+ kstat_named_init(&ivk->ivk_rx_bytes, "rx_bytes",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ivk->ivk_rx_unicast, "rx_unicast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ivk->ivk_rx_multicast, "rx_multicast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ivk->ivk_rx_broadcast, "rx_broadcast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ivk->ivk_rx_discards, "rx_discards",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ivk->ivk_rx_unknown_protocol, "rx_unknown_protocol",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ivk->ivk_tx_bytes, "tx_bytes",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ivk->ivk_tx_unicast, "tx_unicast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ivk->ivk_tx_multicast, "tx_multicast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ivk->ivk_tx_broadcast, "tx_broadcast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ivk->ivk_tx_errors, "tx_errors",
+ KSTAT_DATA_UINT64);
+
+ bzero(&i40e->i40e_vsi_stat, sizeof (i40e_vsi_stats_t));
+ i40e_stat_vsi_update(i40e, B_TRUE);
+ kstat_install(i40e->i40e_vsi_kstat);
+
+ return (B_TRUE);
+}
+
+static void
+i40e_stat_pf_update(i40e_t *i40e, boolean_t init)
+{
+ i40e_pf_stats_t *ips;
+ i40e_pf_kstats_t *ipk;
+ int port = i40e->i40e_hw_space.port;
+ int i;
+
+ ASSERT(i40e->i40e_pf_kstat != NULL);
+ ips = &i40e->i40e_pf_stat;
+ ipk = i40e->i40e_pf_kstat->ks_data;
+
+ mutex_enter(&i40e->i40e_stat_lock);
+
+ /* 64-bit PCIe regs */
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_GORCL(port),
+ &ipk->ipk_rx_bytes, &ips->ips_rx_bytes, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_UPRCL(port),
+ &ipk->ipk_rx_unicast, &ips->ips_rx_unicast, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_MPRCL(port),
+ &ipk->ipk_rx_multicast, &ips->ips_rx_multicast, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_BPRCL(port),
+ &ipk->ipk_rx_broadcast, &ips->ips_rx_broadcast, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_GOTCL(port),
+ &ipk->ipk_tx_bytes, &ips->ips_tx_bytes, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_UPTCL(port),
+ &ipk->ipk_tx_unicast, &ips->ips_tx_unicast, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_MPTCL(port),
+ &ipk->ipk_tx_multicast, &ips->ips_tx_multicast, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_BPTCL(port),
+ &ipk->ipk_tx_broadcast, &ips->ips_tx_broadcast, init);
+
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC64L(port),
+ &ipk->ipk_rx_size_64, &ips->ips_rx_size_64, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC127L(port),
+ &ipk->ipk_rx_size_127, &ips->ips_rx_size_127, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC255L(port),
+ &ipk->ipk_rx_size_255, &ips->ips_rx_size_255, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC511L(port),
+ &ipk->ipk_rx_size_511, &ips->ips_rx_size_511, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC1023L(port),
+ &ipk->ipk_rx_size_1023, &ips->ips_rx_size_1023, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC1522L(port),
+ &ipk->ipk_rx_size_1522, &ips->ips_rx_size_1522, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC9522L(port),
+ &ipk->ipk_rx_size_9522, &ips->ips_rx_size_9522, init);
+
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC64L(port),
+ &ipk->ipk_tx_size_64, &ips->ips_tx_size_64, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC127L(port),
+ &ipk->ipk_tx_size_127, &ips->ips_tx_size_127, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC255L(port),
+ &ipk->ipk_tx_size_255, &ips->ips_tx_size_255, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC511L(port),
+ &ipk->ipk_tx_size_511, &ips->ips_tx_size_511, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC1023L(port),
+ &ipk->ipk_tx_size_1023, &ips->ips_tx_size_1023, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC1522L(port),
+ &ipk->ipk_tx_size_1522, &ips->ips_tx_size_1522, init);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC9522L(port),
+ &ipk->ipk_tx_size_9522, &ips->ips_tx_size_9522, init);
+
+ /* 32-bit PCIe regs */
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_LXONRXC(port),
+ &ipk->ipk_link_xon_rx, &ips->ips_link_xon_rx, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_LXOFFRXC(port),
+ &ipk->ipk_link_xoff_rx, &ips->ips_link_xoff_rx, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_LXONTXC(port),
+ &ipk->ipk_link_xon_tx, &ips->ips_link_xon_tx, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_LXOFFTXC(port),
+ &ipk->ipk_link_xoff_tx, &ips->ips_link_xoff_tx, init);
+
+ for (i = 0; i < 8; i++) {
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_PXONRXC(port, i),
+ &ipk->ipk_priority_xon_rx[i], &ips->ips_priority_xon_rx[i],
+ init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_PXOFFRXC(port, i),
+ &ipk->ipk_priority_xoff_rx[i],
+ &ips->ips_priority_xoff_rx[i],
+ init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_PXONTXC(port, i),
+ &ipk->ipk_priority_xon_tx[i], &ips->ips_priority_xon_tx[i],
+ init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_PXOFFTXC(port, i),
+ &ipk->ipk_priority_xoff_tx[i],
+ &ips->ips_priority_xoff_tx[i],
+ init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RXON2OFFCNT(port, i),
+ &ipk->ipk_priority_xon_2_xoff[i],
+ &ips->ips_priority_xon_2_xoff[i],
+ init);
+ }
+
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_CRCERRS(port),
+ &ipk->ipk_crc_errors, &ips->ips_crc_errors, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_ILLERRC(port),
+ &ipk->ipk_illegal_bytes, &ips->ips_illegal_bytes, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_MLFC(port),
+ &ipk->ipk_mac_local_faults, &ips->ips_mac_local_faults, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_MRFC(port),
+ &ipk->ipk_mac_remote_faults, &ips->ips_mac_remote_faults, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RLEC(port),
+ &ipk->ipk_rx_length_errors, &ips->ips_rx_length_errors, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RUC(port),
+ &ipk->ipk_rx_undersize, &ips->ips_rx_undersize, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RFC(port),
+ &ipk->ipk_rx_fragments, &ips->ips_rx_fragments, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_ROC(port),
+ &ipk->ipk_rx_oversize, &ips->ips_rx_oversize, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RJC(port),
+ &ipk->ipk_rx_jabber, &ips->ips_rx_jabber, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RDPC(port),
+ &ipk->ipk_rx_discards, &ips->ips_rx_discards, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_LDPC(port),
+ &ipk->ipk_rx_vm_discards, &ips->ips_rx_vm_discards, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_MSPDC(port),
+ &ipk->ipk_rx_short_discards, &ips->ips_rx_short_discards, init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_TDOLD(port),
+ &ipk->ipk_tx_dropped_link_down, &ips->ips_tx_dropped_link_down,
+ init);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RUPP(port),
+ &ipk->ipk_rx_unknown_protocol, &ips->ips_rx_unknown_protocol, init);
+
+ /* 64-bit */
+ i40e_stat_get_uint48(i40e, I40E_GL_RXERR1_L(port), &ipk->ipk_rx_err1,
+ &ips->ips_rx_err1, init);
+ i40e_stat_get_uint48(i40e, I40E_GL_RXERR2_L(port), &ipk->ipk_rx_err2,
+ &ips->ips_rx_err2, init);
+
+ mutex_exit(&i40e->i40e_stat_lock);
+
+ /*
+ * We follow ixgbe's lead here and that if a kstat update didn't work
+ * 100% then we mark service unaffected as opposed to when fetching
+ * things for MAC directly.
+ */
+ if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_UNAFFECTED);
+ }
+}
+
+static int
+i40e_stat_pf_kstat_update(kstat_t *ksp, int rw)
+{
+ i40e_t *i40e;
+
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+
+ i40e = ksp->ks_private;
+ i40e_stat_pf_update(i40e, B_FALSE);
+ return (0);
+}
+
+
+static boolean_t
+i40e_stat_pf_init(i40e_t *i40e)
+{
+ kstat_t *ksp;
+ i40e_pf_kstats_t *ipk;
+
+ ksp = kstat_create(I40E_MODULE_NAME, ddi_get_instance(i40e->i40e_dip),
+ "pfstats", "net", KSTAT_TYPE_NAMED,
+ sizeof (i40e_pf_kstats_t) / sizeof (kstat_named_t), 0);
+ if (ksp == NULL) {
+ i40e_error(i40e, "Could not create kernel statistics.");
+ return (B_FALSE);
+ }
+
+ i40e->i40e_pf_kstat = ksp;
+ ipk = ksp->ks_data;
+ ksp->ks_update = i40e_stat_pf_kstat_update;
+ ksp->ks_private = i40e;
+
+ kstat_named_init(&ipk->ipk_rx_bytes, "rx_bytes",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_unicast, "rx_unicast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_multicast, "rx_multicast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_broadcast, "rx_broadcast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_bytes, "tx_bytes",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_unicast, "tx_unicast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_multicast, "tx_multicast",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_broadcast, "tx_broadcast",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_rx_size_64, "rx_size_64",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_size_127, "rx_size_127",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_size_255, "rx_size_255",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_size_511, "rx_size_511",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_size_1023, "rx_size_1023",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_size_1522, "rx_size_1522",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_size_9522, "rx_size_9522",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_tx_size_64, "tx_size_64",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_size_127, "tx_size_127",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_size_255, "tx_size_255",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_size_511, "tx_size_511",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_size_1023, "tx_size_1023",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_size_1522, "tx_size_1522",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_size_9522, "tx_size_9522",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_link_xon_rx, "link_xon_rx",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_link_xoff_rx, "link_xoff_rx",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_link_xon_tx, "link_xon_tx",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_link_xoff_tx, "link_xoff_tx",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_priority_xon_rx[0], "priority_xon_rx[0]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_rx[0], "priority_xoff_rx[0]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_tx[0], "priority_xon_tx[0]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_tx[0], "priority_xoff_tx[0]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_2_xoff[0],
+ "priority_xon_2_xoff[0]",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_priority_xon_rx[1], "priority_xon_rx[1]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_rx[1], "priority_xoff_rx[1]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_tx[1], "priority_xon_tx[1]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_tx[1], "priority_xoff_tx[1]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_2_xoff[1],
+ "priority_xon_2_xoff[1]",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_priority_xon_rx[2], "priority_xon_rx[2]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_rx[2], "priority_xoff_rx[2]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_tx[2], "priority_xon_tx[2]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_tx[2], "priority_xoff_tx[2]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_2_xoff[2],
+ "priority_xon_2_xoff[2]",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_priority_xon_rx[3], "priority_xon_rx[3]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_rx[3], "priority_xoff_rx[3]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_tx[3], "priority_xon_tx[3]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_tx[3], "priority_xoff_tx[3]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_2_xoff[3],
+ "priority_xon_2_xoff[3]",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_priority_xon_rx[4], "priority_xon_rx[4]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_rx[4], "priority_xoff_rx[4]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_tx[4], "priority_xon_tx[4]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_tx[4], "priority_xoff_tx[4]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_2_xoff[4],
+ "priority_xon_2_xoff[4]",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_priority_xon_rx[5], "priority_xon_rx[5]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_rx[5], "priority_xoff_rx[5]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_tx[5], "priority_xon_tx[5]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_tx[5], "priority_xoff_tx[5]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_2_xoff[5],
+ "priority_xon_2_xoff[5]",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_priority_xon_rx[6], "priority_xon_rx[6]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_rx[6], "priority_xoff_rx[6]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_tx[6], "priority_xon_tx[6]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_tx[6], "priority_xoff_tx[6]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_2_xoff[6],
+ "priority_xon_2_xoff[6]",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_priority_xon_rx[7], "priority_xon_rx[7]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_rx[7], "priority_xoff_rx[7]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_tx[7], "priority_xon_tx[7]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xoff_tx[7], "priority_xoff_tx[7]",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_priority_xon_2_xoff[7],
+ "priority_xon_2_xoff[7]",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ipk->ipk_crc_errors, "crc_errors",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_illegal_bytes, "illegal_bytes",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_mac_local_faults, "mac_local_faults",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_mac_remote_faults, "mac_remote_faults",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_length_errors, "rx_length_errors",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_undersize, "rx_undersize",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_fragments, "rx_fragments",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_oversize, "rx_oversize",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_jabber, "rx_jabber",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_discards, "rx_discards",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_vm_discards, "rx_vm_discards",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_short_discards, "rx_short_discards",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_tx_dropped_link_down, "tx_dropped_link_down",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_unknown_protocol, "rx_unknown_protocol",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_err1, "rx_err1",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ipk->ipk_rx_err2, "rx_err2",
+ KSTAT_DATA_UINT64);
+
+
+ bzero(&i40e->i40e_pf_stat, sizeof (i40e_pf_stats_t));
+ i40e_stat_pf_update(i40e, B_TRUE);
+
+ kstat_install(i40e->i40e_pf_kstat);
+
+ return (B_TRUE);
+}
+
+void
+i40e_stats_fini(i40e_t *i40e)
+{
+ ASSERT(i40e->i40e_vsi_kstat == NULL);
+ if (i40e->i40e_pf_kstat != NULL) {
+ kstat_delete(i40e->i40e_pf_kstat);
+ i40e->i40e_pf_kstat = NULL;
+ }
+
+ mutex_destroy(&i40e->i40e_stat_lock);
+}
+
+boolean_t
+i40e_stats_init(i40e_t *i40e)
+{
+ mutex_init(&i40e->i40e_stat_lock, NULL, MUTEX_DRIVER, NULL);
+ if (i40e_stat_pf_init(i40e) == B_FALSE) {
+ mutex_destroy(&i40e->i40e_stat_lock);
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * For Nemo/GLDv3.
+ */
+int
+i40e_m_stat(void *arg, uint_t stat, uint64_t *val)
+{
+ i40e_t *i40e = (i40e_t *)arg;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ int port = i40e->i40e_hw_space.port;
+ i40e_pf_stats_t *ips;
+ i40e_pf_kstats_t *ipk;
+
+
+ ASSERT(i40e->i40e_pf_kstat != NULL);
+ ips = &i40e->i40e_pf_stat;
+ ipk = i40e->i40e_pf_kstat->ks_data;
+
+ /*
+ * We need both locks, as various stats are protected by different
+ * things here.
+ */
+ mutex_enter(&i40e->i40e_general_lock);
+
+ if (i40e->i40e_state & I40E_SUSPENDED) {
+ mutex_exit(&i40e->i40e_general_lock);
+ return (ECANCELED);
+ }
+
+ mutex_enter(&i40e->i40e_stat_lock);
+
+ /*
+ * Unfortunately the GLDv3 conflates two rather different things here.
+ * We're combining statistics about the physical port represented by
+ * this instance with statistics that describe the properties of the
+ * logical interface. As such, we're going to use the various aspects of
+ * the port to describe these stats as they represent what the physical
+ * instance is doing, even though that that means some tools may be
+ * confused and that to see the logical traffic on the interface itself
+ * sans VNICs and the like will require more work.
+ *
+ * Stats which are not listed in this switch statement are unimplemented
+ * at this time in hardware or don't currently apply to the device.
+ */
+ switch (stat) {
+ /* MIB-II stats (RFC 1213 and RFC 1573) */
+ case MAC_STAT_IFSPEED:
+ *val = i40e->i40e_link_speed * 1000000ull;
+ break;
+ case MAC_STAT_MULTIRCV:
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_MPRCL(port),
+ &ipk->ipk_rx_multicast, &ips->ips_rx_multicast, B_FALSE);
+ *val = ipk->ipk_rx_multicast.value.ui64;
+ break;
+ case MAC_STAT_BRDCSTRCV:
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_BPRCL(port),
+ &ipk->ipk_rx_broadcast, &ips->ips_rx_broadcast, B_FALSE);
+ *val = ipk->ipk_rx_broadcast.value.ui64;
+ break;
+ case MAC_STAT_MULTIXMT:
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_MPTCL(port),
+ &ipk->ipk_tx_multicast, &ips->ips_tx_multicast, B_FALSE);
+ *val = ipk->ipk_tx_multicast.value.ui64;
+ break;
+ case MAC_STAT_BRDCSTXMT:
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_BPTCL(port),
+ &ipk->ipk_tx_broadcast, &ips->ips_tx_broadcast, B_FALSE);
+ *val = ipk->ipk_tx_broadcast.value.ui64;
+ break;
+ case MAC_STAT_NORCVBUF:
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RDPC(port),
+ &ipk->ipk_rx_discards, &ips->ips_rx_discards, B_FALSE);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_LDPC(port),
+ &ipk->ipk_rx_vm_discards, &ips->ips_rx_vm_discards,
+ B_FALSE);
+ *val = ipk->ipk_rx_discards.value.ui64 +
+ ipk->ipk_rx_vm_discards.value.ui64;
+ break;
+ /*
+ * Note, that some RXERR2 stats are also duplicated by the switch filter
+ * stats; however, since we're not using those at this time, it seems
+ * reasonable to include them.
+ */
+ case MAC_STAT_IERRORS:
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_CRCERRS(port),
+ &ipk->ipk_crc_errors, &ips->ips_crc_errors, B_FALSE);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_ILLERRC(port),
+ &ipk->ipk_illegal_bytes, &ips->ips_illegal_bytes, B_FALSE);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RLEC(port),
+ &ipk->ipk_rx_length_errors, &ips->ips_rx_length_errors,
+ B_FALSE);
+ i40e_stat_get_uint48(i40e, I40E_GL_RXERR1_L(port),
+ &ipk->ipk_rx_err1, &ips->ips_rx_err1, B_FALSE);
+ i40e_stat_get_uint48(i40e, I40E_GL_RXERR2_L(port),
+ &ipk->ipk_rx_err2, &ips->ips_rx_err2, B_FALSE);
+
+ *val = ipk->ipk_crc_errors.value.ui64 +
+ ipk->ipk_illegal_bytes.value.ui64 +
+ ipk->ipk_rx_length_errors.value.ui64 +
+ ipk->ipk_rx_err1.value.ui64 +
+ ipk->ipk_rx_err2.value.ui64;
+ break;
+ case MAC_STAT_UNKNOWNS:
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RUPP(port),
+ &ipk->ipk_rx_unknown_protocol,
+ &ips->ips_rx_unknown_protocol,
+ B_FALSE);
+ *val = ipk->ipk_rx_unknown_protocol.value.ui64;
+ break;
+ case MAC_STAT_RBYTES:
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_GORCL(port),
+ &ipk->ipk_rx_bytes, &ips->ips_rx_bytes, B_FALSE);
+ *val = ipk->ipk_rx_bytes.value.ui64;
+ break;
+ case MAC_STAT_IPACKETS:
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_UPRCL(port),
+ &ipk->ipk_rx_unicast, &ips->ips_rx_unicast, B_FALSE);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_MPRCL(port),
+ &ipk->ipk_rx_multicast, &ips->ips_rx_multicast, B_FALSE);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_BPRCL(port),
+ &ipk->ipk_rx_broadcast, &ips->ips_rx_broadcast, B_FALSE);
+ *val = ipk->ipk_rx_unicast.value.ui64 +
+ ipk->ipk_rx_multicast.value.ui64 +
+ ipk->ipk_rx_broadcast.value.ui64;
+ break;
+ case MAC_STAT_OBYTES:
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_GOTCL(port),
+ &ipk->ipk_tx_bytes, &ips->ips_tx_bytes, B_FALSE);
+ *val = ipk->ipk_tx_bytes.value.ui64;
+ break;
+ case MAC_STAT_OPACKETS:
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_UPTCL(port),
+ &ipk->ipk_tx_unicast, &ips->ips_tx_unicast, B_FALSE);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_MPTCL(port),
+ &ipk->ipk_tx_multicast, &ips->ips_tx_multicast, B_FALSE);
+ i40e_stat_get_uint48(i40e, I40E_GLPRT_BPTCL(port),
+ &ipk->ipk_tx_broadcast, &ips->ips_tx_broadcast, B_FALSE);
+ *val = ipk->ipk_tx_unicast.value.ui64 +
+ ipk->ipk_tx_multicast.value.ui64 +
+ ipk->ipk_tx_broadcast.value.ui64;
+ break;
+ case MAC_STAT_UNDERFLOWS:
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RUC(port),
+ &ipk->ipk_rx_undersize, &ips->ips_rx_undersize, B_FALSE);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RFC(port),
+ &ipk->ipk_rx_fragments, &ips->ips_rx_fragments, B_FALSE);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_MSPDC(port),
+ &ipk->ipk_rx_short_discards, &ips->ips_rx_short_discards,
+ B_FALSE);
+ *val = ipk->ipk_rx_undersize.value.ui64 +
+ ipk->ipk_rx_fragments.value.ui64 +
+ ipk->ipk_rx_short_discards.value.ui64;
+ break;
+ case MAC_STAT_OVERFLOWS:
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_ROC(port),
+ &ipk->ipk_rx_oversize, &ips->ips_rx_oversize, B_FALSE);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RJC(port),
+ &ipk->ipk_rx_jabber, &ips->ips_rx_jabber, B_FALSE);
+ *val = ipk->ipk_rx_oversize.value.ui64 +
+ ipk->ipk_rx_fragments.value.ui64;
+ break;
+
+ /* RFC 1643 stats */
+ case ETHER_STAT_FCS_ERRORS:
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_CRCERRS(port),
+ &ipk->ipk_crc_errors, &ips->ips_crc_errors, B_FALSE);
+ *val = ipk->ipk_crc_errors.value.ui64;
+ break;
+ case ETHER_STAT_TOOLONG_ERRORS:
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_ROC(port),
+ &ipk->ipk_rx_oversize, &ips->ips_rx_oversize, B_FALSE);
+ *val = ipk->ipk_rx_oversize.value.ui64;
+ break;
+ case ETHER_STAT_MACRCV_ERRORS:
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_ILLERRC(port),
+ &ipk->ipk_illegal_bytes, &ips->ips_illegal_bytes, B_FALSE);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RLEC(port),
+ &ipk->ipk_rx_length_errors, &ips->ips_rx_length_errors,
+ B_FALSE);
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RFC(port),
+ &ipk->ipk_rx_fragments, &ips->ips_rx_fragments, B_FALSE);
+ *val = ipk->ipk_illegal_bytes.value.ui64 +
+ ipk->ipk_rx_length_errors.value.ui64 +
+ ipk->ipk_rx_fragments.value.ui64;
+ break;
+ /* MII/GMII stats */
+
+ /*
+ * The receiver address is apparently the same as the port number.
+ */
+ case ETHER_STAT_XCVR_ADDR:
+ /* The Receiver address is apparently the same as the port */
+ *val = i40e->i40e_hw_space.port;
+ break;
+ case ETHER_STAT_XCVR_ID:
+ switch (hw->phy.media_type) {
+ case I40E_MEDIA_TYPE_BASET:
+ /*
+ * Transform the data here into the ID. Note, generally
+ * the revision is left out.
+ */
+ *val = i40e->i40e_phy.phy_id[3] << 24 |
+ i40e->i40e_phy.phy_id[2] << 16 |
+ i40e->i40e_phy.phy_id[1] << 8;
+ break;
+ case I40E_MEDIA_TYPE_FIBER:
+ case I40E_MEDIA_TYPE_BACKPLANE:
+ case I40E_MEDIA_TYPE_CX4:
+ case I40E_MEDIA_TYPE_DA:
+ case I40E_MEDIA_TYPE_VIRTUAL:
+ *val = i40e->i40e_phy.phy_id[0] |
+ i40e->i40e_phy.phy_id[1] << 8 |
+ i40e->i40e_phy.phy_id[2] << 16;
+ break;
+ case I40E_MEDIA_TYPE_UNKNOWN:
+ default:
+ goto unimpl;
+ }
+ break;
+ case ETHER_STAT_XCVR_INUSE:
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_100BASE_TX:
+ *val = XCVR_100T2;
+ break;
+ case I40E_PHY_TYPE_1000BASE_T:
+ *val = XCVR_1000T;
+ break;
+ default:
+ *val = XCVR_UNDEFINED;
+ break;
+ }
+ break;
+
+ /*
+ * This group answers the question of do we support a given speed in
+ * theory.
+ */
+ case ETHER_STAT_CAP_100FDX:
+ *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_100MB) != 0;
+ break;
+ case ETHER_STAT_CAP_1000FDX:
+ *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_1GB) != 0;
+ break;
+ case ETHER_STAT_CAP_10GFDX:
+ *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_10GB) != 0;
+ break;
+ case ETHER_STAT_CAP_40GFDX:
+ *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_40GB) != 0;
+ break;
+
+ /*
+ * These ask are we currently advertising these speeds and abilities.
+ * Until we support setting these because we're working with a copper
+ * PHY, then the only things we advertise are based on the link PHY
+ * speeds. In other words, we advertise everything we support.
+ */
+ case ETHER_STAT_ADV_CAP_100FDX:
+ *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_100MB) != 0;
+ break;
+ case ETHER_STAT_ADV_CAP_1000FDX:
+ *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_1GB) != 0;
+ break;
+ case ETHER_STAT_ADV_CAP_10GFDX:
+ *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_10GB) != 0;
+ break;
+ case ETHER_STAT_ADV_CAP_40GFDX:
+ *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_40GB) != 0;
+ break;
+
+ /*
+ * These ask if the peer supports these speeds, e.g. what did they tell
+ * us in auto-negotiation. Unfortunately, hardware doesn't appear to
+ * give us a way to determine whether or not they actually support
+ * something, only what they have enabled. This means that all we can
+ * tell the user is the speed that we're currently at, unfortunately.
+ */
+ case ETHER_STAT_LP_CAP_100FDX:
+ *val = i40e->i40e_link_speed == 100;
+ break;
+ case ETHER_STAT_LP_CAP_1000FDX:
+ *val = i40e->i40e_link_speed == 1000;
+ break;
+ case ETHER_STAT_LP_CAP_10GFDX:
+ *val = i40e->i40e_link_speed == 10000;
+ break;
+ case ETHER_STAT_LP_CAP_40GFDX:
+ *val = i40e->i40e_link_speed == 40000;
+ break;
+
+ /*
+ * Statistics for unsupported speeds. Note that these often have the
+ * same constraints as the other ones. For example, we can't answer the
+ * question of the ETHER_STAT_LP_CAP family because hardware doesn't
+ * give us any way of knowing whether or not it does.
+ */
+ case ETHER_STAT_CAP_100HDX:
+ case ETHER_STAT_CAP_1000HDX:
+ case ETHER_STAT_CAP_10FDX:
+ case ETHER_STAT_CAP_10HDX:
+ case ETHER_STAT_CAP_100T4:
+ case ETHER_STAT_CAP_100GFDX:
+ case ETHER_STAT_CAP_2500FDX:
+ case ETHER_STAT_CAP_5000FDX:
+ case ETHER_STAT_ADV_CAP_1000HDX:
+ case ETHER_STAT_ADV_CAP_100HDX:
+ case ETHER_STAT_ADV_CAP_10FDX:
+ case ETHER_STAT_ADV_CAP_10HDX:
+ case ETHER_STAT_ADV_CAP_100T4:
+ case ETHER_STAT_ADV_CAP_100GFDX:
+ case ETHER_STAT_ADV_CAP_2500FDX:
+ case ETHER_STAT_ADV_CAP_5000FDX:
+ case ETHER_STAT_LP_CAP_1000HDX:
+ case ETHER_STAT_LP_CAP_100HDX:
+ case ETHER_STAT_LP_CAP_10FDX:
+ case ETHER_STAT_LP_CAP_10HDX:
+ case ETHER_STAT_LP_CAP_100T4:
+ case ETHER_STAT_LP_CAP_100GFDX:
+ case ETHER_STAT_LP_CAP_2500FDX:
+ case ETHER_STAT_LP_CAP_5000FDX:
+ *val = 0;
+ break;
+
+ case ETHER_STAT_LINK_DUPLEX:
+ *val = i40e->i40e_link_duplex;
+ break;
+ case ETHER_STAT_TOOSHORT_ERRORS:
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RUC(port),
+ &ipk->ipk_rx_undersize, &ips->ips_rx_undersize, B_FALSE);
+
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_MSPDC(port),
+ &ipk->ipk_rx_short_discards, &ips->ips_rx_short_discards,
+ B_FALSE);
+ *val = ipk->ipk_rx_undersize.value.ui64 +
+ ipk->ipk_rx_short_discards.value.ui64;
+ break;
+ case ETHER_STAT_JABBER_ERRORS:
+ i40e_stat_get_uint32(i40e, I40E_GLPRT_RJC(port),
+ &ipk->ipk_rx_jabber, &ips->ips_rx_jabber, B_FALSE);
+ *val = ipk->ipk_rx_jabber.value.ui64;
+ break;
+
+ /*
+ * Non-Link speed related capabilities.
+ */
+ case ETHER_STAT_CAP_AUTONEG:
+ *val = 1;
+ break;
+
+ case ETHER_STAT_ADV_CAP_AUTONEG:
+ *val = 1;
+ break;
+
+ case ETHER_STAT_LP_CAP_AUTONEG:
+ *val = (hw->phy.link_info.an_info & I40E_AQ_LP_AN_ABILITY) != 0;
+ break;
+
+ case ETHER_STAT_LINK_AUTONEG:
+ *val = 1;
+ break;
+
+ /*
+ * Note that while the hardware does support the pause functionality, at
+ * this time we do not use it at all and effectively disable it.
+ */
+ case ETHER_STAT_CAP_ASMPAUSE:
+ *val = (i40e->i40e_phy.abilities &
+ I40E_AQ_PHY_FLAG_PAUSE_RX) != 0;
+ break;
+ case ETHER_STAT_CAP_PAUSE:
+ *val = (i40e->i40e_phy.abilities &
+ I40E_AQ_PHY_FLAG_PAUSE_TX) != 0;
+ break;
+
+ /*
+ * Because we don't support these at this time, they are always
+ * hard-coded to zero.
+ */
+ case ETHER_STAT_ADV_CAP_ASMPAUSE:
+ case ETHER_STAT_ADV_CAP_PAUSE:
+ *val = 0;
+ break;
+
+ /*
+ * Like the other LP fields, we can only answer the question have we
+ * enabled it, not whether the other end actually supports it.
+ */
+ case ETHER_STAT_LP_CAP_ASMPAUSE:
+ case ETHER_STAT_LINK_ASMPAUSE:
+ *val = (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) != 0;
+ break;
+ case ETHER_STAT_LP_CAP_PAUSE:
+ case ETHER_STAT_LINK_PAUSE:
+ *val = (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) != 0;
+ break;
+
+ default:
+ unimpl:
+ mutex_exit(&i40e->i40e_stat_lock);
+ mutex_exit(&i40e->i40e_general_lock);
+ return (ENOTSUP);
+ }
+
+ mutex_exit(&i40e->i40e_stat_lock);
+ mutex_exit(&i40e->i40e_general_lock);
+
+ if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_DEGRADED);
+ return (EIO);
+ }
+
+ return (0);
+}
+
+int
+i40e_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
+{
+ i40e_trqpair_t *itrq = (i40e_trqpair_t *)rh;
+ i40e_t *i40e = itrq->itrq_i40e;
+
+ if (i40e->i40e_state & I40E_SUSPENDED) {
+ return (ECANCELED);
+ }
+
+ switch (stat) {
+ case MAC_STAT_RBYTES:
+ *val = itrq->itrq_rxstat.irxs_bytes.value.ui64;
+ break;
+ case MAC_STAT_IPACKETS:
+ *val = itrq->itrq_rxstat.irxs_packets.value.ui64;
+ break;
+ default:
+ *val = 0;
+ return (ENOTSUP);
+ }
+
+ return (0);
+}
+
+int
+i40e_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
+{
+ i40e_trqpair_t *itrq = (i40e_trqpair_t *)rh;
+ i40e_t *i40e = itrq->itrq_i40e;
+
+ if (i40e->i40e_state & I40E_SUSPENDED) {
+ return (ECANCELED);
+ }
+
+ switch (stat) {
+ case MAC_STAT_OBYTES:
+ *val = itrq->itrq_txstat.itxs_bytes.value.ui64;
+ break;
+ case MAC_STAT_OPACKETS:
+ *val = itrq->itrq_txstat.itxs_packets.value.ui64;
+ break;
+ default:
+ *val = 0;
+ return (ENOTSUP);
+ }
+
+ return (0);
+}
+
+/*
+ * When we end up refactoring all off the queue assignments and have non-static
+ * queue to VSI mappings, then we may need to revisit the general locking
+ * strategy that we employ and have the kstat creation / deletion be part of the
+ * ring start and stop routines.
+ */
+void
+i40e_stats_trqpair_fini(i40e_trqpair_t *itrq)
+{
+ if (itrq->itrq_txkstat != NULL) {
+ kstat_delete(itrq->itrq_txkstat);
+ itrq->itrq_txkstat = NULL;
+ }
+
+ if (itrq->itrq_rxkstat != NULL) {
+ kstat_delete(itrq->itrq_rxkstat);
+ itrq->itrq_rxkstat = NULL;
+ }
+}
+
+boolean_t
+i40e_stats_trqpair_init(i40e_trqpair_t *itrq)
+{
+ char buf[128];
+ i40e_t *i40e = itrq->itrq_i40e;
+ i40e_txq_stat_t *tsp = &itrq->itrq_txstat;
+ i40e_rxq_stat_t *rsp = &itrq->itrq_rxstat;
+
+ (void) snprintf(buf, sizeof (buf), "trqpair_tx_%d", itrq->itrq_index);
+ itrq->itrq_txkstat = kstat_create(I40E_MODULE_NAME,
+ ddi_get_instance(i40e->i40e_dip), buf, "net", KSTAT_TYPE_NAMED,
+ sizeof (i40e_txq_stat_t) / sizeof (kstat_named_t),
+ KSTAT_FLAG_VIRTUAL);
+
+ if (itrq->itrq_txkstat == NULL)
+ return (B_FALSE);
+
+ (void) snprintf(buf, sizeof (buf), "trqpair_rx_%d", itrq->itrq_index);
+ itrq->itrq_rxkstat = kstat_create(I40E_MODULE_NAME,
+ ddi_get_instance(i40e->i40e_dip), buf, "net", KSTAT_TYPE_NAMED,
+ sizeof (i40e_rxq_stat_t) / sizeof (kstat_named_t),
+ KSTAT_FLAG_VIRTUAL);
+
+ if (itrq->itrq_rxkstat == NULL) {
+ kstat_delete(itrq->itrq_txkstat);
+ itrq->itrq_txkstat = NULL;
+ return (B_FALSE);
+ }
+
+ itrq->itrq_txkstat->ks_data = &itrq->itrq_txstat;
+ itrq->itrq_rxkstat->ks_data = &itrq->itrq_rxstat;
+
+ kstat_named_init(&tsp->itxs_bytes, "tx_bytes",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_bytes.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_packets, "tx_packets",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_packets.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_descriptors, "tx_descriptors",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_descriptors.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_recycled, "tx_recycled",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_recycled.value.ui64 = 0;
+
+ kstat_named_init(&tsp->itxs_hck_meoifail, "tx_hck_meoifail",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_hck_meoifail.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_hck_nol2info, "tx_hck_nol2info",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_hck_nol2info.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_hck_nol3info, "tx_hck_nol3info",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_hck_nol3info.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_hck_nol4info, "tx_hck_nol4info",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_hck_nol4info.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_hck_badl3, "tx_hck_badl3",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_hck_badl3.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_hck_badl4, "tx_hck_badl4",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_hck_badl4.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_err_notcb, "tx_err_notcb",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_err_notcb.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_err_nodescs, "tx_err_nodescs",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_err_nodescs.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_err_context, "tx_err_context",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_err_context.value.ui64 = 0;
+ kstat_named_init(&tsp->itxs_num_unblocked, "tx_num_unblocked",
+ KSTAT_DATA_UINT64);
+ tsp->itxs_num_unblocked.value.ui64 = 0;
+
+
+ kstat_named_init(&rsp->irxs_bytes, "rx_bytes",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_bytes.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_packets, "rx_packets",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_packets.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_rx_desc_error, "rx_desc_error",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_rx_desc_error.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_rx_intr_limit, "rx_intr_limit",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_rx_intr_limit.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_rx_bind_norcb, "rx_bind_norcb",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_rx_bind_norcb.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_rx_bind_nomp, "rx_bind_nomp",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_rx_bind_nomp.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_rx_copy_nomem, "rx_copy_nomem",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_rx_copy_nomem.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_hck_v4hdrok, "rx_hck_v4hdrok",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_hck_v4hdrok.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_hck_l4hdrok, "rx_hck_l4hdrok",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_hck_l4hdrok.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_hck_unknown, "rx_hck_unknown",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_hck_unknown.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_hck_nol3l4p, "rx_hck_nol3l4p",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_hck_nol3l4p.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_hck_iperr, "rx_hck_iperr",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_hck_iperr.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_hck_eiperr, "rx_hck_eiperr",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_hck_eiperr.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_hck_l4err, "rx_hck_l4err",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_hck_l4err.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_hck_v6skip, "rx_hck_v6skip",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_hck_v6skip.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_hck_set, "rx_hck_set",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_hck_set.value.ui64 = 0;
+ kstat_named_init(&rsp->irxs_hck_miss, "rx_hck_miss",
+ KSTAT_DATA_UINT64);
+ rsp->irxs_hck_miss.value.ui64 = 0;
+
+ kstat_install(itrq->itrq_txkstat);
+ kstat_install(itrq->itrq_rxkstat);
+
+ return (B_TRUE);
+}
diff --git a/usr/src/uts/common/io/i40e/i40e_sw.h b/usr/src/uts/common/io/i40e/i40e_sw.h
new file mode 100644
index 0000000000..04959b1590
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/i40e_sw.h
@@ -0,0 +1,974 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
+ * Copyright 2016 Joyent, Inc.
+ */
+
+/*
+ * Please see i40e_main.c for an introduction to the device driver, its layout,
+ * and more.
+ */
+
+#ifndef _I40E_SW_H
+#define _I40E_SW_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/debug.h>
+#include <sys/stropts.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/strlog.h>
+#include <sys/kmem.h>
+#include <sys/stat.h>
+#include <sys/kstat.h>
+#include <sys/modctl.h>
+#include <sys/errno.h>
+#include <sys/dlpi.h>
+#include <sys/mac_provider.h>
+#include <sys/mac_ether.h>
+#include <sys/vlan.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/pci.h>
+#include <sys/pcie.h>
+#include <sys/sdt.h>
+#include <sys/ethernet.h>
+#include <sys/pattr.h>
+#include <sys/strsubr.h>
+#include <sys/netlb.h>
+#include <sys/random.h>
+#include <inet/common.h>
+#include <inet/tcp.h>
+#include <inet/ip.h>
+#include <inet/mi.h>
+#include <inet/nd.h>
+#include <netinet/udp.h>
+#include <netinet/sctp.h>
+#include <sys/bitmap.h>
+#include <sys/cpuvar.h>
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/disp.h>
+#include <sys/fm/io/ddi.h>
+#include <sys/list.h>
+#include <sys/debug.h>
+#include <sys/sdt.h>
+#include "i40e_type.h"
+#include "i40e_osdep.h"
+#include "i40e_prototype.h"
+#include "i40e_xregs.h"
+
+#define I40E_MODULE_NAME "i40e"
+
+#define I40E_ADAPTER_REGSET 1
+
+/*
+ * Configuration constants. Note that the hardware defines a minimum bound of 32
+ * descriptors and requires that the programming of the descriptor lengths be
+ * aligned in units of 32 descriptors.
+ */
+#define I40E_MIN_TX_RING_SIZE 64
+#define I40E_MAX_TX_RING_SIZE 4096
+#define I40E_DEF_TX_RING_SIZE 1024
+
+#define I40E_MIN_RX_RING_SIZE 64
+#define I40E_MAX_RX_RING_SIZE 4096
+#define I40E_DEF_RX_RING_SIZE 1024
+
+#define I40E_DESC_ALIGN 32
+
+/*
+ * Sizes used for asynchronous processing of the adminq. We allocate a fixed
+ * size buffer for each instance of the device during attach time, rather than
+ * allocating and freeing one during interrupt processing.
+ *
+ * We also define the descriptor size of the admin queue here.
+ */
+#define I40E_ADMINQ_BUFSZ 4096
+#define I40E_MAX_ADMINQ_SIZE 1024
+#define I40E_DEF_ADMINQ_SIZE 256
+
+/*
+ * Note, while the min and maximum values are based upon the sizing of the ring
+ * itself, the default is taken from ixgbe without much thought. It's basically
+ * been cargo culted. See i40e_transciever.c for a bit more information.
+ */
+#define I40E_MIN_RX_LIMIT_PER_INTR 16
+#define I40E_MAX_RX_LIMIT_PER_INTR 4096
+#define I40E_DEF_RX_LIMIT_PER_INTR 256
+
+/*
+ * Valid MTU ranges. Note that the XL710's maximum payload is actually 9728.
+ * However, we need to adjust for the ETHERFCSL (4 bytes) and the Ethernet VLAN
+ * header size (18 bytes) to get the actual maximum frame we can use. If
+ * different adapters end up with different sizes, we should make this value a
+ * bit more dynamic.
+ */
+#define I40E_MAX_MTU 9706
+#define I40E_MIN_MTU ETHERMIN
+#define I40E_DEF_MTU ETHERMTU
+
+/*
+ * Interrupt throttling related values. Interrupt throttling values are defined
+ * in two microsecond increments. Note that a value of zero basically says do no
+ * ITR activity. A helpful way to think about these is that setting the ITR to a
+ * value will allow a certain number of interrupts per second.
+ *
+ * Our default values for RX allow 20k interrupts per second while our default
+ * values for TX allow for 5k interrupts per second. For other class interrupts,
+ * we limit ourselves to a rate of 2k/s.
+ */
+#define I40E_MIN_ITR 0x0000
+#define I40E_MAX_ITR 0x0FF0
+#define I40E_DEF_RX_ITR 0x0019
+#define I40E_DEF_TX_ITR 0x0064
+#define I40E_DEF_OTHER_ITR 0x00FA
+
+/*
+ * Indexes into the three ITR registers that we have.
+ */
+typedef enum i40e_itr_index {
+ I40E_ITR_INDEX_RX = 0x0,
+ I40E_ITR_INDEX_TX = 0x1,
+ I40E_ITR_INDEX_OTHER = 0x2,
+ I40E_ITR_INDEX_NONE = 0x3
+} i40e_itr_index_t;
+
+
+/*
+ * Table 1-5 of the PRM notes that LSO supports up to 256 KB.
+ */
+#define I40E_LSO_MAXLEN (256 * 1024)
+
+#define I40E_CYCLIC_PERIOD NANOSEC /* 1 second */
+#define I40E_DRAIN_RX_WAIT (500 * MILLISEC) /* In us */
+
+/*
+ * All the other queue types for are defined by the common code. However, this
+ * is the constant to indicate that it's terminated.
+ */
+#define I40E_QUEUE_TYPE_EOL 0x7FF
+
+/*
+ * See the comments in i40e_buf.c as to the purpose of this value and how it's
+ * used to ensure that the IP header is eventually aligned when it's received by
+ * the OS.
+ */
+#define I40E_BUF_IPHDR_ALIGNMENT 2
+
+/*
+ * The XL710 controller has a limit of eight buffers being allowed to be used
+ * for the transmission of a single frame. This is defined in 8.4.1 - Transmit
+ * Packet in System Memory.
+ */
+#define I40E_TX_MAX_COOKIE 8
+
+/*
+ * Sizing to determine the amount of available descriptors at which we'll
+ * consider ourselves blocked. Also, when we have these available, we'll then
+ * consider ourselves available to transmit to MAC again. Strictly speaking, the
+ * MAX is based on the ring size. The default sizing is based on ixgbe.
+ */
+#define I40E_MIN_TX_BLOCK_THRESH I40E_TX_MAX_COOKIE
+#define I40E_DEF_TX_BLOCK_THRESH I40E_MIN_TX_BLOCK_THRESH
+
+/*
+ * Sizing for DMA thresholds. These are used to indicate whether or not we
+ * should perform a bcopy or a DMA binding of a given message block. The range
+ * allows for setting things such that we'll always do a bcopy (a high value) or
+ * always perform a DMA binding (a low value).
+ */
+#define I40E_MIN_RX_DMA_THRESH 0
+#define I40E_DEF_RX_DMA_THRESH 256
+#define I40E_MAX_RX_DMA_THRESH INT32_MAX
+
+#define I40E_MIN_TX_DMA_THRESH 0
+#define I40E_DEF_TX_DMA_THRESH 256
+#define I40E_MAX_TX_DMA_THRESH INT32_MAX
+
+/*
+ * Resource sizing counts. There are various aspects of hardware where we may
+ * have some variable number of elements that we need to handle. Such as the
+ * hardware capabilities and switch capacities. We cannot know a priori how many
+ * elements to do, so instead we take a starting guess and then will grow it up
+ * to an upper bound on a number of elements, to limit memory consumption in
+ * case of a hardware bug.
+ */
+#define I40E_HW_CAP_DEFAULT 40
+#define I40E_SWITCH_CAP_DEFAULT 25
+
+/*
+ * Host Memory Context related constants.
+ */
+#define I40E_HMC_RX_CTX_UNIT 128
+#define I40E_HMC_RX_DBUFF_MIN 1024
+#define I40E_HMC_RX_DBUFF_MAX (16 * 1024 - 128)
+#define I40E_HMC_RX_DTYPE_NOSPLIT 0
+#define I40E_HMC_RX_DSIZE_32BYTE 1
+#define I40E_HMC_RX_CRCSTRIP_ENABLE 1
+#define I40E_HMC_RX_FC_DISABLE 0
+#define I40E_HMC_RX_L2TAGORDER 1
+#define I40E_HMC_RX_HDRSPLIT_DISABLE 0
+#define I40E_HMC_RX_INVLAN_DONTSTRIP 0
+#define I40E_HMC_RX_TPH_DISABLE 0
+#define I40E_HMC_RX_LOWRXQ_NOINTR 0
+#define I40E_HMC_RX_PREFENA 1
+
+#define I40E_HMC_TX_CTX_UNIT 128
+#define I40E_HMC_TX_NEW_CONTEXT 1
+#define I40E_HMC_TX_FC_DISABLE 0
+#define I40E_HMC_TX_TS_DISABLE 0
+#define I40E_HMC_TX_FD_DISABLE 0
+#define I40E_HMC_TX_ALT_VLAN_DISABLE 0
+#define I40E_HMC_TX_WB_ENABLE 1
+#define I40E_HMC_TX_TPH_DISABLE 0
+
+/*
+ * Whenever we establish and create a VSI, we need to assign some number of
+ * queues that it's allowed to access from the PF. Because we only have a single
+ * VSI per PF at this time, we assign it all the queues.
+ *
+ * Many of the devices support what's called Data-center Bridging. Which is a
+ * feature that we don't have much use of at this time. However, we still need
+ * to fill in this information. We follow the guidance of the note in Table 7-80
+ * which talks about bytes 62-77. It says that if we don't want to assign
+ * anything to traffic classes, we should set the field to zero. Effectively
+ * this means that everything in the system is assigned to traffic class zero.
+ */
+#define I40E_ASSIGN_ALL_QUEUES 0
+#define I40E_TRAFFIC_CLASS_NO_QUEUES 0
+
+/*
+ * This defines the error mask that we care about from rx descriptors. Currently
+ * we're only concerned with the general errors and oversize errors.
+ */
+#define I40E_RX_ERR_BITS ((1 << I40E_RX_DESC_ERROR_RXE_SHIFT) | \
+ (1 << I40E_RX_DESC_ERROR_OVERSIZE_SHIFT))
+
+/*
+ * Property sizing macros for firmware versions, etc. They need to be large
+ * enough to hold 32-bit quantities transformed to strings as %d.%d or %x.
+ */
+#define I40E_DDI_PROP_LEN 64
+
+/*
+ * We currently consolidate some overrides that we use in the code here. These
+ * will be gone in the fullness of time, but as we're bringing up the device,
+ * this is what we use.
+ */
+#define I40E_GROUP_MAX 1
+#define I40E_TRQPAIR_MAX 1
+
+#define I40E_GROUP_NOMSIX 1
+#define I40E_TRQPAIR_NOMSIX 1
+
+/*
+ * It seems reasonable to cast this to void because the only reason that we
+ * should be getting a DDI_FAILURE is due to the fact that we specify addresses
+ * out of range. Because we specify no offset or address, it shouldn't happen.
+ */
+#ifdef DEBUG
+#define I40E_DMA_SYNC(handle, flag) ASSERT0(ddi_dma_sync( \
+ (handle)->dmab_dma_handle, 0, 0, \
+ (flag)))
+#else /* !DEBUG */
+#define I40E_DMA_SYNC(handle, flag) ((void) ddi_dma_sync( \
+ (handle)->dmab_dma_handle, 0, 0, \
+ (flag)))
+#endif /* DEBUG */
+
+/*
+ * Constants related to ring startup and teardown. These refer to the amount of
+ * time that we're willing to wait for a ring to spin up and spin down.
+ */
+#define I40E_RING_WAIT_NTRIES 10
+#define I40E_RING_WAIT_PAUSE 10 /* ms */
+
+/*
+ * Bit flags for attach_progress
+ */
+typedef enum i40e_attach_state {
+ I40E_ATTACH_PCI_CONFIG = 0x0001, /* PCI config setup */
+ I40E_ATTACH_REGS_MAP = 0x0002, /* Registers mapped */
+ I40E_ATTACH_PROPS = 0x0004, /* Properties initialized */
+ I40E_ATTACH_ALLOC_INTR = 0x0008, /* Interrupts allocated */
+ I40E_ATTACH_ALLOC_RINGSLOCKS = 0x0010, /* Rings & locks allocated */
+ I40E_ATTACH_ADD_INTR = 0x0020, /* Intr handlers added */
+ I40E_ATTACH_COMMON_CODE = 0x0040, /* Intel code initialized */
+ I40E_ATTACH_INIT = 0x0080, /* Device initialized */
+ I40E_ATTACH_STATS = 0x0200, /* Kstats created */
+ I40E_ATTACH_MAC = 0x0800, /* MAC registered */
+ I40E_ATTACH_ENABLE_INTR = 0x1000, /* DDI interrupts enabled */
+ I40E_ATTACH_FM_INIT = 0x2000, /* FMA initialized */
+ I40E_ATTACH_LINK_TIMER = 0x4000, /* link check timer */
+} i40e_attach_state_t;
+
+
+/*
+ * State flags that what's going on in in the device. Some of these state flags
+ * indicate some aspirational work that needs to happen in the driver.
+ *
+ * I40E_UNKNOWN: The device has yet to be started.
+ * I40E_INITIALIZED: The device has been fully attached.
+ * I40E_STARTED: The device has come out of the GLDV3 start routine.
+ * I40E_SUSPENDED: The device is suspended and I/O among other things
+ * should not occur. This happens because of an actual
+ * DDI_SUSPEND or interrupt adjustments.
+ * I40E_STALL: The tx stall detection logic has found a stall.
+ * I40E_OVERTEMP: The device has encountered a temperature alarm.
+ * I40E_INTR_ADJUST: Our interrupts are being manipulated and therefore we
+ * shouldn't be manipulating their state.
+ * I40E_ERROR: We've detected an FM error and degraded the device.
+ */
+typedef enum i40e_state {
+ I40E_UNKNOWN = 0x00,
+ I40E_INITIALIZED = 0x01,
+ I40E_STARTED = 0x02,
+ I40E_SUSPENDED = 0x04,
+ I40E_STALL = 0x08,
+ I40E_OVERTEMP = 0x20,
+ I40E_INTR_ADJUST = 0x40,
+ I40E_ERROR = 0x80
+} i40e_state_t;
+
+
+/*
+ * Definitions for common Intel things that we use and some slightly more usable
+ * names.
+ */
+typedef struct i40e_hw i40e_hw_t;
+typedef struct i40e_aqc_switch_resource_alloc_element_resp i40e_switch_rsrc_t;
+
+/*
+ * Handles and addresses of DMA buffers.
+ */
+typedef struct i40e_dma_buffer {
+ caddr_t dmab_address; /* Virtual address */
+ uint64_t dmab_dma_address; /* DMA (Hardware) address */
+ ddi_acc_handle_t dmab_acc_handle; /* Data access handle */
+ ddi_dma_handle_t dmab_dma_handle; /* DMA handle */
+ size_t dmab_size; /* Buffer size */
+ size_t dmab_len; /* Data length in the buffer */
+} i40e_dma_buffer_t;
+
+/*
+ * RX Control Block
+ */
+typedef struct i40e_rx_control_block {
+ mblk_t *rcb_mp;
+ uint32_t rcb_ref;
+ i40e_dma_buffer_t rcb_dma;
+ frtn_t rcb_free_rtn;
+ struct i40e_rx_data *rcb_rxd;
+} i40e_rx_control_block_t;
+
+typedef enum {
+ I40E_TX_NONE,
+ I40E_TX_COPY,
+ I40E_TX_DMA
+} i40e_tx_type_t;
+
+typedef struct i40e_tx_desc i40e_tx_desc_t;
+typedef union i40e_32byte_rx_desc i40e_rx_desc_t;
+
+typedef struct i40e_tx_control_block {
+ struct i40e_tx_control_block *tcb_next;
+ mblk_t *tcb_mp;
+ i40e_tx_type_t tcb_type;
+ ddi_dma_handle_t tcb_dma_handle;
+ i40e_dma_buffer_t tcb_dma;
+} i40e_tx_control_block_t;
+
+/*
+ * Receive ring data (used below).
+ */
+typedef struct i40e_rx_data {
+ struct i40e *rxd_i40e;
+
+ /*
+ * RX descriptor ring definitions
+ */
+ i40e_dma_buffer_t rxd_desc_area; /* DMA buffer of rx desc ring */
+ i40e_rx_desc_t *rxd_desc_ring; /* Rx desc ring */
+ uint32_t rxd_desc_next; /* Index of next rx desc */
+
+ /*
+ * RX control block list definitions
+ */
+ kmutex_t rxd_free_lock; /* Lock to protect free data */
+ i40e_rx_control_block_t *rxd_rcb_area; /* Array of control blocks */
+ i40e_rx_control_block_t **rxd_work_list; /* Work list of rcbs */
+ i40e_rx_control_block_t **rxd_free_list; /* Free list of rcbs */
+ uint32_t rxd_rcb_free; /* Number of free rcbs */
+
+ /*
+ * RX software ring settings
+ */
+ uint32_t rxd_ring_size; /* Rx descriptor ring size */
+ uint32_t rxd_free_list_size; /* Rx free list size */
+
+ /*
+ * RX outstanding data. This is used to keep track of outstanding loaned
+ * descriptors after we've shut down receiving information. Note these
+ * are protected by the i40e_t`i40e_rx_pending_lock.
+ */
+ uint32_t rxd_rcb_pending;
+ boolean_t rxd_shutdown;
+} i40e_rx_data_t;
+
+/*
+ * Structures for unicast and multicast addresses. Note that we keep the VSI id
+ * around for unicast addresses, since they may belong to different VSIs.
+ * However, since all multicast addresses belong to the default VSI, we don't
+ * duplicate that information.
+ */
+typedef struct i40e_uaddr {
+ uint8_t iua_mac[ETHERADDRL];
+ int iua_vsi;
+} i40e_uaddr_t;
+
+typedef struct i40e_maddr {
+ uint8_t ima_mac[ETHERADDRL];
+} i40e_maddr_t;
+
+/*
+ * Collection of RX statistics on a given queue.
+ */
+typedef struct i40e_rxq_stat {
+ /*
+ * The i40e hardware does not maintain statistics on a per-ring basis,
+ * only on a per-PF and per-VSI level. As such, to satisfy the GLDv3, we
+ * need to maintain our own stats for packets and bytes.
+ */
+ kstat_named_t irxs_bytes; /* Bytes in on queue */
+ kstat_named_t irxs_packets; /* Packets in on queue */
+
+ /*
+ * The following set of stats cover non-checksum data path issues.
+ */
+ kstat_named_t irxs_rx_desc_error; /* Error bit set on desc */
+ kstat_named_t irxs_rx_copy_nomem; /* allocb failure for copy */
+ kstat_named_t irxs_rx_intr_limit; /* Hit i40e_rx_limit_per_intr */
+ kstat_named_t irxs_rx_bind_norcb; /* No replacement rcb free */
+ kstat_named_t irxs_rx_bind_nomp; /* No mblk_t in bind rcb */
+
+ /*
+ * The following set of statistics covers rx checksum related activity.
+ * These are all primarily set in i40e_rx_hcksum. If rx checksum
+ * activity is disabled, then these should all be zero.
+ */
+ kstat_named_t irxs_hck_v4hdrok; /* Valid IPv4 Header */
+ kstat_named_t irxs_hck_l4hdrok; /* Valid L4 Header */
+ kstat_named_t irxs_hck_unknown; /* !pinfo.known */
+ kstat_named_t irxs_hck_nol3l4p; /* Missing L3L4P bit in desc */
+ kstat_named_t irxs_hck_iperr; /* IPE error bit set */
+ kstat_named_t irxs_hck_eiperr; /* EIPE error bit set */
+ kstat_named_t irxs_hck_l4err; /* L4E error bit set */
+ kstat_named_t irxs_hck_v6skip; /* IPv6 case hw fails on */
+ kstat_named_t irxs_hck_set; /* Total times we set cksum */
+ kstat_named_t irxs_hck_miss; /* Times with zero cksum bits */
+} i40e_rxq_stat_t;
+
+/*
+ * Collection of TX Statistics on a given queue
+ */
+typedef struct i40e_txq_stat {
+ kstat_named_t itxs_bytes; /* Bytes out on queue */
+ kstat_named_t itxs_packets; /* Packets out on queue */
+ kstat_named_t itxs_descriptors; /* Descriptors issued */
+ kstat_named_t itxs_recycled; /* Descriptors reclaimed */
+ /*
+ * Various failure conditions.
+ */
+ kstat_named_t itxs_hck_meoifail; /* ether offload failures */
+ kstat_named_t itxs_hck_nol2info; /* Missing l2 info */
+ kstat_named_t itxs_hck_nol3info; /* Missing l3 info */
+ kstat_named_t itxs_hck_nol4info; /* Missing l4 info */
+ kstat_named_t itxs_hck_badl3; /* Not IPv4/IPv6 */
+ kstat_named_t itxs_hck_badl4; /* Bad L4 Paylaod */
+
+ kstat_named_t itxs_err_notcb; /* No tcb's available */
+ kstat_named_t itxs_err_nodescs; /* No tcb's available */
+ kstat_named_t itxs_err_context; /* Total context failures */
+
+ kstat_named_t itxs_num_unblocked; /* Number of MAC unblocks */
+} i40e_txq_stat_t;
+
+/*
+ * An instance of an XL710 transmit/receive queue pair. This currently
+ * represents a combination of both a transmit and receive ring, though they
+ * should really be split apart into separate logical structures. Unfortunately,
+ * during initial work we mistakenly joined them together.
+ */
+typedef struct i40e_trqpair {
+ struct i40e *itrq_i40e;
+
+ /* Receive-side structures. */
+ kmutex_t itrq_rx_lock;
+ mac_ring_handle_t itrq_macrxring; /* Receive ring handle. */
+ i40e_rx_data_t *itrq_rxdata; /* Receive ring rx data. */
+ uint64_t itrq_rxgen; /* Generation number for mac/GLDv3. */
+ uint32_t itrq_index; /* Queue index in the PF */
+ uint32_t itrq_rx_intrvec; /* Receive interrupt vector. */
+
+ /* Receive-side stats. */
+ i40e_rxq_stat_t itrq_rxstat;
+ kstat_t *itrq_rxkstat;
+
+ /* Transmit-side structures. */
+ kmutex_t itrq_tx_lock;
+ mac_ring_handle_t itrq_mactxring; /* Transmit ring handle. */
+ uint32_t itrq_tx_intrvec; /* Transmit interrupt vector. */
+ boolean_t itrq_tx_blocked; /* Does MAC think we're blocked? */
+
+ /*
+ * TX data sizing
+ */
+ uint32_t itrq_tx_ring_size;
+ uint32_t itrq_tx_free_list_size;
+
+ /*
+ * TX descriptor ring data
+ */
+ i40e_dma_buffer_t itrq_desc_area; /* DMA buffer of tx desc ring */
+ i40e_tx_desc_t *itrq_desc_ring; /* TX Desc ring */
+ volatile uint32_t *itrq_desc_wbhead; /* TX write-back index */
+ uint32_t itrq_desc_head; /* Last index hw freed */
+ uint32_t itrq_desc_tail; /* Index of next free desc */
+ uint32_t itrq_desc_free; /* Number of free descriptors */
+
+ /*
+ * TX control block (tcb) data
+ */
+ kmutex_t itrq_tcb_lock;
+ i40e_tx_control_block_t *itrq_tcb_area; /* Array of control blocks */
+ i40e_tx_control_block_t **itrq_tcb_work_list; /* In use tcb */
+ i40e_tx_control_block_t **itrq_tcb_free_list; /* Available tcb */
+ uint32_t itrq_tcb_free; /* Count of free tcb */
+
+ /* Transmit-side stats. */
+ i40e_txq_stat_t itrq_txstat;
+ kstat_t *itrq_txkstat;
+
+} i40e_trqpair_t;
+
+/*
+ * VSI statistics.
+ *
+ * This mirrors the i40e_eth_stats structure but transforms it into a kstat.
+ * Note that the stock statistic structure also includes entries for tx
+ * discards. However, this is not actually implemented for the VSI (see Table
+ * 7-221), hence why we don't include the member which would always have a value
+ * of zero. This choice was made to minimize confusion to someone looking at
+ * these, as a value of zero does not necessarily equate to the fact that it's
+ * not implemented.
+ */
+typedef struct i40e_vsi_stats {
+ uint64_t ivs_rx_bytes; /* gorc */
+ uint64_t ivs_rx_unicast; /* uprc */
+ uint64_t ivs_rx_multicast; /* mprc */
+ uint64_t ivs_rx_broadcast; /* bprc */
+ uint64_t ivs_rx_discards; /* rdpc */
+ uint64_t ivs_rx_unknown_protocol; /* rupp */
+ uint64_t ivs_tx_bytes; /* gotc */
+ uint64_t ivs_tx_unicast; /* uptc */
+ uint64_t ivs_tx_multicast; /* mptc */
+ uint64_t ivs_tx_broadcast; /* bptc */
+ uint64_t ivs_tx_errors; /* tepc */
+} i40e_vsi_stats_t;
+
+typedef struct i40e_vsi_kstats {
+ kstat_named_t ivk_rx_bytes;
+ kstat_named_t ivk_rx_unicast;
+ kstat_named_t ivk_rx_multicast;
+ kstat_named_t ivk_rx_broadcast;
+ kstat_named_t ivk_rx_discards;
+ kstat_named_t ivk_rx_unknown_protocol;
+ kstat_named_t ivk_tx_bytes;
+ kstat_named_t ivk_tx_unicast;
+ kstat_named_t ivk_tx_multicast;
+ kstat_named_t ivk_tx_broadcast;
+ kstat_named_t ivk_tx_errors;
+} i40e_vsi_kstats_t;
+
+/*
+ * For pf statistics, we opt not to use the standard statistics as defined by
+ * the Intel common code. This also currently combines statistics that are
+ * global across the entire device.
+ */
+typedef struct i40e_pf_stats {
+ uint64_t ips_rx_bytes; /* gorc */
+ uint64_t ips_rx_unicast; /* uprc */
+ uint64_t ips_rx_multicast; /* mprc */
+ uint64_t ips_rx_broadcast; /* bprc */
+ uint64_t ips_tx_bytes; /* gotc */
+ uint64_t ips_tx_unicast; /* uptc */
+ uint64_t ips_tx_multicast; /* mptc */
+ uint64_t ips_tx_broadcast; /* bptc */
+
+ uint64_t ips_rx_size_64; /* prc64 */
+ uint64_t ips_rx_size_127; /* prc127 */
+ uint64_t ips_rx_size_255; /* prc255 */
+ uint64_t ips_rx_size_511; /* prc511 */
+ uint64_t ips_rx_size_1023; /* prc1023 */
+ uint64_t ips_rx_size_1522; /* prc1522 */
+ uint64_t ips_rx_size_9522; /* prc9522 */
+
+ uint64_t ips_tx_size_64; /* ptc64 */
+ uint64_t ips_tx_size_127; /* ptc127 */
+ uint64_t ips_tx_size_255; /* ptc255 */
+ uint64_t ips_tx_size_511; /* ptc511 */
+ uint64_t ips_tx_size_1023; /* ptc1023 */
+ uint64_t ips_tx_size_1522; /* ptc1522 */
+ uint64_t ips_tx_size_9522; /* ptc9522 */
+
+ uint64_t ips_link_xon_rx; /* lxonrxc */
+ uint64_t ips_link_xoff_rx; /* lxoffrxc */
+ uint64_t ips_link_xon_tx; /* lxontxc */
+ uint64_t ips_link_xoff_tx; /* lxofftxc */
+ uint64_t ips_priority_xon_rx[8]; /* pxonrxc[8] */
+ uint64_t ips_priority_xoff_rx[8]; /* pxoffrxc[8] */
+ uint64_t ips_priority_xon_tx[8]; /* pxontxc[8] */
+ uint64_t ips_priority_xoff_tx[8]; /* pxofftxc[8] */
+ uint64_t ips_priority_xon_2_xoff[8]; /* rxon2offcnt[8] */
+
+ uint64_t ips_crc_errors; /* crcerrs */
+ uint64_t ips_illegal_bytes; /* illerrc */
+ uint64_t ips_mac_local_faults; /* mlfc */
+ uint64_t ips_mac_remote_faults; /* mrfc */
+ uint64_t ips_rx_length_errors; /* rlec */
+ uint64_t ips_rx_undersize; /* ruc */
+ uint64_t ips_rx_fragments; /* rfc */
+ uint64_t ips_rx_oversize; /* roc */
+ uint64_t ips_rx_jabber; /* rjc */
+ uint64_t ips_rx_discards; /* rdpc */
+ uint64_t ips_rx_vm_discards; /* ldpc */
+ uint64_t ips_rx_short_discards; /* mspdc */
+ uint64_t ips_tx_dropped_link_down; /* tdold */
+ uint64_t ips_rx_unknown_protocol; /* rupp */
+ uint64_t ips_rx_err1; /* rxerr1 */
+ uint64_t ips_rx_err2; /* rxerr2 */
+} i40e_pf_stats_t;
+
+typedef struct i40e_pf_kstats {
+ kstat_named_t ipk_rx_bytes; /* gorc */
+ kstat_named_t ipk_rx_unicast; /* uprc */
+ kstat_named_t ipk_rx_multicast; /* mprc */
+ kstat_named_t ipk_rx_broadcast; /* bprc */
+ kstat_named_t ipk_tx_bytes; /* gotc */
+ kstat_named_t ipk_tx_unicast; /* uptc */
+ kstat_named_t ipk_tx_multicast; /* mptc */
+ kstat_named_t ipk_tx_broadcast; /* bptc */
+
+ kstat_named_t ipk_rx_size_64; /* prc64 */
+ kstat_named_t ipk_rx_size_127; /* prc127 */
+ kstat_named_t ipk_rx_size_255; /* prc255 */
+ kstat_named_t ipk_rx_size_511; /* prc511 */
+ kstat_named_t ipk_rx_size_1023; /* prc1023 */
+ kstat_named_t ipk_rx_size_1522; /* prc1522 */
+ kstat_named_t ipk_rx_size_9522; /* prc9522 */
+
+ kstat_named_t ipk_tx_size_64; /* ptc64 */
+ kstat_named_t ipk_tx_size_127; /* ptc127 */
+ kstat_named_t ipk_tx_size_255; /* ptc255 */
+ kstat_named_t ipk_tx_size_511; /* ptc511 */
+ kstat_named_t ipk_tx_size_1023; /* ptc1023 */
+ kstat_named_t ipk_tx_size_1522; /* ptc1522 */
+ kstat_named_t ipk_tx_size_9522; /* ptc9522 */
+
+ kstat_named_t ipk_link_xon_rx; /* lxonrxc */
+ kstat_named_t ipk_link_xoff_rx; /* lxoffrxc */
+ kstat_named_t ipk_link_xon_tx; /* lxontxc */
+ kstat_named_t ipk_link_xoff_tx; /* lxofftxc */
+ kstat_named_t ipk_priority_xon_rx[8]; /* pxonrxc[8] */
+ kstat_named_t ipk_priority_xoff_rx[8]; /* pxoffrxc[8] */
+ kstat_named_t ipk_priority_xon_tx[8]; /* pxontxc[8] */
+ kstat_named_t ipk_priority_xoff_tx[8]; /* pxofftxc[8] */
+ kstat_named_t ipk_priority_xon_2_xoff[8]; /* rxon2offcnt[8] */
+
+ kstat_named_t ipk_crc_errors; /* crcerrs */
+ kstat_named_t ipk_illegal_bytes; /* illerrc */
+ kstat_named_t ipk_mac_local_faults; /* mlfc */
+ kstat_named_t ipk_mac_remote_faults; /* mrfc */
+ kstat_named_t ipk_rx_length_errors; /* rlec */
+ kstat_named_t ipk_rx_undersize; /* ruc */
+ kstat_named_t ipk_rx_fragments; /* rfc */
+ kstat_named_t ipk_rx_oversize; /* roc */
+ kstat_named_t ipk_rx_jabber; /* rjc */
+ kstat_named_t ipk_rx_discards; /* rdpc */
+ kstat_named_t ipk_rx_vm_discards; /* ldpc */
+ kstat_named_t ipk_rx_short_discards; /* mspdc */
+ kstat_named_t ipk_tx_dropped_link_down; /* tdold */
+ kstat_named_t ipk_rx_unknown_protocol; /* rupp */
+ kstat_named_t ipk_rx_err1; /* rxerr1 */
+ kstat_named_t ipk_rx_err2; /* rxerr2 */
+} i40e_pf_kstats_t;
+
+/*
+ * Resources that are pooled and specific to a given i40e_t.
+ */
+typedef struct i40e_func_rsrc {
+ uint_t ifr_nrx_queue;
+ uint_t ifr_nrx_queue_used;
+ uint_t ifr_ntx_queue;
+ uint_t ifr_trx_queue_used;
+ uint_t ifr_nvsis;
+ uint_t ifr_nvsis_used;
+ uint_t ifr_nmacfilt;
+ uint_t ifr_nmacfilt_used;
+ uint_t ifr_nmcastfilt;
+ uint_t ifr_nmcastfilt_used;
+} i40e_func_rsrc_t;
+
+/*
+ * Main i40e per-instance state.
+ */
+typedef struct i40e {
+ list_node_t i40e_glink; /* Global list link */
+ list_node_t i40e_dlink; /* Device list link */
+ kmutex_t i40e_general_lock; /* General device lock */
+
+ /*
+ * General Data and management
+ */
+ dev_info_t *i40e_dip;
+ int i40e_instance;
+ int i40e_fm_capabilities;
+ uint_t i40e_state;
+ i40e_attach_state_t i40e_attach_progress;
+ mac_handle_t i40e_mac_hdl;
+ ddi_periodic_t i40e_periodic_id;
+
+ /*
+ * Pointers to common code data structures and memory for the common
+ * code.
+ */
+ struct i40e_hw i40e_hw_space;
+ struct i40e_osdep i40e_osdep_space;
+ struct i40e_aq_get_phy_abilities_resp i40e_phy;
+ void *i40e_aqbuf;
+
+ /*
+ * Device state, switch information, and resources.
+ */
+ int i40e_vsi_id;
+ struct i40e_device *i40e_device;
+ i40e_func_rsrc_t i40e_resources;
+ uint16_t i40e_switch_rsrc_alloc;
+ uint16_t i40e_switch_rsrc_actual;
+ i40e_switch_rsrc_t *i40e_switch_rsrcs;
+ i40e_uaddr_t *i40e_uaddrs;
+ i40e_maddr_t *i40e_maddrs;
+ int i40e_mcast_promisc_count;
+ boolean_t i40e_promisc_on;
+ link_state_t i40e_link_state;
+ uint32_t i40e_link_speed; /* In Mbps */
+ link_duplex_t i40e_link_duplex;
+ uint_t i40e_sdu;
+ uint_t i40e_frame_max;
+
+ /*
+ * Transmit and receive information, tunables, and MAC info.
+ */
+ i40e_trqpair_t *i40e_trqpairs;
+ boolean_t i40e_mr_enable;
+ int i40e_num_trqpairs;
+ uint_t i40e_other_itr;
+
+ int i40e_num_rx_groups;
+ int i40e_num_rx_descs;
+ mac_group_handle_t i40e_rx_group_handle;
+ uint32_t i40e_rx_ring_size;
+ uint32_t i40e_rx_buf_size;
+ boolean_t i40e_rx_hcksum_enable;
+ uint32_t i40e_rx_dma_min;
+ uint32_t i40e_rx_limit_per_intr;
+ uint_t i40e_rx_itr;
+
+ int i40e_num_tx_descs;
+ uint32_t i40e_tx_ring_size;
+ uint32_t i40e_tx_buf_size;
+ uint32_t i40e_tx_block_thresh;
+ boolean_t i40e_tx_hcksum_enable;
+ uint32_t i40e_tx_dma_min;
+ uint_t i40e_tx_itr;
+
+ /*
+ * Interrupt state
+ *
+ * Note that the use of a single boolean_t for i40e_intr_poll isn't
+ * really the best design. When we have more than a single ring on the
+ * device working, we'll transition to using something more
+ * sophisticated.
+ */
+ uint_t i40e_intr_pri;
+ uint_t i40e_intr_force;
+ uint_t i40e_intr_type;
+ int i40e_intr_cap;
+ uint32_t i40e_intr_count;
+ uint32_t i40e_intr_count_max;
+ uint32_t i40e_intr_count_min;
+ size_t i40e_intr_size;
+ ddi_intr_handle_t *i40e_intr_handles;
+ ddi_cb_handle_t i40e_callback_handle;
+ boolean_t i40e_intr_poll;
+
+ /*
+ * DMA attributes. See i40e_buf.c for why we have copies of them in the
+ * i40e_t.
+ */
+ ddi_dma_attr_t i40e_static_dma_attr;
+ ddi_dma_attr_t i40e_txbind_dma_attr;
+ ddi_device_acc_attr_t i40e_desc_acc_attr;
+ ddi_device_acc_attr_t i40e_buf_acc_attr;
+
+ /*
+ * The following two fields are used to protect and keep track of
+ * outstanding, loaned buffers to MAC. If we have these, we can't
+ * detach as we have active DMA memory outstanding.
+ */
+ kmutex_t i40e_rx_pending_lock;
+ kcondvar_t i40e_rx_pending_cv;
+ uint32_t i40e_rx_pending;
+
+ /*
+ * PF statistics and VSI statistics.
+ */
+ kmutex_t i40e_stat_lock;
+ kstat_t *i40e_pf_kstat;
+ kstat_t *i40e_vsi_kstat;
+ i40e_pf_stats_t i40e_pf_stat;
+ i40e_vsi_stats_t i40e_vsi_stat;
+ uint16_t i40e_vsi_stat_id;
+
+ /*
+ * Misc. stats and counters that should maybe one day be kstats.
+ */
+ uint64_t i40e_s_link_status_errs;
+ uint32_t i40e_s_link_status_lasterr;
+} i40e_t;
+
+/*
+ * The i40e_device represents a PCI device which encapsulates multiple physical
+ * functions which are represented as an i40e_t. This is used to track the use
+ * of pooled resources throughout all of the various devices.
+ */
+typedef struct i40e_device {
+ list_node_t id_link;
+ dev_info_t *id_parent;
+ uint_t id_pci_bus;
+ uint_t id_pci_device;
+ uint_t id_nfuncs; /* Total number of functions */
+ uint_t id_nreg; /* Total number present */
+ list_t id_i40e_list; /* List of i40e_t's registered */
+ i40e_switch_rsrc_t *id_rsrcs; /* Switch resources for this PF */
+ uint_t id_rsrcs_alloc; /* Total allocated resources */
+ uint_t id_rsrcs_act; /* Actual number of resources */
+} i40e_device_t;
+
+/* Values for the interrupt forcing on the NIC. */
+#define I40E_INTR_NONE 0
+#define I40E_INTR_MSIX 1
+#define I40E_INTR_MSI 2
+#define I40E_INTR_LEGACY 3
+
+/* Hint that we don't want to do any polling... */
+#define I40E_POLL_NULL -1
+
+/*
+ * Logging functions.
+ */
+extern void i40e_error(i40e_t *, const char *, ...);
+extern void i40e_notice(i40e_t *, const char *, ...);
+extern void i40e_log(i40e_t *, const char *, ...);
+
+/*
+ * General link handling functions.
+ */
+extern void i40e_link_check(i40e_t *);
+extern void i40e_update_mtu(i40e_t *);
+
+/*
+ * FMA functions.
+ */
+extern int i40e_check_acc_handle(ddi_acc_handle_t);
+extern int i40e_check_dma_handle(ddi_dma_handle_t);
+extern void i40e_fm_ereport(i40e_t *, char *);
+
+/*
+ * Interrupt handlers and interrupt handler setup.
+ */
+extern void i40e_intr_chip_init(i40e_t *);
+extern void i40e_intr_chip_fini(i40e_t *);
+extern uint_t i40e_intr_msix(void *, void *);
+extern uint_t i40e_intr_msi(void *, void *);
+extern uint_t i40e_intr_legacy(void *, void *);
+extern void i40e_intr_io_enable_all(i40e_t *);
+extern void i40e_intr_io_disable_all(i40e_t *);
+extern void i40e_intr_io_clear_cause(i40e_t *);
+extern void i40e_intr_rx_queue_disable(i40e_t *, uint_t);
+extern void i40e_intr_rx_queue_enable(i40e_t *, uint_t);
+extern void i40e_intr_set_itr(i40e_t *, i40e_itr_index_t, uint_t);
+
+/*
+ * Receive-side functions
+ */
+extern mblk_t *i40e_ring_rx(i40e_trqpair_t *, int);
+extern mblk_t *i40e_ring_rx_poll(void *, int);
+extern void i40e_rx_recycle(caddr_t);
+
+/*
+ * Transmit-side functions
+ */
+mblk_t *i40e_ring_tx(void *, mblk_t *);
+extern void i40e_tx_recycle_ring(i40e_trqpair_t *);
+extern void i40e_tx_cleanup_ring(i40e_trqpair_t *);
+
+/*
+ * Statistics functions.
+ */
+extern boolean_t i40e_stats_init(i40e_t *);
+extern void i40e_stats_fini(i40e_t *);
+extern boolean_t i40e_stat_vsi_init(i40e_t *);
+extern void i40e_stat_vsi_fini(i40e_t *);
+extern boolean_t i40e_stats_trqpair_init(i40e_trqpair_t *);
+extern void i40e_stats_trqpair_fini(i40e_trqpair_t *);
+extern int i40e_m_stat(void *, uint_t, uint64_t *);
+extern int i40e_rx_ring_stat(mac_ring_driver_t, uint_t, uint64_t *);
+extern int i40e_tx_ring_stat(mac_ring_driver_t, uint_t, uint64_t *);
+
+/*
+ * MAC/GLDv3 functions, and functions called by MAC/GLDv3 support code.
+ */
+extern boolean_t i40e_register_mac(i40e_t *);
+extern boolean_t i40e_start(i40e_t *, boolean_t);
+extern void i40e_stop(i40e_t *, boolean_t);
+
+/*
+ * DMA & buffer functions and attributes
+ */
+extern void i40e_init_dma_attrs(i40e_t *, boolean_t);
+extern boolean_t i40e_alloc_ring_mem(i40e_t *);
+extern void i40e_free_ring_mem(i40e_t *, boolean_t);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _I40E_SW_H */
diff --git a/usr/src/uts/common/io/i40e/i40e_transceiver.c b/usr/src/uts/common/io/i40e/i40e_transceiver.c
new file mode 100644
index 0000000000..06f82f856e
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/i40e_transceiver.c
@@ -0,0 +1,2266 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
+ * Copyright 2016 Joyent, Inc.
+ */
+
+#include "i40e_sw.h"
+
+/*
+ * ---------------------------------------------------------
+ * Buffer and Memory Management, Receiving, and Transmitting
+ * ---------------------------------------------------------
+ *
+ * Each physical function (PF), which is what we think of as an instance of the
+ * device driver, has a series of associated transmit and receive queue pairs.
+ * Effectively, what we think of in MAC as rings. Each of these has their own
+ * ring of descriptors which is used as part of doing DMA activity.
+ *
+ * The transmit ring of descriptors are 16-byte entries which are used to send
+ * packets, program filters, etc. The receive ring of descriptors are either
+ * 16-byte or 32-bytes each. At the moment, we opt to use the larger descriptor
+ * format so that we're in a better position if we ever want to leverage that
+ * information later on.
+ *
+ * However, these rings are just for descriptors, they don't talk or deal with
+ * how we actually store the memory that we need for DMA or the associated
+ * information that we need for keeping track of message blocks. To correspond
+ * to the hardware descriptor ring which is how we communicate with hardware, we
+ * introduce a control block which keeps track of our required metadata like DMA
+ * mappings.
+ *
+ * There are two main considerations that dictate how much memory and buffers
+ * we end up allocating. Those are:
+ *
+ * o The size of the ring (controlled through the driver.conf file)
+ *
+ * o The maximum size frame we can receive.
+ *
+ * The size of the rings currently defaults to 1024 descriptors and is stored in
+ * the i40e_t`i40e_rx_ring_size and i40e_t`i40e_tx_ring_size.
+ *
+ * While the size of the rings is controlled by the driver.conf, the maximum
+ * size frame is informed primarily through the use of dladm and the setting of
+ * the MTU property on the device. From the MTU, we then go and do some
+ * machinations. The first thing we do is we then have to add in space for the
+ * Ethernet header, potentially a VLAN header, and the FCS check. This value is
+ * what's stored as i40e_t`i40e_frame_max and is derived any time
+ * i40e_t`i40e_sdu changes.
+ *
+ * This size is then rounded up to the nearest 1k chunk, which represents the
+ * actual amount of memory that we'll allocate for a single frame.
+ *
+ * Note, that for rx, we do something that might be unexpected. We always add
+ * an extra two bytes to the frame size that we allocate. We then offset the DMA
+ * address that we receive a packet into by two bytes. This ensures that the IP
+ * header will always be 4 byte aligned because the MAC header is either 14 or
+ * 18 bytes in length, depending on the use of 802.1Q tagging, which makes IP's
+ * and MAC's lives easier.
+ *
+ * Both the rx and tx descriptor rings (which are what we use to communicate
+ * with hardware) are allocated as a single region of DMA memory which is the
+ * size of the descriptor (4 bytes and 2 bytes respectively) times the total
+ * number of descriptors for an rx and tx ring.
+ *
+ * While the rx and tx descriptors are allocated using DMA-based memory, the
+ * control blocks for each of them are allocated using normal kernel memory.
+ * They aren't special from a DMA perspective. We'll go over the design of both
+ * receiving and transmitting separately, as they have slightly different
+ * control blocks and different ways that we manage the relationship between
+ * control blocks and descriptors.
+ *
+ * ---------------------------------
+ * RX Descriptors and Control Blocks
+ * ---------------------------------
+ *
+ * For every descriptor in the ring that the driver has, we need some associated
+ * memory, which means that we need to have the receive specific control block.
+ * We have a couple different, but related goals:
+ *
+ * o Once we've completed the mc_start GLDv3 endpoint, we do not want to do
+ * any additional memory allocations or DMA allocations if we don't have to.
+ *
+ * o We'd like to try and do as much zero-copy as possible, while taking into
+ * account the cost of mapping in DMA resources.
+ *
+ * o We'd like to have every receive descriptor available.
+ *
+ * Now, these rules are a bit in tension with one another. The act of mapping in
+ * is an exercise of trying to find the break-even point between page table
+ * updates and bcopy. We currently start by using the same metrics that ixgbe
+ * used; however, it should be known that this value has effectively been
+ * cargo-culted across to yet another driver, sorry.
+ *
+ * If we receive a packet which is larger than our copy threshold, we'll create
+ * a message block out of the DMA memory via desballoc(9F) and send that up to
+ * MAC that way. This will cause us to be notified when the message block is
+ * then freed because it has been consumed, dropped, or otherwise. Otherwise, if
+ * it's less than the threshold, we'll try to use allocb and bcopy it into the
+ * block, thus allowing us to immediately reuse the DMA resource. Note, on debug
+ * builds, we allow someone to whack the variable i40e_debug_rx_mode to override
+ * the behavior and always do a bcopy or a DMA bind.
+ *
+ * To try and ensure that the device always has blocks that it can receive data
+ * into, we maintain two lists of control blocks, a working list and a free
+ * list. Each list is sized equal to the number of descriptors in the rx ring.
+ * During the GLDv3 mc_start routine, we allocate a number of rx control blocks
+ * equal to twice the number of descriptors in the ring and we assign them
+ * equally to the free list and to the working list. Each control block also has
+ * DMA memory allocated and associated with which it will be used to receive the
+ * actual packet data. All of a received frame's data will end up in a single
+ * DMA buffer.
+ *
+ * During operation, we always maintain the invariant that each rx descriptor
+ * has an associated rx control block which lives in the working list. If we
+ * feel that we should loan up DMA memory to MAC in the form of a message block,
+ * we can only do so if we can maintain this invariant. To do that, we swap in
+ * one of the buffers from the free list. If none are available, then we resort
+ * to using allocb(9F) and bcopy(9F) on the packet instead, regardless of the
+ * size.
+ *
+ * Loaned message blocks come back to use when freemsg(9F) or freeb(9F) is
+ * called on the block, at which point we restore the rx control block to the
+ * free list and are able to reuse the DMA memory again. While the scheme may
+ * seem odd, it importantly keeps us out of trying to do any DMA allocations in
+ * the normal path of operation, even though we may still have to allocate
+ * message blocks and copy.
+ *
+ * The following state machine describes the life time of a rx control block. In
+ * the diagram we abbrviate the rx ring descriptor entry as rxd and the rx
+ * control block entry as rcb.
+ *
+ * | |
+ * * ... 1/2 of all initial rcb's ... *
+ * | |
+ * v v
+ * +------------------+ +------------------+
+ * | rcb on free list |---*---------->| rcb on work list |
+ * +------------------+ . +------------------+
+ * ^ . moved to |
+ * | replace rcb * . . Frame received,
+ * | loaned to | entry on free list
+ * | MAC + co. | available. rcb's
+ * | | memory made into mblk_t
+ * * . freemsg(9F) | and sent up to MAC.
+ * | called on |
+ * | loaned rcb |
+ * | and it is v
+ * | recycled. +-------------------+
+ * +--------------------<-----| rcb loaned to MAC |
+ * +-------------------+
+ *
+ * Finally, note that every rx control block has a reference count on it. One
+ * reference is added as long as the driver has had the GLDv3 mc_start endpoint
+ * called. If the GLDv3 mc_stop entry point is called, IP has been unplumbed and
+ * no other DLPI consumers remain, then we'll decrement the reference count by
+ * one. Whenever we loan up the rx control block and associated buffer to MAC,
+ * then we bump the reference count again. Even though the device is stopped,
+ * there may still be loaned frames in upper levels that we'll want to account
+ * for. Our callback from freemsg(9F)/freeb(9F) will take care of making sure
+ * that it is cleaned up.
+ *
+ * --------------------
+ * Managing the RX Ring
+ * --------------------
+ *
+ * The receive ring descriptors are arranged in a circular buffer with a head
+ * and tail pointer. There are both the conventional head and tail pointers
+ * which are used to partition the ring into two portions, a portion that we,
+ * the operating system, manage and a portion that is managed by hardware. When
+ * hardware owns a descriptor in the ring, it means that it is waiting for data
+ * to be filled in. However, when a portion of the ring is owned by the driver,
+ * then that means that the descriptor has been consumed and we need to go take
+ * a look at it.
+ *
+ * The initial head is configured to be zero by writing it as such in the
+ * receive queue context in the FPM (function private memory from the host). The
+ * initial tail is written to be the last descriptor. This is written to via the
+ * PCIe register I40E_QRX_TAIL(). Technically, hardware owns everything between
+ * the HEAD and TAIL, inclusive. Note that while we initially program the HEAD,
+ * the only values we ever consult ourselves are the TAIL register and our own
+ * state tracking. Effectively, we cache the HEAD register and then update it
+ * ourselves based on our work.
+ *
+ * When we iterate over the rx descriptors and thus the received frames, we are
+ * either in an interrupt context or we've been asked by MAC to poll on the
+ * ring. If we've been asked to poll on the ring, we have a maximum number of
+ * bytes of mblk_t's to return. If processing an rx descriptor would cause us to
+ * exceed that count, then we do not process it. When in interrupt context, we
+ * don't have a strict byte count. However, to ensure liveness, we limit the
+ * amount of data based on a configuration value
+ * (i40e_t`i40e_rx_limit_per_intr). The number that we've started with for this
+ * is based on similar numbers that are used for ixgbe. After some additional
+ * time in the field, we'll have a sense as to whether or not it should be
+ * changed.
+ *
+ * When processing, we start at our own HEAD pointer
+ * (i40e_rx_data_t`rxd_desc_next), which indicates the descriptor to start
+ * processing. Every RX descriptor has what's described as the DD bit. This bit
+ * (the LSB of the second 8-byte word), indicates whether or not the descriptor
+ * is done. When we give descriptors to the hardware, this value is always
+ * zero. When the hardware has finished a descriptor, it will always be one.
+ *
+ * The first thing that we check is whether the DD bit indicates that the
+ * current HEAD is ready. If it isn't, then we're done. That's the primary
+ * invariant of processing a frame. If it's done, then there are a few other
+ * things that we want to look at. In the same status word as the DD bit, there
+ * are two other important bits:
+ *
+ * o End of Packet (EOP)
+ * o Error bits
+ *
+ * The end of packet indicates that we have reached the last descriptor. Now,
+ * you might ask when would there be more than one descriptor. The reason for
+ * that might be due to large receive offload (lro) or header splitting
+ * functionality, which presently isn't supported in the driver. The error bits
+ * in the frame are only valid when EOP is set.
+ *
+ * If error bits are set on the frame, then we still consume it; however, we
+ * will not generate an mblk_t to send up to MAC. If there are no error bits
+ * set, then we'll consume the descriptor either using bcopy or DMA binding. See
+ * the earlier section 'RX DESCRIPTORS AND CONTROL BLOCKS' for more information
+ * on how that selection is made.
+ *
+ * Regardless of whether we construct an mblk_t or encounter an error, we end up
+ * resetting the descriptor. This re-arms the descriptor for hardware and in the
+ * process, we may end up assigning it a new receive control bock. After we do
+ * this, we always update our HEAD pointer, no matter what.
+ *
+ * Finally, once we've consumed as much as we will in a given window, we go and
+ * update the TAIL register to indicate all the frames we've consumed. We only
+ * do a single bulk write for the ring.
+ *
+ * ---------------------------------
+ * TX Descriptors and Control Blocks
+ * ---------------------------------
+ *
+ * While the transmit path is similar in spirit to the receive path, it works
+ * differently due to the fact that all data is originated by the operating
+ * system and not by the device.
+ *
+ * Like rx, there is both a descriptor ring that we use to communicate to the
+ * driver and which points to the memory used to transmit a frame. Similarly,
+ * there is a corresponding transmit control block. Each transmit control block
+ * has a region of DMA memory allocated to it; however, the way we use it
+ * varies.
+ *
+ * The driver is asked to process a single frame at a time. That message block
+ * may be made up of multiple fragments linked together by the mblk_t`b_cont
+ * member. The device has a hard limit of up to 8 buffers being allowed for use
+ * for a single logical frame. For each fragment, we'll try and use an entry
+ * from the tx descriptor ring and then we'll allocate a corresponding tx
+ * control block. Depending on the size of the fragment, we may copy it around
+ * or we might instead try to do DMA binding of the fragment.
+ *
+ * If we exceed the number of blocks that fit, we'll try to pull up the block
+ * and then we'll do a DMA bind and send it out.
+ *
+ * If we don't have enough space in the ring or tx control blocks available,
+ * then we'll return the unprocessed message block to MAC. This will induce flow
+ * control and once we recycle enough entries, we'll once again enable sending
+ * on the ring.
+ *
+ * We size the working list as equal to the number of descriptors in the ring.
+ * We size the free list as equal to 1.5 times the number of descriptors in the
+ * ring. We'll allocate a number of tx control block entries equal to the number
+ * of entries in the free list. By default, all entries are placed in the free
+ * list. As we come along and try to send something, we'll allocate entries from
+ * the free list and add them to the working list, where they'll stay until the
+ * hardware indicates that all of the data has been written back to us. The
+ * reason that we start with 1.5x is to help facilitate having more than one TX
+ * buffer associated with the DMA activity.
+ *
+ * --------------------
+ * Managing the TX Ring
+ * --------------------
+ *
+ * The transmit descriptor ring is driven by us. We maintain our own notion of a
+ * HEAD and TAIL register and we update the hardware with updates to the TAIL
+ * register. When the hardware is done writing out data, it updates us by
+ * writing back to a specific address, not by updating the individual
+ * descriptors. That address is a 4-byte region after the main transmit
+ * descriptor ring. This is why the descriptor ring has an extra descriptor's
+ * worth allocated to it.
+ *
+ * We maintain our notion of the HEAD in the i40e_trqpair_t`itrq_desc_head and
+ * the TAIL in the i40e_trqpair_t`itrq_desc_tail. When we write out frames,
+ * we'll update the tail there and in the I40E_QTX_TAIL() register. At various
+ * points in time, through both interrupts, and our own internal checks, we'll
+ * sync the write-back head portion of the DMA space. Based on the index it
+ * reports back, we'll free everything between our current HEAD and the
+ * indicated index and update HEAD to the new index.
+ *
+ * When a frame comes in, we try to use a number of transmit control blocks and
+ * we'll transition them from the free list to the work list. They'll get moved
+ * to the entry on the work list that corresponds with the transmit descriptor
+ * they correspond to. Once we are indicated that the corresponding descriptor
+ * has been freed, we'll return it to the list.
+ *
+ * The thread control block free list is managed by keeping track of the number
+ * of entries in it, i40e_trqpair_t`itrq_tcb_free. We use it as a way to index
+ * into the free list and add things to it. In effect, we always push and pop
+ * from the tail and protect it with a single lock,
+ * i40e_trqpair_t`itrq_tcb_lock. This scheme is somewhat simplistic and may not
+ * stand up to further performance testing; however, it does allow us to get off
+ * the ground with the device driver.
+ *
+ * The following image describes where a given transmit control block lives in
+ * its lifetime:
+ *
+ * |
+ * * ... Initial placement for all tcb's
+ * |
+ * v
+ * +------------------+ +------------------+
+ * | tcb on free list |---*------------------>| tcb on work list |
+ * +------------------+ . +------------------+
+ * ^ . tcb allocated |
+ * | to send frame v
+ * | or fragment on |
+ * | wire, mblk from |
+ * | MAC associated. |
+ * | |
+ * +------*-------------------------------<----+
+ * .
+ * . Hardware indicates
+ * entry transmitted.
+ * tcb recycled, mblk
+ * from MAC freed.
+ *
+ * ------------
+ * Blocking MAC
+ * ------------
+ *
+ * Wen performing transmit, we can run out of descriptors and ring entries. When
+ * such a case happens, we return the mblk_t to MAC to indicate that we've been
+ * blocked. At that point in time, MAC becomes blocked and will not transmit
+ * anything out that specific ring until we notify MAC. To indicate that we're
+ * in such a situation we set i40e_trqpair_t`itrq_tx_blocked member to B_TRUE.
+ *
+ * When we recycle tx descriptors then we'll end up signaling MAC by calling
+ * mac_tx_ring_update() if we were blocked, letting it know that it's safe to
+ * start sending frames out to us again.
+ */
+
+/*
+ * We set our DMA alignment requests based on the smallest supported page size
+ * of the corresponding platform.
+ */
+#if defined(__sparc)
+#define I40E_DMA_ALIGNMENT 0x2000ull
+#elif defined(__x86)
+#define I40E_DMA_ALIGNMENT 0x1000ull
+#else
+#error "unknown architecture for i40e"
+#endif
+
+/*
+ * This structure is used to maintain information and flags related to
+ * transmitting a frame. The first member is the set of flags we need to or into
+ * the command word (generally checksumming related). The second member controls
+ * the word offsets which is required for IP and L4 checksumming.
+ */
+typedef struct i40e_tx_context {
+ enum i40e_tx_desc_cmd_bits itc_cmdflags;
+ uint32_t itc_offsets;
+} i40e_tx_context_t;
+
+/*
+ * Toggles on debug builds which can be used to override our RX behaviour based
+ * on thresholds.
+ */
+#ifdef DEBUG
+typedef enum {
+ I40E_DEBUG_RX_DEFAULT = 0,
+ I40E_DEBUG_RX_BCOPY = 1,
+ I40E_DEBUG_RX_DMABIND = 2
+} i40e_debug_rx_t;
+
+i40e_debug_rx_t i40e_debug_rx_mode = I40E_DEBUG_RX_DEFAULT;
+#endif /* DEBUG */
+
+/*
+ * Notes on the following pair of DMA attributes. The first attribute,
+ * i40e_static_dma_attr, is designed to be used for both the descriptor rings
+ * and the static buffers that we associate with control blocks. For this
+ * reason, we force an SGL length of one. While technically the driver supports
+ * a larger SGL (5 on rx and 8 on tx), we opt to only use one to simplify our
+ * management here. In addition, when the Intel common code wants to allocate
+ * memory via the i40e_allocate_virt_mem osdep function, we have it leverage
+ * the static dma attr.
+ *
+ * The second set of attributes, i40e_txbind_dma_attr, is what we use when we're
+ * binding a bunch of mblk_t fragments to go out the door. Note that the main
+ * difference here is that we're allowed a larger SGL length -- eight.
+ *
+ * Note, we default to setting ourselves to be DMA capable here. However,
+ * because we could have multiple instances which have different FMA error
+ * checking capabilities, or end up on different buses, we make these static
+ * and const and copy them into the i40e_t for the given device with the actual
+ * values that reflect the actual capabilities.
+ */
+static const ddi_dma_attr_t i40e_g_static_dma_attr = {
+ DMA_ATTR_V0, /* version number */
+ 0x0000000000000000ull, /* low address */
+ 0xFFFFFFFFFFFFFFFFull, /* high address */
+ 0x00000000FFFFFFFFull, /* dma counter max */
+ I40E_DMA_ALIGNMENT, /* alignment */
+ 0x00000FFF, /* burst sizes */
+ 0x00000001, /* minimum transfer size */
+ 0x00000000FFFFFFFFull, /* maximum transfer size */
+ 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
+ 1, /* scatter/gather list length */
+ 0x00000001, /* granularity */
+ DDI_DMA_FLAGERR /* DMA flags */
+};
+
+static const ddi_dma_attr_t i40e_g_txbind_dma_attr = {
+ DMA_ATTR_V0, /* version number */
+ 0x0000000000000000ull, /* low address */
+ 0xFFFFFFFFFFFFFFFFull, /* high address */
+ 0x00000000FFFFFFFFull, /* dma counter max */
+ I40E_DMA_ALIGNMENT, /* alignment */
+ 0x00000FFF, /* burst sizes */
+ 0x00000001, /* minimum transfer size */
+ 0x00000000FFFFFFFFull, /* maximum transfer size */
+ 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
+ I40E_TX_MAX_COOKIE, /* scatter/gather list length */
+ 0x00000001, /* granularity */
+ DDI_DMA_FLAGERR /* DMA flags */
+};
+
+/*
+ * Next, we have the attributes for these structures. The descriptor rings are
+ * all strictly little endian, while the data buffers are just arrays of bytes
+ * representing frames. Because of this, we purposefully simplify the driver
+ * programming life by programming the descriptor ring as little endian, while
+ * for the buffer data we keep it as unstructured.
+ *
+ * Note, that to keep the Intel common code operating in a reasonable way, when
+ * we allocate DMA memory for it, we do not use byte swapping and thus use the
+ * standard i40e_buf_acc_attr.
+ */
+static const ddi_device_acc_attr_t i40e_g_desc_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+static const ddi_device_acc_attr_t i40e_g_buf_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+/*
+ * The next two functions are designed to be type-safe versions of macros that
+ * are used to increment and decrement a descriptor index in the loop. Note,
+ * these are marked inline to try and keep the data path hot and they were
+ * effectively inlined in their previous life as macros.
+ */
+static inline int
+i40e_next_desc(int base, int count, int size)
+{
+ int out;
+
+ ASSERT(base >= 0);
+ ASSERT(count > 0);
+ ASSERT(size > 0);
+
+ if (base + count < size) {
+ out = base + count;
+ } else {
+ out = base + count - size;
+ }
+
+ ASSERT(out >= 0 && out < size);
+ return (out);
+}
+
+static inline int
+i40e_prev_desc(int base, int count, int size)
+{
+ int out;
+
+ ASSERT(base >= 0);
+ ASSERT(count > 0);
+ ASSERT(size > 0);
+
+ if (base >= count) {
+ out = base - count;
+ } else {
+ out = base - count + size;
+ }
+
+ ASSERT(out >= 0 && out < size);
+ return (out);
+}
+
+/*
+ * Free DMA memory that is represented by a i40e_dma_buffer_t.
+ */
+static void
+i40e_free_dma_buffer(i40e_dma_buffer_t *dmap)
+{
+ if (dmap->dmab_dma_address != NULL) {
+ VERIFY(dmap->dmab_dma_handle != NULL);
+ (void) ddi_dma_unbind_handle(dmap->dmab_dma_handle);
+ dmap->dmab_dma_address = NULL;
+ dmap->dmab_size = 0;
+ }
+
+ if (dmap->dmab_acc_handle != NULL) {
+ ddi_dma_mem_free(&dmap->dmab_acc_handle);
+ dmap->dmab_acc_handle = NULL;
+ dmap->dmab_address = NULL;
+ }
+
+ if (dmap->dmab_dma_handle != NULL) {
+ ddi_dma_free_handle(&dmap->dmab_dma_handle);
+ dmap->dmab_dma_handle = NULL;
+ }
+
+ /*
+ * These should only be set if we have valid handles allocated and
+ * therefore should always be NULLed out due to the above code. This
+ * is here to catch us acting sloppy.
+ */
+ ASSERT(dmap->dmab_dma_address == NULL);
+ ASSERT(dmap->dmab_address == NULL);
+ ASSERT(dmap->dmab_size == 0);
+ dmap->dmab_len = 0;
+}
+
+/*
+ * Allocate size bytes of DMA memory based on the passed in attributes. This
+ * fills in the information in dmap and is designed for all of our single cookie
+ * allocations.
+ */
+static boolean_t
+i40e_alloc_dma_buffer(i40e_t *i40e, i40e_dma_buffer_t *dmap,
+ ddi_dma_attr_t *attrsp, ddi_device_acc_attr_t *accp, boolean_t stream,
+ boolean_t zero, size_t size)
+{
+ int ret;
+ uint_t flags;
+ size_t len;
+ ddi_dma_cookie_t cookie;
+ uint_t ncookies;
+
+ if (stream == B_TRUE)
+ flags = DDI_DMA_STREAMING;
+ else
+ flags = DDI_DMA_CONSISTENT;
+
+ /*
+ * Step one: Allocate the DMA handle
+ */
+ ret = ddi_dma_alloc_handle(i40e->i40e_dip, attrsp, DDI_DMA_DONTWAIT,
+ NULL, &dmap->dmab_dma_handle);
+ if (ret != DDI_SUCCESS) {
+ i40e_error(i40e, "failed to allocate dma handle for I/O "
+ "buffers: %d", ret);
+ dmap->dmab_dma_handle = NULL;
+ return (B_FALSE);
+ }
+
+ /*
+ * Step two: Allocate the DMA memory
+ */
+ ret = ddi_dma_mem_alloc(dmap->dmab_dma_handle, size, accp, flags,
+ DDI_DMA_DONTWAIT, NULL, &dmap->dmab_address, &len,
+ &dmap->dmab_acc_handle);
+ if (ret != DDI_SUCCESS) {
+ i40e_error(i40e, "failed to allocate %d bytes of DMA for I/O "
+ "buffers", size);
+ dmap->dmab_address = NULL;
+ dmap->dmab_acc_handle = NULL;
+ i40e_free_dma_buffer(dmap);
+ return (B_FALSE);
+ }
+
+ /*
+ * Step three: Optionally zero
+ */
+ if (zero == B_TRUE)
+ bzero(dmap->dmab_address, len);
+
+ /*
+ * Step four: Bind the memory
+ */
+ ret = ddi_dma_addr_bind_handle(dmap->dmab_dma_handle, NULL,
+ dmap->dmab_address, len, DDI_DMA_RDWR | flags, DDI_DMA_DONTWAIT,
+ NULL, &cookie, &ncookies);
+ if (ret != DDI_DMA_MAPPED) {
+ i40e_error(i40e, "failed to allocate %d bytes of DMA for I/O "
+ "buffers: %d", size, ret);
+ i40e_free_dma_buffer(dmap);
+ return (B_FALSE);
+ }
+
+ VERIFY(ncookies == 1);
+ dmap->dmab_dma_address = cookie.dmac_laddress;
+ dmap->dmab_size = len;
+ dmap->dmab_len = 0;
+ return (B_TRUE);
+}
+
+/*
+ * This function is called once the last pending rcb has been freed by the upper
+ * levels of the system.
+ */
+static void
+i40e_free_rx_data(i40e_rx_data_t *rxd)
+{
+ VERIFY(rxd->rxd_rcb_pending == 0);
+
+ if (rxd->rxd_rcb_area != NULL) {
+ kmem_free(rxd->rxd_rcb_area,
+ sizeof (i40e_rx_control_block_t) *
+ (rxd->rxd_free_list_size + rxd->rxd_ring_size));
+ rxd->rxd_rcb_area = NULL;
+ }
+
+ if (rxd->rxd_free_list != NULL) {
+ kmem_free(rxd->rxd_free_list,
+ sizeof (i40e_rx_control_block_t *) *
+ rxd->rxd_free_list_size);
+ rxd->rxd_free_list = NULL;
+ }
+
+ if (rxd->rxd_work_list != NULL) {
+ kmem_free(rxd->rxd_work_list,
+ sizeof (i40e_rx_control_block_t *) *
+ rxd->rxd_ring_size);
+ }
+
+ kmem_free(rxd, sizeof (i40e_rx_data_t));
+}
+
+static boolean_t
+i40e_alloc_rx_data(i40e_t *i40e, i40e_trqpair_t *itrq)
+{
+ i40e_rx_data_t *rxd;
+
+ rxd = kmem_zalloc(sizeof (i40e_rx_data_t), KM_NOSLEEP);
+ if (rxd == NULL)
+ return (B_FALSE);
+ itrq->itrq_rxdata = rxd;
+ rxd->rxd_i40e = i40e;
+
+ rxd->rxd_ring_size = i40e->i40e_rx_ring_size;
+ rxd->rxd_free_list_size = i40e->i40e_rx_ring_size;
+
+ rxd->rxd_rcb_free = rxd->rxd_free_list_size;
+
+ rxd->rxd_work_list = kmem_zalloc(sizeof (i40e_rx_control_block_t *) *
+ rxd->rxd_ring_size, KM_NOSLEEP);
+ if (rxd->rxd_work_list == NULL) {
+ i40e_error(i40e, "failed to allocate rx work list for a ring "
+ "of %d entries for ring %d", rxd->rxd_ring_size,
+ itrq->itrq_index);
+ goto cleanup;
+ }
+
+ rxd->rxd_free_list = kmem_zalloc(sizeof (i40e_rx_control_block_t *) *
+ rxd->rxd_free_list_size, KM_NOSLEEP);
+ if (rxd->rxd_free_list == NULL) {
+ i40e_error(i40e, "failed to allocate a %d entry rx free list "
+ "for ring %d", rxd->rxd_free_list_size, itrq->itrq_index);
+ goto cleanup;
+ }
+
+ rxd->rxd_rcb_area = kmem_zalloc(sizeof (i40e_rx_control_block_t) *
+ (rxd->rxd_free_list_size + rxd->rxd_ring_size), KM_NOSLEEP);
+ if (rxd->rxd_rcb_area == NULL) {
+ i40e_error(i40e, "failed to allocate a %d entry rcb area for "
+ "ring %d", rxd->rxd_ring_size + rxd->rxd_free_list_size,
+ itrq->itrq_index);
+ goto cleanup;
+ }
+
+ return (B_TRUE);
+
+cleanup:
+ i40e_free_rx_data(rxd);
+ itrq->itrq_rxdata = NULL;
+ return (B_FALSE);
+}
+
+/*
+ * Free all of the memory that we've allocated for DMA. Note that we may have
+ * buffers that we've loaned up to the OS which are still outstanding. We'll
+ * always free up the descriptor ring, because we no longer need that. For each
+ * rcb, we'll iterate over it and if we send the reference count to zero, then
+ * we'll free the message block and DMA related resources. However, if we don't
+ * take the last one, then we'll go ahead and keep track that we'll have pending
+ * data and clean it up when we get there.
+ */
+static void
+i40e_free_rx_dma(i40e_rx_data_t *rxd, boolean_t failed_init)
+{
+ uint32_t i, count, ref;
+
+ i40e_rx_control_block_t *rcb;
+ i40e_t *i40e = rxd->rxd_i40e;
+
+ i40e_free_dma_buffer(&rxd->rxd_desc_area);
+ rxd->rxd_desc_ring = NULL;
+ rxd->rxd_desc_next = 0;
+
+ mutex_enter(&i40e->i40e_rx_pending_lock);
+
+ rcb = rxd->rxd_rcb_area;
+ count = rxd->rxd_ring_size + rxd->rxd_free_list_size;
+
+ for (i = 0; i < count; i++, rcb++) {
+ VERIFY(rcb != NULL);
+
+ /*
+ * If we're cleaning up from a failed creation attempt, then an
+ * entry may never have been assembled which would mean that
+ * it's reference count is zero. If we find that, we leave it
+ * be, because nothing else should be modifying it at this
+ * point. We're not at the point that any more references can be
+ * added, just removed.
+ */
+ if (failed_init == B_TRUE && rcb->rcb_ref == 0)
+ continue;
+
+ ref = atomic_dec_32_nv(&rcb->rcb_ref);
+ if (ref == 0) {
+ freemsg(rcb->rcb_mp);
+ rcb->rcb_mp = NULL;
+ i40e_free_dma_buffer(&rcb->rcb_dma);
+ } else {
+ atomic_inc_32(&rxd->rxd_rcb_pending);
+ atomic_inc_32(&i40e->i40e_rx_pending);
+ }
+ }
+ mutex_exit(&i40e->i40e_rx_pending_lock);
+}
+
+/*
+ * Initialize the DMA memory for the descriptor ring and for each frame in the
+ * control block list.
+ */
+static boolean_t
+i40e_alloc_rx_dma(i40e_rx_data_t *rxd)
+{
+ int i, count;
+ size_t dmasz;
+ i40e_rx_control_block_t *rcb;
+ i40e_t *i40e = rxd->rxd_i40e;
+
+ /*
+ * First allocate the rx descriptor ring.
+ */
+ dmasz = sizeof (i40e_rx_desc_t) * rxd->rxd_ring_size;
+ VERIFY(dmasz > 0);
+ if (i40e_alloc_dma_buffer(i40e, &rxd->rxd_desc_area,
+ &i40e->i40e_static_dma_attr, &i40e->i40e_desc_acc_attr, B_FALSE,
+ B_TRUE, dmasz) == B_FALSE) {
+ i40e_error(i40e, "failed to allocate DMA resources "
+ "for rx descriptor ring");
+ return (B_FALSE);
+ }
+ rxd->rxd_desc_ring =
+ (i40e_rx_desc_t *)(uintptr_t)rxd->rxd_desc_area.dmab_address;
+ rxd->rxd_desc_next = 0;
+
+ count = rxd->rxd_ring_size + rxd->rxd_free_list_size;
+ rcb = rxd->rxd_rcb_area;
+
+ dmasz = i40e->i40e_rx_buf_size;
+ VERIFY(dmasz > 0);
+ for (i = 0; i < count; i++, rcb++) {
+ i40e_dma_buffer_t *dmap;
+ VERIFY(rcb != NULL);
+
+ if (i < rxd->rxd_ring_size) {
+ rxd->rxd_work_list[i] = rcb;
+ } else {
+ rxd->rxd_free_list[i - rxd->rxd_ring_size] = rcb;
+ }
+
+ dmap = &rcb->rcb_dma;
+ if (i40e_alloc_dma_buffer(i40e, dmap,
+ &i40e->i40e_static_dma_attr, &i40e->i40e_buf_acc_attr,
+ B_TRUE, B_FALSE, dmasz) == B_FALSE) {
+ i40e_error(i40e, "failed to allocate rx dma buffer");
+ return (B_FALSE);
+ }
+
+ /*
+ * Initialize the control block and offset the DMA address. See
+ * the note in the big theory statement that explains how this
+ * helps IP deal with alignment. Note, we don't worry about
+ * whether or not we successfully get an mblk_t from desballoc,
+ * it's a common case that we have to handle later on in the
+ * system.
+ */
+ dmap->dmab_size -= I40E_BUF_IPHDR_ALIGNMENT;
+ dmap->dmab_address += I40E_BUF_IPHDR_ALIGNMENT;
+ dmap->dmab_dma_address += I40E_BUF_IPHDR_ALIGNMENT;
+
+ rcb->rcb_ref = 1;
+ rcb->rcb_rxd = rxd;
+ rcb->rcb_free_rtn.free_func = i40e_rx_recycle;
+ rcb->rcb_free_rtn.free_arg = (caddr_t)rcb;
+ rcb->rcb_mp = desballoc((unsigned char *)dmap->dmab_address,
+ dmap->dmab_size, 0, &rcb->rcb_free_rtn);
+ }
+
+ return (B_TRUE);
+}
+
+static void
+i40e_free_tx_dma(i40e_trqpair_t *itrq)
+{
+ size_t fsz;
+
+ if (itrq->itrq_tcb_area != NULL) {
+ uint32_t i;
+ i40e_tx_control_block_t *tcb = itrq->itrq_tcb_area;
+
+ for (i = 0; i < itrq->itrq_tx_free_list_size; i++, tcb++) {
+ i40e_free_dma_buffer(&tcb->tcb_dma);
+ if (tcb->tcb_dma_handle != NULL) {
+ ddi_dma_free_handle(&tcb->tcb_dma_handle);
+ tcb->tcb_dma_handle = NULL;
+ }
+ }
+
+ fsz = sizeof (i40e_tx_control_block_t) *
+ itrq->itrq_tx_free_list_size;
+ kmem_free(itrq->itrq_tcb_area, fsz);
+ itrq->itrq_tcb_area = NULL;
+ }
+
+ if (itrq->itrq_tcb_free_list != NULL) {
+ fsz = sizeof (i40e_tx_control_block_t *) *
+ itrq->itrq_tx_free_list_size;
+ kmem_free(itrq->itrq_tcb_free_list, fsz);
+ itrq->itrq_tcb_free_list = NULL;
+ }
+
+ if (itrq->itrq_tcb_work_list != NULL) {
+ fsz = sizeof (i40e_tx_control_block_t *) *
+ itrq->itrq_tx_ring_size;
+ kmem_free(itrq->itrq_tcb_work_list, fsz);
+ itrq->itrq_tcb_work_list = NULL;
+ }
+
+ i40e_free_dma_buffer(&itrq->itrq_desc_area);
+ itrq->itrq_desc_ring = NULL;
+
+}
+
+static boolean_t
+i40e_alloc_tx_dma(i40e_trqpair_t *itrq)
+{
+ int i, ret;
+ size_t dmasz;
+ i40e_tx_control_block_t *tcb;
+ i40e_t *i40e = itrq->itrq_i40e;
+
+ itrq->itrq_tx_ring_size = i40e->i40e_tx_ring_size;
+ itrq->itrq_tx_free_list_size = i40e->i40e_tx_ring_size +
+ (i40e->i40e_tx_ring_size >> 1);
+
+ /*
+ * Allocate an additional tx descriptor for the writeback head.
+ */
+ dmasz = sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size;
+ dmasz += sizeof (i40e_tx_desc_t);
+
+ VERIFY(dmasz > 0);
+ if (i40e_alloc_dma_buffer(i40e, &itrq->itrq_desc_area,
+ &i40e->i40e_static_dma_attr, &i40e->i40e_desc_acc_attr,
+ B_FALSE, B_TRUE, dmasz) == B_FALSE) {
+ i40e_error(i40e, "failed to allocate DMA resources for tx "
+ "descriptor ring");
+ return (B_FALSE);
+ }
+ itrq->itrq_desc_ring =
+ (i40e_tx_desc_t *)(uintptr_t)itrq->itrq_desc_area.dmab_address;
+ itrq->itrq_desc_wbhead = (uint32_t *)(itrq->itrq_desc_ring +
+ itrq->itrq_tx_ring_size);
+ itrq->itrq_desc_head = 0;
+ itrq->itrq_desc_tail = 0;
+ itrq->itrq_desc_free = itrq->itrq_tx_ring_size;
+
+ itrq->itrq_tcb_work_list = kmem_zalloc(itrq->itrq_tx_ring_size *
+ sizeof (i40e_tx_control_block_t *), KM_NOSLEEP);
+ if (itrq->itrq_tcb_work_list == NULL) {
+ i40e_error(i40e, "failed to allocate a %d entry tx work list "
+ "for ring %d", itrq->itrq_tx_ring_size, itrq->itrq_index);
+ goto cleanup;
+ }
+
+ itrq->itrq_tcb_free_list = kmem_zalloc(itrq->itrq_tx_free_list_size *
+ sizeof (i40e_tx_control_block_t *), KM_SLEEP);
+ if (itrq->itrq_tcb_free_list == NULL) {
+ i40e_error(i40e, "failed to allocate a %d entry tx free list "
+ "for ring %d", itrq->itrq_tx_free_list_size,
+ itrq->itrq_index);
+ goto cleanup;
+ }
+
+ /*
+ * We allocate enough tx control blocks to cover the free list.
+ */
+ itrq->itrq_tcb_area = kmem_zalloc(sizeof (i40e_tx_control_block_t) *
+ itrq->itrq_tx_free_list_size, KM_NOSLEEP);
+ if (itrq->itrq_tcb_area == NULL) {
+ i40e_error(i40e, "failed to allocate a %d entry tcb area for "
+ "ring %d", itrq->itrq_tx_free_list_size, itrq->itrq_index);
+ goto cleanup;
+ }
+
+ /*
+ * For each tcb, allocate DMA memory.
+ */
+ dmasz = i40e->i40e_tx_buf_size;
+ VERIFY(dmasz > 0);
+ tcb = itrq->itrq_tcb_area;
+ for (i = 0; i < itrq->itrq_tx_free_list_size; i++, tcb++) {
+ VERIFY(tcb != NULL);
+
+ /*
+ * Allocate both a DMA buffer which we'll use for when we copy
+ * packets for transmission and allocate a DMA handle which
+ * we'll use when we bind data.
+ */
+ ret = ddi_dma_alloc_handle(i40e->i40e_dip,
+ &i40e->i40e_txbind_dma_attr, DDI_DMA_DONTWAIT, NULL,
+ &tcb->tcb_dma_handle);
+ if (ret != DDI_SUCCESS) {
+ i40e_error(i40e, "failed to allocate DMA handle for tx "
+ "data binding on ring %d: %d", itrq->itrq_index,
+ ret);
+ tcb->tcb_dma_handle = NULL;
+ goto cleanup;
+ }
+
+ if (i40e_alloc_dma_buffer(i40e, &tcb->tcb_dma,
+ &i40e->i40e_static_dma_attr, &i40e->i40e_buf_acc_attr,
+ B_TRUE, B_FALSE, dmasz) == B_FALSE) {
+ i40e_error(i40e, "failed to allocate %d bytes of "
+ "DMA for tx data binding on ring %d: %d", dmasz,
+ itrq->itrq_index);
+ goto cleanup;
+ }
+
+ itrq->itrq_tcb_free_list[i] = tcb;
+ }
+
+ itrq->itrq_tcb_free = itrq->itrq_tx_free_list_size;
+
+ return (B_TRUE);
+
+cleanup:
+ i40e_free_tx_dma(itrq);
+ return (B_FALSE);
+}
+
+/*
+ * Free all memory associated with all of the rings on this i40e instance. Note,
+ * this is done as part of the GLDv3 stop routine.
+ */
+void
+i40e_free_ring_mem(i40e_t *i40e, boolean_t failed_init)
+{
+ int i;
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ i40e_rx_data_t *rxd = i40e->i40e_trqpairs[i].itrq_rxdata;
+
+ /*
+ * Clean up our rx data. We have to free DMA resources first and
+ * then if we have no more pending RCB's, then we'll go ahead
+ * and clean things up. Note, we can't set the stopped flag on
+ * the rx data until after we've done the first pass of the
+ * pending resources. Otherwise we might race with
+ * i40e_rx_recycle on determining who should free the
+ * i40e_rx_data_t above.
+ */
+ i40e_free_rx_dma(rxd, failed_init);
+
+ mutex_enter(&i40e->i40e_rx_pending_lock);
+ rxd->rxd_shutdown = B_TRUE;
+ if (rxd->rxd_rcb_pending == 0) {
+ i40e_free_rx_data(rxd);
+ i40e->i40e_trqpairs[i].itrq_rxdata = NULL;
+ }
+ mutex_exit(&i40e->i40e_rx_pending_lock);
+
+ i40e_free_tx_dma(&i40e->i40e_trqpairs[i]);
+ }
+}
+
+/*
+ * Allocate all of the resources associated with all of the rings on this i40e
+ * instance. Note this is done as part of the GLDv3 start routine and thus we
+ * should not use blocking allocations. This takes care of both DMA and non-DMA
+ * related resources.
+ */
+boolean_t
+i40e_alloc_ring_mem(i40e_t *i40e)
+{
+ int i;
+
+ for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
+ if (i40e_alloc_rx_data(i40e, &i40e->i40e_trqpairs[i]) ==
+ B_FALSE)
+ goto unwind;
+
+ if (i40e_alloc_rx_dma(i40e->i40e_trqpairs[i].itrq_rxdata) ==
+ B_FALSE)
+ goto unwind;
+
+ if (i40e_alloc_tx_dma(&i40e->i40e_trqpairs[i]) == B_FALSE)
+ goto unwind;
+ }
+
+ return (B_TRUE);
+
+unwind:
+ i40e_free_ring_mem(i40e, B_TRUE);
+ return (B_FALSE);
+}
+
+
+/*
+ * Because every instance of i40e may have different support for FMA
+ * capabilities, we copy the DMA attributes into the i40e_t and set them that
+ * way and use them for determining attributes.
+ */
+void
+i40e_init_dma_attrs(i40e_t *i40e, boolean_t fma)
+{
+ bcopy(&i40e_g_static_dma_attr, &i40e->i40e_static_dma_attr,
+ sizeof (ddi_dma_attr_t));
+ bcopy(&i40e_g_txbind_dma_attr, &i40e->i40e_txbind_dma_attr,
+ sizeof (ddi_dma_attr_t));
+ bcopy(&i40e_g_desc_acc_attr, &i40e->i40e_desc_acc_attr,
+ sizeof (ddi_device_acc_attr_t));
+ bcopy(&i40e_g_buf_acc_attr, &i40e->i40e_buf_acc_attr,
+ sizeof (ddi_device_acc_attr_t));
+
+ if (fma == B_TRUE) {
+ i40e->i40e_static_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
+ i40e->i40e_txbind_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
+ } else {
+ i40e->i40e_static_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
+ i40e->i40e_txbind_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
+ }
+}
+
+static void
+i40e_rcb_free(i40e_rx_data_t *rxd, i40e_rx_control_block_t *rcb)
+{
+ mutex_enter(&rxd->rxd_free_lock);
+ ASSERT(rxd->rxd_rcb_free < rxd->rxd_free_list_size);
+ ASSERT(rxd->rxd_free_list[rxd->rxd_rcb_free] == NULL);
+ rxd->rxd_free_list[rxd->rxd_rcb_free] = rcb;
+ rxd->rxd_rcb_free++;
+ mutex_exit(&rxd->rxd_free_lock);
+}
+
+static i40e_rx_control_block_t *
+i40e_rcb_alloc(i40e_rx_data_t *rxd)
+{
+ i40e_rx_control_block_t *rcb;
+
+ mutex_enter(&rxd->rxd_free_lock);
+ if (rxd->rxd_rcb_free == 0) {
+ mutex_exit(&rxd->rxd_free_lock);
+ return (NULL);
+ }
+ rxd->rxd_rcb_free--;
+ rcb = rxd->rxd_free_list[rxd->rxd_rcb_free];
+ VERIFY(rcb != NULL);
+ rxd->rxd_free_list[rxd->rxd_rcb_free] = NULL;
+ mutex_exit(&rxd->rxd_free_lock);
+
+ return (rcb);
+}
+
+/*
+ * This is the callback that we get from the OS when freemsg(9F) has been called
+ * on a loaned descriptor. In addition, if we take the last reference count
+ * here, then we have to tear down all of the rx data.
+ */
+void
+i40e_rx_recycle(caddr_t arg)
+{
+ uint32_t ref;
+ i40e_rx_control_block_t *rcb;
+ i40e_rx_data_t *rxd;
+ i40e_t *i40e;
+
+ /* LINTED: E_BAD_PTR_CAST_ALIGN */
+ rcb = (i40e_rx_control_block_t *)arg;
+ rxd = rcb->rcb_rxd;
+ i40e = rxd->rxd_i40e;
+
+ /*
+ * It's possible for this to be called with a reference count of zero.
+ * That will happen when we're doing the freemsg after taking the last
+ * reference because we're tearing down everything and this rcb is not
+ * outstanding.
+ */
+ if (rcb->rcb_ref == 0)
+ return;
+
+ /*
+ * Don't worry about failure of desballoc here. It'll only become fatal
+ * if we're trying to use it and we can't in i40e_rx_bind().
+ */
+ rcb->rcb_mp = desballoc((unsigned char *)rcb->rcb_dma.dmab_address,
+ rcb->rcb_dma.dmab_size, 0, &rcb->rcb_free_rtn);
+ i40e_rcb_free(rxd, rcb);
+
+ /*
+ * It's possible that the rcb was being used while we are shutting down
+ * the device. In that case, we'll take the final reference from the
+ * device here.
+ */
+ ref = atomic_dec_32_nv(&rcb->rcb_ref);
+ if (ref == 0) {
+ freemsg(rcb->rcb_mp);
+ rcb->rcb_mp = NULL;
+ i40e_free_dma_buffer(&rcb->rcb_dma);
+
+ mutex_enter(&i40e->i40e_rx_pending_lock);
+ atomic_dec_32(&rxd->rxd_rcb_pending);
+ atomic_dec_32(&i40e->i40e_rx_pending);
+
+ /*
+ * If this was the last block and it's been indicated that we've
+ * passed the shutdown point, we should clean up.
+ */
+ if (rxd->rxd_shutdown == B_TRUE && rxd->rxd_rcb_pending == 0) {
+ i40e_free_rx_data(rxd);
+ cv_broadcast(&i40e->i40e_rx_pending_cv);
+ }
+
+ mutex_exit(&i40e->i40e_rx_pending_lock);
+ }
+}
+
+static mblk_t *
+i40e_rx_bind(i40e_trqpair_t *itrq, i40e_rx_data_t *rxd, uint32_t index,
+ uint32_t plen)
+{
+ mblk_t *mp;
+ i40e_t *i40e = rxd->rxd_i40e;
+ i40e_rx_control_block_t *rcb, *rep_rcb;
+
+ ASSERT(MUTEX_HELD(&itrq->itrq_rx_lock));
+
+ if ((rep_rcb = i40e_rcb_alloc(rxd)) == NULL) {
+ itrq->itrq_rxstat.irxs_rx_bind_norcb.value.ui64++;
+ return (NULL);
+ }
+
+ rcb = rxd->rxd_work_list[index];
+
+ /*
+ * Check to make sure we have a mblk_t. If we don't, this is our last
+ * chance to try and get one.
+ */
+ if (rcb->rcb_mp == NULL) {
+ rcb->rcb_mp =
+ desballoc((unsigned char *)rcb->rcb_dma.dmab_address,
+ rcb->rcb_dma.dmab_size, 0, &rcb->rcb_free_rtn);
+ if (rcb->rcb_mp == NULL) {
+ itrq->itrq_rxstat.irxs_rx_bind_nomp.value.ui64++;
+ i40e_rcb_free(rxd, rcb);
+ return (NULL);
+ }
+ }
+
+ I40E_DMA_SYNC(&rcb->rcb_dma, DDI_DMA_SYNC_FORKERNEL);
+
+ if (i40e_check_dma_handle(rcb->rcb_dma.dmab_dma_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_DEGRADED);
+ atomic_or_32(&i40e->i40e_state, I40E_ERROR);
+ i40e_rcb_free(rxd, rcb);
+ return (NULL);
+ }
+
+ /*
+ * Note, we've already accounted for the I40E_BUF_IPHDR_ALIGNMENT.
+ */
+ mp = rcb->rcb_mp;
+ atomic_inc_32(&rcb->rcb_ref);
+ mp->b_wptr = mp->b_rptr + plen;
+ mp->b_next = mp->b_cont = NULL;
+
+ rxd->rxd_work_list[index] = rep_rcb;
+ return (mp);
+}
+
+/*
+ * We're going to allocate a new message block for this frame and attempt to
+ * receive it. See the big theory statement for more information on when we copy
+ * versus bind.
+ */
+static mblk_t *
+i40e_rx_copy(i40e_trqpair_t *itrq, i40e_rx_data_t *rxd, uint32_t index,
+ uint32_t plen)
+{
+ i40e_t *i40e = rxd->rxd_i40e;
+ i40e_rx_control_block_t *rcb;
+ mblk_t *mp;
+
+ ASSERT(index < rxd->rxd_ring_size);
+ rcb = rxd->rxd_work_list[index];
+
+ I40E_DMA_SYNC(&rcb->rcb_dma, DDI_DMA_SYNC_FORKERNEL);
+
+ if (i40e_check_dma_handle(rcb->rcb_dma.dmab_dma_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_DEGRADED);
+ atomic_or_32(&i40e->i40e_state, I40E_ERROR);
+ return (NULL);
+ }
+
+ mp = allocb(plen + I40E_BUF_IPHDR_ALIGNMENT, 0);
+ if (mp == NULL) {
+ itrq->itrq_rxstat.irxs_rx_copy_nomem.value.ui64++;
+ return (NULL);
+ }
+
+ mp->b_rptr += I40E_BUF_IPHDR_ALIGNMENT;
+ bcopy(rcb->rcb_dma.dmab_address, mp->b_rptr, plen);
+ mp->b_wptr = mp->b_rptr + plen;
+
+ return (mp);
+}
+
+/*
+ * Determine if the device has enabled any checksum flags for us. The level of
+ * checksum computed will depend on the type packet that we have, which is
+ * contained in ptype. For example, the checksum logic it does will vary
+ * depending on whether or not the packet is considered tunneled, whether it
+ * recognizes the L4 type, etc. Section 8.3.4.3 summarizes which checksums are
+ * valid.
+ *
+ * While there are additional checksums that we could recognize here, we'll need
+ * to get some additional GLDv3 enhancements to be able to properly describe
+ * them.
+ */
+static void
+i40e_rx_hcksum(i40e_trqpair_t *itrq, mblk_t *mp, uint64_t status, uint32_t err,
+ uint32_t ptype)
+{
+ uint32_t cksum;
+ struct i40e_rx_ptype_decoded pinfo;
+
+ ASSERT(ptype <= 255);
+ pinfo = decode_rx_desc_ptype(ptype);
+
+ cksum = 0;
+
+ /*
+ * If the ptype isn't something that we know in the driver, then we
+ * shouldn't even consider moving forward.
+ */
+ if (pinfo.known == 0) {
+ itrq->itrq_rxstat.irxs_hck_unknown.value.ui64++;
+ return;
+ }
+
+ /*
+ * If hardware didn't set the L3L4P bit on the frame, then there is no
+ * checksum offload to consider.
+ */
+ if ((status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) == 0) {
+ itrq->itrq_rxstat.irxs_hck_nol3l4p.value.ui64++;
+ return;
+ }
+
+ /*
+ * The device tells us that IPv6 checksums where a Destination Options
+ * Header or a Routing header shouldn't be trusted. Discard all
+ * checksums in this case.
+ */
+ if (pinfo.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ pinfo.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6 &&
+ (status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))) {
+ itrq->itrq_rxstat.irxs_hck_v6skip.value.ui64++;
+ return;
+ }
+
+ /*
+ * The hardware denotes three kinds of possible errors. Two are reserved
+ * for inner and outer IP checksum errors (IPE and EIPE) and the latter
+ * is for L4 checksum errors (L4E). If there is only one IP header, then
+ * the only thing that we care about is IPE. Note that since we don't
+ * support inner checksums, we will ignore IPE being set on tunneled
+ * packets and only care about EIPE.
+ */
+ if (pinfo.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ pinfo.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) {
+ if (pinfo.tunnel_type == I40E_RX_PTYPE_OUTER_NONE) {
+ if ((err & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)) != 0) {
+ itrq->itrq_rxstat.irxs_hck_iperr.value.ui64++;
+ } else {
+ itrq->itrq_rxstat.irxs_hck_v4hdrok.value.ui64++;
+ cksum |= HCK_IPV4_HDRCKSUM_OK;
+ }
+ } else {
+ if ((err & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)) != 0) {
+ itrq->itrq_rxstat.irxs_hck_eiperr.value.ui64++;
+ } else {
+ itrq->itrq_rxstat.irxs_hck_v4hdrok.value.ui64++;
+ cksum |= HCK_IPV4_HDRCKSUM_OK;
+ }
+ }
+ }
+
+ /*
+ * We only have meaningful L4 checksums in the case of IP->L4 and
+ * IP->IP->L4. There is not outer L4 checksum data available in any
+ * other case. Further, we don't bother reporting the valid checksum in
+ * the case of IP->IP->L4 set.
+ */
+ if (pinfo.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ pinfo.tunnel_type == I40E_RX_PTYPE_TUNNEL_NONE &&
+ (pinfo.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP ||
+ pinfo.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP ||
+ pinfo.inner_prot == I40E_RX_PTYPE_INNER_PROT_ICMP ||
+ pinfo.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)) {
+ ASSERT(pinfo.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4);
+ if ((err & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) != 0) {
+ itrq->itrq_rxstat.irxs_hck_l4err.value.ui64++;
+ } else {
+ itrq->itrq_rxstat.irxs_hck_l4hdrok.value.ui64++;
+ cksum |= HCK_FULLCKSUM_OK;
+ }
+ }
+
+ if (cksum != 0) {
+ itrq->itrq_rxstat.irxs_hck_set.value.ui64++;
+ mac_hcksum_set(mp, 0, 0, 0, 0, cksum);
+ } else {
+ itrq->itrq_rxstat.irxs_hck_miss.value.ui64++;
+ }
+}
+
+mblk_t *
+i40e_ring_rx(i40e_trqpair_t *itrq, int poll_bytes)
+{
+ i40e_t *i40e;
+ i40e_hw_t *hw;
+ i40e_rx_data_t *rxd;
+ uint32_t cur_head;
+ i40e_rx_desc_t *cur_desc;
+ i40e_rx_control_block_t *rcb;
+ uint64_t rx_bytes, rx_frames;
+ uint64_t stword;
+ mblk_t *mp, *mp_head, **mp_tail;
+
+ ASSERT(MUTEX_HELD(&itrq->itrq_rx_lock));
+ rxd = itrq->itrq_rxdata;
+ i40e = itrq->itrq_i40e;
+ hw = &i40e->i40e_hw_space;
+
+ if (!(i40e->i40e_state & I40E_STARTED) ||
+ (i40e->i40e_state & I40E_OVERTEMP) ||
+ (i40e->i40e_state & I40E_SUSPENDED) ||
+ (i40e->i40e_state & I40E_ERROR))
+ return (NULL);
+
+ /*
+ * Before we do anything else, we have to make sure that all of the DMA
+ * buffers are synced up and then check to make sure that they're
+ * actually good from an FM perspective.
+ */
+ I40E_DMA_SYNC(&rxd->rxd_desc_area, DDI_DMA_SYNC_FORKERNEL);
+ if (i40e_check_dma_handle(rxd->rxd_desc_area.dmab_dma_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_DEGRADED);
+ atomic_or_32(&i40e->i40e_state, I40E_ERROR);
+ return (NULL);
+ }
+
+ /*
+ * Prepare our stats. We do a limited amount of processing in both
+ * polling and interrupt context. The limit in interrupt context is
+ * based on frames, in polling context based on bytes.
+ */
+ rx_bytes = rx_frames = 0;
+ mp_head = NULL;
+ mp_tail = &mp_head;
+
+ /*
+ * At this point, the descriptor ring is available to check. We'll try
+ * and process until we either run out of poll_bytes or descriptors.
+ */
+ cur_head = rxd->rxd_desc_next;
+ cur_desc = &rxd->rxd_desc_ring[cur_head];
+ stword = LE64_TO_CPU(cur_desc->wb.qword1.status_error_len);
+
+ /*
+ * Note, the primary invariant of this loop should be tha cur_head,
+ * cur_desc, and stword always point to the currently processed
+ * descriptor. When we leave the loop, it should point to a descriptor
+ * that HAS NOT been processed. Meaning, that if we haven't consumed the
+ * frame, the descriptor should not be advanced.
+ */
+ while ((stword & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) != 0) {
+ uint32_t error, eop, plen, ptype;
+
+ /*
+ * The DD, PLEN, and EOP bits are the only ones that are valid
+ * in every frame. The error information is only valid when EOP
+ * is set in the same frame.
+ *
+ * At this time, because we don't do any LRO or header
+ * splitting. We expect that every frame should have EOP set in
+ * it. When later functionality comes in, we'll want to
+ * re-evaluate this.
+ */
+ eop = stword & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
+ VERIFY(eop != 0);
+
+ error = (stword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ if (error & I40E_RX_ERR_BITS) {
+ itrq->itrq_rxstat.irxs_rx_desc_error.value.ui64++;
+ goto discard;
+ }
+
+ plen = (stword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+
+ ptype = (stword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+
+ /*
+ * This packet contains valid data. We should check to see if
+ * we're actually going to consume it based on its length (to
+ * ensure that we don't overshoot our quota). We determine
+ * whether to bcopy or bind the DMA resources based on the size
+ * of the frame. However, if on debug, we allow it to be
+ * overridden for testing purposes.
+ *
+ * We should be smarter about this and do DMA binding for
+ * larger frames, but for now, it's really more important that
+ * we actually just get something simple working.
+ */
+
+ /*
+ * Ensure we don't exceed our polling quota by reading this
+ * frame. Note we only bump bytes now, we bump frames later.
+ */
+ if ((poll_bytes != I40E_POLL_NULL) &&
+ (rx_bytes + plen) > poll_bytes)
+ break;
+ rx_bytes += plen;
+
+ mp = NULL;
+ if (plen >= i40e->i40e_rx_dma_min)
+ mp = i40e_rx_bind(itrq, rxd, cur_head, plen);
+ if (mp == NULL)
+ mp = i40e_rx_copy(itrq, rxd, cur_head, plen);
+
+ if (mp != NULL) {
+ if (i40e->i40e_rx_hcksum_enable)
+ i40e_rx_hcksum(itrq, mp, stword, error, ptype);
+ *mp_tail = mp;
+ mp_tail = &mp->b_next;
+ }
+
+ /*
+ * Now we need to prepare this frame for use again. See the
+ * discussion in the big theory statements.
+ *
+ * However, right now we're doing the simple version of this.
+ * Normally what we'd do would depend on whether or not we were
+ * doing DMA binding or bcopying. But because we're always doing
+ * bcopying, we can just always use the current index as a key
+ * for what to do and reassign the buffer based on the ring.
+ */
+discard:
+ rcb = rxd->rxd_work_list[cur_head];
+ cur_desc->read.pkt_addr =
+ CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address);
+ cur_desc->read.hdr_addr = 0;
+
+ /*
+ * Finally, update our loop invariants.
+ */
+ cur_head = i40e_next_desc(cur_head, 1, rxd->rxd_ring_size);
+ cur_desc = &rxd->rxd_desc_ring[cur_head];
+ stword = LE64_TO_CPU(cur_desc->wb.qword1.status_error_len);
+
+ /*
+ * To help provide liveness, we limit the amount of data that
+ * we'll end up counting. Note that in these cases, an interrupt
+ * is not dissimilar from a polling request.
+ */
+ rx_frames++;
+ if (rx_frames > i40e->i40e_rx_limit_per_intr) {
+ itrq->itrq_rxstat.irxs_rx_intr_limit.value.ui64++;
+ break;
+ }
+ }
+
+ /*
+ * As we've modified the ring, we need to make sure that we sync the
+ * descriptor ring for the device. Next, we update the hardware and
+ * update our notion of where the head for us to read from hardware is
+ * next.
+ */
+ I40E_DMA_SYNC(&rxd->rxd_desc_area, DDI_DMA_SYNC_FORDEV);
+ if (i40e_check_dma_handle(rxd->rxd_desc_area.dmab_dma_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_DEGRADED);
+ atomic_or_32(&i40e->i40e_state, I40E_ERROR);
+ }
+
+ if (rx_frames != 0) {
+ uint32_t tail;
+ ddi_acc_handle_t rh = i40e->i40e_osdep_space.ios_reg_handle;
+ rxd->rxd_desc_next = cur_head;
+ tail = i40e_prev_desc(cur_head, 1, rxd->rxd_ring_size);
+
+ I40E_WRITE_REG(hw, I40E_QRX_TAIL(itrq->itrq_index), tail);
+ if (i40e_check_acc_handle(rh) != DDI_FM_OK) {
+ ddi_fm_service_impact(i40e->i40e_dip,
+ DDI_SERVICE_DEGRADED);
+ atomic_or_32(&i40e->i40e_state, I40E_ERROR);
+ }
+
+ itrq->itrq_rxstat.irxs_bytes.value.ui64 += rx_bytes;
+ itrq->itrq_rxstat.irxs_packets.value.ui64 += rx_frames;
+ }
+
+#ifdef DEBUG
+ if (rx_frames == 0) {
+ ASSERT(rx_bytes == 0);
+ }
+#endif
+
+ return (mp_head);
+}
+
+/*
+ * This function is called by the GLDv3 when it wants to poll on a ring. The
+ * only primary difference from when we call this during an interrupt is that we
+ * have a limit on the number of bytes that we should consume.
+ */
+mblk_t *
+i40e_ring_rx_poll(void *arg, int poll_bytes)
+{
+ i40e_trqpair_t *itrq = arg;
+ mblk_t *mp;
+
+ ASSERT(poll_bytes > 0);
+ if (poll_bytes == 0)
+ return (NULL);
+
+ mutex_enter(&itrq->itrq_rx_lock);
+ mp = i40e_ring_rx(itrq, poll_bytes);
+ mutex_exit(&itrq->itrq_rx_lock);
+
+ return (mp);
+}
+
+/*
+ * This is a structure I wish someone would fill out for me for dorking with the
+ * checksums. When we get some more experience with this, we should go ahead and
+ * consider adding this to MAC.
+ */
+typedef enum mac_ether_offload_flags {
+ MEOI_L2INFO_SET = 0x01,
+ MEOI_VLAN_TAGGED = 0x02,
+ MEOI_L3INFO_SET = 0x04,
+ MEOI_L3CKSUM_SET = 0x08,
+ MEOI_L4INFO_SET = 0x10,
+ MEOI_L4CKSUM_SET = 0x20
+} mac_ether_offload_flags_t;
+
+typedef struct mac_ether_offload_info {
+ mac_ether_offload_flags_t meoi_flags;
+ uint8_t meoi_l2hlen; /* How long is the Ethernet header? */
+ uint16_t meoi_l3proto; /* What's the Ethertype */
+ uint8_t meoi_l3hlen; /* How long is the header? */
+ uint8_t meoi_l4proto; /* What is the payload type? */
+ uint8_t meoi_l4hlen; /* How long is the L4 header */
+ mblk_t *meoi_l3ckmp; /* Which mblk has the l3 checksum */
+ off_t meoi_l3ckoff; /* What's the offset to it */
+ mblk_t *meoi_l4ckmp; /* Which mblk has the L4 checksum */
+ off_t meoi_l4off; /* What is the offset to it? */
+} mac_ether_offload_info_t;
+
+/*
+ * This is something that we'd like to make a general MAC function. Before we do
+ * that, we should add support for TSO.
+ *
+ * We should really keep track of our offset and not walk everything every
+ * time. I can't imagine that this will be kind to us at high packet rates;
+ * however, for the moment, let's leave that.
+ *
+ * This walks a message block chain without pulling up to fill in the context
+ * information. Note that the data we care about could be hidden across more
+ * than one mblk_t.
+ */
+static int
+i40e_meoi_get_uint8(mblk_t *mp, off_t off, uint8_t *out)
+{
+ size_t mpsize;
+ uint8_t *bp;
+
+ mpsize = msgsize(mp);
+ /* Check for overflow */
+ if (off + sizeof (uint16_t) > mpsize)
+ return (-1);
+
+ mpsize = MBLKL(mp);
+ while (off >= mpsize) {
+ mp = mp->b_cont;
+ off -= mpsize;
+ mpsize = MBLKL(mp);
+ }
+
+ bp = mp->b_rptr + off;
+ *out = *bp;
+ return (0);
+
+}
+
+static int
+i40e_meoi_get_uint16(mblk_t *mp, off_t off, uint16_t *out)
+{
+ size_t mpsize;
+ uint8_t *bp;
+
+ mpsize = msgsize(mp);
+ /* Check for overflow */
+ if (off + sizeof (uint16_t) > mpsize)
+ return (-1);
+
+ mpsize = MBLKL(mp);
+ while (off >= mpsize) {
+ mp = mp->b_cont;
+ off -= mpsize;
+ mpsize = MBLKL(mp);
+ }
+
+ /*
+ * Data is in network order. Note the second byte of data might be in
+ * the next mp.
+ */
+ bp = mp->b_rptr + off;
+ *out = *bp << 8;
+ if (off + 1 == mpsize) {
+ mp = mp->b_cont;
+ bp = mp->b_rptr;
+ } else {
+ bp++;
+ }
+
+ *out |= *bp;
+ return (0);
+
+}
+
+static int
+mac_ether_offload_info(mblk_t *mp, mac_ether_offload_info_t *meoi)
+{
+ size_t off;
+ uint16_t ether;
+ uint8_t ipproto, iplen, l4len, maclen;
+
+ bzero(meoi, sizeof (mac_ether_offload_info_t));
+
+ off = offsetof(struct ether_header, ether_type);
+ if (i40e_meoi_get_uint16(mp, off, &ether) != 0)
+ return (-1);
+
+ if (ether == ETHERTYPE_VLAN) {
+ off = offsetof(struct ether_vlan_header, ether_type);
+ if (i40e_meoi_get_uint16(mp, off, &ether) != 0)
+ return (-1);
+ meoi->meoi_flags |= MEOI_VLAN_TAGGED;
+ maclen = sizeof (struct ether_vlan_header);
+ } else {
+ maclen = sizeof (struct ether_header);
+ }
+ meoi->meoi_flags |= MEOI_L2INFO_SET;
+ meoi->meoi_l2hlen = maclen;
+ meoi->meoi_l3proto = ether;
+
+ switch (ether) {
+ case ETHERTYPE_IP:
+ /*
+ * For IPv4 we need to get the length of the header, as it can
+ * be variable.
+ */
+ off = offsetof(ipha_t, ipha_version_and_hdr_length) + maclen;
+ if (i40e_meoi_get_uint8(mp, off, &iplen) != 0)
+ return (-1);
+ iplen &= 0x0f;
+ if (iplen < 5 || iplen > 0x0f)
+ return (-1);
+ iplen *= 4;
+ off = offsetof(ipha_t, ipha_protocol) + maclen;
+ if (i40e_meoi_get_uint8(mp, off, &ipproto) == -1)
+ return (-1);
+ break;
+ case ETHERTYPE_IPV6:
+ iplen = 40;
+ off = offsetof(ip6_t, ip6_nxt) + maclen;
+ if (i40e_meoi_get_uint8(mp, off, &ipproto) == -1)
+ return (-1);
+ break;
+ default:
+ return (0);
+ }
+ meoi->meoi_l3hlen = iplen;
+ meoi->meoi_l4proto = ipproto;
+ meoi->meoi_flags |= MEOI_L3INFO_SET;
+
+ switch (ipproto) {
+ case IPPROTO_TCP:
+ off = offsetof(tcph_t, th_offset_and_rsrvd) + maclen + iplen;
+ if (i40e_meoi_get_uint8(mp, off, &l4len) == -1)
+ return (-1);
+ l4len = (l4len & 0xf0) >> 4;
+ if (l4len < 5 || l4len > 0xf)
+ return (-1);
+ l4len *= 4;
+ break;
+ case IPPROTO_UDP:
+ l4len = sizeof (struct udphdr);
+ break;
+ case IPPROTO_SCTP:
+ l4len = sizeof (sctp_hdr_t);
+ break;
+ default:
+ return (0);
+ }
+
+ meoi->meoi_l4hlen = l4len;
+ meoi->meoi_flags |= MEOI_L4INFO_SET;
+ return (0);
+}
+
+/*
+ * Attempt to put togther the information we'll need to feed into a descriptor
+ * to properly program the hardware for checksum offload as well as the
+ * generally required flags.
+ *
+ * The i40e_tx_contex_t`itc_cmdflags contains the set of flags we need to or
+ * into the descriptor based on the checksum flags for this mblk_t and the
+ * actual information we care about.
+ */
+static int
+i40e_tx_context(i40e_t *i40e, i40e_trqpair_t *itrq, mblk_t *mp,
+ i40e_tx_context_t *tctx)
+{
+ int ret;
+ uint32_t flags, start;
+ mac_ether_offload_info_t meo;
+ i40e_txq_stat_t *txs = &itrq->itrq_txstat;
+
+ bzero(tctx, sizeof (i40e_tx_context_t));
+
+ if (i40e->i40e_tx_hcksum_enable != B_TRUE)
+ return (0);
+
+ mac_hcksum_get(mp, &start, NULL, NULL, NULL, &flags);
+ if (flags == 0)
+ return (0);
+
+ if ((ret = mac_ether_offload_info(mp, &meo)) != 0) {
+ txs->itxs_hck_meoifail.value.ui64++;
+ return (ret);
+ }
+
+ /*
+ * Have we been asked to checksum an IPv4 header. If so, verify that we
+ * have sufficient information and then set the proper fields in the
+ * command structure.
+ */
+ if (flags & HCK_IPV4_HDRCKSUM) {
+ if ((meo.meoi_flags & MEOI_L2INFO_SET) == 0) {
+ txs->itxs_hck_nol2info.value.ui64++;
+ return (-1);
+ }
+ if ((meo.meoi_flags & MEOI_L3INFO_SET) == 0) {
+ txs->itxs_hck_nol3info.value.ui64++;
+ return (-1);
+ }
+ if (meo.meoi_l3proto != ETHERTYPE_IP) {
+ txs->itxs_hck_badl3.value.ui64++;
+ return (-1);
+ }
+ tctx->itc_cmdflags |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ tctx->itc_offsets |= (meo.meoi_l2hlen >> 1) <<
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+ tctx->itc_offsets |= (meo.meoi_l3hlen >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+
+ /*
+ * We've been asked to provide an L4 header, first, set up the IP
+ * information in the descriptor if we haven't already before moving
+ * onto seeing if we have enough information for the L4 checksum
+ * offload.
+ */
+ if (flags & HCK_PARTIALCKSUM) {
+ if ((meo.meoi_flags & MEOI_L4INFO_SET) == 0) {
+ txs->itxs_hck_nol4info.value.ui64++;
+ return (-1);
+ }
+
+ if (!(flags & HCK_IPV4_HDRCKSUM)) {
+ if ((meo.meoi_flags & MEOI_L2INFO_SET) == 0) {
+ txs->itxs_hck_nol2info.value.ui64++;
+ return (-1);
+ }
+ if ((meo.meoi_flags & MEOI_L3INFO_SET) == 0) {
+ txs->itxs_hck_nol3info.value.ui64++;
+ return (-1);
+ }
+
+ if (meo.meoi_l3proto == ETHERTYPE_IP) {
+ tctx->itc_cmdflags |=
+ I40E_TX_DESC_CMD_IIPT_IPV4;
+ } else if (meo.meoi_l3proto == ETHERTYPE_IPV6) {
+ tctx->itc_cmdflags |=
+ I40E_TX_DESC_CMD_IIPT_IPV6;
+ } else {
+ txs->itxs_hck_badl3.value.ui64++;
+ return (-1);
+ }
+ tctx->itc_offsets |= (meo.meoi_l2hlen >> 1) <<
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+ tctx->itc_offsets |= (meo.meoi_l3hlen >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+
+ switch (meo.meoi_l4proto) {
+ case IPPROTO_TCP:
+ tctx->itc_cmdflags |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ break;
+ case IPPROTO_UDP:
+ tctx->itc_cmdflags |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ break;
+ case IPPROTO_SCTP:
+ tctx->itc_cmdflags |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ break;
+ default:
+ txs->itxs_hck_badl4.value.ui64++;
+ return (-1);
+ }
+
+ tctx->itc_offsets |= (meo.meoi_l4hlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ }
+
+ return (0);
+}
+
+static void
+i40e_tcb_free(i40e_trqpair_t *itrq, i40e_tx_control_block_t *tcb)
+{
+ ASSERT(tcb != NULL);
+
+ mutex_enter(&itrq->itrq_tcb_lock);
+ ASSERT(itrq->itrq_tcb_free < itrq->itrq_tx_free_list_size);
+ itrq->itrq_tcb_free_list[itrq->itrq_tcb_free] = tcb;
+ itrq->itrq_tcb_free++;
+ mutex_exit(&itrq->itrq_tcb_lock);
+}
+
+static i40e_tx_control_block_t *
+i40e_tcb_alloc(i40e_trqpair_t *itrq)
+{
+ i40e_tx_control_block_t *ret;
+
+ mutex_enter(&itrq->itrq_tcb_lock);
+ if (itrq->itrq_tcb_free == 0) {
+ mutex_exit(&itrq->itrq_tcb_lock);
+ return (NULL);
+ }
+
+ itrq->itrq_tcb_free--;
+ ret = itrq->itrq_tcb_free_list[itrq->itrq_tcb_free];
+ itrq->itrq_tcb_free_list[itrq->itrq_tcb_free] = NULL;
+ mutex_exit(&itrq->itrq_tcb_lock);
+
+ ASSERT(ret != NULL);
+ return (ret);
+}
+
+/*
+ * This should be used to free any DMA resources, associated mblk_t's, etc. It's
+ * used as part of recycling the message blocks when we have either an interrupt
+ * or other activity that indicates that we need to take a look.
+ */
+static void
+i40e_tcb_reset(i40e_tx_control_block_t *tcb)
+{
+ switch (tcb->tcb_type) {
+ case I40E_TX_COPY:
+ tcb->tcb_dma.dmab_len = 0;
+ break;
+ case I40E_TX_DMA:
+ (void) ddi_dma_unbind_handle(tcb->tcb_dma_handle);
+ break;
+ case I40E_TX_NONE:
+ /* Cast to pacify lint */
+ panic("trying to free tcb %p with bad type none\n", (void *)tcb);
+ default:
+ panic("unknown i40e tcb type: %d", tcb->tcb_type);
+ }
+
+ tcb->tcb_type = I40E_TX_NONE;
+ freemsg(tcb->tcb_mp);
+ tcb->tcb_mp = NULL;
+ tcb->tcb_next = NULL;
+}
+
+/*
+ * This is called as part of shutting down to clean up all outstanding
+ * descriptors. Similar to recycle, except we don't re-arm anything and instead
+ * just return control blocks to the free list.
+ */
+void
+i40e_tx_cleanup_ring(i40e_trqpair_t *itrq)
+{
+ uint32_t index;
+
+ ASSERT(MUTEX_HELD(&itrq->itrq_tx_lock));
+ ASSERT(itrq->itrq_desc_free <= itrq->itrq_tx_ring_size);
+
+ /*
+ * Because we should have shut down the chip at this point, it should be
+ * safe to just clean up all the entries between our head and tail.
+ */
+#ifdef DEBUG
+ index = I40E_READ_REG(&itrq->itrq_i40e->i40e_hw_space,
+ I40E_QTX_ENA(itrq->itrq_index));
+ VERIFY0(index & (I40E_QTX_ENA_QENA_REQ_MASK |
+ I40E_QTX_ENA_QENA_STAT_MASK));
+#endif
+
+ index = itrq->itrq_desc_head;
+ while (itrq->itrq_desc_free < itrq->itrq_tx_ring_size) {
+ i40e_tx_control_block_t *tcb;
+
+ tcb = itrq->itrq_tcb_work_list[index];
+ VERIFY(tcb != NULL);
+ itrq->itrq_tcb_work_list[index] = NULL;
+ i40e_tcb_reset(tcb);
+ i40e_tcb_free(itrq, tcb);
+
+ bzero(&itrq->itrq_desc_ring[index], sizeof (i40e_tx_desc_t));
+ index = i40e_next_desc(index, 1, itrq->itrq_tx_ring_size);
+ itrq->itrq_desc_free++;
+ }
+
+ ASSERT(index == itrq->itrq_desc_tail);
+ itrq->itrq_desc_head = index;
+}
+
+/*
+ * We're here either by hook or by crook. We need to see if there are transmit
+ * descriptors available for us to go and clean up and return to the hardware.
+ * We may also be blocked, and if so, we should make sure that we let it know
+ * we're good to go.
+ */
+void
+i40e_tx_recycle_ring(i40e_trqpair_t *itrq)
+{
+ uint32_t wbhead, toclean, count;
+ i40e_tx_control_block_t *tcbhead;
+ i40e_t *i40e = itrq->itrq_i40e;
+
+ mutex_enter(&itrq->itrq_tx_lock);
+
+ ASSERT(itrq->itrq_desc_free <= itrq->itrq_tx_ring_size);
+ if (itrq->itrq_desc_free == itrq->itrq_tx_ring_size) {
+ if (itrq->itrq_tx_blocked == B_TRUE) {
+ itrq->itrq_tx_blocked = B_FALSE;
+ mac_tx_ring_update(i40e->i40e_mac_hdl,
+ itrq->itrq_mactxring);
+ itrq->itrq_txstat.itxs_num_unblocked.value.ui64++;
+ }
+ mutex_exit(&itrq->itrq_tx_lock);
+ return;
+ }
+
+ /*
+ * Now we need to try and see if there's anything available. The driver
+ * will write to the head location and it guarantees that it does not
+ * use relaxed ordering.
+ */
+ VERIFY0(ddi_dma_sync(itrq->itrq_desc_area.dmab_dma_handle,
+ (uintptr_t)itrq->itrq_desc_wbhead,
+ sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL));
+
+ if (i40e_check_dma_handle(itrq->itrq_desc_area.dmab_dma_handle) !=
+ DDI_FM_OK) {
+ mutex_exit(&itrq->itrq_tx_lock);
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_DEGRADED);
+ atomic_or_32(&i40e->i40e_state, I40E_ERROR);
+ return;
+ }
+
+ wbhead = *itrq->itrq_desc_wbhead;
+ toclean = itrq->itrq_desc_head;
+ count = 0;
+ tcbhead = NULL;
+
+ while (toclean != wbhead) {
+ i40e_tx_control_block_t *tcb;
+
+ tcb = itrq->itrq_tcb_work_list[toclean];
+ itrq->itrq_tcb_work_list[toclean] = NULL;
+ ASSERT(tcb != NULL);
+ tcb->tcb_next = tcbhead;
+ tcbhead = tcb;
+
+ /*
+ * We zero this out for sanity purposes.
+ */
+ bzero(&itrq->itrq_desc_ring[toclean], sizeof (i40e_tx_desc_t));
+ toclean = i40e_next_desc(toclean, 1, itrq->itrq_tx_ring_size);
+ count++;
+ }
+
+ itrq->itrq_desc_head = wbhead;
+ itrq->itrq_desc_free += count;
+ itrq->itrq_txstat.itxs_recycled.value.ui64 += count;
+ ASSERT(itrq->itrq_desc_free <= itrq->itrq_tx_ring_size);
+
+ if (itrq->itrq_tx_blocked == B_TRUE &&
+ itrq->itrq_desc_free > i40e->i40e_tx_block_thresh) {
+ itrq->itrq_tx_blocked = B_FALSE;
+
+ mac_tx_ring_update(i40e->i40e_mac_hdl, itrq->itrq_mactxring);
+ itrq->itrq_txstat.itxs_num_unblocked.value.ui64++;
+ }
+
+ mutex_exit(&itrq->itrq_tx_lock);
+
+ /*
+ * Now clean up the tcb.
+ */
+ while (tcbhead != NULL) {
+ i40e_tx_control_block_t *tcb = tcbhead;
+
+ tcbhead = tcb->tcb_next;
+ i40e_tcb_reset(tcb);
+ i40e_tcb_free(itrq, tcb);
+ }
+
+ DTRACE_PROBE2(i40e__recycle, i40e_trqpair_t *, itrq, uint32_t, count);
+}
+
+/*
+ * We've been asked to send a message block on the wire. We'll only have a
+ * single chain. There will not be any b_next pointers; however, there may be
+ * multiple b_cont blocks.
+ *
+ * We may do one of three things with any given mblk_t chain:
+ *
+ * 1) Drop it
+ * 2) Transmit it
+ * 3) Return it
+ *
+ * If we return it to MAC, then MAC will flow control on our behalf. In other
+ * words, it won't send us anything until we tell it that it's okay to send us
+ * something.
+ */
+mblk_t *
+i40e_ring_tx(void *arg, mblk_t *mp)
+{
+ const mblk_t *nmp;
+ size_t mpsize;
+ i40e_tx_control_block_t *tcb;
+ i40e_tx_desc_t *txdesc;
+ i40e_tx_context_t tctx;
+ int cmd, type;
+
+ i40e_trqpair_t *itrq = arg;
+ i40e_t *i40e = itrq->itrq_i40e;
+ i40e_hw_t *hw = &i40e->i40e_hw_space;
+ i40e_txq_stat_t *txs = &itrq->itrq_txstat;
+
+ ASSERT(mp->b_next == NULL);
+
+ if (!(i40e->i40e_state & I40E_STARTED) ||
+ (i40e->i40e_state & I40E_OVERTEMP) ||
+ (i40e->i40e_state & I40E_SUSPENDED) ||
+ (i40e->i40e_state & I40E_ERROR) ||
+ (i40e->i40e_link_state != LINK_STATE_UP)) {
+ freemsg(mp);
+ return (NULL);
+ }
+
+ /*
+ * Figure out the relevant context about this frame that we might need
+ * for enabling checksum, lso, etc. This also fills in information that
+ * we might set around the packet type, etc.
+ */
+ if (i40e_tx_context(i40e, itrq, mp, &tctx) < 0) {
+ freemsg(mp);
+ itrq->itrq_txstat.itxs_err_context.value.ui64++;
+ return (NULL);
+ }
+
+ /*
+ * For the primordial driver we can punt on doing any recycling right
+ * now; however, longer term we need to probably do some more pro-active
+ * recycling to cut back on stalls in the tx path.
+ */
+
+ /*
+ * Do a quick size check to make sure it fits into what we think it
+ * should for this device. Note that longer term this will be false,
+ * particularly when we have the world of TSO.
+ */
+ mpsize = 0;
+ for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
+ mpsize += MBLKL(nmp);
+ }
+
+ /*
+ * First we allocate our tx control block and prepare the packet for
+ * transmit before we do a final check for descriptors. We do it this
+ * way to minimize the time under the tx lock.
+ */
+ tcb = i40e_tcb_alloc(itrq);
+ if (tcb == NULL) {
+ txs->itxs_err_notcb.value.ui64++;
+ goto txfail;
+ }
+
+ /*
+ * For transmitting a block, we're currently going to use just a
+ * single control block and bcopy all of the fragments into it. We
+ * should be more intelligent about doing DMA binding or otherwise, but
+ * for getting off the ground this will have to do.
+ */
+ ASSERT(tcb->tcb_dma.dmab_len == 0);
+ ASSERT(tcb->tcb_dma.dmab_size >= mpsize);
+ for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
+ size_t clen = MBLKL(nmp);
+ void *coff = tcb->tcb_dma.dmab_address + tcb->tcb_dma.dmab_len;
+
+ bcopy(nmp->b_rptr, coff, clen);
+ tcb->tcb_dma.dmab_len += clen;
+ }
+ ASSERT(tcb->tcb_dma.dmab_len == mpsize);
+
+ /*
+ * While there's really no need to keep the mp here, but let's just do
+ * it to help with our own debugging for now.
+ */
+ tcb->tcb_mp = mp;
+ tcb->tcb_type = I40E_TX_COPY;
+ I40E_DMA_SYNC(&tcb->tcb_dma, DDI_DMA_SYNC_FORDEV);
+
+ mutex_enter(&itrq->itrq_tx_lock);
+ if (itrq->itrq_desc_free < i40e->i40e_tx_block_thresh) {
+ txs->itxs_err_nodescs.value.ui64++;
+ mutex_exit(&itrq->itrq_tx_lock);
+ goto txfail;
+ }
+
+ /*
+ * Build up the descriptor and send it out. Thankfully at the moment
+ * we only need a single desc, because we're not doing anything fancy
+ * yet.
+ */
+ ASSERT(itrq->itrq_desc_free > 0);
+ itrq->itrq_desc_free--;
+ txdesc = &itrq->itrq_desc_ring[itrq->itrq_desc_tail];
+ itrq->itrq_tcb_work_list[itrq->itrq_desc_tail] = tcb;
+ itrq->itrq_desc_tail = i40e_next_desc(itrq->itrq_desc_tail, 1,
+ itrq->itrq_tx_ring_size);
+
+ /*
+ * Note, we always set EOP and RS which indicates that this is the last
+ * data frame and that we should ask for it to be transmitted. We also
+ * must always set ICRC, because that is an internal bit that must be
+ * set to one for data descriptors. The remaining bits in the command
+ * descriptor depend on checksumming and are determined based on the
+ * information set up in i40e_tx_context().
+ */
+ type = I40E_TX_DESC_DTYPE_DATA;
+ cmd = I40E_TX_DESC_CMD_EOP |
+ I40E_TX_DESC_CMD_RS |
+ I40E_TX_DESC_CMD_ICRC |
+ tctx.itc_cmdflags;
+ txdesc->buffer_addr =
+ CPU_TO_LE64((uintptr_t)tcb->tcb_dma.dmab_dma_address);
+ txdesc->cmd_type_offset_bsz = CPU_TO_LE64(((uint64_t)type |
+ ((uint64_t)tctx.itc_offsets << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((uint64_t)cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)tcb->tcb_dma.dmab_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)));
+
+ /*
+ * Now, finally, sync the DMA data and alert hardware.
+ */
+ I40E_DMA_SYNC(&itrq->itrq_desc_area, DDI_DMA_SYNC_FORDEV);
+
+ I40E_WRITE_REG(hw, I40E_QTX_TAIL(itrq->itrq_index),
+ itrq->itrq_desc_tail);
+ if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
+ DDI_FM_OK) {
+ /*
+ * Note, we can't really go through and clean this up very well,
+ * because the memory has been given to the device, so just
+ * indicate it's been transmitted.
+ */
+ ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_DEGRADED);
+ atomic_or_32(&i40e->i40e_state, I40E_ERROR);
+ }
+
+ txs->itxs_bytes.value.ui64 += mpsize;
+ txs->itxs_packets.value.ui64++;
+ txs->itxs_descriptors.value.ui64++;
+
+ mutex_exit(&itrq->itrq_tx_lock);
+
+ return (NULL);
+
+txfail:
+ /*
+ * We ran out of resources. Return it to MAC and indicate that we'll
+ * need to signal MAC. If there are allocated tcb's, return them now.
+ * Make sure to reset their message block's, since we'll return them
+ * back to MAC.
+ */
+ if (tcb != NULL) {
+ tcb->tcb_mp = NULL;
+ i40e_tcb_reset(tcb);
+ i40e_tcb_free(itrq, tcb);
+ }
+
+ mutex_enter(&itrq->itrq_tx_lock);
+ itrq->itrq_tx_blocked = B_TRUE;
+ mutex_exit(&itrq->itrq_tx_lock);
+
+ return (mp);
+}
diff --git a/usr/src/uts/common/io/i40e/i40e_xregs.h b/usr/src/uts/common/io/i40e/i40e_xregs.h
new file mode 100644
index 0000000000..1bf3a1f0be
--- /dev/null
+++ b/usr/src/uts/common/io/i40e/i40e_xregs.h
@@ -0,0 +1,53 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2016 Joyent, Inc.
+ */
+
+#ifndef _I40E_XREGS_H
+#define _I40E_XREGS_H
+
+/*
+ * This file contains extra register definitions and other things that would
+ * nominally come from the Intel common code, but do not due to bugs, erratum,
+ * etc. Ideally we'll get to a point where we can remove this file.
+ */
+#include "i40e_type.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The MSPDC register is missing from the current datasheet.
+ */
+#define I40E_GLPRT_MSPDC(_i) (0x00300060 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MSDPC_MAX_INDEX 3
+#define I40E_GLPRT_MSPDC_MSPDC_SHIFT 0
+#define I40E_GLPRT_MSPDC_MSPDC_MASK \
+ I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MSPDC_MSPDC_SHIFT)
+
+/*
+ * The RXERR* registers are technically correct from the perspective of their
+ * addreses; however, the other associated constants are not correct. Instead,
+ * we have new definitions here in the interim.
+ */
+
+#define I40E_X_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8))
+
+#define I40E_X_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _I40E_XREGS_H */
diff --git a/usr/src/uts/common/io/inotify.c b/usr/src/uts/common/io/inotify.c
new file mode 100644
index 0000000000..baa36cfc8d
--- /dev/null
+++ b/usr/src/uts/common/io/inotify.c
@@ -0,0 +1,1504 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright (c) 2015 Joyent, Inc. All rights reserved.
+ * Copyright (c) 2015 The MathWorks, Inc. All rights reserved.
+ */
+
+/*
+ * Support for the inotify facility, a Linux-borne facility for asynchronous
+ * notification of certain events on specified files or directories. Our
+ * implementation broadly leverages the file event monitoring facility, and
+ * would actually be quite straightforward were it not for a very serious
+ * blunder in the inotify interface: in addition to allowing for one to be
+ * notified on events on a particular file or directory, inotify also allows
+ * for one to be notified on certain events on files _within_ a watched
+ * directory -- even though those events have absolutely nothing to do with
+ * the directory itself. This leads to all sorts of madness because file
+ * operations are (of course) not undertaken on paths but rather on open
+ * files -- and the relationships between open files and the paths that resolve
+ * to those files are neither static nor isomorphic. We implement this
+ * concept by having _child watches_ when directories are watched with events
+ * in IN_CHILD_EVENTS. We add child watches when a watch on a directory is
+ * first added, and we modify those child watches dynamically as files are
+ * created, deleted, moved into or moved out of the specified directory. This
+ * mechanism works well, absent hard links. Hard links, unfortunately, break
+ * this rather badly, and the user is warned that watches on directories that
+ * have multiple directory entries referring to the same file may behave
+ * unexpectedly.
+ */
+
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/inotify.h>
+#include <sys/fem.h>
+#include <sys/conf.h>
+#include <sys/stat.h>
+#include <sys/vfs_opreg.h>
+#include <sys/vmem.h>
+#include <sys/avl.h>
+#include <sys/sysmacros.h>
+#include <sys/cyclic.h>
+#include <sys/filio.h>
+
+struct inotify_state;
+struct inotify_kevent;
+
+typedef struct inotify_watch inotify_watch_t;
+typedef struct inotify_state inotify_state_t;
+typedef struct inotify_kevent inotify_kevent_t;
+
+struct inotify_watch {
+ kmutex_t inw_lock; /* lock protecting ref count */
+ int inw_refcnt; /* reference count */
+ uint8_t inw_zombie:1; /* boolean: is zombie */
+ uint8_t inw_fired:1; /* boolean: fired one-shot */
+ uint8_t inw_active:1; /* boolean: watch is active */
+ uint8_t inw_orphaned:1; /* boolean: orphaned */
+ kcondvar_t inw_cv; /* condvar for zombifier */
+ uint32_t inw_mask; /* mask of watch */
+ int32_t inw_wd; /* watch descriptor */
+ vnode_t *inw_vp; /* underlying vnode */
+ inotify_watch_t *inw_parent; /* parent, if a child */
+ avl_node_t inw_byvp; /* watches by vnode */
+ avl_node_t inw_bywd; /* watches by descriptor */
+ avl_tree_t inw_children; /* children, if a parent */
+ char *inw_name; /* name, if a child */
+ list_node_t inw_orphan; /* orphan list */
+ cred_t *inw_cred; /* cred, if orphaned */
+ inotify_state_t *inw_state; /* corresponding state */
+};
+
+struct inotify_kevent {
+ inotify_kevent_t *ine_next; /* next event in queue */
+ struct inotify_event ine_event; /* event (variable size) */
+};
+
+#define INOTIFY_EVENT_LENGTH(ev) \
+ (sizeof (inotify_kevent_t) + (ev)->ine_event.len)
+
+struct inotify_state {
+ kmutex_t ins_lock; /* lock protecting state */
+ avl_tree_t ins_byvp; /* watches by vnode */
+ avl_tree_t ins_bywd; /* watches by descriptor */
+ vmem_t *ins_wds; /* watch identifier arena */
+ int ins_maxwatches; /* maximum number of watches */
+ int ins_maxevents; /* maximum number of events */
+ int ins_nevents; /* current # of events */
+ int32_t ins_size; /* total size of events */
+ inotify_kevent_t *ins_head; /* head of event queue */
+ inotify_kevent_t *ins_tail; /* tail of event queue */
+ pollhead_t ins_pollhd; /* poll head */
+ kcondvar_t ins_cv; /* condvar for reading */
+ list_t ins_orphans; /* orphan list */
+ ddi_periodic_t ins_cleaner; /* cyclic for cleaning */
+ inotify_watch_t *ins_zombies; /* zombie watch list */
+ cred_t *ins_cred; /* creator's credentials */
+ inotify_state_t *ins_next; /* next state on global list */
+};
+
+/*
+ * Tunables (exported read-only in lx-branded zones via /proc).
+ */
+int inotify_maxwatches = 8192; /* max watches per instance */
+int inotify_maxevents = 16384; /* max events */
+int inotify_maxinstances = 128; /* max instances per user */
+
+/*
+ * Internal global variables.
+ */
+static kmutex_t inotify_lock; /* lock protecting state */
+static dev_info_t *inotify_devi; /* device info */
+static fem_t *inotify_femp; /* FEM pointer */
+static vmem_t *inotify_minor; /* minor number arena */
+static void *inotify_softstate; /* softstate pointer */
+static inotify_state_t *inotify_state; /* global list if state */
+
+static void inotify_watch_event(inotify_watch_t *, uint64_t, char *);
+static void inotify_watch_insert(inotify_watch_t *, vnode_t *, char *);
+static void inotify_watch_delete(inotify_watch_t *, uint32_t);
+static void inotify_watch_remove(inotify_state_t *state,
+ inotify_watch_t *watch);
+
+static int
+inotify_fop_close(femarg_t *vf, int flag, int count, offset_t offset,
+ cred_t *cr, caller_context_t *ct)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval;
+
+ if ((rval = vnext_close(vf, flag, count, offset, cr, ct)) == 0) {
+ inotify_watch_event(watch, flag & FWRITE ?
+ IN_CLOSE_WRITE : IN_CLOSE_NOWRITE, NULL);
+ }
+
+ return (rval);
+}
+
+static int
+inotify_fop_create(femarg_t *vf, char *name, vattr_t *vap, vcexcl_t excl,
+ int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct,
+ vsecattr_t *vsecp)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval;
+
+ if ((rval = vnext_create(vf, name, vap, excl, mode,
+ vpp, cr, flag, ct, vsecp)) == 0) {
+ inotify_watch_insert(watch, *vpp, name);
+ inotify_watch_event(watch, IN_CREATE, name);
+ }
+
+ return (rval);
+}
+
+static int
+inotify_fop_link(femarg_t *vf, vnode_t *svp, char *tnm, cred_t *cr,
+ caller_context_t *ct, int flags)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval;
+
+ if ((rval = vnext_link(vf, svp, tnm, cr, ct, flags)) == 0) {
+ inotify_watch_insert(watch, svp, tnm);
+ inotify_watch_event(watch, IN_CREATE, tnm);
+ }
+
+ return (rval);
+}
+
+static int
+inotify_fop_mkdir(femarg_t *vf, char *name, vattr_t *vap, vnode_t **vpp,
+ cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval;
+
+ if ((rval = vnext_mkdir(vf, name, vap, vpp, cr,
+ ct, flags, vsecp)) == 0) {
+ inotify_watch_insert(watch, *vpp, name);
+ inotify_watch_event(watch, IN_CREATE | IN_ISDIR, name);
+ }
+
+ return (rval);
+}
+
+static int
+inotify_fop_open(femarg_t *vf, int mode, cred_t *cr, caller_context_t *ct)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval;
+
+ if ((rval = vnext_open(vf, mode, cr, ct)) == 0)
+ inotify_watch_event(watch, IN_OPEN, NULL);
+
+ return (rval);
+}
+
+static int
+inotify_fop_read(femarg_t *vf, struct uio *uiop, int ioflag, struct cred *cr,
+ caller_context_t *ct)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval = vnext_read(vf, uiop, ioflag, cr, ct);
+ inotify_watch_event(watch, IN_ACCESS, NULL);
+
+ return (rval);
+}
+
+static int
+inotify_fop_readdir(femarg_t *vf, uio_t *uiop, cred_t *cr, int *eofp,
+ caller_context_t *ct, int flags)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval = vnext_readdir(vf, uiop, cr, eofp, ct, flags);
+ inotify_watch_event(watch, IN_ACCESS | IN_ISDIR, NULL);
+
+ return (rval);
+}
+
+int
+inotify_fop_remove(femarg_t *vf, char *nm, cred_t *cr, caller_context_t *ct,
+ int flags)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval;
+
+ if ((rval = vnext_remove(vf, nm, cr, ct, flags)) == 0)
+ inotify_watch_event(watch, IN_DELETE, nm);
+
+ return (rval);
+}
+
+int
+inotify_fop_rmdir(femarg_t *vf, char *nm, vnode_t *cdir, cred_t *cr,
+ caller_context_t *ct, int flags)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval;
+
+ if ((rval = vnext_rmdir(vf, nm, cdir, cr, ct, flags)) == 0)
+ inotify_watch_event(watch, IN_DELETE | IN_ISDIR, nm);
+
+ return (rval);
+}
+
+static int
+inotify_fop_setattr(femarg_t *vf, vattr_t *vap, int flags, cred_t *cr,
+ caller_context_t *ct)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval;
+
+ if ((rval = vnext_setattr(vf, vap, flags, cr, ct)) == 0)
+ inotify_watch_event(watch, IN_ATTRIB, NULL);
+
+ return (rval);
+}
+
+static int
+inotify_fop_write(femarg_t *vf, struct uio *uiop, int ioflag, struct cred *cr,
+ caller_context_t *ct)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+ int rval = vnext_write(vf, uiop, ioflag, cr, ct);
+ inotify_watch_event(watch, IN_MODIFY, NULL);
+
+ return (rval);
+}
+
+static int
+inotify_fop_vnevent(femarg_t *vf, vnevent_t vnevent, vnode_t *dvp, char *name,
+ caller_context_t *ct)
+{
+ inotify_watch_t *watch = vf->fa_fnode->fn_available;
+
+ switch (vnevent) {
+ case VE_RENAME_SRC:
+ inotify_watch_event(watch, IN_MOVE_SELF, NULL);
+ inotify_watch_delete(watch, IN_MOVE_SELF);
+ break;
+ case VE_REMOVE:
+ /*
+ * Linux will apparently fire an IN_ATTRIB event when the link
+ * count changes (including when it drops to 0 on a remove).
+ * This is merely somewhat odd; what is amazing is that this
+ * IN_ATTRIB event is not visible on an inotify watch on the
+ * parent directory. (IN_ATTRIB events are normally sent to
+ * watches on the parent directory). While it's hard to
+ * believe that this constitutes desired semantics, ltp
+ * unfortunately tests this case (if implicitly); in the name
+ * of bug-for-bug compatibility, we fire IN_ATTRIB iff we are
+ * explicitly watching the file that has been removed.
+ */
+ if (watch->inw_parent == NULL)
+ inotify_watch_event(watch, IN_ATTRIB, NULL);
+
+ /*FALLTHROUGH*/
+ case VE_RENAME_DEST:
+ inotify_watch_event(watch, IN_DELETE_SELF, NULL);
+ inotify_watch_delete(watch, IN_DELETE_SELF);
+ break;
+ case VE_RMDIR:
+ /*
+ * It seems that IN_ISDIR should really be OR'd in here, but
+ * Linux doesn't seem to do that in this case; for the sake of
+ * bug-for-bug compatibility, we don't do it either.
+ */
+ inotify_watch_event(watch, IN_DELETE_SELF, NULL);
+ inotify_watch_delete(watch, IN_DELETE_SELF);
+ break;
+ case VE_CREATE:
+ case VE_TRUNCATE:
+ case VE_RESIZE:
+ inotify_watch_event(watch, IN_MODIFY | IN_ATTRIB, NULL);
+ break;
+ case VE_LINK:
+ inotify_watch_event(watch, IN_ATTRIB, NULL);
+ break;
+ case VE_RENAME_SRC_DIR:
+ inotify_watch_event(watch, IN_MOVED_FROM, name);
+ break;
+ case VE_RENAME_DEST_DIR:
+ if (name == NULL)
+ name = dvp->v_path;
+
+ inotify_watch_insert(watch, dvp, name);
+ inotify_watch_event(watch, IN_MOVED_TO, name);
+ break;
+ case VE_SUPPORT:
+ case VE_MOUNTEDOVER:
+ case VE_PRE_RENAME_SRC:
+ case VE_PRE_RENAME_DEST:
+ case VE_PRE_RENAME_DEST_DIR:
+ break;
+ }
+
+ return (vnext_vnevent(vf, vnevent, dvp, name, ct));
+}
+
+const fs_operation_def_t inotify_vnodesrc_template[] = {
+ VOPNAME_CLOSE, { .femop_close = inotify_fop_close },
+ VOPNAME_CREATE, { .femop_create = inotify_fop_create },
+ VOPNAME_LINK, { .femop_link = inotify_fop_link },
+ VOPNAME_MKDIR, { .femop_mkdir = inotify_fop_mkdir },
+ VOPNAME_OPEN, { .femop_open = inotify_fop_open },
+ VOPNAME_READ, { .femop_read = inotify_fop_read },
+ VOPNAME_READDIR, { .femop_readdir = inotify_fop_readdir },
+ VOPNAME_REMOVE, { .femop_remove = inotify_fop_remove },
+ VOPNAME_RMDIR, { .femop_rmdir = inotify_fop_rmdir },
+ VOPNAME_SETATTR, { .femop_setattr = inotify_fop_setattr },
+ VOPNAME_WRITE, { .femop_write = inotify_fop_write },
+ VOPNAME_VNEVENT, { .femop_vnevent = inotify_fop_vnevent },
+ NULL, NULL
+};
+
+static int
+inotify_watch_cmpwd(inotify_watch_t *lhs, inotify_watch_t *rhs)
+{
+ if (lhs->inw_wd < rhs->inw_wd)
+ return (-1);
+
+ if (lhs->inw_wd > rhs->inw_wd)
+ return (1);
+
+ return (0);
+}
+
+static int
+inotify_watch_cmpvp(inotify_watch_t *lhs, inotify_watch_t *rhs)
+{
+ uintptr_t lvp = (uintptr_t)lhs->inw_vp, rvp = (uintptr_t)rhs->inw_vp;
+
+ if (lvp < rvp)
+ return (-1);
+
+ if (lvp > rvp)
+ return (1);
+
+ return (0);
+}
+
+static void
+inotify_watch_hold(inotify_watch_t *watch)
+{
+ mutex_enter(&watch->inw_lock);
+ VERIFY(watch->inw_refcnt > 0);
+ watch->inw_refcnt++;
+ mutex_exit(&watch->inw_lock);
+}
+
+static void
+inotify_watch_release(inotify_watch_t *watch)
+{
+ mutex_enter(&watch->inw_lock);
+ VERIFY(watch->inw_refcnt > 1);
+
+ if (--watch->inw_refcnt == 1 && watch->inw_zombie) {
+ /*
+ * We're down to our last reference; kick anyone that might be
+ * waiting.
+ */
+ cv_signal(&watch->inw_cv);
+ }
+
+ mutex_exit(&watch->inw_lock);
+}
+
+static void
+inotify_watch_event(inotify_watch_t *watch, uint64_t mask, char *name)
+{
+ inotify_kevent_t *event, *tail;
+ inotify_state_t *state = watch->inw_state;
+ uint32_t wd = watch->inw_wd, cookie = 0, len;
+ boolean_t removal = mask & IN_REMOVAL ? B_TRUE : B_FALSE;
+ inotify_watch_t *source = watch;
+
+ if (!(mask &= watch->inw_mask) || mask == IN_ISDIR)
+ return;
+
+ if (watch->inw_parent != NULL) {
+ /*
+ * This is an event on the child; if this isn't a valid child
+ * event, return. Otherwise, we move our watch to be our
+ * parent (which we know is around because we have a hold on
+ * it) and continue.
+ */
+ if (!(mask & IN_CHILD_EVENTS))
+ return;
+
+ name = watch->inw_name;
+ watch = watch->inw_parent;
+ wd = watch->inw_wd;
+ }
+
+ if (!removal) {
+ mutex_enter(&state->ins_lock);
+
+ if (watch->inw_zombie ||
+ watch->inw_fired || !watch->inw_active) {
+ mutex_exit(&state->ins_lock);
+ return;
+ }
+ } else {
+ if (!watch->inw_active)
+ return;
+
+ VERIFY(MUTEX_HELD(&state->ins_lock));
+ }
+
+ /*
+ * If this is an operation on a directory and it's a child event
+ * (event if it's not on a child), we specify IN_ISDIR.
+ */
+ if (source->inw_vp->v_type == VDIR && (mask & IN_CHILD_EVENTS))
+ mask |= IN_ISDIR;
+
+ if (mask & (IN_MOVED_FROM | IN_MOVED_TO))
+ cookie = (uint32_t)curthread->t_did;
+
+ if (state->ins_nevents >= state->ins_maxevents) {
+ /*
+ * We're at our maximum number of events -- turn our event
+ * into an IN_Q_OVERFLOW event, which will be coalesced if
+ * it's already the tail event.
+ */
+ mask = IN_Q_OVERFLOW;
+ wd = (uint32_t)-1;
+ cookie = 0;
+ len = 0;
+ }
+
+ if ((tail = state->ins_tail) != NULL && tail->ine_event.wd == wd &&
+ tail->ine_event.mask == mask && tail->ine_event.cookie == cookie &&
+ ((tail->ine_event.len == 0 && len == 0) ||
+ (name != NULL && tail->ine_event.len != 0 &&
+ strcmp(tail->ine_event.name, name) == 0))) {
+ /*
+ * This is an implicitly coalesced event; we're done.
+ */
+ if (!removal)
+ mutex_exit(&state->ins_lock);
+ return;
+ }
+
+ if (name != NULL) {
+ len = strlen(name) + 1;
+ len = roundup(len, sizeof (struct inotify_event));
+ } else {
+ len = 0;
+ }
+
+ event = kmem_zalloc(sizeof (inotify_kevent_t) + len, KM_SLEEP);
+ event->ine_event.wd = wd;
+ event->ine_event.mask = (uint32_t)mask;
+ event->ine_event.cookie = cookie;
+ event->ine_event.len = len;
+
+ if (name != NULL)
+ strcpy(event->ine_event.name, name);
+
+ if (tail != NULL) {
+ tail->ine_next = event;
+ } else {
+ VERIFY(state->ins_head == NULL);
+ state->ins_head = event;
+ cv_broadcast(&state->ins_cv);
+ }
+
+ state->ins_tail = event;
+ state->ins_nevents++;
+ state->ins_size += sizeof (event->ine_event) + len;
+
+ if (removal)
+ return;
+
+ if ((watch->inw_mask & IN_ONESHOT) && !watch->inw_fired) {
+ /*
+ * If this is a one-shot, we need to remove the watch. (Note
+ * that this will recurse back into inotify_watch_event() to
+ * fire the IN_IGNORED event -- but with "removal" set.)
+ */
+ watch->inw_fired = 1;
+ inotify_watch_remove(state, watch);
+ }
+
+ mutex_exit(&state->ins_lock);
+ pollwakeup(&state->ins_pollhd, POLLRDNORM | POLLIN);
+}
+
+/*
+ * Destroy a watch. By the time we're in here, the watch must have exactly
+ * one reference.
+ */
+static void
+inotify_watch_destroy(inotify_watch_t *watch)
+{
+ VERIFY(MUTEX_HELD(&watch->inw_lock));
+
+ if (watch->inw_name != NULL)
+ kmem_free(watch->inw_name, strlen(watch->inw_name) + 1);
+
+ kmem_free(watch, sizeof (inotify_watch_t));
+}
+
+/*
+ * Zombify a watch. By the time we come in here, it must be true that the
+ * watch has already been fem_uninstall()'d -- the only reference should be
+ * in the state's data structure. If we can get away with freeing it, we'll
+ * do that -- but if the reference count is greater than one due to an active
+ * vnode operation, we'll put this watch on the zombie list on the state
+ * structure.
+ */
+static void
+inotify_watch_zombify(inotify_watch_t *watch)
+{
+ inotify_state_t *state = watch->inw_state;
+
+ VERIFY(MUTEX_HELD(&state->ins_lock));
+ VERIFY(!watch->inw_zombie);
+
+ watch->inw_zombie = 1;
+
+ if (watch->inw_parent != NULL) {
+ inotify_watch_release(watch->inw_parent);
+ } else {
+ avl_remove(&state->ins_byvp, watch);
+ avl_remove(&state->ins_bywd, watch);
+ vmem_free(state->ins_wds, (void *)(uintptr_t)watch->inw_wd, 1);
+ watch->inw_wd = -1;
+ }
+
+ mutex_enter(&watch->inw_lock);
+
+ if (watch->inw_refcnt == 1) {
+ /*
+ * There are no operations in flight and there is no way
+ * for anyone to discover this watch -- we can destroy it.
+ */
+ inotify_watch_destroy(watch);
+ } else {
+ /*
+ * There are operations in flight; we will need to enqueue
+ * this for later destruction.
+ */
+ watch->inw_parent = state->ins_zombies;
+ state->ins_zombies = watch;
+ mutex_exit(&watch->inw_lock);
+ }
+}
+
+static inotify_watch_t *
+inotify_watch_add(inotify_state_t *state, inotify_watch_t *parent,
+ const char *name, vnode_t *vp, uint32_t mask)
+{
+ inotify_watch_t *watch;
+ int err;
+
+ VERIFY(MUTEX_HELD(&state->ins_lock));
+
+ watch = kmem_zalloc(sizeof (inotify_watch_t), KM_SLEEP);
+
+ watch->inw_vp = vp;
+ watch->inw_mask = mask;
+ watch->inw_state = state;
+ watch->inw_refcnt = 1;
+
+ if (parent == NULL) {
+ watch->inw_wd = (int)(uintptr_t)vmem_alloc(state->ins_wds,
+ 1, VM_BESTFIT | VM_SLEEP);
+ avl_add(&state->ins_byvp, watch);
+ avl_add(&state->ins_bywd, watch);
+
+ avl_create(&watch->inw_children,
+ (int(*)(const void *, const void *))inotify_watch_cmpvp,
+ sizeof (inotify_watch_t),
+ offsetof(inotify_watch_t, inw_byvp));
+ } else {
+ VERIFY(name != NULL);
+ inotify_watch_hold(parent);
+ watch->inw_mask &= IN_CHILD_EVENTS;
+ watch->inw_parent = parent;
+ watch->inw_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
+ strcpy(watch->inw_name, name);
+
+ avl_add(&parent->inw_children, watch);
+ }
+
+ /*
+ * Add our monitor to the vnode. We must not have the watch lock held
+ * when we do this, as it will immediately hold our watch.
+ */
+ err = fem_install(vp, inotify_femp, watch, OPARGUNIQ,
+ (void (*)(void *))inotify_watch_hold,
+ (void (*)(void *))inotify_watch_release);
+
+ VERIFY(err == 0);
+
+ return (watch);
+}
+
+/*
+ * Remove a (non-child) watch. This is called from either synchronous context
+ * via inotify_rm_watch() or monitor context via either a vnevent or a
+ * one-shot.
+ */
+static void
+inotify_watch_remove(inotify_state_t *state, inotify_watch_t *watch)
+{
+ inotify_watch_t *child;
+ int err;
+
+ VERIFY(MUTEX_HELD(&state->ins_lock));
+ VERIFY(watch->inw_parent == NULL);
+
+ err = fem_uninstall(watch->inw_vp, inotify_femp, watch);
+ VERIFY(err == 0);
+
+ /*
+ * If we have children, we're going to remove them all and set them
+ * all to be zombies.
+ */
+ while ((child = avl_first(&watch->inw_children)) != NULL) {
+ VERIFY(child->inw_parent == watch);
+ avl_remove(&watch->inw_children, child);
+
+ err = fem_uninstall(child->inw_vp, inotify_femp, child);
+ VERIFY(err == 0);
+
+ /*
+ * If this child watch has been orphaned, remove it from the
+ * state's list of orphans.
+ */
+ if (child->inw_orphaned) {
+ list_remove(&state->ins_orphans, child);
+ crfree(child->inw_cred);
+ }
+
+ VN_RELE(child->inw_vp);
+
+ /*
+ * We're down (or should be down) to a single reference to
+ * this child watch; it's safe to zombify it.
+ */
+ inotify_watch_zombify(child);
+ }
+
+ inotify_watch_event(watch, IN_IGNORED | IN_REMOVAL, NULL);
+ VN_RELE(watch->inw_vp);
+
+ /*
+ * It's now safe to zombify the watch -- we know that the only reference
+ * can come from operations in flight.
+ */
+ inotify_watch_zombify(watch);
+}
+
+/*
+ * Delete a watch. Should only be called from VOP context.
+ */
+static void
+inotify_watch_delete(inotify_watch_t *watch, uint32_t event)
+{
+ inotify_state_t *state = watch->inw_state;
+ inotify_watch_t cmp = { .inw_vp = watch->inw_vp }, *parent;
+ int err;
+
+ if (event != IN_DELETE_SELF && !(watch->inw_mask & IN_CHILD_EVENTS))
+ return;
+
+ mutex_enter(&state->ins_lock);
+
+ if (watch->inw_zombie) {
+ mutex_exit(&state->ins_lock);
+ return;
+ }
+
+ if ((parent = watch->inw_parent) == NULL) {
+ if (event == IN_DELETE_SELF) {
+ /*
+ * If we're here because we're being deleted and we
+ * are not a child watch, we need to delete the entire
+ * watch, children and all.
+ */
+ inotify_watch_remove(state, watch);
+ }
+
+ mutex_exit(&state->ins_lock);
+ return;
+ } else {
+ if (event == IN_DELETE_SELF &&
+ !(parent->inw_mask & IN_EXCL_UNLINK)) {
+ /*
+ * This is a child watch for a file that is being
+ * removed and IN_EXCL_UNLINK has not been specified;
+ * indicate that it is orphaned and add it to the list
+ * of orphans. (This list will be checked by the
+ * cleaning cyclic to determine when the watch has
+ * become the only hold on the vnode, at which point
+ * the watch can be zombified.) Note that we check
+ * if the watch is orphaned before we orphan it: hard
+ * links make it possible for VE_REMOVE to be called
+ * multiple times on the same vnode. (!)
+ */
+ if (!watch->inw_orphaned) {
+ watch->inw_orphaned = 1;
+ watch->inw_cred = CRED();
+ crhold(watch->inw_cred);
+ list_insert_head(&state->ins_orphans, watch);
+ }
+
+ mutex_exit(&state->ins_lock);
+ return;
+ }
+
+ if (watch->inw_orphaned) {
+ /*
+ * If we're here, a file was orphaned and then later
+ * moved -- which almost certainly means that hard
+ * links are on the scene. We choose the orphan over
+ * the move because we don't want to spuriously
+ * drop events if we can avoid it.
+ */
+ crfree(watch->inw_cred);
+ list_remove(&state->ins_orphans, watch);
+ }
+ }
+
+ if (avl_find(&parent->inw_children, &cmp, NULL) == NULL) {
+ /*
+ * This watch has already been deleted from the parent.
+ */
+ mutex_exit(&state->ins_lock);
+ return;
+ }
+
+ avl_remove(&parent->inw_children, watch);
+ err = fem_uninstall(watch->inw_vp, inotify_femp, watch);
+ VERIFY(err == 0);
+
+ VN_RELE(watch->inw_vp);
+
+ /*
+ * It's now safe to zombify the watch -- which won't actually delete
+ * it as we know that the reference count is greater than 1.
+ */
+ inotify_watch_zombify(watch);
+ mutex_exit(&state->ins_lock);
+}
+
+/*
+ * Insert a new child watch. Should only be called from VOP context when
+ * a child is created in a watched directory.
+ */
+static void
+inotify_watch_insert(inotify_watch_t *watch, vnode_t *vp, char *name)
+{
+ inotify_state_t *state = watch->inw_state;
+ inotify_watch_t cmp = { .inw_vp = vp };
+
+ if (!(watch->inw_mask & IN_CHILD_EVENTS))
+ return;
+
+ mutex_enter(&state->ins_lock);
+
+ if (watch->inw_zombie || watch->inw_parent != NULL || vp == NULL) {
+ mutex_exit(&state->ins_lock);
+ return;
+ }
+
+ if (avl_find(&watch->inw_children, &cmp, NULL) != NULL) {
+ mutex_exit(&state->ins_lock);
+ return;
+ }
+
+ VN_HOLD(vp);
+ watch = inotify_watch_add(state, watch, name, vp, watch->inw_mask);
+ VERIFY(watch != NULL);
+
+ mutex_exit(&state->ins_lock);
+}
+
+
+static int
+inotify_add_watch(inotify_state_t *state, vnode_t *vp, uint32_t mask,
+ int32_t *wdp)
+{
+ inotify_watch_t *watch, cmp = { .inw_vp = vp };
+ uint32_t set;
+
+ set = (mask & (IN_ALL_EVENTS | IN_MODIFIERS)) | IN_UNMASKABLE;
+
+ /*
+ * Lookup our vnode to determine if we already have a watch on it.
+ */
+ mutex_enter(&state->ins_lock);
+
+ if ((watch = avl_find(&state->ins_byvp, &cmp, NULL)) == NULL) {
+ /*
+ * We don't have this watch; allocate a new one, provided that
+ * we have fewer than our limit.
+ */
+ if (avl_numnodes(&state->ins_bywd) >= state->ins_maxwatches) {
+ mutex_exit(&state->ins_lock);
+ return (ENOSPC);
+ }
+
+ VN_HOLD(vp);
+ watch = inotify_watch_add(state, NULL, NULL, vp, set);
+ *wdp = watch->inw_wd;
+ mutex_exit(&state->ins_lock);
+
+ return (0);
+ }
+
+ VERIFY(!watch->inw_zombie);
+
+ if (!(mask & IN_MASK_ADD)) {
+ /*
+ * Note that if we're resetting our event mask and we're
+ * transitioning from an event mask that includes child events
+ * to one that doesn't, there will be potentially some stale
+ * child watches. This is basically fine: they won't fire,
+ * and they will correctly be removed when the watch is
+ * removed.
+ */
+ watch->inw_mask = 0;
+ }
+
+ watch->inw_mask |= set;
+
+ *wdp = watch->inw_wd;
+
+ mutex_exit(&state->ins_lock);
+
+ return (0);
+}
+
+static int
+inotify_add_child(inotify_state_t *state, vnode_t *vp, char *name)
+{
+ inotify_watch_t *watch, cmp = { .inw_vp = vp };
+ vnode_t *cvp;
+ int err;
+
+ /*
+ * Verify that the specified child doesn't have a directory component
+ * within it.
+ */
+ if (strchr(name, '/') != NULL)
+ return (EINVAL);
+
+ /*
+ * Lookup the underlying file. Note that this will succeed even if
+ * we don't have permissions to actually read the file.
+ */
+ if ((err = lookupnameat(name,
+ UIO_SYSSPACE, NO_FOLLOW, NULL, &cvp, vp)) != 0) {
+ return (err);
+ }
+
+ /*
+ * Use our vnode to find our watch, and then add our child watch to it.
+ */
+ mutex_enter(&state->ins_lock);
+
+ if ((watch = avl_find(&state->ins_byvp, &cmp, NULL)) == NULL) {
+ /*
+ * This is unexpected -- it means that we don't have the
+ * watch that we thought we had.
+ */
+ mutex_exit(&state->ins_lock);
+ VN_RELE(cvp);
+ return (ENXIO);
+ }
+
+ /*
+ * Now lookup the child vnode in the watch; we'll only add it if it
+ * isn't already there.
+ */
+ cmp.inw_vp = cvp;
+
+ if (avl_find(&watch->inw_children, &cmp, NULL) != NULL) {
+ mutex_exit(&state->ins_lock);
+ VN_RELE(cvp);
+ return (0);
+ }
+
+ watch = inotify_watch_add(state, watch, name, cvp, watch->inw_mask);
+ VERIFY(watch != NULL);
+ mutex_exit(&state->ins_lock);
+
+ return (0);
+}
+
+static int
+inotify_rm_watch(inotify_state_t *state, int32_t wd)
+{
+ inotify_watch_t *watch, cmp = { .inw_wd = wd };
+
+ mutex_enter(&state->ins_lock);
+
+ if ((watch = avl_find(&state->ins_bywd, &cmp, NULL)) == NULL) {
+ mutex_exit(&state->ins_lock);
+ return (EINVAL);
+ }
+
+ inotify_watch_remove(state, watch);
+ mutex_exit(&state->ins_lock);
+
+ return (0);
+}
+
+static int
+inotify_activate(inotify_state_t *state, int32_t wd)
+{
+ inotify_watch_t *watch, cmp = { .inw_wd = wd };
+
+ mutex_enter(&state->ins_lock);
+
+ if ((watch = avl_find(&state->ins_bywd, &cmp, NULL)) == NULL) {
+ mutex_exit(&state->ins_lock);
+ return (EINVAL);
+ }
+
+ watch->inw_active = 1;
+
+ mutex_exit(&state->ins_lock);
+
+ return (0);
+}
+
+/*
+ * Called periodically as a cyclic to process the orphans and zombies.
+ */
+static void
+inotify_clean(void *arg)
+{
+ inotify_state_t *state = arg;
+ inotify_watch_t *watch, *parent, *next, **prev;
+ cred_t *savecred;
+ int err;
+
+ mutex_enter(&state->ins_lock);
+
+ for (watch = list_head(&state->ins_orphans);
+ watch != NULL; watch = next) {
+ next = list_next(&state->ins_orphans, watch);
+
+ VERIFY(!watch->inw_zombie);
+ VERIFY((parent = watch->inw_parent) != NULL);
+
+ if (watch->inw_vp->v_count > 1)
+ continue;
+
+ avl_remove(&parent->inw_children, watch);
+ err = fem_uninstall(watch->inw_vp, inotify_femp, watch);
+ VERIFY(err == 0);
+
+ list_remove(&state->ins_orphans, watch);
+
+ /*
+ * For purposes of releasing the vnode, we need to switch our
+ * cred to be the cred of the orphaning thread (which we held
+ * at the time this watch was orphaned).
+ */
+ savecred = curthread->t_cred;
+ curthread->t_cred = watch->inw_cred;
+ VN_RELE(watch->inw_vp);
+ crfree(watch->inw_cred);
+ curthread->t_cred = savecred;
+
+ inotify_watch_zombify(watch);
+ }
+
+ prev = &state->ins_zombies;
+
+ while ((watch = *prev) != NULL) {
+ mutex_enter(&watch->inw_lock);
+
+ if (watch->inw_refcnt == 1) {
+ *prev = watch->inw_parent;
+ inotify_watch_destroy(watch);
+ continue;
+ }
+
+ prev = &watch->inw_parent;
+ mutex_exit(&watch->inw_lock);
+ }
+
+ mutex_exit(&state->ins_lock);
+}
+
+/*ARGSUSED*/
+static int
+inotify_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
+{
+ inotify_state_t *state;
+ major_t major = getemajor(*devp);
+ minor_t minor = getminor(*devp);
+ int instances = 0;
+ char c[64];
+
+ if (minor != INOTIFYMNRN_INOTIFY)
+ return (ENXIO);
+
+ mutex_enter(&inotify_lock);
+
+ for (state = inotify_state; state != NULL; state = state->ins_next) {
+ if (state->ins_cred == cred_p)
+ instances++;
+ }
+
+ if (instances >= inotify_maxinstances) {
+ mutex_exit(&inotify_lock);
+ return (EMFILE);
+ }
+
+ minor = (minor_t)(uintptr_t)vmem_alloc(inotify_minor, 1,
+ VM_BESTFIT | VM_SLEEP);
+
+ if (ddi_soft_state_zalloc(inotify_softstate, minor) != DDI_SUCCESS) {
+ vmem_free(inotify_minor, (void *)(uintptr_t)minor, 1);
+ mutex_exit(&inotify_lock);
+ return (NULL);
+ }
+
+ state = ddi_get_soft_state(inotify_softstate, minor);
+ *devp = makedevice(major, minor);
+
+ crhold(cred_p);
+ state->ins_cred = cred_p;
+ state->ins_next = inotify_state;
+ inotify_state = state;
+
+ (void) snprintf(c, sizeof (c), "inotify_watchid_%d", minor);
+ state->ins_wds = vmem_create(c, (void *)1, UINT32_MAX, 1,
+ NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
+
+ avl_create(&state->ins_bywd,
+ (int(*)(const void *, const void *))inotify_watch_cmpwd,
+ sizeof (inotify_watch_t),
+ offsetof(inotify_watch_t, inw_bywd));
+
+ avl_create(&state->ins_byvp,
+ (int(*)(const void *, const void *))inotify_watch_cmpvp,
+ sizeof (inotify_watch_t),
+ offsetof(inotify_watch_t, inw_byvp));
+
+ list_create(&state->ins_orphans, sizeof (inotify_watch_t),
+ offsetof(inotify_watch_t, inw_orphan));
+
+ state->ins_maxwatches = inotify_maxwatches;
+ state->ins_maxevents = inotify_maxevents;
+
+ mutex_exit(&inotify_lock);
+
+ state->ins_cleaner = ddi_periodic_add(inotify_clean,
+ state, NANOSEC, DDI_IPL_0);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+inotify_read(dev_t dev, uio_t *uio, cred_t *cr)
+{
+ inotify_state_t *state;
+ inotify_kevent_t *event;
+ minor_t minor = getminor(dev);
+ int err = 0, nevents = 0;
+ size_t len;
+
+ state = ddi_get_soft_state(inotify_softstate, minor);
+
+ mutex_enter(&state->ins_lock);
+
+ while (state->ins_head == NULL) {
+ if (uio->uio_fmode & (FNDELAY|FNONBLOCK)) {
+ mutex_exit(&state->ins_lock);
+ return (EAGAIN);
+ }
+
+ if (!cv_wait_sig_swap(&state->ins_cv, &state->ins_lock)) {
+ mutex_exit(&state->ins_lock);
+ return (EINTR);
+ }
+ }
+
+ /*
+ * We have events and we have our lock; return as many as we can.
+ */
+ while ((event = state->ins_head) != NULL) {
+ len = sizeof (event->ine_event) + event->ine_event.len;
+
+ if (uio->uio_resid < len) {
+ if (nevents == 0)
+ err = EINVAL;
+ break;
+ }
+
+ nevents++;
+
+ if ((err = uiomove(&event->ine_event, len, UIO_READ, uio)) != 0)
+ break;
+
+ VERIFY(state->ins_nevents > 0);
+ state->ins_nevents--;
+
+ VERIFY(state->ins_size > 0);
+ state->ins_size -= len;
+
+ if ((state->ins_head = event->ine_next) == NULL) {
+ VERIFY(event == state->ins_tail);
+ VERIFY(state->ins_nevents == 0);
+ state->ins_tail = NULL;
+ }
+
+ kmem_free(event, INOTIFY_EVENT_LENGTH(event));
+ }
+
+ mutex_exit(&state->ins_lock);
+
+ return (err);
+}
+
+/*ARGSUSED*/
+static int
+inotify_poll(dev_t dev, short events, int anyyet, short *reventsp,
+ struct pollhead **phpp)
+{
+ inotify_state_t *state;
+ minor_t minor = getminor(dev);
+
+ state = ddi_get_soft_state(inotify_softstate, minor);
+
+ mutex_enter(&state->ins_lock);
+
+ if (state->ins_head != NULL) {
+ *reventsp = events & (POLLRDNORM | POLLIN);
+ } else {
+ *reventsp = 0;
+
+ if (!anyyet)
+ *phpp = &state->ins_pollhd;
+ }
+
+ mutex_exit(&state->ins_lock);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+inotify_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
+{
+ inotify_state_t *state;
+ minor_t minor = getminor(dev);
+ file_t *fp;
+ int rval;
+
+ state = ddi_get_soft_state(inotify_softstate, minor);
+
+ switch (cmd) {
+ case INOTIFYIOC_ADD_WATCH: {
+ inotify_addwatch_t addwatch;
+ file_t *fp;
+
+ if (copyin((void *)arg, &addwatch, sizeof (addwatch)) != 0)
+ return (EFAULT);
+
+ if ((fp = getf(addwatch.inaw_fd)) == NULL)
+ return (EBADF);
+
+ rval = inotify_add_watch(state, fp->f_vnode,
+ addwatch.inaw_mask, rv);
+
+ releasef(addwatch.inaw_fd);
+ return (rval);
+ }
+
+ case INOTIFYIOC_ADD_CHILD: {
+ inotify_addchild_t addchild;
+ char name[MAXPATHLEN];
+
+ if (copyin((void *)arg, &addchild, sizeof (addchild)) != 0)
+ return (EFAULT);
+
+ if (copyinstr(addchild.inac_name, name, MAXPATHLEN, NULL) != 0)
+ return (EFAULT);
+
+ if ((fp = getf(addchild.inac_fd)) == NULL)
+ return (EBADF);
+
+ rval = inotify_add_child(state, fp->f_vnode, name);
+
+ releasef(addchild.inac_fd);
+ return (rval);
+ }
+
+ case INOTIFYIOC_RM_WATCH:
+ return (inotify_rm_watch(state, arg));
+
+ case INOTIFYIOC_ACTIVATE:
+ return (inotify_activate(state, arg));
+
+ case FIONREAD: {
+ int32_t size;
+
+ mutex_enter(&state->ins_lock);
+ size = state->ins_size;
+ mutex_exit(&state->ins_lock);
+
+ if (copyout(&size, (void *)arg, sizeof (size)) != 0)
+ return (EFAULT);
+
+ return (0);
+ }
+
+ default:
+ break;
+ }
+
+ return (ENOTTY);
+}
+
+/*ARGSUSED*/
+static int
+inotify_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
+{
+ inotify_state_t *state, **sp;
+ inotify_watch_t *watch, *zombies;
+ inotify_kevent_t *event;
+ minor_t minor = getminor(dev);
+
+ state = ddi_get_soft_state(inotify_softstate, minor);
+
+ if (state->ins_pollhd.ph_list != NULL) {
+ pollwakeup(&state->ins_pollhd, POLLERR);
+ pollhead_clean(&state->ins_pollhd);
+ }
+
+ mutex_enter(&state->ins_lock);
+
+ /*
+ * First, destroy all of our watches.
+ */
+ while ((watch = avl_first(&state->ins_bywd)) != NULL)
+ inotify_watch_remove(state, watch);
+
+ /*
+ * And now destroy our event queue.
+ */
+ while ((event = state->ins_head) != NULL) {
+ state->ins_head = event->ine_next;
+ kmem_free(event, INOTIFY_EVENT_LENGTH(event));
+ }
+
+ zombies = state->ins_zombies;
+ state->ins_zombies = NULL;
+ mutex_exit(&state->ins_lock);
+
+ /*
+ * Now that our state lock is dropped, we can synchronously wait on
+ * any zombies.
+ */
+ while ((watch = zombies) != NULL) {
+ zombies = zombies->inw_parent;
+
+ mutex_enter(&watch->inw_lock);
+
+ while (watch->inw_refcnt > 1)
+ cv_wait(&watch->inw_cv, &watch->inw_lock);
+
+ inotify_watch_destroy(watch);
+ }
+
+ if (state->ins_cleaner != NULL) {
+ ddi_periodic_delete(state->ins_cleaner);
+ state->ins_cleaner = NULL;
+ }
+
+ mutex_enter(&inotify_lock);
+
+ /*
+ * Remove our state from our global list, and release our hold on
+ * the cred.
+ */
+ for (sp = &inotify_state; *sp != state; sp = &((*sp)->ins_next))
+ VERIFY(*sp != NULL);
+
+ *sp = (*sp)->ins_next;
+ crfree(state->ins_cred);
+ vmem_destroy(state->ins_wds);
+
+ ddi_soft_state_free(inotify_softstate, minor);
+ vmem_free(inotify_minor, (void *)(uintptr_t)minor, 1);
+
+ mutex_exit(&inotify_lock);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+inotify_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
+{
+ mutex_enter(&inotify_lock);
+
+ if (ddi_soft_state_init(&inotify_softstate,
+ sizeof (inotify_state_t), 0) != 0) {
+ cmn_err(CE_NOTE, "/dev/inotify failed to create soft state");
+ mutex_exit(&inotify_lock);
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_create_minor_node(devi, "inotify", S_IFCHR,
+ INOTIFYMNRN_INOTIFY, DDI_PSEUDO, NULL) == DDI_FAILURE) {
+ cmn_err(CE_NOTE, "/dev/inotify couldn't create minor node");
+ ddi_soft_state_fini(&inotify_softstate);
+ mutex_exit(&inotify_lock);
+ return (DDI_FAILURE);
+ }
+
+ if (fem_create("inotify_fem",
+ inotify_vnodesrc_template, &inotify_femp) != 0) {
+ cmn_err(CE_NOTE, "/dev/inotify couldn't create FEM state");
+ ddi_remove_minor_node(devi, NULL);
+ ddi_soft_state_fini(&inotify_softstate);
+ mutex_exit(&inotify_lock);
+ return (DDI_FAILURE);
+ }
+
+ ddi_report_dev(devi);
+ inotify_devi = devi;
+
+ inotify_minor = vmem_create("inotify_minor", (void *)INOTIFYMNRN_CLONE,
+ UINT32_MAX - INOTIFYMNRN_CLONE, 1, NULL, NULL, NULL, 0,
+ VM_SLEEP | VMC_IDENTIFIER);
+
+ mutex_exit(&inotify_lock);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+inotify_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ switch (cmd) {
+ case DDI_DETACH:
+ break;
+
+ case DDI_SUSPEND:
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&inotify_lock);
+ fem_free(inotify_femp);
+ vmem_destroy(inotify_minor);
+
+ ddi_remove_minor_node(inotify_devi, NULL);
+ inotify_devi = NULL;
+
+ ddi_soft_state_fini(&inotify_softstate);
+ mutex_exit(&inotify_lock);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+inotify_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
+{
+ int error;
+
+ switch (infocmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ *result = (void *)inotify_devi;
+ error = DDI_SUCCESS;
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *result = (void *)0;
+ error = DDI_SUCCESS;
+ break;
+ default:
+ error = DDI_FAILURE;
+ }
+ return (error);
+}
+
+static struct cb_ops inotify_cb_ops = {
+ inotify_open, /* open */
+ inotify_close, /* close */
+ nulldev, /* strategy */
+ nulldev, /* print */
+ nodev, /* dump */
+ inotify_read, /* read */
+ nodev, /* write */
+ inotify_ioctl, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ inotify_poll, /* poll */
+ ddi_prop_op, /* cb_prop_op */
+ 0, /* streamtab */
+ D_NEW | D_MP /* Driver compatibility flag */
+};
+
+static struct dev_ops inotify_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* refcnt */
+ inotify_info, /* get_dev_info */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ inotify_attach, /* attach */
+ inotify_detach, /* detach */
+ nodev, /* reset */
+ &inotify_cb_ops, /* driver operations */
+ NULL, /* bus operations */
+ nodev, /* dev power */
+ ddi_quiesce_not_needed, /* quiesce */
+};
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* module type (this is a pseudo driver) */
+ "inotify support", /* name of module */
+ &inotify_ops, /* driver ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1,
+ (void *)&modldrv,
+ NULL
+};
+
+int
+_init(void)
+{
+ return (mod_install(&modlinkage));
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ return (mod_remove(&modlinkage));
+}
diff --git a/usr/src/uts/common/io/inotify.conf b/usr/src/uts/common/io/inotify.conf
new file mode 100644
index 0000000000..ce9da6180f
--- /dev/null
+++ b/usr/src/uts/common/io/inotify.conf
@@ -0,0 +1,16 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2014 Joyent, Inc. All rights reserved.
+#
+
+name="inotify" parent="pseudo" instance=0;
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_main.c b/usr/src/uts/common/io/ixgbe/ixgbe_main.c
index 848e3470c7..c29a762e06 100644
--- a/usr/src/uts/common/io/ixgbe/ixgbe_main.c
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_main.c
@@ -271,7 +271,7 @@ static adapter_info_t ixgbe_82599eb_cap = {
128, /* default number of rx queues */
64, /* maximum number of rx groups */
1, /* minimum number of rx groups */
- 1, /* default number of rx groups */
+ 32, /* default number of rx groups */
128, /* maximum number of tx queues */
1, /* minimum number of tx queues */
8, /* default number of tx queues */
@@ -302,7 +302,7 @@ static adapter_info_t ixgbe_X540_cap = {
128, /* default number of rx queues */
64, /* maximum number of rx groups */
1, /* minimum number of rx groups */
- 1, /* default number of rx groups */
+ 32, /* default number of rx groups */
128, /* maximum number of tx queues */
1, /* minimum number of tx queues */
8, /* default number of tx queues */
@@ -1792,6 +1792,7 @@ ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
void *arg1, void *arg2)
{
ixgbe_t *ixgbe = (ixgbe_t *)arg1;
+ int prev = ixgbe->intr_cnt;
switch (cbaction) {
/* IRM callback */
@@ -1805,7 +1806,8 @@ ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
DDI_SUCCESS) {
ixgbe_error(ixgbe,
- "IRM CB: Failed to adjust interrupts");
+ "IRM CB: Failed to adjust interrupts [%d %d %d]",
+ cbaction, count, prev);
goto cb_fail;
}
break;
diff --git a/usr/src/uts/common/io/ksocket/ksocket.c b/usr/src/uts/common/io/ksocket/ksocket.c
index 8944fcbff3..25da45be39 100644
--- a/usr/src/uts/common/io/ksocket/ksocket.c
+++ b/usr/src/uts/common/io/ksocket/ksocket.c
@@ -22,7 +22,7 @@
/*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Joyent, Inc. All rights reserved.
+ * Copyright 2015, Joyent, Inc.
*/
#include <sys/file.h>
@@ -932,3 +932,15 @@ ksocket_rele(ksocket_t ks)
cv_signal(&so->so_closing_cv);
}
}
+
+int
+ksocket_krecv_set(ksocket_t ks, ksocket_krecv_f cb, void *arg)
+{
+ return (so_krecv_set(KSTOSO(ks), (so_krecv_f)cb, arg));
+}
+
+void
+ksocket_krecv_unblock(ksocket_t ks)
+{
+ return (so_krecv_unblock(KSTOSO(ks)));
+}
diff --git a/usr/src/uts/common/io/ksocket/ksocket_impl.h b/usr/src/uts/common/io/ksocket/ksocket_impl.h
index ac5251540f..516a68d358 100644
--- a/usr/src/uts/common/io/ksocket/ksocket_impl.h
+++ b/usr/src/uts/common/io/ksocket/ksocket_impl.h
@@ -22,11 +22,17 @@
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2015, Joyent, Inc.
*/
#ifndef _INET_KSOCKET_KSOCKET_IMPL_H
#define _INET_KSOCKET_KSOCKET_IMPL_H
+/*
+ * Note that if this relationship ever changes, the logic in ksocket_krecv_set
+ * must be updated and we must maintain local state about this on whatever the
+ * new ksocket object is.
+ */
#define KSTOSO(ks) ((struct sonode *)(ks))
#define SOTOKS(so) ((ksocket_t)(uintptr_t)(so))
diff --git a/usr/src/uts/common/io/ksyms.c b/usr/src/uts/common/io/ksyms.c
index c9f0c63b69..5233fcd0b4 100644
--- a/usr/src/uts/common/io/ksyms.c
+++ b/usr/src/uts/common/io/ksyms.c
@@ -21,6 +21,7 @@
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2016 Joyent, Inc.
*/
@@ -219,6 +220,14 @@ ksyms_open(dev_t *devp, int flag, int otyp, struct cred *cred)
char *addr;
void *hptr = NULL;
ksyms_buflist_hdr_t hdr;
+
+ /*
+ * This device should never be visible in a zone, but if it somehow
+ * does get created we refuse to allow the zone to use it.
+ */
+ if (crgetzoneid(cred) != GLOBAL_ZONEID)
+ return (EACCES);
+
bzero(&hdr, sizeof (struct ksyms_buflist_hdr));
list_create(&hdr.blist, PAGESIZE,
offsetof(ksyms_buflist_t, buflist_node));
diff --git a/usr/src/uts/common/io/mac/mac.c b/usr/src/uts/common/io/mac/mac.c
index 1d30dc3478..1bf49a5b44 100644
--- a/usr/src/uts/common/io/mac/mac.c
+++ b/usr/src/uts/common/io/mac/mac.c
@@ -3141,6 +3141,9 @@ mac_prop_check_size(mac_prop_id_t id, uint_t valsize, boolean_t is_range)
case MAC_PROP_WL_MLME:
minsize = sizeof (wl_mlme_t);
break;
+ case MAC_PROP_VN_PROMISC_FILTERED:
+ minsize = sizeof (boolean_t);
+ break;
}
return (valsize >= minsize);
diff --git a/usr/src/uts/common/io/mac/mac_client.c b/usr/src/uts/common/io/mac/mac_client.c
index 8f0ec9eb67..18a6613424 100644
--- a/usr/src/uts/common/io/mac/mac_client.c
+++ b/usr/src/uts/common/io/mac/mac_client.c
@@ -21,7 +21,7 @@
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Joyent, Inc. All rights reserved.
+ * Copyright 2015, Joyent, Inc.
*/
/*
@@ -3263,6 +3263,11 @@ mac_promisc_add(mac_client_handle_t mch, mac_client_promisc_type_t type,
mac_cb_info_t *mcbi;
int rc;
+ if ((flags & MAC_PROMISC_FLAGS_NO_COPY) &&
+ (flags & MAC_PROMISC_FLAGS_DO_FIXUPS)) {
+ return (EINVAL);
+ }
+
i_mac_perim_enter(mip);
if ((rc = mac_start((mac_handle_t)mip)) != 0) {
@@ -3271,7 +3276,8 @@ mac_promisc_add(mac_client_handle_t mch, mac_client_promisc_type_t type,
}
if ((mcip->mci_state_flags & MCIS_IS_VNIC) &&
- type == MAC_CLIENT_PROMISC_ALL) {
+ type == MAC_CLIENT_PROMISC_ALL &&
+ (mcip->mci_protect_flags & MPT_FLAG_PROMISC_FILTERED)) {
/*
* The function is being invoked by the upper MAC client
* of a VNIC. The VNIC should only see the traffic
@@ -3308,6 +3314,7 @@ mac_promisc_add(mac_client_handle_t mch, mac_client_promisc_type_t type,
mpip->mpi_strip_vlan_tag =
((flags & MAC_PROMISC_FLAGS_VLAN_TAG_STRIP) != 0);
mpip->mpi_no_copy = ((flags & MAC_PROMISC_FLAGS_NO_COPY) != 0);
+ mpip->mpi_do_fixups = ((flags & MAC_PROMISC_FLAGS_DO_FIXUPS) != 0);
mcbi = &mip->mi_promisc_cb_info;
mutex_enter(mcbi->mcbi_lockp);
@@ -3944,15 +3951,22 @@ mac_client_get_effective_resources(mac_client_handle_t mch,
static void
mac_promisc_dispatch_one(mac_promisc_impl_t *mpip, mblk_t *mp,
- boolean_t loopback)
+ boolean_t loopback, boolean_t local)
{
mblk_t *mp_copy, *mp_next;
- if (!mpip->mpi_no_copy || mpip->mpi_strip_vlan_tag) {
+ if (!mpip->mpi_no_copy || mpip->mpi_strip_vlan_tag ||
+ (mpip->mpi_do_fixups && local)) {
mp_copy = copymsg(mp);
if (mp_copy == NULL)
return;
+ if (mpip->mpi_do_fixups && local) {
+ mp_copy = mac_fix_cksum(mp_copy);
+ if (mp_copy == NULL)
+ return;
+ }
+
if (mpip->mpi_strip_vlan_tag) {
mp_copy = mac_strip_vlan_tag_chain(mp_copy);
if (mp_copy == NULL)
@@ -4009,7 +4023,7 @@ mac_is_mcast(mac_impl_t *mip, mblk_t *mp)
*/
void
mac_promisc_dispatch(mac_impl_t *mip, mblk_t *mp_chain,
- mac_client_impl_t *sender)
+ mac_client_impl_t *sender, boolean_t local)
{
mac_promisc_impl_t *mpip;
mac_cb_t *mcb;
@@ -4049,8 +4063,10 @@ mac_promisc_dispatch(mac_impl_t *mip, mblk_t *mp_chain,
if (is_sender ||
mpip->mpi_type == MAC_CLIENT_PROMISC_ALL ||
- is_mcast)
- mac_promisc_dispatch_one(mpip, mp, is_sender);
+ is_mcast) {
+ mac_promisc_dispatch_one(mpip, mp, is_sender,
+ local);
+ }
}
}
MAC_PROMISC_WALKER_DCR(mip);
@@ -4079,7 +4095,8 @@ mac_promisc_client_dispatch(mac_client_impl_t *mcip, mblk_t *mp_chain)
mpip = (mac_promisc_impl_t *)mcb->mcb_objp;
if (mpip->mpi_type == MAC_CLIENT_PROMISC_FILTERED &&
!is_mcast) {
- mac_promisc_dispatch_one(mpip, mp, B_FALSE);
+ mac_promisc_dispatch_one(mpip, mp, B_FALSE,
+ B_FALSE);
}
}
}
@@ -4150,16 +4167,15 @@ mac_info_get(const char *name, mac_info_t *minfop)
/*
* To get the capabilities that MAC layer cares about, such as rings, factory
* mac address, vnic or not, it should directly invoke this function. If the
- * link is part of a bridge, then the only "capability" it has is the inability
- * to do zero copy.
+ * link is part of a bridge, then the link is unable to do zero copy.
*/
boolean_t
i_mac_capab_get(mac_handle_t mh, mac_capab_t cap, void *cap_data)
{
mac_impl_t *mip = (mac_impl_t *)mh;
- if (mip->mi_bridge_link != NULL)
- return (cap == MAC_CAPAB_NO_ZCOPY);
+ if (mip->mi_bridge_link != NULL && cap == MAC_CAPAB_NO_ZCOPY)
+ return (B_TRUE);
else if (mip->mi_callbacks->mc_callbacks & MC_GETCAPAB)
return (mip->mi_getcapab(mip->mi_driver, cap, cap_data));
else
@@ -4338,7 +4354,13 @@ mac_addr_len(mac_handle_t mh)
boolean_t
mac_is_vnic(mac_handle_t mh)
{
- return (((mac_impl_t *)mh)->mi_state_flags & MIS_IS_VNIC);
+ return ((((mac_impl_t *)mh)->mi_state_flags & MIS_IS_VNIC) != 0);
+}
+
+boolean_t
+mac_is_overlay(mac_handle_t mh)
+{
+ return ((((mac_impl_t *)mh)->mi_state_flags & MIS_IS_OVERLAY) != 0);
}
mac_handle_t
@@ -5552,3 +5574,23 @@ mac_client_set_rings(mac_client_handle_t mch, int rxrings, int txrings)
mrp->mrp_ntxrings = txrings;
}
}
+
+boolean_t
+mac_get_promisc_filtered(mac_client_handle_t mch)
+{
+ mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
+
+ return (mcip->mci_protect_flags & MPT_FLAG_PROMISC_FILTERED);
+}
+
+void
+mac_set_promisc_filtered(mac_client_handle_t mch, boolean_t enable)
+{
+ mac_client_impl_t *mcip = (mac_client_impl_t *)mch;
+
+ ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
+ if (enable)
+ mcip->mci_protect_flags |= MPT_FLAG_PROMISC_FILTERED;
+ else
+ mcip->mci_protect_flags &= ~MPT_FLAG_PROMISC_FILTERED;
+}
diff --git a/usr/src/uts/common/io/mac/mac_datapath_setup.c b/usr/src/uts/common/io/mac/mac_datapath_setup.c
index 14d94981cd..0459506784 100644
--- a/usr/src/uts/common/io/mac/mac_datapath_setup.c
+++ b/usr/src/uts/common/io/mac/mac_datapath_setup.c
@@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2015, Joyent, Inc.
*/
#include <sys/types.h>
@@ -603,6 +604,7 @@ mac_srs_cpu_setup(cpu_setup_t what, int id, void *arg)
*
* TODO: Cleanup and tighten some of the assumptions.
*/
+boolean_t mac_check_overlay = B_TRUE;
boolean_t mac_use_bw_heuristic = B_TRUE;
static int
mac_compute_soft_ring_count(flow_entry_t *flent, int rx_srs_cnt, int maxcpus)
@@ -610,6 +612,7 @@ mac_compute_soft_ring_count(flow_entry_t *flent, int rx_srs_cnt, int maxcpus)
uint64_t cpu_speed, bw = 0;
int srings = 0;
boolean_t bw_enabled = B_FALSE;
+ mac_client_impl_t *mcip = flent->fe_mcip;
ASSERT(!(flent->fe_type & FLOW_USER));
if (flent->fe_resource_props.mrp_mask & MRP_MAXBW &&
@@ -637,7 +640,16 @@ mac_compute_soft_ring_count(flow_entry_t *flent, int rx_srs_cnt, int maxcpus)
*/
if (mac_soft_ring_enable)
srings = srings * 2;
+ } else if (mac_check_overlay == B_TRUE &&
+ (mcip->mci_state_flags & MCIS_IS_VNIC) != 0) {
+ /* Is this a VNIC on an overlay? */
+ mac_handle_t mh = (mac_handle_t)mcip->mci_mip;
+ if (mac_is_overlay(mh) == B_TRUE) {
+ srings = mac_rx_soft_ring_10gig_count;
+ }
}
+
+
} else {
/*
* Soft ring computation using CPU speed and specified
diff --git a/usr/src/uts/common/io/mac/mac_protect.c b/usr/src/uts/common/io/mac/mac_protect.c
index 805b5d36f9..da83dc643e 100644
--- a/usr/src/uts/common/io/mac/mac_protect.c
+++ b/usr/src/uts/common/io/mac/mac_protect.c
@@ -2576,6 +2576,9 @@ mac_protect_init(mac_client_impl_t *mcip)
sizeof (dhcpv6_addr_t), offsetof(dhcpv6_addr_t, da_node));
avl_create(&mcip->mci_v6_slaac_ip, compare_slaac_ip,
sizeof (slaac_addr_t), offsetof(slaac_addr_t, sla_node));
+
+ if (mcip->mci_state_flags & MCIS_IS_VNIC)
+ mcip->mci_protect_flags |= MPT_FLAG_PROMISC_FILTERED;
}
void
diff --git a/usr/src/uts/common/io/mac/mac_provider.c b/usr/src/uts/common/io/mac/mac_provider.c
index 57d1996d84..98b770786a 100644
--- a/usr/src/uts/common/io/mac/mac_provider.c
+++ b/usr/src/uts/common/io/mac/mac_provider.c
@@ -21,6 +21,7 @@
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2015, Joyent, Inc.
*/
#include <sys/types.h>
@@ -350,6 +351,9 @@ mac_register(mac_register_t *mregp, mac_handle_t *mhp)
if (i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_AGGR, NULL))
mip->mi_state_flags |= MIS_IS_AGGR;
+ if (i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_OVERLAY, NULL))
+ mip->mi_state_flags |= MIS_IS_OVERLAY;
+
mac_addr_factory_init(mip);
/*
@@ -670,7 +674,7 @@ mac_trill_snoop(mac_handle_t mh, mblk_t *mp)
mac_impl_t *mip = (mac_impl_t *)mh;
if (mip->mi_promisc_list != NULL)
- mac_promisc_dispatch(mip, mp, NULL);
+ mac_promisc_dispatch(mip, mp, NULL, B_FALSE);
}
/*
@@ -691,7 +695,7 @@ mac_rx_common(mac_handle_t mh, mac_resource_handle_t mrh, mblk_t *mp_chain)
* this MAC, pass them a copy if appropriate.
*/
if (mip->mi_promisc_list != NULL)
- mac_promisc_dispatch(mip, mp_chain, NULL);
+ mac_promisc_dispatch(mip, mp_chain, NULL, B_FALSE);
if (mr != NULL) {
/*
diff --git a/usr/src/uts/common/io/mac/mac_sched.c b/usr/src/uts/common/io/mac/mac_sched.c
index 148f739d52..0e2cb864c9 100644
--- a/usr/src/uts/common/io/mac/mac_sched.c
+++ b/usr/src/uts/common/io/mac/mac_sched.c
@@ -1370,7 +1370,7 @@ int mac_srs_worker_wakeup_ticks = 0;
* said, the constant is left as a static variable to allow it to be
* dynamically tuned in the field if and as needed.
*/
-static uintptr_t mac_rx_srs_stack_needed = 10240;
+static uintptr_t mac_rx_srs_stack_needed = 13312;
static uint_t mac_rx_srs_stack_toodeep;
#ifndef STACK_GROWTH_DOWN
@@ -2310,7 +2310,7 @@ check_again:
if (smcip->mci_mip->mi_promisc_list != NULL) {
mutex_exit(lock);
mac_promisc_dispatch(smcip->mci_mip,
- head, NULL);
+ head, NULL, B_FALSE);
mutex_enter(lock);
}
}
@@ -4450,8 +4450,10 @@ mac_tx_send(mac_client_handle_t mch, mac_ring_handle_t ring, mblk_t *mp_chain,
* check is done inside the MAC_TX()
* macro.
*/
- if (mip->mi_promisc_list != NULL)
- mac_promisc_dispatch(mip, mp, src_mcip);
+ if (mip->mi_promisc_list != NULL) {
+ mac_promisc_dispatch(mip, mp, src_mcip,
+ B_TRUE);
+ }
do_switch = ((src_mcip->mci_state_flags &
dst_mcip->mci_state_flags &
diff --git a/usr/src/uts/common/io/mac/mac_stat.c b/usr/src/uts/common/io/mac/mac_stat.c
index 31972f94d8..c1a5c9c069 100644
--- a/usr/src/uts/common/io/mac/mac_stat.c
+++ b/usr/src/uts/common/io/mac/mac_stat.c
@@ -21,6 +21,7 @@
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2013 Joyent, Inc. All rights reserved.
*/
/*
@@ -390,8 +391,8 @@ i_mac_stat_create(void *handle, const char *modname, const char *statname,
kstat_t *ksp;
kstat_named_t *knp;
- ksp = kstat_create(modname, 0, statname, "net",
- KSTAT_TYPE_NAMED, count, 0);
+ ksp = kstat_create_zone(modname, 0, statname, "net",
+ KSTAT_TYPE_NAMED, count, 0, getzoneid());
if (ksp == NULL)
return (NULL);
@@ -948,9 +949,9 @@ mac_driver_stat_create(mac_impl_t *mip)
major_t major = getmajor(mip->mi_phy_dev);
count = MAC_MOD_NKSTAT + MAC_NKSTAT + mip->mi_type->mt_statcount;
- ksp = kstat_create((const char *)ddi_major_to_name(major),
+ ksp = kstat_create_zone((const char *)ddi_major_to_name(major),
getminor(mip->mi_phy_dev) - 1, MAC_KSTAT_NAME,
- MAC_KSTAT_CLASS, KSTAT_TYPE_NAMED, count, 0);
+ MAC_KSTAT_CLASS, KSTAT_TYPE_NAMED, count, 0, getzoneid());
if (ksp == NULL)
return;
diff --git a/usr/src/uts/common/io/mem.c b/usr/src/uts/common/io/mem.c
index cdbeb0d422..8955b3d935 100644
--- a/usr/src/uts/common/io/mem.c
+++ b/usr/src/uts/common/io/mem.c
@@ -24,7 +24,7 @@
*/
/*
- * Copyright (c) 2015, Joyent, Inc. All rights reserved.
+ * Copyright 2016, Joyent, Inc.
*/
/*
@@ -221,10 +221,19 @@ mmopen(dev_t *devp, int flag, int typ, struct cred *cred)
switch (getminor(*devp)) {
case M_NULL:
case M_ZERO:
+ /* standard devices */
+ break;
+
case M_MEM:
case M_KMEM:
case M_ALLKMEM:
- /* standard devices */
+ /*
+ * These devices should never be visible in a zone, but if they
+ * somehow do get created we refuse to allow the zone to use
+ * them.
+ */
+ if (crgetzoneid(cred) != GLOBAL_ZONEID)
+ return (EACCES);
break;
default:
diff --git a/usr/src/uts/common/io/mr_sas/mr_sas.conf b/usr/src/uts/common/io/mr_sas/mr_sas.conf
index cfda434e23..6c585c6a42 100644
--- a/usr/src/uts/common/io/mr_sas/mr_sas.conf
+++ b/usr/src/uts/common/io/mr_sas/mr_sas.conf
@@ -13,3 +13,11 @@
# Fast-Path specific flag. Default is "yes".
# mrsas-enable-fp="yes";
+flow_control="dmult" queue="qsort" tape="sctp";
+
+# MSI specific flag. To enable MSI modify the flag value to "yes"
+mrsas-enable-msi="yes";
+
+# Fast-Path specific flag. To enable Fast-Path modify the flag value to "yes"
+mrsas-enable-fp="yes";
+
diff --git a/usr/src/uts/common/io/nfp/THIRDPARTYLICENSE b/usr/src/uts/common/io/nfp/THIRDPARTYLICENSE
new file mode 100644
index 0000000000..187088ff34
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/THIRDPARTYLICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2014, Thales UK Limited
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/usr/src/uts/common/io/nfp/THIRDPARTYLICENSE.descrip b/usr/src/uts/common/io/nfp/THIRDPARTYLICENSE.descrip
new file mode 100644
index 0000000000..cde8b65b37
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/THIRDPARTYLICENSE.descrip
@@ -0,0 +1 @@
+NFAST CRYPTO ACCELERATOR DRIVER
diff --git a/usr/src/uts/common/io/nfp/autoversion.h b/usr/src/uts/common/io/nfp/autoversion.h
new file mode 100644
index 0000000000..b9021942b2
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/autoversion.h
@@ -0,0 +1,21 @@
+/*
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+*/
+
+/* AUTOGENERATED - DO NOT EDIT */
+#ifndef AUTOVERSION_H
+#define AUTOVERSION_H
+
+#define VERSION_RELEASEMAJOR 2
+#define VERSION_RELEASEMINOR 26
+#define VERSION_RELEASEPATCH 40
+#define VERSION_NO "2.26.40cam999"
+#define VERSION_COMPNAME "nfdrv"
+
+#endif
diff --git a/usr/src/uts/common/io/nfp/drvlist.c b/usr/src/uts/common/io/nfp/drvlist.c
new file mode 100644
index 0000000000..a04b1fd5b0
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/drvlist.c
@@ -0,0 +1,19 @@
+/*
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+*/
+
+#include "nfp_common.h"
+#include "nfp_cmd.h"
+
+const nfpcmd_dev *nfp_drvlist[] = {
+ &i21285_cmddev,
+ &i21555_cmddev,
+ NULL
+};
+
diff --git a/usr/src/uts/common/io/nfp/hostif.c b/usr/src/uts/common/io/nfp/hostif.c
new file mode 100644
index 0000000000..684be703ea
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/hostif.c
@@ -0,0 +1,1192 @@
+/*
+
+hostif.c: nFast PCI driver for Solaris 2.5, 2.6, 2.7 and 2.8
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+history
+
+06/05/1998 jsh Original solaris 2.6
+21/05/1999 jsh added support for solaris 2.5
+10/06/1999 jsh added support for solaris 2.7 (32 and 64 bit)
+??/??/2001 jsh added support for solaris 2.8 (32 and 64 bit)
+16/10/2001 jsh moved from nfast to new structure in nfdrv
+12/02/2002 jsh added high level interrupt support
+
+*/
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/file.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/map.h>
+#include <sys/debug.h>
+#include <sys/modctl.h>
+#include <sys/kmem.h>
+#include <sys/cmn_err.h>
+#include <sys/open.h>
+#include <sys/stat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/pci.h>
+
+#include "nfp_common.h"
+#include "nfp_hostif.h"
+#include "nfp_osif.h"
+#include "nfp_cmd.h"
+
+#include "nfp.h"
+
+/* mapped memory attributes, no-swap endianess (done in higher level) */
+static struct ddi_device_acc_attr nosw_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+/* dma attributes */
+static ddi_dma_attr_t dma_attrs = {
+ DMA_ATTR_V0, /* version number */
+ (uint64_t)0x0, /* low address */
+ (uint64_t)0xffffffff, /* high address */
+ (uint64_t)0xffffff, /* DMA counter max */
+ (uint64_t)0x1, /* alignment */
+ 0x0c, /* burst sizes */
+ 0x1, /* minimum transfer size */
+ (uint64_t)0x3ffffff, /* maximum transfer size */
+ (uint64_t)0x7fff, /* maximum segment size */
+ 1, /* no scatter/gather lists */
+ 1, /* granularity */
+ 0 /* DMA flags */
+};
+
+/*
+ * Debug message control
+ * Debug Levels:
+ * 0 = no messages
+ * 1 = Errors
+ * 2 = Subroutine calls & control flow
+ * 3 = I/O Data (verbose!)
+ * Can be set with adb or in the /etc/system file with
+ * "set nfp:nfp_debug=<value>"
+ */
+
+int nfp_debug= 1;
+
+static void *state_head; /* opaque handle top of state structs */
+
+static int nfp_open(dev_t *dev, int openflags, int otyp, cred_t *credp);
+static int nfp_close(dev_t dev, int openflags, int otyp, cred_t *credp);
+static int nfp_release_dev( dev_info_t *dip );
+
+static int nfp_read(dev_t dev, struct uio *uiop, cred_t *credp);
+static int nfp_write(dev_t dev, struct uio *uiop, cred_t *credp);
+static int nfp_strategy(struct buf *bp);
+
+static int nfp_ioctl(dev_t dev, int cmd, ioctlptr_t arg, int mode, cred_t *credp, int *rvalp);
+static int nfp_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
+ struct pollhead **phpp);
+
+static void nfp_wrtimeout (void *pdev);
+static void nfp_rdtimeout (void *pdev);
+
+static int nfp_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result);
+static int nfp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+static int nfp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
+
+static void nfp_read_complete_final(nfp_dev *pdev, int ok);
+static void nfp_write_complete_final(nfp_dev *pdev, int ok);
+
+/* nfp file ops --------------------------------------------------- */
+
+static struct cb_ops nfp_cb_ops = {
+ nfp_open,
+ nfp_close,
+ nodev, /* no nfp_strategy */
+ nodev, /* no print routine */
+ nodev, /* no dump routine */
+ nfp_read,
+ nfp_write,
+ nfp_ioctl,
+ nodev, /* no devmap routine */
+ nodev, /* no mmap routine */
+ nodev, /* no segmap routine */
+ nfp_chpoll,
+ ddi_prop_op,
+ 0, /* not a STREAMS driver, no cb_str routine */
+ D_NEW | D_MP | EXTRA_CB_FLAGS, /* must be safe for multi-thread/multi-processor */
+ CB_REV,
+ nodev, /* aread */
+ nodev /* awrite */
+};
+
+static struct dev_ops nfp_ops = {
+ DEVO_REV, /* DEVO_REV indicated by manual */
+ 0, /* device reference count */
+ nfp_getinfo,
+ nulldev, /* identify */
+ nulldev, /* probe */
+ nfp_attach,
+ nfp_detach,
+ nodev, /* device reset routine */
+ &nfp_cb_ops,
+ (struct bus_ops *)0, /* bus operations */
+};
+
+extern struct mod_ops mod_driverops;
+static struct modldrv modldrv = {
+ &mod_driverops,
+ NFP_DRVNAME,
+ &nfp_ops,
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, /* MODREV_1 indicated by manual */
+ (void *)&modldrv,
+ NULL, /* termination of list of linkage structures */
+};
+
+/* interface resource allocation */
+
+int nfp_alloc_pci_push( nfp_dev *pdev ) {
+ /* allocate resources needed for PCI Push,
+ * if not already allocated.
+ * return True if successful
+ */
+ nfp_err ret;
+ uint_t cookie_count;
+ size_t real_length;
+
+ if(!pdev->read_buf) {
+ /* allocate read buffer */
+ pdev->read_buf = kmem_zalloc( NFP_READBUF_SIZE, KM_NOSLEEP );
+ }
+ if(!pdev->read_buf) {
+ nfp_log( NFP_DBG1, "nfp_attach: kmem_zalloc read buffer failed");
+ pdev->read_buf = NULL;
+ return 0;
+ }
+
+ if(!pdev->rd_dma_ok) {
+ /* allocate dma handle for read buffer */
+ ret = ddi_dma_alloc_handle( pdev->dip,
+ &dma_attrs,
+ DDI_DMA_DONTWAIT,
+ NULL,
+ &pdev->read_dma_handle );
+ if( ret != DDI_SUCCESS ) {
+ nfp_log( NFP_DBG1,
+ "nfp_alloc_pci_push: ddi_dma_alloc_handle failed (%d)",
+ ret );
+ return 0;
+ }
+
+ /* Allocate the memory for dma transfers */
+ ret = ddi_dma_mem_alloc(pdev->read_dma_handle, NFP_READBUF_SIZE, &nosw_attr,
+ DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
+ (caddr_t*)&pdev->read_buf, &real_length, &pdev->acchandle);
+ if (ret != DDI_SUCCESS) {
+ nfp_log( NFP_DBG1, "nfp_alloc_pci_push: ddi_dma_mem_alloc failed (%d)", ret);
+ ddi_dma_free_handle( &pdev->read_dma_handle );
+ return 0;
+ }
+
+ ret = ddi_dma_addr_bind_handle( pdev->read_dma_handle,
+ NULL, /* kernel address space */
+ (caddr_t)pdev->read_buf, real_length,
+ DDI_DMA_READ | DDI_DMA_CONSISTENT, /* dma flags */
+ DDI_DMA_DONTWAIT, NULL,
+ &pdev->read_dma_cookie, &cookie_count );
+ if( ret != DDI_DMA_MAPPED ) {
+ nfp_log( NFP_DBG1,
+ "nfp_alloc_pci_push: ddi_dma_addr_bind_handle failed (%d)",
+ ret);
+ ddi_dma_mem_free(&pdev->acchandle);
+ ddi_dma_free_handle( &pdev->read_dma_handle );
+ return 0;
+ }
+ if( cookie_count > 1 ) {
+ nfp_log( NFP_DBG1,
+ "nfp_alloc_pci_push: error:"
+ " ddi_dma_addr_bind_handle wants %d transfers",
+ cookie_count);
+ ddi_dma_mem_free(&pdev->acchandle);
+ (void) ddi_dma_unbind_handle( pdev->read_dma_handle );
+ ddi_dma_free_handle( &pdev->read_dma_handle );
+ return 0;
+ }
+ pdev->rd_dma_ok = 1;
+ }
+ return pdev->rd_dma_ok;
+}
+
+void nfp_free_pci_push( nfp_dev *pdev ) {
+ /* free resources allocated to PCI Push */
+ if( pdev->rd_dma_ok ) {
+ (void) ddi_dma_sync(pdev->read_dma_handle,0,0,DDI_DMA_SYNC_FORKERNEL);
+ ddi_dma_mem_free(&pdev->acchandle);
+ (void) ddi_dma_unbind_handle( pdev->read_dma_handle );
+ ddi_dma_free_handle( &pdev->read_dma_handle );
+ pdev->rd_dma_ok = 0;
+ }
+ if( pdev->read_buf ) {
+ kmem_free( pdev->read_buf, NFP_READBUF_SIZE );
+ pdev->read_buf = NULL;
+ }
+}
+
+/* include definition of nfp_set_ifvers() */
+#define nfp_ifvers NFDEV_IF_PCI_PUSH
+#include "nfp_ifvers.c"
+#undef nfp_ifvers
+
+/*--------------------*/
+/* nfp_isr */
+/*--------------------*/
+
+static u_int nfp_isr( char *pdev_in ) {
+ /* LINTED: alignment */
+ nfp_dev *pdev= (nfp_dev *)pdev_in;
+ nfp_err ne;
+ int handled;
+
+ nfp_log( NFP_DBG3, "nfp_isr: entered");
+
+ if( !pdev ) {
+ nfp_log( NFP_DBG1, "nfp_isr: cannot find dev");
+ return DDI_INTR_UNCLAIMED;
+ }
+
+ /* The isr needs to be mutex'ed - an SMP can call us while we're still
+ * running!
+ */
+ mutex_enter(&pdev->low_mutex);
+ ne= pdev->cmddev->isr( pdev->common.cmdctx, &handled );
+ mutex_exit(&pdev->low_mutex);
+
+ if( !ne && handled )
+ return DDI_INTR_CLAIMED;
+ if (ne)
+ nfp_log( NFP_DBG1, "nfp_isr: failed");
+ else
+ nfp_log( NFP_DBG3, "nfp_isr: unclaimed");
+ return DDI_INTR_UNCLAIMED;
+}
+
+static u_int nfp_soft_isr( char *pdev_in ) {
+ /* LINTED: alignment */
+ nfp_dev *pdev= (nfp_dev *)pdev_in;
+ int rd, wr;
+
+ nfp_log( NFP_DBG3, "nfp_soft_isr: entered");
+
+ if( !pdev ) {
+ nfp_log( NFP_DBG1, "nfp_soft_isr: cannot find dev");
+ return DDI_INTR_UNCLAIMED;
+ }
+ rd= wr= 0;
+
+ mutex_enter(&pdev->high_mutex);
+ if(pdev->high_read) {
+ pdev->high_read= 0;
+ mutex_exit(&pdev->high_mutex);
+ rd= 1;
+ }
+ if(pdev->high_write) {
+ pdev->high_write= 0;
+ wr= 1;
+ }
+ mutex_exit(&pdev->high_mutex);
+
+ if(rd) {
+ nfp_log( NFP_DBG3, "nfp_soft_isr: read done");
+ nfp_read_complete_final(pdev, pdev->rd_ok);
+ }
+ if(wr) {
+ nfp_log( NFP_DBG3, "nfp_soft_isr: write done");
+ nfp_write_complete_final(pdev, pdev->wr_ok);
+ }
+ if( rd || wr )
+ return DDI_INTR_CLAIMED;
+
+ nfp_log( NFP_DBG2, "nfp_isr: unclaimed");
+ return DDI_INTR_UNCLAIMED;
+}
+
+
+/*-------------------------*/
+/* nfp_read */
+/*-------------------------*/
+
+void nfp_read_complete(nfp_dev *pdev, int ok) {
+ nfp_log( NFP_DBG2,"nfp_read_complete: entering");
+
+ if(pdev->high_intr) {
+ nfp_log(NFP_DBG2, "nfp_read_complete: high_intr");
+ mutex_enter(&pdev->high_mutex);
+ nfp_log(NFP_DBG3, "nfp_read_complete: high_mutex entered");
+ if(pdev->high_read)
+ nfp_log(NFP_DBG1, "nfp_read_complete: high_read allread set!");
+ pdev->high_read= 1;
+ pdev->rd_ok= ok;
+ nfp_log(NFP_DBG3, "nfp_read_complete: exiting high_mutex");
+ mutex_exit(&pdev->high_mutex);
+ ddi_trigger_softintr(pdev->soft_int_id);
+ } else
+ nfp_read_complete_final( pdev, ok );
+ nfp_log( NFP_DBG2,"nfp_read_complete: exiting");
+}
+
+static void nfp_read_complete_final(nfp_dev *pdev, int ok) {
+ nfp_log( NFP_DBG2,"nfp_read_complete_final: entering");
+ if(pdev->rdtimeout)
+ (void) untimeout(pdev->rdtimeout);
+ if(!pdev->rd_outstanding) {
+ nfp_log( NFP_DBG1,"nfp_read_complete_final: !pdev->rd_outstanding");
+ }
+ nfp_log( NFP_DBG2,"nfp_read_complete_final: pdev->rd_outstanding=0, ok %d", ok);
+ mutex_enter(&pdev->isr_mutex);
+ pdev->rd_outstanding= 0;
+ pdev->rd_ready= 1;
+ pdev->rd_ok= ok;
+ cv_broadcast(&pdev->rd_cv);
+ mutex_exit(&pdev->isr_mutex);
+ pollwakeup (&pdev->pollhead, POLLRDNORM);
+ nfp_log( NFP_DBG2,"nfp_read_complete_final: exiting");
+}
+
+static void nfp_rdtimeout( void *pdev_in )
+{
+ nfp_dev *pdev= (nfp_dev *)pdev_in;
+
+ nfp_log( NFP_DBG1, "nfp_rdtimeout: read timed out");
+
+ if (!pdev) {
+ nfp_log( NFP_DBG1, "nfp_rdtimeout: NULL pdev." );
+ return;
+ }
+ pdev->rdtimeout= 0;
+ nfp_read_complete_final(pdev, 0);
+}
+
+/* ARGSUSED */
+static int nfp_read(dev_t dev, struct uio *uiop, cred_t *credp) {
+ int ret;
+ nfp_log( NFP_DBG2, "nfp_read: entered" );
+ if (ddi_get_soft_state(state_head, getminor(dev)) != NULL) {
+ nfp_log( NFP_DBG1, "nfp_read: unable to get nfp_dev");
+ return (ENODEV);
+ }
+ nfp_log( NFP_DBG2, "nfp_read: about to physio." );
+ ret = physio(nfp_strategy, (struct buf *)0, dev, B_READ, minphys, uiop );
+ if(ret)
+ nfp_log( NFP_DBG1, "nfp_read: physio returned %x.", ret );
+ return ret;
+}
+
+/*-------------------------*/
+/* nfp_write */
+/*-------------------------*/
+
+void nfp_write_complete( nfp_dev *pdev, int ok) {
+ nfp_log( NFP_DBG2,"nfp_write_complete: entering");
+
+ if(pdev->high_intr) {
+ mutex_enter(&pdev->high_mutex);
+ if(pdev->high_write)
+ nfp_log(NFP_DBG1, "nfp_write_complete: high_write allread set!");
+ pdev->high_write= 1;
+ pdev->wr_ok= ok;
+ mutex_exit(&pdev->high_mutex);
+ ddi_trigger_softintr(pdev->soft_int_id);
+ } else
+ nfp_write_complete_final( pdev, ok );
+ nfp_log( NFP_DBG2,"nfp_write_complete: exiting");
+}
+
+static void nfp_write_complete_final( nfp_dev *pdev, int ok) {
+ struct buf *local_wr_bp;
+ nfp_log( NFP_DBG2,"nfp_write_complete_final: entering");
+ if(pdev->wrtimeout)
+ (void) untimeout(pdev->wrtimeout);
+
+ if (!pdev->wr_bp) {
+ nfp_log( NFP_DBG2, "nfp_write_complete_final: write: wr_bp == NULL." );
+ return;
+ }
+
+ bp_mapout(pdev->wr_bp);
+ pdev->wr_bp->b_resid = ok ? 0 : pdev->wr_bp->b_bcount;
+ /* Make sure we set wr_ready before calling biodone to avoid a race */
+ pdev->wr_ready = 1;
+ bioerror(pdev->wr_bp, ok ? 0 : ENXIO);
+ local_wr_bp = pdev->wr_bp;
+ pdev->wr_bp = 0;
+ biodone(local_wr_bp);
+ nfp_log( NFP_DBG2, "nfp_write_complete_final: isr_mutex extited");
+ pollwakeup (&pdev->pollhead, POLLWRNORM);
+
+ nfp_log( NFP_DBG2, "nfp_write_complete_final: leaving");
+}
+
+static void nfp_wrtimeout( void *pdev_in )
+{
+ nfp_dev *pdev= (nfp_dev *)pdev_in;
+
+ nfp_log( NFP_DBG1, "nfp_wrtimeout: write timed out");
+
+ if (!pdev) {
+ nfp_log( NFP_DBG1, "nfp_wrtimeout: NULL pdev." );
+ return;
+ }
+ pdev->wrtimeout= 0;
+ nfp_write_complete_final(pdev, 0);
+}
+
+/* ARGSUSED */
+static int nfp_write(dev_t dev, struct uio *uiop, cred_t *credp) {
+ int ret;
+ nfp_log( NFP_DBG2, "nfp_write: entered." );
+ if (ddi_get_soft_state(state_head, getminor(dev)) == NULL) {
+ nfp_log( NFP_DBG1, "nfp_chread: unable to get nfp_dev.");
+ return (ENODEV);
+ }
+ nfp_log( NFP_DBG2, "nfp_write: about to physio." );
+ ret = physio(nfp_strategy, (struct buf *)0, dev, B_WRITE, minphys, uiop );
+ if(ret)
+ nfp_log( NFP_DBG1, "nfp_write: physio returned %x.", ret );
+ return ret;
+}
+
+/*-------------------------*/
+/* nfp_strategy */
+/*-------------------------*/
+
+#define NFP_STRAT_ERR(thebp,err,txt) \
+ nfp_log( NFP_DBG1, "nfp_strategy: " txt ".\n"); \
+ (thebp)->b_resid = (thebp)->b_bcount; \
+ bioerror ((thebp), err); \
+ biodone ((thebp));
+
+static int nfp_strategy(struct buf *bp) {
+ register struct nfp_dev *pdev;
+ nfp_err ne;
+
+ nfp_log( NFP_DBG2, "nfp_strategy: entered." );
+ if (!(pdev = ddi_get_soft_state(state_head, getminor(bp->b_edev)))) {
+ NFP_STRAT_ERR (bp, ENXIO, "unable to get nfp_dev");
+ return (0);
+ }
+
+ if (bp->b_flags & B_READ) {
+ int count;
+ /* read */
+ if (!pdev->rd_ready) {
+ NFP_STRAT_ERR (bp,ENXIO,"read called when not ready");
+ return (0);
+ }
+ pdev->rd_ready=0;
+ pdev->rd_pending = 0;
+ if( !pdev->rd_ok) {
+ NFP_STRAT_ERR (bp,ENXIO,"read failed");
+ return (0);
+ }
+ /* copy data from module */
+ if(pdev->ifvers >= NFDEV_IF_PCI_PUSH) {
+ nfp_log( NFP_DBG3, "nfp_strategy: copying kernel read buffer");
+ if( ddi_dma_sync(pdev->read_dma_handle,0,0,DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS )
+ {
+ NFP_STRAT_ERR(bp,ENXIO,"ddi_dma_sync(read_dma_handle) failed");
+ return (0);
+ }
+ /* LINTED: alignment */
+ count= *(unsigned int *)(pdev->read_buf+4);
+ count= FROM_LE32_MEM(&count);
+ nfp_log( NFP_DBG3, "nfp_strategy: read count %d", count);
+ if(count<0 || count>bp->b_bcount) {
+ NFP_STRAT_ERR(bp,ENXIO,"bad read byte count from device");
+ nfp_log( NFP_DBG1, "nfp_strategy: bad read byte count (%d) from device", count);
+ return (0);
+ }
+ bp_mapin (bp);
+ bcopy( pdev->read_buf + 8, bp->b_un.b_addr, count );
+ bp_mapout (bp);
+ } else {
+ bp_mapin (bp);
+ ne= pdev->cmddev->read_block( bp->b_un.b_addr, bp->b_bcount, pdev->common.cmdctx, &count );
+ bp_mapout (bp);
+ if( ne != NFP_SUCCESS) {
+ NFP_STRAT_ERR (bp,nfp_oserr(ne),"read_block failed");
+ return (0);
+ }
+ }
+ bioerror(bp, 0);
+ bp->b_resid = 0;
+ biodone (bp);
+ } else {
+ /* write */
+ if (!pdev->wr_ready) {
+ NFP_STRAT_ERR (bp,ENXIO,"write called when not ready");
+ return (0);
+ }
+ if (pdev->wr_bp) {
+ NFP_STRAT_ERR (bp,ENXIO,"wr_bp != NULL");
+ return (0);
+ }
+ pdev->wrtimeout= timeout(nfp_wrtimeout, (caddr_t)pdev, NFP_TIMEOUT_SEC * drv_usectohz(1000000));
+ pdev->wr_bp = bp;
+ pdev->wr_ready = 0;
+ bp_mapin (bp);
+ ne= pdev->cmddev->write_block( bp->b_un.b_addr, bp->b_bcount, pdev->common.cmdctx);
+ if( ne != NFP_SUCCESS ) {
+ bp_mapout (bp);
+ (void) untimeout(pdev->wrtimeout);
+ pdev->wr_bp = 0;
+ pdev->wr_ready = 1;
+ NFP_STRAT_ERR (bp,nfp_oserr(ne),"write failed");
+ return (0);
+ }
+ }
+ nfp_log( NFP_DBG2, "nfp_strategy: leaving");
+
+ return (0);
+}
+
+
+/*--------------------*/
+/* poll / select */
+/*--------------------*/
+
+static int nfp_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
+ struct pollhead **phpp) {
+ nfp_dev *pdev;
+ short revents;
+
+ if (!(pdev = ddi_get_soft_state(state_head, getminor(dev)))) {
+ nfp_log( NFP_DBG1, "nfp_chpoll: unable to get nfp_dev");
+ *reventsp=0;
+ return (0);
+ }
+ nfp_log( NFP_DBG2, "nfp_chpoll: entered %x", events);
+
+ revents=0;
+ if (events&POLLWRNORM) {
+ if (pdev->wr_ready) {
+ nfp_log( NFP_DBG2, "nfp_chpoll: write ready");
+ revents|=POLLWRNORM;
+ }
+ }
+
+ if (events&POLLRDNORM) {
+ if (pdev->rd_ready) {
+ nfp_log( NFP_DBG2, "nfp_chpoll: read ready");
+ revents|=POLLRDNORM;
+ }
+ }
+
+ if (!revents && !anyyet) {
+ *phpp=&pdev->pollhead;
+ }
+ *reventsp=revents;
+
+ nfp_log( NFP_DBG2, "nfp_chpoll: leaving");
+ return (0);
+}
+
+
+/*--------------------*/
+/* ioctl */
+/*--------------------*/
+
+/* ARGSUSED */
+static int nfp_ioctl(dev_t dev, int cmd, ioctlptr_t arg, int mode, cred_t *credp, int *rvalp) {
+ register struct nfp_dev *pdev;
+
+ nfp_log( NFP_DBG2, "nfp_ioctl: entered." );
+
+ if (!(pdev = ddi_get_soft_state(state_head, getminor(dev)))) {
+ nfp_log( NFP_DBG1, "nfp_ioctl: unable to get nfp dev.");
+ return (ENXIO);
+ }
+
+ switch (cmd) {
+ case NFDEV_IOCTL_ENQUIRY:
+ {
+ long *outp;
+ int outlen;
+ nfdev_enquiry_str enq_data;
+
+ enq_data.busno = (unsigned int)-1;
+ enq_data.slotno = (unsigned char)-1;
+
+ /* get our bus and slot num */
+ if (ddi_getlongprop (DDI_DEV_T_NONE,
+ pdev->dip, 0, "reg",
+ (caddr_t)&outp, &outlen) != DDI_PROP_NOT_FOUND) {
+ nfp_log( NFP_DBG2, "ddi_getlongprop('reg') ok." );
+ if( outlen > 0 ) {
+ enq_data.busno = ((*outp)>>16) & 0xff;
+ enq_data.slotno = ((*outp)>>11) & 0x1f;
+ nfp_log( NFP_DBG2, "busno %d, slotno %d.",
+ enq_data.busno, enq_data.slotno );
+ }
+ } else
+ nfp_log( NFP_DBG1, "ddi_getlongprop('reg') failed." );
+
+ if( ddi_copyout( (char *)&enq_data, (void *)arg, sizeof(enq_data), mode ) != 0 ) {
+ nfp_log( NFP_DBG1, "ddi_copyout() failed." );
+ return EFAULT;
+ }
+ }
+ break;
+
+ case NFDEV_IOCTL_ENSUREREADING:
+ {
+ unsigned int addr, len;
+ nfp_err ret;
+ if( ddi_copyin( (void *)arg, (char *)&len, sizeof(unsigned int), mode ) != 0 ) {
+ nfp_log( NFP_DBG1, "ddi_copyin() failed." );
+ return (EFAULT);
+ }
+ /* signal a read to the module */
+ nfp_log( NFP_DBG2, "nfp_ioctl: signalling read request to module, len = %x.", len );
+ if (len>8192) {
+ nfp_log( NFP_DBG1, "nfp_ioctl: len >8192 = %x.", len );
+ return EINVAL;
+ }
+ if (pdev->rd_outstanding==1) {
+ nfp_log( NFP_DBG1, "nfp_ioctl: not about to call read with read outstanding.");
+ return EIO;
+ }
+
+ addr= 0;
+ if(pdev->ifvers >= NFDEV_IF_PCI_PUSH) {
+ if( len > NFP_READBUF_SIZE ) {
+ nfp_log( NFP_DBG1, "nfp_ioctl: len > NFP_READBUF_SIZE = %x.", len );
+ return EINVAL;
+ }
+ addr= pdev->read_dma_cookie.dmac_address;
+ }
+
+ pdev->rd_outstanding = 1;
+ nfp_log( NFP_DBG2,"nfp_ioctl: pdev->rd_outstanding=1");
+
+ /* setup timeout timer */
+ pdev->rdtimeout= timeout(nfp_rdtimeout, (caddr_t)pdev, NFP_TIMEOUT_SEC * drv_usectohz(1000000));
+
+ nfp_log( NFP_DBG2, "nfp_ioctl: read request");
+ ret = pdev->cmddev->ensure_reading(addr, len, pdev->common.cmdctx);
+ if ( ret != NFP_SUCCESS ) {
+ (void) untimeout(pdev->rdtimeout);
+ pdev->rdtimeout = 0;
+ pdev->rd_outstanding = 0;
+ nfp_log( NFP_DBG1, "nfp_ioctl : cmddev->ensure_reading failed ");
+ return nfp_oserr( ret );
+ }
+ }
+ break;
+
+ case NFDEV_IOCTL_PCI_IFVERS:
+ {
+ int vers;
+
+ nfp_log( NFP_DBG2, "nfp_ioctl: NFDEV_IOCTL_PCI_IFVERS");
+
+ if( ddi_copyin( (void *)arg, (char *)&vers, sizeof(vers), mode ) != 0 ) {
+ nfp_log( NFP_DBG1, "ddi_copyin() failed." );
+ return (EFAULT);
+ }
+
+ if( pdev->rd_outstanding ) {
+ nfp_log( NFP_DBG1, "nfp_ioctl: can't set ifvers %d as read outstanding", vers);
+ return EIO;
+ }
+
+ nfp_set_ifvers(pdev, vers);
+ if( pdev->ifvers != vers ) {
+ nfp_log( NFP_DBG1, "nfp_ioctl: can't set ifvers %d", vers);
+ return EIO;
+ }
+ }
+ break;
+
+ case NFDEV_IOCTL_STATS:
+ {
+ if( ddi_copyout( (char *)&(pdev->common.stats),
+ (void *)arg,
+ sizeof(nfdev_stats_str),
+ mode ) != 0 ) {
+ nfp_log( NFP_DBG1, "ddi_copyout() failed." );
+ return EFAULT;
+ }
+ }
+ break;
+
+ default:
+ nfp_log( NFP_DBG1, "nfp_ioctl: unknown ioctl." );
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+/*-------------------------*/
+/* nfp_open */
+/*-------------------------*/
+
+/* ARGSUSED */
+int nfp_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
+{
+ nfp_err ret;
+ register struct nfp_dev *pdev;
+
+ nfp_log( NFP_DBG2, "entered nfp_open." );
+
+ pdev = (nfp_dev *)ddi_get_soft_state(state_head, getminor(*dev));
+
+ if( !pdev ) {
+ nfp_log( NFP_DBG1, "nfp_open: unable to get nfp dev.");
+ return (ENODEV);
+ }
+
+ if( otyp != OTYP_CHR ) {
+ nfp_log( NFP_DBG1, "nfp_open: not opened as character device");
+ return (EINVAL);
+ }
+
+ mutex_enter(&pdev->busy_mutex);
+
+ if (pdev->busy) {
+ mutex_exit(&pdev->busy_mutex);
+ nfp_log( NFP_DBG1, "nfp_open: device busy");
+ return EBUSY;
+ }
+ pdev->busy= 1;
+ mutex_exit(&pdev->busy_mutex);
+
+ /* use oldest possible interface until told otherwise */
+ pdev->ifvers= NFDEV_IF_STANDARD;
+ nfp_log( NFP_DBG3, "nfp_open: setting ifvers %d", pdev->ifvers);
+ pdev->rd_ready= 0; /* drop any old data */
+
+ ret = pdev->cmddev->open(pdev->common.cmdctx);
+ if( ret != NFP_SUCCESS ) {
+ nfp_log( NFP_DBG1, "nfp_open : cmddev->open failed ");
+ return nfp_oserr( ret );
+ }
+
+ nfp_log( NFP_DBG2, "nfp_open: done");
+
+ return 0;
+}
+
+/*--------------------*/
+/* nfp_close */
+/*--------------------*/
+
+/* ARGSUSED */
+static int nfp_close(dev_t dev, int openflags, int otyp, cred_t *credp) {
+ nfp_dev *pdev;
+ nfp_err ret;
+
+ nfp_log( NFP_DBG2, "nfp_close: entered");
+
+ pdev = (struct nfp_dev *)ddi_get_soft_state(state_head, getminor(dev));
+ if( !pdev ) {
+ nfp_log( NFP_DBG1, "nfp_close: cannot find dev.");
+ return ENODEV;
+ }
+
+ mutex_enter(&pdev->isr_mutex);
+ if(pdev->rd_outstanding) {
+ int lbolt, err;
+ nfp_get_lbolt(&lbolt, err);
+ if(!err)
+ (void) cv_timedwait(&pdev->rd_cv, &pdev->isr_mutex, lbolt + (NFP_TIMEOUT_SEC * drv_usectohz(1000000)) );
+ }
+ mutex_exit(&pdev->isr_mutex);
+ ret = pdev->cmddev->close(pdev->common.cmdctx);
+ if (ret != NFP_SUCCESS ) {
+ nfp_log( NFP_DBG1, " nfp_close : cmddev->close failed");
+ return nfp_oserr( ret );
+ }
+
+ mutex_enter(&pdev->busy_mutex);
+ pdev->busy= 0;
+ mutex_exit(&pdev->busy_mutex);
+
+ return 0;
+}
+
+/****************************************************************************
+
+ nfp driver config
+
+ ****************************************************************************/
+
+/*-------------------------*/
+/* nfp_getinfo */
+/*-------------------------*/
+
+/* ARGSUSED */
+static int nfp_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) {
+ int error;
+ nfp_dev *pdev;
+
+ nfp_log( NFP_DBG2, "nfp_getinfo: entered" );
+
+ pdev = (struct nfp_dev *)ddi_get_soft_state(state_head, getminor((dev_t)arg));
+ if( !pdev ) {
+ nfp_log( NFP_DBG1, "nfp_close: cannot find dev.");
+ return ENODEV;
+ }
+
+ switch (infocmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ if (pdev == NULL) {
+ *result = NULL;
+ error = DDI_FAILURE;
+ } else {
+ /*
+ * don't need to use a MUTEX even though we are
+ * accessing our instance structure; dev->dip
+ * never changes.
+ */
+ *result = pdev->dip;
+ error = DDI_SUCCESS;
+ }
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *result = (void *)(uintptr_t)getminor((dev_t)arg);
+ error = DDI_SUCCESS;
+ break;
+ default:
+ *result = NULL;
+ error = DDI_FAILURE;
+ }
+
+ nfp_log( NFP_DBG2, "nfp_getinfo: leaving." );
+ return (error);
+}
+
+/*-------------------------*/
+/* nfp_release */
+/*-------------------------*/
+
+static int nfp_release_dev( dev_info_t *dip ) {
+ nfp_dev *pdev;
+ int instance, i;
+ nfp_err ret;
+
+ nfp_log( NFP_DBG2, "nfp_release_dev: entering" );
+
+ instance = ddi_get_instance(dip);
+ pdev = (struct nfp_dev *)ddi_get_soft_state(state_head, instance);
+ if (pdev) {
+ nfp_log( NFP_DBG3, "nfp_release_dev: removing device" );
+
+ nfp_free_pci_push(pdev);
+
+ if( pdev->cmddev ) {
+ nfp_log( NFP_DBG3, "nfp_release_dev: destroying cmd dev" );
+ ret = pdev->cmddev->destroy(pdev->common.cmdctx);
+ if (ret != NFP_SUCCESS) {
+ nfp_log( NFP_DBG1, " nfp_release_dev : cmddev->destroy failed ");
+ return nfp_oserr( ret );
+ }
+ }
+
+ if(pdev->high_iblock_cookie) {
+ nfp_log( NFP_DBG3, "nfp_release_dev: removing high and soft irq" );
+ ddi_remove_softintr(pdev->soft_int_id);
+ ddi_remove_intr(pdev->dip, 0, pdev->high_iblock_cookie);
+ mutex_destroy( &pdev->busy_mutex );
+ cv_destroy( &pdev->rd_cv );
+ mutex_destroy( &pdev->isr_mutex );
+ mutex_destroy( &pdev->high_mutex );
+ } else if(pdev->iblock_cookie) {
+ nfp_log( NFP_DBG3, "nfp_release_dev: removing irq" );
+ ddi_remove_intr(pdev->dip, 0, pdev->iblock_cookie);
+ mutex_destroy( &pdev->busy_mutex );
+ cv_destroy( &pdev->rd_cv );
+ mutex_destroy( &pdev->isr_mutex );
+ }
+ if(pdev->low_iblock_cookie) {
+ ddi_remove_intr(pdev->dip, 0, pdev->low_iblock_cookie);
+ mutex_destroy( &pdev->low_mutex);
+ }
+
+ for(i=0;i<6;i++) {
+ if( pdev->common.extra[i] ) {
+ nfp_log( NFP_DBG3, "nfp_release_dev: unmapping BAR %d", i );
+ ddi_regs_map_free ((ddi_acc_handle_t *)&pdev->common.extra[i]);
+ }
+ }
+
+ ddi_remove_minor_node(dip, NULL);
+
+ if (pdev->conf_handle)
+ pci_config_teardown( &pdev->conf_handle );
+
+ ddi_soft_state_free(state_head, instance);
+ }
+ nfp_log( NFP_DBG2, "nfp_release: finished" );
+
+ return DDI_SUCCESS;
+}
+
+
+/*-------------------------*/
+/* nfp_attach */
+/*-------------------------*/
+
+static int nfp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) {
+ int instance;
+ nfp_dev *pdev = NULL;
+ int intres;
+ uint16_t device, vendor, sub_device, sub_vendor;
+ long *outp;
+ nfpcmd_dev const *cmddev;
+ int index, i;
+ nfp_err ret;
+
+ nfp_log( NFP_DBG2, "nfp_attach: entered." );
+
+ if (cmd != DDI_ATTACH) {
+ nfp_log( NFP_DBG1, "nfp_attach: bad command." );
+ goto bailout;
+ }
+
+ instance = ddi_get_instance(dip);
+
+ if (ddi_soft_state_zalloc(state_head, instance) != 0) {
+ nfp_log( NFP_DBG1, "nfp_attach: ddi_soft_state_zalloc() failed." );
+ goto bailout;
+ }
+
+ pdev = (struct nfp_dev *)ddi_get_soft_state(state_head, instance);
+ if( !pdev ) {
+ nfp_log( NFP_DBG1, "nfp_attach: cannot find dev.");
+ return ENODEV;
+ }
+ pdev->dip = dip;
+
+ /* map in pci config registers */
+ if (pci_config_setup(dip, &pdev->conf_handle)) {
+ nfp_log( NFP_DBG1, "nfp_attach: pci_config_setup() failed." );
+ goto bailout;
+ }
+
+ /* find out what we have got */
+ vendor= PCI_CONFIG_GET16( pdev->conf_handle, PCI_CONF_VENID );
+ device = PCI_CONFIG_GET16( pdev->conf_handle, PCI_CONF_DEVID );
+ sub_vendor = PCI_CONFIG_GET16( pdev->conf_handle, PCI_CONF_SUBVENID );
+ sub_device = PCI_CONFIG_GET16( pdev->conf_handle, PCI_CONF_SUBSYSID );
+
+ index= 0;
+ while( (cmddev = nfp_drvlist[index++]) != NULL ) {
+ if( cmddev->vendorid == vendor &&
+ cmddev->deviceid == device &&
+ cmddev->sub_vendorid == sub_vendor &&
+ cmddev->sub_deviceid == sub_device )
+ break;
+ }
+ if( !cmddev ) {
+ nfp_log( NFP_DBG1, "nfp_attach: unknonw device." );
+ goto bailout;
+ }
+
+ /* map BARs */
+ for( i=0; i<6; i++ ) {
+ if( cmddev->bar_sizes[i] ) {
+ off_t size;
+ if( ddi_dev_regsize(dip, i+1, &size) != DDI_SUCCESS) {
+ nfp_log( NFP_DBG1, "nfp_attach: ddi_dev_regsize() failed for BAR %d", i );
+ goto bailout;
+ }
+ if( size < (cmddev->bar_sizes[i] & ~NFP_MEMBAR_MASK) ) {
+ nfp_log( NFP_DBG1, "nfp_attach: BAR %d too small %x (%x)", i, size, (cmddev->bar_sizes[i] & ~0xF) );
+ goto bailout;
+ }
+ if (ddi_regs_map_setup(dip, i+1, (caddr_t *)&pdev->common.bar[i],
+ 0, cmddev->bar_sizes[i] & ~NFP_MEMBAR_MASK, &nosw_attr, (ddi_acc_handle_t *)&pdev->common.extra[i] )) {
+ nfp_log( NFP_DBG1, "nfp_attach: ddi_regs_map_setup() failed for BAR %d", i );
+ goto bailout;
+ }
+ nfp_log( NFP_DBG3, "nfp_attach: BAR[%d] mapped to %x (%x)", i, pdev->common.bar[i], size );
+ }
+ }
+
+ pdev->read_buf = NULL;
+ pdev->rd_dma_ok = 0;
+
+ /* attach to minor node */
+ if (ddi_create_minor_node(dip, "nfp", S_IFCHR, instance, (char *)cmddev->name, 0) == DDI_FAILURE) {
+ ddi_remove_minor_node(dip, NULL);
+ nfp_log( NFP_DBG1, "nfp_attach: ddi_create_minor_node() failed." );
+ goto bailout;
+ }
+
+ pdev->wr_ready = 1;
+ pdev->rd_ready = 0;
+ pdev->rd_pending = 0;
+ pdev->rd_outstanding = 0;
+ pdev->busy=0;
+ pdev->cmddev= cmddev;
+
+ ret = pdev->cmddev->create(&pdev->common);
+ if( ret != NFP_SUCCESS) {
+ nfp_log( NFP_DBG1, "nfp_attach: failed to create command device");
+ goto bailout;
+ }
+ pdev->common.dev= pdev;
+
+ if (ddi_intr_hilevel(dip, 0) != 0){
+ nfp_log( NFP_DBG2, "nfp_attach: high-level interrupt");
+ if( ddi_get_iblock_cookie(dip, 0, &pdev->high_iblock_cookie) ) {
+ nfp_log( NFP_DBG1, "nfp_attach: ddi_get_iblock_cookie(high) failed." );
+ goto bailout;
+ }
+ if( ddi_get_iblock_cookie(dip, 0, &pdev->low_iblock_cookie) ) {
+ nfp_log( NFP_DBG1, "nfp_attach: ddi_get_iblock_cookie(low) failed." );
+ goto bailout;
+ }
+ mutex_init(&pdev->high_mutex, NULL, MUTEX_DRIVER,
+ (void *)pdev->high_iblock_cookie);
+ mutex_init(&pdev->low_mutex, NULL, MUTEX_DRIVER,
+ (void *)pdev->low_iblock_cookie);
+ if (ddi_add_intr(dip, 0, NULL,
+ NULL, nfp_isr,
+ (caddr_t)pdev) != DDI_SUCCESS) {
+ nfp_log( NFP_DBG1, "nfp_attach: ddi_add_intr(high) failed." );
+ goto bailout;
+ }
+ if( ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH,
+ &pdev->iblock_cookie) ) {
+ nfp_log( NFP_DBG1, "nfp_attach: ddi_get_iblock_cookie(soft) failed." );
+ goto bailout;
+ }
+ mutex_init(&pdev->isr_mutex, NULL, MUTEX_DRIVER,
+ (void *)pdev->iblock_cookie);
+ if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, &pdev->soft_int_id,
+ &pdev->iblock_cookie, NULL,
+ nfp_soft_isr, (caddr_t)pdev) != DDI_SUCCESS)
+ goto bailout;
+ pdev->high_intr= 1;
+ } else {
+ nfp_log( NFP_DBG2, "nfp_attach: low-level interrupt");
+
+ if (ddi_get_iblock_cookie (dip, 0, &pdev->iblock_cookie)) {
+ nfp_log( NFP_DBG1, "nfp_attach: ddi_get_iblock_cookie() failed." );
+ goto bailout;
+ }
+
+ mutex_init(&pdev->isr_mutex, "nfp isr mutex", MUTEX_DRIVER, (void *)pdev->iblock_cookie);
+
+ if (ddi_add_intr(dip, 0, NULL,
+ (ddi_idevice_cookie_t *)NULL, nfp_isr,
+ (caddr_t)pdev) != DDI_SUCCESS) {
+ nfp_log( NFP_DBG1, "nfp_attach: ddi_add_intr() failed." );
+ goto bailout;
+ }
+ }
+ mutex_init(&pdev->busy_mutex, "nfp busy mutex", MUTEX_DRIVER, NULL );
+ cv_init(&pdev->rd_cv, "nfp read condvar", CV_DRIVER, NULL );
+
+ /* get our bus and slot num */
+ if (ddi_getlongprop (DDI_DEV_T_NONE,
+ pdev->dip, 0, "reg",
+ (caddr_t)&outp, &intres) != DDI_PROP_NOT_FOUND) {
+ nfp_log( NFP_DBG2, "nfp_attach: ddi_getlongprop('reg') ok." );
+ if( intres > 0 ) {
+ nfp_log( NFP_DBG1, "nfp_attach: found PCI nfast bus %x slot %x.",
+ ((*outp)>>16) & 0xff, ((*outp)>>11) & 0x1f );
+ }
+ }
+
+ nfp_log( NFP_DBG2, "nfp_attach: attach succeeded." );
+ return DDI_SUCCESS;
+
+bailout:
+ (void) nfp_release_dev( dip );
+
+ return DDI_FAILURE;
+}
+
+/*-------------------------*/
+/* nfp_detach */
+/*-------------------------*/
+
+/*
+ * When our driver is unloaded, nfp_detach cleans up and frees the resources
+ * we allocated in nfp_attach.
+ */
+static int nfp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) {
+ if (cmd != DDI_DETACH)
+ return (DDI_FAILURE);
+
+ (void) nfp_release_dev(dip);
+
+ return (DDI_SUCCESS);
+}
+
+/*-------------------------*/
+/* _init */
+/*-------------------------*/
+
+int _init(void) {
+ register int error;
+
+ nfp_log( NFP_DBG2, "_init: entered" );
+
+ if ((error = ddi_soft_state_init(&state_head, sizeof (struct nfp_dev), 1)) != 0) {
+ nfp_log( NFP_DBG1, "_init: soft_state_init() failed" );
+ return (error);
+ }
+
+ if ((error = mod_install(&modlinkage)) != 0) {
+ nfp_log( NFP_DBG1, "_init: mod_install() failed" );
+ ddi_soft_state_fini(&state_head);
+ }
+
+ nfp_log( NFP_DBG2, "_init: leaving" );
+ return (error);
+}
+
+/*-------------------------*/
+/* _info */
+/*-------------------------*/
+
+int _info(struct modinfo *modinfop) {
+ nfp_log( NFP_DBG2, "_info: entered" );
+
+ return (mod_info(&modlinkage, modinfop));
+}
+
+/*-------------------------*/
+/* _fini */
+/*-------------------------*/
+
+int _fini(void) {
+ int status;
+
+ nfp_log( NFP_DBG2, "_fini: entered" );
+
+ if ((status = mod_remove(&modlinkage)) != 0) {
+ nfp_log( NFP_DBG2, "_fini: mod_remove() failed." );
+ return (status);
+ }
+
+ ddi_soft_state_fini(&state_head);
+
+ nfp_log( NFP_DBG2, "_fini: leaving" );
+
+ return (status);
+}
+
diff --git a/usr/src/uts/common/io/nfp/i21285.c b/usr/src/uts/common/io/nfp/i21285.c
new file mode 100644
index 0000000000..f51a09188d
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/i21285.c
@@ -0,0 +1,310 @@
+/*
+
+i21285.c: nCipher PCI HSM intel/digital 21285 command driver
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+
+history
+
+09/10/2001 jsh Original
+
+*/
+
+#include "nfp_common.h"
+#include "nfp_error.h"
+#include "nfp_hostif.h"
+#include "nfp_osif.h"
+#include "i21285.h"
+#include "nfp_cmd.h"
+#include "nfpci.h"
+
+/* create ------------------------------------------------------- */
+
+static nfp_err i21285_create( nfp_cdev *pdev ) {
+ unsigned int tmp32;
+
+ nfp_log( NFP_DBG2, "i21285_create: entered");
+ pdev->cmdctx= pdev; /* set our context to just be a pointer to our nfp_cdev */
+
+ nfp_log( NFP_DBG2, "i21285_create: enable doorbell");
+ if(!pdev->bar[ IOBAR ]) {
+ nfp_log( NFP_DBG1, "i21285_create: null BAR[%d]", IOBAR );
+ return NFP_ENOMEM;
+ }
+ TO_LE32_IO( &tmp32, DOORBELL_ENABLE | POSTLIST_ENABLE);
+ nfp_outl( pdev, IOBAR, I21285_OFFSET_INTERRUPT_MASK, tmp32 );
+
+ return NFP_SUCCESS;
+}
+
+/* stop ------------------------------------------------------- */
+
+static nfp_err i21285_destroy( void * ctx ) {
+ nfp_cdev *pdev;
+ unsigned int tmp32;
+
+ nfp_log( NFP_DBG2, "i21285_destroy: entered");
+
+ pdev= (nfp_cdev *)ctx;
+ if(!pdev) {
+ nfp_log( NFP_DBG1, "i21285_destroy: NULL pdev");
+ return NFP_ENODEV;
+ }
+ if(!pdev->bar[ IOBAR ]) {
+ nfp_log( NFP_DBG1, "i21285_destroy: null BAR[%d]", IOBAR );
+ return NFP_ENOMEM;
+ }
+ TO_LE32_IO( &tmp32, DOORBELL_DISABLE | POSTLIST_DISABLE );
+ nfp_outl( pdev, IOBAR, I21285_OFFSET_INTERRUPT_MASK, tmp32 );
+
+ return NFP_SUCCESS;
+}
+
+/* open ------------------------------------------------------- */
+
+/* ARGSUSED */
+static nfp_err i21285_open( void * ctx ) {
+ nfp_log( NFP_DBG2, "i21285_open: entered");
+
+ return NFP_SUCCESS;
+}
+
+/* close ------------------------------------------------------- */
+
+/* ARGSUSED */
+static nfp_err i21285_close( void * ctx ) {
+ nfp_log( NFP_DBG2, "i21285_close: entered");
+
+ return NFP_SUCCESS;
+}
+
+/* isr ------------------------------------------------------- */
+
+static nfp_err i21285_isr( void *ctx, int *handled ) {
+ nfp_cdev *pdev;
+ unsigned int doorbell;
+ unsigned int tmp32;
+
+ nfp_log( NFP_DBG3, "i21285_isr: entered");
+
+ *handled= 0;
+ pdev= (nfp_cdev *)ctx;
+ if(!pdev) {
+ nfp_log( NFP_DBG1, "i21285_isr: NULL pdev");
+ return NFP_ENODEV;
+ }
+
+ doorbell= nfp_inl( pdev, IOBAR, I21285_OFFSET_DOORBELL);
+ doorbell= FROM_LE32_IO(&doorbell) & 0xffff;
+ while( doorbell && doorbell != 0xffff) {
+ *handled= 1;
+ /* service interrupts */
+ if( doorbell & (NFAST_INT_DEVICE_WRITE_OK | NFAST_INT_DEVICE_WRITE_FAILED)) {
+ TO_LE32_IO( &tmp32, NFAST_INT_DEVICE_WRITE_OK | NFAST_INT_DEVICE_WRITE_FAILED);
+ nfp_outl( pdev, IOBAR, I21285_OFFSET_DOORBELL, tmp32 );
+
+ nfp_log(NFP_DBG2, "i21285_isr: write done interrupt, ok = %d.", doorbell & NFAST_INT_DEVICE_WRITE_OK ? 1 : 0 );
+
+ nfp_write_complete(pdev->dev, doorbell & NFAST_INT_DEVICE_WRITE_OK ? 1 : 0 );
+ }
+
+ if( doorbell & (NFAST_INT_DEVICE_READ_OK | NFAST_INT_DEVICE_READ_FAILED)) {
+ TO_LE32_IO( &tmp32, NFAST_INT_DEVICE_READ_OK | NFAST_INT_DEVICE_READ_FAILED );
+ nfp_outl( pdev, IOBAR, I21285_OFFSET_DOORBELL, tmp32 );
+
+ nfp_log(NFP_DBG2, "i21285_isr: read ack interrupt, ok = %d.", doorbell & NFAST_INT_DEVICE_READ_OK ? 1 : 0 );
+ nfp_read_complete( pdev->dev, doorbell & NFAST_INT_DEVICE_READ_OK ? 1 : 0);
+ }
+
+ if( doorbell & ~(NFAST_INT_DEVICE_READ_OK | NFAST_INT_DEVICE_READ_FAILED |
+ NFAST_INT_DEVICE_WRITE_OK | NFAST_INT_DEVICE_WRITE_FAILED)) {
+ nfp_log( NFP_DBG1, "i21285_isr: unexpected interrupt %x", doorbell );
+ TO_LE32_IO( &tmp32, 0xffff & doorbell );
+ nfp_outl( pdev, IOBAR, I21285_OFFSET_DOORBELL, tmp32 );
+ }
+ doorbell= nfp_inl( pdev, IOBAR, I21285_OFFSET_DOORBELL);
+ doorbell= FROM_LE32_IO(&doorbell) & 0xffff;
+ }
+ return 0;
+}
+
+/* write ------------------------------------------------------- */
+
+static nfp_err i21285_write( const char *block, int len, void *ctx ) {
+ nfp_cdev *cdev;
+ unsigned int hdr[2];
+ nfp_err ne;
+ unsigned int tmp32;
+
+ nfp_log( NFP_DBG2, "i21285_write: entered");
+
+ cdev= (nfp_cdev *)ctx;
+ if(!cdev) {
+ nfp_log( NFP_DBG1, "i21285_write: NULL pdev");
+ return NFP_ENODEV;
+ }
+
+ nfp_log(NFP_DBG2, "i21285_write: pdev->bar[ MEMBAR ]= %x\n", cdev->bar[ MEMBAR ]);
+ nfp_log(NFP_DBG2, "i21285_write: pdev->bar[ IOBAR ]= %x\n", cdev->bar[ IOBAR ]);
+ if(!cdev->bar[ MEMBAR ]) {
+ nfp_log( NFP_DBG1, "i21285_write: null BAR[%d]", MEMBAR );
+ return NFP_ENOMEM;
+ }
+ ne= nfp_copy_from_user_to_dev( cdev, MEMBAR, NFPCI_JOBS_WR_DATA, block, len);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21285_write: nfp_copy_from_user_to_dev failed");
+ return ne;
+ }
+ TO_LE32_MEM(&hdr[0], NFPCI_JOB_CONTROL);
+ TO_LE32_MEM(&hdr[1], len);
+
+ ne= nfp_copy_to_dev( cdev, MEMBAR, NFPCI_JOBS_WR_CONTROL, (const char *)hdr, 8);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21285_write: nfp_copy_to_dev failed");
+ return ne;
+ }
+
+ ne= nfp_copy_from_dev( cdev, MEMBAR, NFPCI_JOBS_WR_LENGTH, (char *)hdr, 4);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21285_write: nfp_copy_from_dev failed");
+ return ne;
+ }
+
+ TO_LE32_MEM( &tmp32, len );
+ if ( hdr[0] != tmp32 ) {
+ nfp_log( NFP_DBG1, "i21285_write: length not written");
+ return NFP_EIO;
+ }
+
+ TO_LE32_IO( &tmp32, NFAST_INT_HOST_WRITE_REQUEST);
+
+ nfp_outl( cdev, IOBAR, I21285_OFFSET_DOORBELL, tmp32 );
+
+ nfp_log( NFP_DBG2, "i21285_write: done");
+ return NFP_SUCCESS;
+}
+
+/* read ------------------------------------------------------- */
+
+static nfp_err i21285_read( char *block, int len, void *ctx, int *rcount) {
+ nfp_cdev *cdev;
+ nfp_err ne;
+ int count;
+
+ nfp_log( NFP_DBG2, "i21285_read: entered, len %d", len);
+ *rcount= 0;
+
+ cdev= (nfp_cdev *)ctx;
+ if(!cdev) {
+ nfp_log( NFP_DBG1, "i21285_read: NULL pdev");
+ return NFP_ENODEV;
+ }
+
+ if(!cdev->bar[ MEMBAR ]) {
+ nfp_log( NFP_DBG1, "i21285_read: null BAR[%d]", MEMBAR );
+ return NFP_ENOMEM;
+ }
+ ne= nfp_copy_from_dev( cdev, MEMBAR, NFPCI_JOBS_RD_LENGTH, (char *)&count, 4);
+ if(ne) {
+ nfp_log( NFP_DBG1, "i21285_read: nfp_copy_from_dev failed.");
+ return ne;
+ }
+ count= FROM_LE32_MEM(&count);
+ if(count<0 || count>len) {
+ nfp_log( NFP_DBG1, "i21285_read: bad byte count (%d) from device", count);
+ return NFP_EIO;
+ }
+ ne= nfp_copy_to_user_from_dev( cdev, MEMBAR, NFPCI_JOBS_RD_DATA, block, count);
+ if( ne ) {
+ nfp_log( NFP_DBG1, "i21285_read: nfp_copy_to_user_from_dev failed.");
+ return ne;
+ }
+ nfp_log( NFP_DBG2, "i21285_read: done");
+ *rcount= count;
+ return NFP_SUCCESS;
+}
+
+/* chupdate ------------------------------------------------------- */
+
+/* ARGSUSED */
+static nfp_err i21285_chupdate( char *data, int len, void *ctx ) {
+ nfp_log( NFP_DBG1, "i21285_chupdate: NYI");
+ return NFP_SUCCESS;
+}
+
+/* ensure reading -------------------------------------------------- */
+
+static nfp_err i21285_ensure_reading( unsigned int addr, int len, void *ctx ) {
+ nfp_cdev *cdev;
+ unsigned int hdr[2];
+ unsigned int tmp32;
+ nfp_err ne;
+
+ nfp_log( NFP_DBG2, "i21285_ensure_reading: entered");
+
+ if(addr) {
+ nfp_log( NFP_DBG2, "i21285_ensure_reading: bad addr");
+ return -NFP_EINVAL;
+ }
+
+ cdev= (nfp_cdev *)ctx;
+ if(!cdev) {
+ nfp_log( NFP_DBG1, "i21285_ensure_reading: NULL pdev");
+ return NFP_ENODEV;
+ }
+
+ if(!cdev->bar[ MEMBAR ]) {
+ nfp_log( NFP_DBG1, "i21285_ensure_reading: null BAR[%d]", MEMBAR );
+ return NFP_ENXIO;
+ }
+ nfp_log( NFP_DBG3, "i21285_ensure_reading: pdev->bar[ MEMBAR ]= %x", cdev->bar[ MEMBAR ]);
+ nfp_log( NFP_DBG3, "i21285_ensure_reading: pdev->bar[ IOBAR ]= %x", cdev->bar[ IOBAR ]);
+ TO_LE32_MEM( &hdr[0], NFPCI_JOB_CONTROL);
+ TO_LE32_MEM( &hdr[1], len);
+ ne= nfp_copy_to_dev( cdev, MEMBAR, NFPCI_JOBS_RD_CONTROL, (const char *)hdr, 8);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21285_ensure_reading: nfp_copy_to_dev failed");
+ return ne;
+ }
+ ne= nfp_copy_from_dev( cdev, MEMBAR, NFPCI_JOBS_RD_LENGTH, (char *)hdr, 4);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21285_ensure_reading: nfp_copy_from_dev failed");
+ return ne;
+ }
+ TO_LE32_MEM( &tmp32, len );
+ if ( hdr[0] != tmp32 ) {
+ nfp_log( NFP_DBG1, "i21285_ensure_reading: len not written");
+ return NFP_EIO;
+ };
+ TO_LE32_IO( &tmp32, NFAST_INT_HOST_READ_REQUEST );
+ nfp_outl( cdev, IOBAR, I21285_OFFSET_DOORBELL, tmp32 );
+
+ return NFP_SUCCESS;
+}
+
+/* command device structure ------------------------------------- */
+
+
+const nfpcmd_dev i21285_cmddev = {
+ "nCipher Gen 1 PCI",
+ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285,
+ PCI_VENDOR_ID_NCIPHER, PCI_DEVICE_ID_NFAST_GEN1,
+ { 0, IOSIZE | PCI_BASE_ADDRESS_SPACE_IO, NFPCI_RAM_MINSIZE, 0, 0, 0 },
+ NFP_CMD_FLG_NEED_IOBUF,
+ i21285_create,
+ i21285_destroy,
+ i21285_open,
+ i21285_close,
+ i21285_isr,
+ i21285_write,
+ i21285_read,
+ i21285_chupdate,
+ i21285_ensure_reading,
+ 0, /* no debug */
+};
+
diff --git a/usr/src/uts/common/io/nfp/i21285.h b/usr/src/uts/common/io/nfp/i21285.h
new file mode 100644
index 0000000000..4ea1d853ec
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/i21285.h
@@ -0,0 +1,43 @@
+/*
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+*/
+
+#ifndef NFP_I21285_H
+#define NFP_I21285_H
+
+#ifndef PCI_VENDOR_ID_DEC
+#define PCI_VENDOR_ID_DEC 0x1011
+#endif
+#ifndef PCI_DEVICE_ID_DEC_21285
+#define PCI_DEVICE_ID_DEC_21285 0x1065
+#endif
+#ifndef PCI_VENDOR_ID_NCIPHER
+#define PCI_VENDOR_ID_NCIPHER 0x0100
+#endif
+
+#ifndef PCI_DEVICE_ID_NFAST_GEN1
+#define PCI_DEVICE_ID_NFAST_GEN1 0x0100
+#endif
+
+#define I21285_OFFSET_DOORBELL 0x60
+#define I21285_OFFSET_INTERRUPT_MASK 0x34
+
+#define DOORBELL_ENABLE 0x0
+#define DOORBELL_DISABLE 0x4
+
+#define POSTLIST_ENABLE 0x0
+#define POSTLIST_DISABLE 0x8
+
+#define IOBAR 1
+#define MEMBAR 2
+
+#define IOSIZE 0x80
+#define MEMSIZE 0x100000
+
+#endif
diff --git a/usr/src/uts/common/io/nfp/i21555.c b/usr/src/uts/common/io/nfp/i21555.c
new file mode 100644
index 0000000000..82024dc800
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/i21555.c
@@ -0,0 +1,423 @@
+/*
+
+i21555.c: nCipher PCI HSM intel 21555 command driver
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+history
+
+09/10/2001 jsh Original
+
+*/
+
+#include "nfp_common.h"
+#include "nfp_error.h"
+#include "nfp_hostif.h"
+#include "nfp_osif.h"
+#include "i21555.h"
+#include "nfp_cmd.h"
+#include "nfpci.h"
+
+/* started ------------------------------------------------------
+ *
+ * Check that device is ready to talk, by checking that
+ * the i21555 has master enabled on its secondary interface
+ */
+
+static nfp_err i21555_started( nfp_cdev *pdev ) {
+ unsigned int tmp32;
+#ifdef CONFIGSPACE_DEBUG
+ unsigned int reg32[64];
+ int i;
+#endif
+ nfp_err ne;
+
+ nfp_log( NFP_DBG2, "i21555_started: entered");
+
+#ifdef CONFIGSPACE_DEBUG
+ /* Suck up all the registers */
+ for (i=0; i < 64; i++) {
+ ne = nfp_config_inl( pdev, i*4, &reg32[i] );
+ }
+
+ for (i=0; i < 16; i++) {
+ int j = i * 4;
+ nfp_log( NFP_DBG3, "i21555 config reg %2x: %08x %08x %08x %08x", j*4,
+ reg32[j], reg32[j+1], reg32[j+2], reg32[j+3]);
+ }
+#endif
+
+ ne = nfp_config_inl( pdev, I21555_CFG_SEC_CMD_STATUS, &tmp32 );
+ if (ne) {
+ /* succeed if PCI config reads are not implemented */
+ if (ne == NFP_EUNKNOWN)
+ return NFP_SUCCESS;
+ nfp_log( NFP_DBG1, "i21555_started: nfp_config_inl failed");
+ return ne;
+ }
+
+ tmp32= FROM_LE32_IO(&tmp32) & 0xffff;
+
+ if ( tmp32 & CFG_CMD_MASTER ) {
+ nfp_log( NFP_DBG3, "i21555_started: Yes %x", tmp32);
+ return NFP_SUCCESS;
+ } else {
+ nfp_log( NFP_DBG1, "i21555_started: device not started yet %x", tmp32);
+ return NFP_ESTARTING;
+ }
+}
+
+/* create ------------------------------------------------------- */
+
+static nfp_err i21555_create( nfp_cdev *pdev ) {
+ unsigned int tmp32;
+
+ nfp_log( NFP_DBG2, "i21555_create: entered");
+ pdev->cmdctx= pdev; /* set our context to just be a pointer to our nfp_cdev */
+
+ if(!pdev->bar[ IOBAR ]) {
+ nfp_log( NFP_DBG1, "i21555_create: null BAR[%d]", IOBAR );
+ return NFP_ENOMEM;
+ }
+ nfp_log( NFP_DBG2, "i21555_create: enable doorbell");
+ TO_LE32_IO( &tmp32, I21555_DOORBELL_PRI_ENABLE );
+ nfp_outl( pdev, IOBAR, I21555_OFFSET_DOORBELL_PRI_SET_MASK, tmp32 );
+ nfp_outl( pdev, IOBAR, I21555_OFFSET_DOORBELL_PRI_CLEAR_MASK, tmp32 );
+ return NFP_SUCCESS;
+}
+
+/* stop ------------------------------------------------------- */
+
+static nfp_err i21555_destroy( void * ctx ) {
+ nfp_cdev *pdev;
+ unsigned int tmp32;
+
+ nfp_log( NFP_DBG2, "i21555_destroy: entered");
+
+ pdev= (nfp_cdev *)ctx;
+ if(!pdev) {
+ nfp_log( NFP_DBG1, "i21555_destroy: NULL pdev");
+ return NFP_ENODEV;
+ }
+ if(!pdev->bar[ IOBAR ]) {
+ nfp_log( NFP_DBG1, "i21555_destroy: null BAR[%d]", IOBAR );
+ return NFP_ENOMEM;
+ }
+ TO_LE32_IO( &tmp32, I21555_DOORBELL_PRI_DISABLE );
+ nfp_outl( pdev, IOBAR, I21555_OFFSET_DOORBELL_PRI_SET_MASK, tmp32 );
+ nfp_outl( pdev, IOBAR, I21555_OFFSET_DOORBELL_PRI_CLEAR_MASK, tmp32 );
+
+ return NFP_SUCCESS;
+}
+
+/* open ------------------------------------------------------- */
+
+/* ARGSUSED */
+static nfp_err i21555_open( void * ctx ) {
+
+ nfp_log( NFP_DBG2, "i21555_open: entered");
+
+ return NFP_SUCCESS;
+}
+
+/* close ------------------------------------------------------- */
+
+/* ARGSUSED */
+static nfp_err i21555_close( void * ctx ) {
+ nfp_log( NFP_DBG2, "i21555_close: entered");
+
+ return NFP_SUCCESS;
+}
+
+/* isr ------------------------------------------------------- */
+
+static nfp_err i21555_isr( void *ctx, int *handled ) {
+ nfp_cdev *pdev;
+ nfp_err ne;
+ unsigned short doorbell;
+ unsigned short tmp16;
+
+ nfp_log( NFP_DBG3, "i21555_isr: entered");
+
+ *handled= 0;
+ pdev= (nfp_cdev *)ctx;
+ if(!pdev) {
+ nfp_log( NFP_DBG1, "i21555_isr: NULL pdev");
+ return NFP_ENODEV;
+ }
+
+ pdev->stats.isr++;
+
+ if(!pdev->bar[ IOBAR ]) {
+ nfp_log( NFP_DBG1, "i21555_isr: null BAR[%d]", IOBAR );
+ return NFP_ENOMEM;
+ }
+
+ /* This interrupt may not be from our module, so check that it actually is
+ * us before handling it.
+ */
+ ne = i21555_started( pdev );
+ if (ne) {
+ if (ne != NFP_ESTARTING) {
+ nfp_log( NFP_DBG1, "i21555_isr: i21555_started failed");
+ }
+ return ne;
+ }
+
+ doorbell= nfp_inw( pdev, IOBAR, I21555_OFFSET_DOORBELL_PRI_SET);
+ doorbell= FROM_LE16_IO(&doorbell);
+ while( doorbell && doorbell != 0xffff) {
+ *handled= 1;
+ /* service interrupts */
+ if( doorbell & (NFAST_INT_DEVICE_WRITE_OK | NFAST_INT_DEVICE_WRITE_FAILED)) {
+ pdev->stats.isr_write++;
+ TO_LE16_IO(&tmp16,NFAST_INT_DEVICE_WRITE_OK | NFAST_INT_DEVICE_WRITE_FAILED);
+ nfp_outw( pdev, IOBAR, I21555_OFFSET_DOORBELL_PRI_CLEAR, tmp16 );
+
+ nfp_log( NFP_DBG2, "i21555_isr: write done interrupt, ok = %d.", doorbell & NFAST_INT_DEVICE_WRITE_OK ? 1 : 0 );
+
+ nfp_write_complete(pdev->dev, doorbell & NFAST_INT_DEVICE_WRITE_OK ? 1 : 0 );
+ }
+
+ if( doorbell & (NFAST_INT_DEVICE_READ_OK | NFAST_INT_DEVICE_READ_FAILED)) {
+ pdev->stats.isr_read++;
+ TO_LE16_IO(&tmp16,NFAST_INT_DEVICE_READ_OK | NFAST_INT_DEVICE_READ_FAILED);
+ nfp_outw( pdev, IOBAR, I21555_OFFSET_DOORBELL_PRI_CLEAR, tmp16 );
+
+ nfp_log( NFP_DBG2, "i21555_isr: read ack interrupt, ok = %d.", doorbell & NFAST_INT_DEVICE_READ_OK ? 1 : 0 );
+ nfp_read_complete( pdev->dev, doorbell & NFAST_INT_DEVICE_READ_OK ? 1 : 0);
+ }
+
+ if( doorbell & ~(NFAST_INT_DEVICE_READ_OK | NFAST_INT_DEVICE_READ_FAILED |
+ NFAST_INT_DEVICE_WRITE_OK | NFAST_INT_DEVICE_WRITE_FAILED)) {
+ TO_LE16_IO(&tmp16,doorbell);
+ nfp_outw( pdev, IOBAR, I21555_OFFSET_DOORBELL_PRI_CLEAR, tmp16 );
+ nfp_log( NFP_DBG1, "i21555_isr: unexpected interrupt %x", doorbell );
+ }
+ doorbell= nfp_inw( pdev, IOBAR, I21555_OFFSET_DOORBELL_PRI_SET);
+ doorbell= FROM_LE16_IO(&doorbell);
+ }
+ nfp_log( NFP_DBG3, "i21555_isr: exiting");
+ return 0;
+}
+
+/* write ------------------------------------------------------- */
+
+static nfp_err i21555_write( const char *block, int len, void *ctx) {
+ nfp_cdev *cdev;
+ unsigned int hdr[2];
+ nfp_err ne;
+ unsigned short tmp16;
+ unsigned int tmp32;
+
+ nfp_log( NFP_DBG2, "i21555_write: entered");
+
+ cdev= (nfp_cdev *)ctx;
+ if(!cdev) {
+ nfp_log( NFP_DBG1, "i21555_write: NULL cdev");
+ return NFP_ENODEV;
+ }
+
+ cdev->stats.write_fail++;
+
+ if(!cdev->bar[ IOBAR ]) {
+ nfp_log( NFP_DBG1, "i21555_write: null BAR[%d]", IOBAR );
+ return NFP_ENOMEM;
+ }
+
+ ne = i21555_started( cdev );
+ if (ne) {
+ if (ne != NFP_ESTARTING) {
+ nfp_log( NFP_DBG1, "i21555_write: i21555_started failed");
+ }
+ return ne;
+ }
+
+ nfp_log( NFP_DBG3, "i21555_write: cdev->bar[ MEMBAR ]= %x", cdev->bar[ MEMBAR ]);
+ nfp_log( NFP_DBG3, "i21555_write: cdev->bar[ IOBAR ]= %x", cdev->bar[ IOBAR ]);
+ nfp_log( NFP_DBG3, "i21555_write: block len %d", len );
+ ne= nfp_copy_from_user_to_dev( cdev, MEMBAR, NFPCI_JOBS_WR_DATA, block, len);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21555_write: nfp_copy_from_user_to_dev failed");
+ return ne;
+ }
+ TO_LE32_MEM(&hdr[0], NFPCI_JOB_CONTROL);
+ TO_LE32_MEM(&hdr[1], len);
+ ne= nfp_copy_to_dev( cdev, MEMBAR, NFPCI_JOBS_WR_CONTROL, (const char *)hdr, 8);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21555_write: nfp_copy_to_dev failed");
+ return ne;
+ }
+
+ ne= nfp_copy_from_dev( cdev, MEMBAR, NFPCI_JOBS_WR_LENGTH, (char *)hdr, 4);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21555_write: nfp_copy_from_dev failed");
+ return ne;
+ }
+
+ TO_LE32_MEM(&tmp32, len);
+ if ( hdr[0] != tmp32 ) {
+ nfp_log( NFP_DBG1, "i21555_write: length not written");
+ return NFP_EIO;
+ }
+ TO_LE16_IO(&tmp16, NFAST_INT_HOST_WRITE_REQUEST >> 16);
+ nfp_outw( cdev, IOBAR, I21555_OFFSET_DOORBELL_SEC_SET, tmp16);
+
+ cdev->stats.write_fail--;
+ cdev->stats.write_block++;
+ cdev->stats.write_byte += len;
+
+ nfp_log( NFP_DBG2, "i21555_write: done");
+ return NFP_SUCCESS;
+}
+
+/* read ------------------------------------------------------- */
+
+static nfp_err i21555_read( char *block, int len, void *ctx, int *rcount) {
+ nfp_cdev *cdev;
+ nfp_err ne;
+ int count;
+
+ nfp_log( NFP_DBG2, "i21555_read: entered");
+ *rcount= 0;
+
+ cdev= (nfp_cdev *)ctx;
+ if(!cdev) {
+ nfp_log( NFP_DBG1, "i21555_read: NULL pdev");
+ return NFP_ENODEV;
+ }
+
+ cdev->stats.read_fail++;
+
+ if(!cdev->bar[ IOBAR ]) {
+ nfp_log( NFP_DBG1, "i21555_read: null BAR[%d]", IOBAR );
+ return NFP_ENOMEM;
+ }
+
+ ne= nfp_copy_from_dev( cdev, MEMBAR, NFPCI_JOBS_RD_LENGTH, (char *)&count, 4);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21555_read: nfp_copy_from_dev failed.");
+ return ne;
+ }
+ count= FROM_LE32_MEM(&count);
+ if(count<0 || count>len) {
+ nfp_log( NFP_DBG1, "i21555_read: bad byte count (%d) from device", count);
+ return NFP_EIO;
+ }
+ ne= nfp_copy_to_user_from_dev( cdev, MEMBAR, NFPCI_JOBS_RD_DATA, block, count);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21555_read: nfp_copy_to_user failed.");
+ return ne;
+ }
+ nfp_log( NFP_DBG2, "i21555_read: done");
+ *rcount= count;
+ cdev->stats.read_fail--;
+ cdev->stats.read_block++;
+ cdev->stats.read_byte += len;
+ return NFP_SUCCESS;
+}
+
+/* chupdate ------------------------------------------------------- */
+
+/* ARGSUSED */
+static nfp_err i21555_chupdate( char *data, int len, void *ctx ) {
+ nfp_log( NFP_DBG1, "i21555_chupdate: NYI");
+ return NFP_SUCCESS;
+}
+
+/* ensure reading -------------------------------------------------- */
+
+static nfp_err i21555_ensure_reading( unsigned int addr, int len, void *ctx ) {
+ nfp_cdev *cdev;
+ unsigned int hdr[3];
+ unsigned short tmp16;
+ unsigned int tmp32;
+ nfp_err ne;
+ int hdr_len;
+
+ nfp_log( NFP_DBG2, "i21555_ensure_reading: entered");
+
+ cdev= (nfp_cdev *)ctx;
+ if(!cdev) {
+ nfp_log( NFP_DBG1, "i21555_ensure_reading: NULL pdev");
+ return NFP_ENODEV;
+ }
+
+ cdev->stats.ensure_fail++;
+
+ if(!cdev->bar[ IOBAR ]) {
+ nfp_log( NFP_DBG1, "i21555_ensure_reading: null BAR[%d]", IOBAR );
+ return NFP_ENOMEM;
+ }
+
+ ne = i21555_started( cdev );
+ if (ne) {
+ if (ne != NFP_ESTARTING) {
+ nfp_log( NFP_DBG1, "i21555_ensure_reading: i21555_started failed");
+ }
+ return ne;
+ }
+
+ nfp_log( NFP_DBG3, "i21555_ensure_reading: pdev->bar[ MEMBAR ]= %x", cdev->bar[ MEMBAR ]);
+ nfp_log( NFP_DBG3, "i21555_ensure_reading: pdev->bar[ IOBAR ]= %x", cdev->bar[ IOBAR ]);
+ if(addr) {
+ nfp_log( NFP_DBG3, "i21555_ensure_reading: new format, addr %x", addr);
+ TO_LE32_MEM(&hdr[0], NFPCI_JOB_CONTROL_PCI_PUSH);
+ TO_LE32_MEM(&hdr[1], len);
+ TO_LE32_MEM(&hdr[2], addr);
+ hdr_len= 12;
+ } else {
+ TO_LE32_MEM(&hdr[0], NFPCI_JOB_CONTROL);
+ TO_LE32_MEM(&hdr[1], len);
+ hdr_len= 8;
+ }
+ ne= nfp_copy_to_dev( cdev, MEMBAR, NFPCI_JOBS_RD_CONTROL, (const char *)hdr, hdr_len);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21555_ensure_reading: nfp_copy_to_dev failed");
+ return ne;
+ }
+
+ ne= nfp_copy_from_dev( cdev, MEMBAR, NFPCI_JOBS_RD_LENGTH, (char *)hdr, 4);
+ if (ne) {
+ nfp_log( NFP_DBG1, "i21555_ensure_reading: nfp_copy_from_dev failed");
+ return ne;
+ }
+
+ TO_LE32_MEM(&tmp32, len);
+
+ if ( hdr[0] != tmp32 ) {
+ nfp_log( NFP_DBG1, "i21555_ensure_reading: len not written");
+ return NFP_EIO;
+ }
+ TO_LE16_IO( &tmp16, NFAST_INT_HOST_READ_REQUEST >> 16);
+ nfp_outw( cdev, IOBAR, I21555_OFFSET_DOORBELL_SEC_SET, tmp16);
+
+ cdev->stats.ensure_fail--;
+ cdev->stats.ensure++;
+
+ return NFP_SUCCESS;
+}
+
+/* command device structure ------------------------------------- */
+
+const nfpcmd_dev i21555_cmddev = {
+ "nCipher Gen 2 PCI",
+ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_21555,
+ PCI_VENDOR_ID_NCIPHER, PCI_SUBSYSTEM_ID_NFAST_REV1,
+ { 0, IOSIZE | PCI_BASE_ADDRESS_SPACE_IO, NFPCI_RAM_MINSIZE_JOBS, 0, 0, 0 },
+ NFP_CMD_FLG_NEED_IOBUF,
+ i21555_create,
+ i21555_destroy,
+ i21555_open,
+ i21555_close,
+ i21555_isr,
+ i21555_write,
+ i21555_read,
+ i21555_chupdate,
+ i21555_ensure_reading,
+ i21555_debug,
+};
diff --git a/usr/src/uts/common/io/nfp/i21555.h b/usr/src/uts/common/io/nfp/i21555.h
new file mode 100644
index 0000000000..d8f3965938
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/i21555.h
@@ -0,0 +1,51 @@
+/*
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+*/
+
+#ifndef I21555_H
+#define I21555_H
+
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+
+#ifndef PCI_DEVICE_ID_INTEL_21555
+#define PCI_DEVICE_ID_INTEL_21555 0xb555
+#endif
+
+#ifndef PCI_VENDOR_ID_NCIPHER
+#define PCI_VENDOR_ID_NCIPHER 0x0100
+#endif
+
+#ifndef PCI_SUBSYSTEM_ID_NFAST_REV1
+#define PCI_SUBSYSTEM_ID_NFAST_REV1 0x0100
+#endif
+
+#define I21555_OFFSET_DOORBELL_PRI_SET 0x9C
+#define I21555_OFFSET_DOORBELL_SEC_SET 0x9E
+#define I21555_OFFSET_DOORBELL_PRI_CLEAR 0x98
+
+#define I21555_OFFSET_DOORBELL_PRI_SET_MASK 0xA4
+#define I21555_OFFSET_DOORBELL_PRI_CLEAR_MASK 0xA0
+
+#define I21555_DOORBELL_PRI_ENABLE 0x0000
+#define I21555_DOORBELL_PRI_DISABLE 0xFFFF
+
+#define I21555_CFG_SEC_CMD_STATUS 0x44
+
+#define CFG_CMD_MASTER 0x0004
+
+#define IOBAR 1
+#define MEMBAR 2
+
+#define IOSIZE 0x100
+
+extern nfp_err i21555_debug( int cmd, void *ctx );
+
+#endif
diff --git a/usr/src/uts/common/io/nfp/i21555d.c b/usr/src/uts/common/io/nfp/i21555d.c
new file mode 100644
index 0000000000..183ace8275
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/i21555d.c
@@ -0,0 +1,28 @@
+/*
+
+i21555d.c: nCipher PCI HSM intel 21555 debug ioctl
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+
+history
+
+15/05/2002 jsh Original, does nothing
+
+*/
+
+#include "nfp_common.h"
+#include "nfp_error.h"
+#include "nfp_osif.h"
+#include "i21555.h"
+
+/* ARGSUSED */
+nfp_err i21555_debug( int cmd, void *ctx) {
+ nfp_log( NFP_DBG1, "i21555_debug: entered");
+
+ return NFP_EUNKNOWN;
+}
diff --git a/usr/src/uts/common/io/nfp/nfdev-common.h b/usr/src/uts/common/io/nfp/nfdev-common.h
new file mode 100644
index 0000000000..8a97bf2c63
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/nfdev-common.h
@@ -0,0 +1,141 @@
+/*
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+*/
+/** \file nfdev-common.h
+ *
+ * \brief nFast device driver (not generic SCSI) ioctl struct definition file
+ * include NFDEV-$(system) for ioctl number definitions
+ *
+ * 1998.07.13 jsh Started
+ *
+ *
+ */
+
+#ifndef NFDEV_COMMON_H
+#define NFDEV_COMMON_H
+
+/**
+ * Result of the ENQUIRY ioctl.
+ */
+typedef struct nfdev_enquiry_str {
+ unsigned int busno; /**< Which bus is the PCI device on. */
+ unsigned char slotno; /**< Which slot is the PCI device in. */
+ unsigned char reserved[3]; /**< for consistant struct alignment */
+} nfdev_enquiry_str;
+
+/**
+ * Result of the STATS ioctl.
+ */
+typedef struct nfdev_stats_str {
+ unsigned long isr; /**< Count interrupts. */
+ unsigned long isr_read; /**< Count read interrupts. */
+ unsigned long isr_write; /**< Count write interrupts. */
+ unsigned long write_fail; /**< Count write failures. */
+ unsigned long write_block; /**< Count blocks written. */
+ unsigned long write_byte; /**< Count bytes written. */
+ unsigned long read_fail; /**< Count read failures. */
+ unsigned long read_block; /**< Count blocks read. */
+ unsigned long read_byte; /**< Count bytes read. */
+ unsigned long ensure_fail; /**< Count read request failures. */
+ unsigned long ensure; /**< Count read requests. */
+} nfdev_stats_str;
+
+/**
+ * Input to the CONTROL ioctl.
+ */
+typedef struct nfdev_control_str {
+ unsigned control; /**< Control flags. */
+} nfdev_control_str;
+
+/** Control bit indicating host supports MOI control */
+#define NFDEV_CONTROL_HOST_MOI 0x0001
+
+/** Index of control bits indicating desired mode
+ *
+ * Desired mode follows the M_ModuleMode enumeration.
+ */
+#define NFDEV_CONTROL_MODE_SHIFT 1
+
+/** Detect a backwards-compatible control value
+ *
+ * Returns true if the request control value "makes no difference", i.e.
+ * and the failure of an attempt to set it is therefore uninteresting.
+ */
+#define NFDEV_CONTROL_HARMLESS(c) ((c) <= 1)
+
+/**
+ * Result of the STATUS ioctl.
+ */
+typedef struct nfdev_status_str {
+ unsigned status; /**< Status flags. */
+ char error[8]; /**< Error string. */
+} nfdev_status_str;
+
+/** Monitor firmware supports MOI control and error reporting */
+#define NFDEV_STATUS_MONITOR_MOI 0x0001
+
+/** Application firmware supports MOI control and error reporting */
+#define NFDEV_STATUS_APPLICATION_MOI 0x0002
+
+/** Application firmware running and supports error reporting */
+#define NFDEV_STATUS_APPLICATION_RUNNING 0x0004
+
+/** HSM failed
+ *
+ * Consult error[] for additional information.
+ */
+#define NFDEV_STATUS_FAILED 0x0008
+
+/** Standard PCI interface. */
+#define NFDEV_IF_STANDARD 0x01
+
+/** PCI interface with results pushed from device
+ * via DMA.
+ */
+#define NFDEV_IF_PCI_PUSH 0x02
+
+/* platform independant base ioctl numbers */
+
+/** Enquiry ioctl.
+ * \return nfdev_enquiry_str describing the attached device. */
+#define NFDEV_IOCTL_NUM_ENQUIRY 0x01
+/** Channel Update ioctl.
+ * \deprecated */
+#define NFDEV_IOCTL_NUM_CHUPDATE 0x02
+/** Ensure Reading ioctl.
+ * Signal a read request to the device.
+ * \param (unsigned int) Length of data to be read.
+ */
+#define NFDEV_IOCTL_NUM_ENSUREREADING 0x03
+/** Device Count ioctl.
+ * Not implemented for on all platforms.
+ * \return (int) the number of attached devices. */
+#define NFDEV_IOCTL_NUM_DEVCOUNT 0x04
+/** Internal Debug ioctl.
+ * Not implemented in release drivers. */
+#define NFDEV_IOCTL_NUM_DEBUG 0x05
+/** PCI Interface Version ioctl.
+ * \param (int) Maximum PCI interface version
+ * supported by the user of the device. */
+#define NFDEV_IOCTL_NUM_PCI_IFVERS 0x06
+/** Statistics ioctl.
+ * \return nfdev_enquiry_str describing the attached device. */
+#define NFDEV_IOCTL_NUM_STATS 0x07
+
+/** Module control ioctl
+ * \param (nfdev_control_str) Value to write to HSM control register
+ */
+#define NFDEV_IOCTL_NUM_CONTROL 0x08
+
+/** Module state ioctl
+ * \return (nfdev_status_str) Values read from HSM status/error registers
+ */
+#define NFDEV_IOCTL_NUM_STATUS 0x09
+
+#endif
diff --git a/usr/src/uts/common/io/nfp/nfdev-solaris.h b/usr/src/uts/common/io/nfp/nfdev-solaris.h
new file mode 100644
index 0000000000..923b902e46
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/nfdev-solaris.h
@@ -0,0 +1,37 @@
+/*
+
+nfdev-solaris.h: nFast solaris specific device ioctl interface.
+
+(C) Copyright nCipher Corporation Ltd 1998-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+history
+
+14/07/1998 jsh Original
+
+*/
+
+#ifndef NFDEV_SOLARIS_H
+#define NFDEV_SOLARIS_H
+
+#include "nfdev-common.h"
+
+#define NFDEV_IOCTL_TYPE ('n'<<8)
+
+#define NFDEV_IOCTL_ENQUIRY ( NFDEV_IOCTL_TYPE | \
+ NFDEV_IOCTL_NUM_ENQUIRY )
+#define NFDEV_IOCTL_ENSUREREADING ( NFDEV_IOCTL_TYPE | \
+ NFDEV_IOCTL_NUM_ENSUREREADING )
+#define NFDEV_IOCTL_DEVCOUNT ( NFDEV_IOCTL_TYPE | \
+ NFDEV_IOCTL_NUM_DEVCOUNT )
+#define NFDEV_IOCTL_DEBUG ( NFDEV_IOCTL_TYPE | \
+ NFDEV_IOCTL_NUM_DEBUG )
+#define NFDEV_IOCTL_PCI_IFVERS ( NFDEV_IOCTL_TYPE | \
+ NFDEV_IOCTL_NUM_PCI_IFVERS )
+#define NFDEV_IOCTL_STATS ( NFDEV_IOCTL_TYPE | \
+ NFDEV_IOCTL_NUM_STATS )
+
+#endif /* NFDEV_SOLARIS_H */
diff --git a/usr/src/uts/common/io/nfp/nfp.h b/usr/src/uts/common/io/nfp/nfp.h
new file mode 100644
index 0000000000..9704f04fbc
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/nfp.h
@@ -0,0 +1,113 @@
+/*
+
+nfp.h: nFast PCI driver for Solaris 2.5, 2.6 and 2.7
+
+(C) Copyright nCipher Corporation Ltd 2001-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+history
+
+06/05/1998 jsh Original solaris 2.6
+21/05/1999 jsh added support for solaris 2.5
+10/06/1999 jsh added support for solaris 2.7 (32 and 64 bit)
+16/10/2001 jsh moved from nfast to new structure in nfdrv
+
+*/
+
+#ifndef NFP_H
+#define NFP_H
+
+#ifndef _KERNEL
+#error Hello? this is a driver, please compile with -D_KERNEL
+#endif
+
+#if ( CH_KERNELVER < 260 )
+typedef int ioctlptr_t;
+typedef unsigned short uint16_t;
+#define DDI_GET32 ddi_getl
+#define DDI_PUT32 ddi_putl
+#define DDI_GET16 ddi_getw
+#define DDI_PUT16 ddi_putw
+#define DDI_REP_GET8 ddi_rep_getb
+#define DDI_REP_PUT8 ddi_rep_putb
+#define DDI_REP_GET32 ddi_rep_getl
+#define DDI_REP_PUT32 ddi_rep_putl
+#define PCI_CONFIG_GET16 pci_config_getw
+#else /* ( CH_KERNELVER >= 260 ) */
+typedef intptr_t ioctlptr_t;
+#define DDI_GET32 ddi_get32
+#define DDI_PUT32 ddi_put32
+#define DDI_GET16 ddi_get16
+#define DDI_PUT16 ddi_put16
+#define DDI_REP_GET8 ddi_rep_get8
+#define DDI_REP_PUT8 ddi_rep_put8
+#define DDI_REP_GET32 ddi_rep_get32
+#define DDI_REP_PUT32 ddi_rep_put32
+#define PCI_CONFIG_GET16 pci_config_get16
+#endif
+
+#if ( CH_KERNELVER < 270 )
+typedef int nfp_timeout_t;
+#define EXTRA_CB_FLAGS 0
+#define VSXPRINTF(s, n, format, ap) vsprintf (s, format, ap)
+#else /* ( CH_KERNELVER >= 270 ) */
+typedef timeout_id_t nfp_timeout_t;
+#define EXTRA_CB_FLAGS D_64BIT
+#define VSXPRINTF(s, n, format, ap) vsnprintf(s, n, format, ap)
+#endif
+
+typedef struct nfp_dev {
+ int rd_ok;
+ int wr_ok;
+
+ int ifvers;
+
+ /* for PCI push read interface */
+ unsigned char *read_buf;
+ ddi_dma_handle_t read_dma_handle;
+ ddi_dma_cookie_t read_dma_cookie;
+
+ ddi_acc_handle_t acchandle;
+
+ int rd_dma_ok;
+
+ nfp_timeout_t wrtimeout;
+ nfp_timeout_t rdtimeout;
+
+ struct buf *wr_bp;
+ int wr_ready;
+ int rd_ready;
+ int rd_pending;
+ int rd_outstanding;
+ kcondvar_t rd_cv;
+
+ struct pollhead pollhead;
+ dev_info_t *dip;
+
+ ddi_iblock_cookie_t high_iblock_cookie; /* for mutex */
+ ddi_iblock_cookie_t low_iblock_cookie; /* for mutex */
+ kmutex_t high_mutex;
+ kmutex_t low_mutex;
+ int high_intr;
+ ddi_softintr_t soft_int_id;
+ int high_read;
+ int high_write;
+
+ ddi_iblock_cookie_t iblock_cookie; /* for mutex */
+ kmutex_t isr_mutex;
+
+ kmutex_t busy_mutex;
+ int busy;
+
+ ddi_acc_handle_t conf_handle;
+
+ nfp_cdev common;
+ const nfpcmd_dev *cmddev;
+} nfp_dev;
+
+extern struct nfp_dev *nfp_dev_list[];
+
+#endif /* NFP_H */
diff --git a/usr/src/uts/common/io/nfp/nfp_cmd.h b/usr/src/uts/common/io/nfp/nfp_cmd.h
new file mode 100644
index 0000000000..db8af0b2f9
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/nfp_cmd.h
@@ -0,0 +1,68 @@
+/*
+
+nfp_cmd.h: nCipher PCI HSM command driver decalrations
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+history
+
+10/10/2001 jsh Original
+
+*/
+
+#ifndef NFPCMD_H
+#define NFPCMD_H
+
+#include "nfp_hostif.h"
+#include "nfp_error.h"
+
+/* read and write called with userspace buffer */
+
+typedef struct nfpcmd_dev {
+ const char *name;
+ unsigned short vendorid, deviceid,
+ sub_vendorid, sub_deviceid;
+ unsigned int bar_sizes[6]; /* includes IO bit */
+ unsigned int flags;
+ nfp_err (*create)(struct nfp_cdev *pdev);
+ nfp_err (*destroy)(void * ctx);
+ nfp_err (*open)(void * ctx);
+ nfp_err (*close)(void * ctx);
+ nfp_err (*isr)(void *ctx, int *handled);
+ nfp_err (*write_block)( const char *ublock, int len, void *ctx );
+ nfp_err (*read_block)( char *ublock, int len, void *ctx, int *rcount);
+ nfp_err (*channel_update)( char *data, int len, void *ctx);
+ nfp_err (*ensure_reading)( unsigned int addr, int len, void *ctx );
+ nfp_err (*debug)( int cmd, void *ctx);
+} nfpcmd_dev;
+
+#define NFP_CMD_FLG_NEED_IOBUF 0x1
+
+/* list of all supported drivers ---------------------------------------- */
+
+extern const nfpcmd_dev *nfp_drvlist[];
+
+extern const nfpcmd_dev i21285_cmddev;
+extern const nfpcmd_dev i21555_cmddev;
+extern const nfpcmd_dev bcm5820_cmddev;
+
+#ifndef PCI_BASE_ADDRESS_SPACE_IO
+#define PCI_BASE_ADDRESS_SPACE_IO 0x1
+#endif
+
+#define NFP_MAXDEV 16
+
+
+#define NFP_MEMBAR_MASK ~0xf
+#define NFP_IOBAR_MASK ~0x3
+/*
+ This masks off the bottom bits of the PCI_CSR_BAR which signify that the
+ BAR is an IO BAR rather than a MEM BAR
+*/
+
+#endif
+
diff --git a/usr/src/uts/common/io/nfp/nfp_common.h b/usr/src/uts/common/io/nfp/nfp_common.h
new file mode 100644
index 0000000000..d1d2100fea
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/nfp_common.h
@@ -0,0 +1,68 @@
+/*
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+*/
+
+#ifndef NFP_COMMON_H
+#define NFP_COMMON_H
+
+#include <sys/types.h>
+#include <sys/conf.h>
+
+typedef uint32_t UINT32;
+typedef uint8_t BYTE;
+
+#define DEFINE_NFPCI_PACKED_STRUCTS
+#include "nfpci.h"
+#include "nfdev-solaris.h"
+
+typedef int oserr_t;
+
+#if CH_BIGENDIAN
+
+/* Big Endian Sparc */
+
+#define SWP32(x) \
+( (((unsigned int)(x)>>24)&0xff) | (((unsigned int)(x)>>8)&0xff00) | (((unsigned int)(x)<<8)&0xff0000) | (((unsigned int)(x)<<24)&0xff000000) )
+
+#define SWP16(x) ( (((x)>>8)&0xff) | (((x)<<8)&0xff00) )
+
+#define FROM_LE32_IO(x) SWP32(*x)
+#define TO_LE32_IO(x,y) *x=SWP32(y)
+
+#define FROM_LE32_MEM(x) SWP32(*x)
+#define TO_LE32_MEM(x,y) *x=SWP32(y)
+
+#define FROM_LE16_IO(x) SWP16(*x)
+#define TO_LE16_IO(x,y) *x=SWP16(y)
+
+#else
+
+/* Little Endian x86 */
+
+#define FROM_LE32_IO(x) (*x)
+#define TO_LE32_IO(x,y) (*x=y)
+
+#define FROM_LE32_MEM(x) (*x)
+#define TO_LE32_MEM(x,y) (*x=y)
+
+#define FROM_LE16_IO(x) (*x)
+#define TO_LE16_IO(x,y) (*x=y)
+
+#endif /* !CH_BIGENDIAN */
+
+#include <sys/types.h>
+
+#if CH_KERNELVER == 260
+#define nfp_get_lbolt( lbolt, err ) err= drv_getparm( LBOLT, lbolt )
+#else
+#define nfp_get_lbolt( lbolt, err ) { *lbolt= ddi_get_lbolt(); err= 0; }
+#endif
+
+#endif
+
diff --git a/usr/src/uts/common/io/nfp/nfp_error.h b/usr/src/uts/common/io/nfp/nfp_error.h
new file mode 100644
index 0000000000..d64cb78fd4
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/nfp_error.h
@@ -0,0 +1,48 @@
+/*
+
+nfp_error.h: nCipher PCI HSM error handling
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+history
+
+05/12/2001 jsh Original
+
+*/
+
+#ifndef NFP_ERROR_H
+#define NFP_ERROR_H
+
+#include "nfp_common.h"
+
+#define NFP_SUCCESS 0x0
+#define NFP_EFAULT 0x1
+#define NFP_ENOMEM 0x2
+#define NFP_EINVAL 0x3
+#define NFP_EIO 0x4
+#define NFP_ENXIO 0x5
+#define NFP_ENODEV 0x6
+#define NFP_EINTR 0x7
+#define NFP_ESTARTING 0x8
+#define NFP_EAGAIN 0x9
+#define NFP_EUNKNOWN 0x100
+
+typedef int nfp_err;
+
+extern oserr_t nfp_oserr( nfp_err nerr );
+extern nfp_err nfp_error( oserr_t oerr );
+
+#define nfr( x) \
+ return nfp_error((x))
+
+#define nfer(x, fn, msg) \
+ { oserr_t err=(x); if(err) { nfp_log( NFP_DBG1, #fn ": " msg); return nfp_error(err); } }
+
+#define er(x, fn, msg ) \
+{ nfp_err err=(x); if(err) { nfp_log( NFP_DBG1, #fn ": " msg); return err; } }
+
+#endif
diff --git a/usr/src/uts/common/io/nfp/nfp_hostif.h b/usr/src/uts/common/io/nfp/nfp_hostif.h
new file mode 100644
index 0000000000..3e7d8187e5
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/nfp_hostif.h
@@ -0,0 +1,54 @@
+/*
+
+nfp_hostif.h: nCipher PCI HSM host interface declarations
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+history
+
+10/10/2001 jsh Original
+
+*/
+
+#ifndef NFP_HOSTIF_H
+#define NFP_HOSTIF_H
+
+#include "nfdev-common.h"
+
+struct nfp_dev;
+
+/* common device structure */
+
+typedef struct nfp_cdev {
+ unsigned char *bar[6];
+ void *extra[6];
+
+ int busno;
+ int slotno;
+
+ void *cmdctx;
+
+ char *iobuf;
+
+ struct nfp_dev* dev;
+
+ struct nfdev_stats_str stats;
+
+} nfp_cdev;
+
+/* callbacks from command drivers -------------------------------------- */
+
+void nfp_read_complete( struct nfp_dev *pdev, int ok);
+void nfp_write_complete( struct nfp_dev *pdev, int ok);
+
+#define NFP_READ_MAX (8 * 1024)
+#define NFP_READBUF_SIZE (NFP_READ_MAX + 8)
+#define NFP_TIMEOUT_SEC 10
+
+#define NFP_DRVNAME "nCipher nFast PCI driver"
+
+#endif
diff --git a/usr/src/uts/common/io/nfp/nfp_ifvers.c b/usr/src/uts/common/io/nfp/nfp_ifvers.c
new file mode 100644
index 0000000000..807b4f24c5
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/nfp_ifvers.c
@@ -0,0 +1,51 @@
+/*
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+*/
+
+/*
+ * nfp_ifervs.c - common pci interface versioning
+ *
+ * uses:
+ *
+ * int pdev->ifvers
+ * device interface version
+ *
+ * int nfp_ifvers
+ * interface version limit
+ *
+ * int nfp_alloc_pci_push( nfp_dev *pdev )
+ * allocates resources needed for PCI Push,
+ * if not already allocated, and return True if successful
+ *
+ * void nfp_free_pci_push( nfp_dev *pdev ) {
+ * frees any resources allocated to PCI Push
+ */
+
+void nfp_set_ifvers( nfp_dev *pdev, int vers ) {
+ if( nfp_ifvers != 0 && vers > nfp_ifvers ) {
+ nfp_log( NFP_DBG2,
+ "nfp_set_ifvers: can't set ifvers %d"
+ " as nfp_ifvers wants max ifvers %d",
+ vers, nfp_ifvers);
+ return;
+ }
+ if( vers >= NFDEV_IF_PCI_PUSH ) {
+ if(!nfp_alloc_pci_push(pdev)) {
+ nfp_log( NFP_DBG1,
+ "nfp_set_ifvers: can't set ifvers %d"
+ " as resources not available",
+ vers);
+ return;
+ }
+ } else {
+ nfp_free_pci_push(pdev);
+ }
+ pdev->ifvers= vers;
+ nfp_log( NFP_DBG3, "nfp_set_ifvers: setting ifvers %d", vers);
+}
diff --git a/usr/src/uts/common/io/nfp/nfp_osif.h b/usr/src/uts/common/io/nfp/nfp_osif.h
new file mode 100644
index 0000000000..17ffe469ce
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/nfp_osif.h
@@ -0,0 +1,105 @@
+/*
+
+nfp_osif.h: nCipher PCI HSM OS interface declarations
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+history
+
+10/10/2001 jsh Original
+
+*/
+
+#ifndef NFP_OSIF_H
+#define NFP_OSIF_H
+
+#include "nfp_hostif.h"
+#include "nfp_error.h"
+
+/* general typedefs ----------------------------------------------- */
+
+typedef volatile unsigned int reg32;
+typedef volatile unsigned short reg16;
+typedef volatile unsigned char reg8;
+
+/* sempaphores, mutexs and events --------------------------------- */
+
+#if 0
+extern nfp_err nfp_sema_init( nfp_sema *sema, int initial);
+extern void nfp_sema_destroy( nfp_sema *sema );
+extern void nfp_sema_post( nfp_sema *sema );
+extern void nfp_sema_wait( nfp_sema *sema );
+extern int nfp_sema_wait_sig( nfp_sema *sema );
+
+extern nfp_err nfp_mutex_init( nfp_mutex *mutex );
+extern void nfp_mutex_destroy( nfp_mutex *mutex );
+extern void nfp_mutex_enter( nfp_mutex *mutex );
+extern void nfp_mutex_exit( nfp_mutex *mutex );
+
+extern nfp_err nfp_event_init( nfp_event *event );
+extern void nfp_event_destroy( nfp_event *event );
+extern void nfp_event_set( nfp_event *event );
+extern void nfp_event_clear( nfp_event *event );
+extern void nfp_event_wait( nfp_event *event );
+extern void nfp_event_wait_sig( nfp_event *event );
+
+#endif
+
+/* timeouts ------------------------------------------------------ */
+
+extern void nfp_sleep( int ms );
+
+/* memory handling ----------------------------------------------- */
+
+#define KMALLOC_DMA 0
+#define KMALLOC_CACHED 1
+
+extern void *nfp_kmalloc( int size, int flags );
+extern void *nfp_krealloc( void *ptr, int size, int flags );
+extern void nfp_kfree( void * );
+
+/* config space access ------------------------------------------------ */
+
+/* return Little Endian 32 bit config register */
+extern nfp_err nfp_config_inl( nfp_cdev *pdev, int offset, unsigned int *res );
+
+/* io space access ------------------------------------------------ */
+
+extern unsigned int nfp_inl( nfp_cdev *pdev, int bar, int offset );
+extern unsigned short nfp_inw( nfp_cdev *pdev, int bar, int offset );
+extern void nfp_outl( nfp_cdev *pdev, int bar, int offset, unsigned int data );
+extern void nfp_outw( nfp_cdev *pdev, int bar, int offset, unsigned short data );
+
+/* user and device memory space access ---------------------------- */
+
+/* NB these 2 functions are not guarenteed to be re-entrant for a given device */
+extern nfp_err nfp_copy_from_user_to_dev( nfp_cdev *cdev, int bar, int offset, const char *ubuf, int len);
+extern nfp_err nfp_copy_to_user_from_dev( nfp_cdev *cdev, int bar, int offset, char *ubuf, int len);
+
+extern nfp_err nfp_copy_from_user( char *kbuf, const char *ubuf, int len );
+extern nfp_err nfp_copy_to_user( char *ubuf, const char *kbuf, int len );
+
+extern nfp_err nfp_copy_from_dev( nfp_cdev *cdev, int bar, int offset, char *kbuf, int len );
+extern nfp_err nfp_copy_to_dev( nfp_cdev *cdev, int bar, int offset, const char *kbuf, int len);
+
+/* debug ------------------------------------------------------------ */
+
+#define NFP_DBG1 1
+#define NFP_DBGE NFP_DBG1
+#define NFP_DBG2 2
+#define NFP_DBG3 3
+#define NFP_DBG4 4
+
+#ifdef STRANGE_VARARGS
+extern void nfp_log();
+#else
+extern void nfp_log( int severity, const char *format, ...);
+#endif
+
+extern int nfp_debug;
+
+#endif
diff --git a/usr/src/uts/common/io/nfp/nfpci.h b/usr/src/uts/common/io/nfp/nfpci.h
new file mode 100644
index 0000000000..793f5995e6
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/nfpci.h
@@ -0,0 +1,171 @@
+/*
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+*/
+
+/*
+*
+* NFPCI.H - nFast PCI interface definition file
+*
+*
+*
+* 1998.06.09 IH Started
+*
+* The interface presented by nFast PCI devices consists of:
+*
+* A region of shared RAM used for data transfer & control information
+* A doorbell interrupt register, so both sides can give each other interrupts
+* A number of DMA channels for transferring data
+*/
+
+#ifndef NFPCI_H
+#define NFPCI_H
+
+/* Sizes of some regions */
+#define NFPCI_RAM_MINSIZE 0x00100000
+/* This is the minimum size of shared RAM. In future it may be possible to
+ negotiate larger sizes of shared RAM or auto-detect how big it is */
+#define NFPCI_RAM_MINSIZE_JOBS 0x00020000 /* standard jobs only */
+#define NFPCI_RAM_MINSIZE_KERN 0x00040000 /* standard and kernel jobs */
+
+/* Offsets within shared memory space.
+ The following main regions are:
+ jobs input area
+ jobs output area
+ kernel jobs input area
+ kernel output area
+*/
+
+#define NFPCI_OFFSET_JOBS 0x00000000
+#define NFPCI_OFFSET_JOBS_WR 0x00000000
+#define NFPCI_OFFSET_JOBS_RD 0x00010000
+#define NFPCI_OFFSET_KERN 0x00020000
+#define NFPCI_OFFSET_KERN_WR 0x00020000
+#define NFPCI_OFFSET_KERN_RD 0x00030000
+
+/* Interrupts, defined by bit position in doorbell register */
+
+/* Interrupts from device to host */
+#define NFAST_INT_DEVICE_WRITE_OK 0x00000001
+#define NFAST_INT_DEVICE_WRITE_FAILED 0x00000002
+#define NFAST_INT_DEVICE_READ_OK 0x00000004
+#define NFAST_INT_DEVICE_READ_FAILED 0x00000008
+#define NFAST_INT_DEVICE_KERN_WRITE_OK 0x00000010
+#define NFAST_INT_DEVICE_KERN_WRITE_FAILED 0x00000020
+#define NFAST_INT_DEVICE_KERN_READ_OK 0x00000040
+#define NFAST_INT_DEVICE_KERN_READ_FAILED 0x00000080
+
+/* Interrupts from host to device */
+#define NFAST_INT_HOST_WRITE_REQUEST 0x00010000
+#define NFAST_INT_HOST_READ_REQUEST 0x00020000
+#define NFAST_INT_HOST_DEBUG 0x00040000
+#define NFAST_INT_HOST_KERN_WRITE_REQUEST 0x00080000
+#define NFAST_INT_HOST_KERN_READ_REQUEST 0x00100000
+
+/* Ordinary job submission ------------------------ */
+
+/* The NFPCI_OFFSET_JOBS_WR and NFPCI_OFFSET_JOBS_RD regions are defined
+ by the following (byte) address offsets... */
+
+#define NFPCI_OFFSET_CONTROL 0x0
+#define NFPCI_OFFSET_LENGTH 0x4
+#define NFPCI_OFFSET_DATA 0x8
+#define NFPCI_OFFSET_PUSH_ADDR 0x8
+
+#define NFPCI_JOBS_WR_CONTROL (NFPCI_OFFSET_JOBS_WR + NFPCI_OFFSET_CONTROL)
+#define NFPCI_JOBS_WR_LENGTH (NFPCI_OFFSET_JOBS_WR + NFPCI_OFFSET_LENGTH)
+#define NFPCI_JOBS_WR_DATA (NFPCI_OFFSET_JOBS_WR + NFPCI_OFFSET_DATA)
+#define NFPCI_MAX_JOBS_WR_LEN (0x0000FFF8)
+
+#define NFPCI_JOBS_RD_CONTROL (NFPCI_OFFSET_JOBS_RD + NFPCI_OFFSET_CONTROL)
+#define NFPCI_JOBS_RD_LENGTH (NFPCI_OFFSET_JOBS_RD + NFPCI_OFFSET_LENGTH)
+#define NFPCI_JOBS_RD_DATA (NFPCI_OFFSET_JOBS_RD + NFPCI_OFFSET_DATA)
+/* address in PCI space of host buffer for NFPCI_JOB_CONTROL_PCI_PUSH */
+#define NFPCI_JOBS_RD_PUSH_ADDR (NFPCI_OFFSET_JOBS_RD + NFPCI_OFFSET_PUSH_ADDR)
+#define NFPCI_MAX_JOBS_RD_LEN (0x000FFF8)
+
+/* Kernel inferface job submission ---------------- */
+
+#define NFPCI_KERN_WR_CONTROL (NFPCI_OFFSET_KERN_WR + NFPCI_OFFSET_CONTROL)
+#define NFPCI_KERN_WR_LENGTH (NFPCI_OFFSET_KERN_WR + NFPCI_OFFSET_LENGTH)
+#define NFPCI_KERN_WR_DATA (NFPCI_OFFSET_KERN_WR + NFPCI_OFFSET_DATA)
+#define NFPCI_MAX_KERN_WR_LEN (0x0000FFF8)
+
+#define NFPCI_KERN_RD_CONTROL (NFPCI_OFFSET_KERN_RD + NFPCI_OFFSET_CONTROL)
+#define NFPCI_KERN_RD_LENGTH (NFPCI_OFFSET_KERN_RD + NFPCI_OFFSET_LENGTH)
+#define NFPCI_KERN_RD_DATA (NFPCI_OFFSET_KERN_RD + NFPCI_OFFSET_DATA)
+/* address in PCI space of host buffer for NFPCI_JOB_CONTROL_PCI_PUSH */
+#define NFPCI_KERN_RD_ADDR (NFPCI_OFFSET_KERN_RD + NFPCI_OFFSET_PUSH_ADDR)
+#define NFPCI_MAX_KERN_RD_LEN (0x000FFF8)
+
+#ifdef DEFINE_NFPCI_PACKED_STRUCTS
+typedef struct
+{
+ UINT32 controlword;
+ UINT32 length; /* length of data to follow */
+ union {
+ BYTE data[1];
+ UINT32 addr;
+ } uu;
+}
+ NFPCI_JOBS_BLOCK;
+#endif
+
+
+#define NFPCI_JOB_CONTROL 0x00000001
+#define NFPCI_JOB_CONTROL_PCI_PUSH 0x00000002
+/*
+ The 'Control' word is analogous to the SCSI read/write address;
+ 1 = standard push/pull IO
+ 2 = push/push IO
+
+ To submit a block of job data, the host:
+ - sets the (32-bit, little-endian) word at NFPCI_JOBS_WR_CONTROL to NFPCI_JOB_CONTROL
+ - sets the word at NFPCI_JOBS_WR_LENGTH to the length of the data
+ - copies the data to NFPCI_JOBS_WR_DATA
+ - sets interrupt NFAST_INT_HOST_WRITE_REQUEST in the doorbell register
+ - awaits the NFAST_INT_DEVICE_WRITE_OK (or _FAILED) interrupts back
+
+ To read a block of jobs back, the host:
+ - sets the word at NFPCI_JOBS_RD_CONTROL to NFPCI_JOB_CONTROL
+ - sets the word at NFPCI_JOBS_RD_LENGTH to the max length for returned data
+ - sets interrupt NFAST_INT_HOST_READ_REQUEST
+ - awaits the NFAST_INT_DEVICE_READ_OK (or _FAILED) interrupt
+ - reads the data from NFPCI_JOBS_RD_DATA; the module will set the word at
+ NFPCI_JOBS_RD_LENGTH to its actual length.
+
+ Optionally the host can request the PCI read data to be pushed to host PCI mapped ram:
+ - allocates a contiguous PCI addressable buffer for a NFPCI_JOBS_BLOCK of max
+ size NFPCI_MAX_JOBS_RD_LEN (or NFPCI_MAX_KERN_RD_LEN) + 8
+ - sets the word at NFPCI_JOBS_RD_CONTROL to NFPCI_JOB_CONTROL_PCI_PUSH
+ - sets the word at NFPCI_JOBS_RD_LENGTH to the max length for returned data
+ - sets the word at NFPCI_JOBS_RD_PUSH_ADDR to be the host PCI address of
+ the buffer
+ - sets interrupt NFAST_INT_HOST_READ_REQUEST
+ - awaits the NFAST_INT_DEVICE_READ_OK (or _FAILED) interrupt
+ - reads the data from the buffer at NFPCI_OFFSET_DATA in the buffer. The
+ module will set NFPCI_OFFSET_LENGTH to the actual length.
+*/
+
+#define NFPCI_SCRATCH_CONTROL 0
+
+#define NFPCI_SCRATCH_CONTROL_HOST_MOI (1<<0)
+#define NFPCI_SCRATCH_CONTROL_MODE_SHIFT 1
+#define NFPCI_SCRATCH_CONTROL_MODE_MASK (3<<NFPCI_SCRATCH_CONTROL_MODE_SHIFT)
+
+#define NFPCI_SCRATCH_STATUS 1
+
+#define NFPCI_SCRATCH_STATUS_MONITOR_MOI (1<<0)
+#define NFPCI_SCRATCH_STATUS_APPLICATION_MOI (1<<1)
+#define NFPCI_SCRATCH_STATUS_APPLICATION_RUNNING (1<<2)
+#define NFPCI_SCRATCH_STATUS_ERROR (1<<3)
+
+#define NFPCI_SCRATCH_ERROR_LO 2
+#define NFPCI_SCRATCH_ERROR_HI 3
+
+#endif
diff --git a/usr/src/uts/common/io/nfp/osif.c b/usr/src/uts/common/io/nfp/osif.c
new file mode 100644
index 0000000000..fba62f9a37
--- /dev/null
+++ b/usr/src/uts/common/io/nfp/osif.c
@@ -0,0 +1,184 @@
+/*
+
+(C) Copyright nCipher Corporation Ltd 2002-2008 All rights reserved
+
+Copyright (c) 2008-2013 Thales e-Security All rights reserved
+
+Copyright (c) 2014 Thales UK All rights reserved
+
+*/
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/file.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/map.h>
+#include <sys/debug.h>
+#include <sys/modctl.h>
+#include <sys/kmem.h>
+#include <sys/cmn_err.h>
+#include <sys/open.h>
+#include <sys/stat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/pci.h>
+
+#include "nfp_common.h"
+#include "nfp_hostif.h"
+#include "nfp_error.h"
+#include "nfp_osif.h"
+#include "nfp_cmd.h"
+#include "nfp.h"
+#include "autoversion.h"
+
+/* config space access ---------------------------------- */
+
+nfp_err nfp_config_inl( nfp_cdev *pdev, int offset, unsigned int *res ) {
+ unsigned int tmp32;
+ if ( !pdev || !pdev->dev || !pdev->dev->conf_handle )
+ return NFP_ENODEV;
+
+/* pci_config_get32() does byte swapping, so put back to LE */
+ tmp32 = pci_config_get32( pdev->dev->conf_handle, offset );
+ TO_LE32_IO(res, tmp32);
+
+ return NFP_SUCCESS;
+}
+
+/* user space memory access ---------------------------------- */
+
+nfp_err nfp_copy_from_user( char *kbuf, const char *ubuf, int len) {
+ bcopy(ubuf, kbuf, len);
+ return 0;
+}
+
+nfp_err nfp_copy_to_user( char *ubuf, const char *kbuf, int len) {
+ bcopy(kbuf, ubuf, len);
+ return 0;
+}
+
+nfp_err nfp_copy_from_user_to_dev( nfp_cdev *cdev, int bar, int offset, const char *ubuf, int len) {
+ /* dirty hack on Solaris, as we are called from strategy we are, in fact, copying from kernel mem */
+ return nfp_copy_to_dev( cdev, bar, offset, ubuf, len );
+}
+
+nfp_err nfp_copy_to_user_from_dev( nfp_cdev *cdev, int bar, int offset, char *ubuf, int len) {
+ /* dirty hack on Solaris, as we are called from strategy we are, in fact, copying to kernel mem */
+ return nfp_copy_from_dev( cdev, bar, offset, ubuf, len );
+}
+
+nfp_err nfp_copy_from_dev( nfp_cdev *cdev, int bar, int offset, char *kbuf, int len) {
+ if( len & 0x3 || offset & 0x3 )
+ DDI_REP_GET8( cdev->extra[bar], (unsigned char *)kbuf, cdev->bar[bar] + offset, len, DDI_DEV_AUTOINCR);
+ else
+ /* LINTED: alignment */
+ DDI_REP_GET32( cdev->extra[bar], (unsigned int *)kbuf, (unsigned int *)(cdev->bar[bar] + offset), len / 4, DDI_DEV_AUTOINCR);
+ return NFP_SUCCESS;
+}
+
+nfp_err nfp_copy_to_dev( nfp_cdev *cdev, int bar, int offset, const char *kbuf, int len) {
+ if( len & 0x3 || offset & 0x3 )
+ DDI_REP_PUT8( cdev->extra[bar], (unsigned char *)kbuf, cdev->bar[bar] + offset, len, DDI_DEV_AUTOINCR );
+ else
+ /* LINTED: alignment */
+ DDI_REP_PUT32( cdev->extra[bar], (unsigned int *)kbuf, (unsigned int *)(cdev->bar[bar] + offset), len / 4, DDI_DEV_AUTOINCR );
+ return NFP_SUCCESS;
+}
+
+/* pci io space access --------------------------------------- */
+
+unsigned int nfp_inl( nfp_cdev *pdev, int bar, int offset ) {
+ nfp_log( NFP_DBG3, "nfp_inl: addr %x", (uintptr_t) pdev->bar[bar] + offset);
+ /* LINTED: alignment */
+ return DDI_GET32( pdev->extra[bar], (uint32_t *)(pdev->bar[bar] + offset) );
+}
+
+unsigned short nfp_inw( nfp_cdev *pdev, int bar, int offset ) {
+ nfp_log( NFP_DBG3, "nfp_inw: addr %x", (uintptr_t) pdev->bar[bar] + offset);
+ /* LINTED: alignment */
+ return DDI_GET16( pdev->extra[bar], (unsigned short *)(pdev->bar[ bar ] + offset) );
+}
+
+void nfp_outl( nfp_cdev *pdev, int bar, int offset, unsigned int data ) {
+ nfp_log( NFP_DBG3, "nfp_outl: addr %x, data %x", (uintptr_t) pdev->bar[bar] + offset, data);
+ /* LINTED: alignment */
+ DDI_PUT32( pdev->extra[bar], (uint32_t *)(pdev->bar[ bar ] + offset), data );
+}
+
+void nfp_outw( nfp_cdev *pdev, int bar, int offset, unsigned short data ) {
+ nfp_log( NFP_DBG3, "nfp_outl: addr %x, data %x", (uintptr_t) pdev->bar[bar] + offset, data);
+ /* LINTED: alignment */
+ DDI_PUT16( pdev->extra[bar], (unsigned short *)(pdev->bar[ bar ] + offset), data );
+}
+
+/* logging ---------------------------------------------------- */
+
+void nfp_log( int level, const char *fmt, ...)
+{
+ auto char buf[256];
+ va_list ap;
+
+ switch (level) {
+ case NFP_DBG4: if (nfp_debug < 4) break;
+ /*FALLTHROUGH*/
+ case NFP_DBG3: if (nfp_debug < 3) break;
+ /*FALLTHROUGH*/
+ case NFP_DBG2: if (nfp_debug < 2) break;
+ /*FALLTHROUGH*/
+ case NFP_DBG1: if (nfp_debug < 1) break;
+ /*FALLTHROUGH*/
+ default:
+ va_start(ap, fmt);
+ (void) vsnprintf(buf, 256, fmt, ap);
+ va_end(ap);
+ cmn_err(CE_CONT, "!" VERSION_COMPNAME " " VERSION_NO ": %s\n", buf);
+ break;
+ }
+}
+
+struct errstr {
+ int oserr;
+ nfp_err nferr;
+};
+
+
+static struct errstr errtab[] = {
+ { EFAULT, NFP_EFAULT },
+ { ENOMEM, NFP_ENOMEM },
+ { EINVAL, NFP_EINVAL },
+ { EIO, NFP_EIO },
+ { ENXIO, NFP_ENXIO },
+ { ENODEV, NFP_ENODEV },
+ { EINVAL, NFP_EUNKNOWN },
+ { 0, 0 }
+};
+
+nfp_err nfp_error( int oserr )
+{
+ struct errstr *perr;
+ if(!oserr)
+ return 0;
+ perr= errtab;
+ while(perr->nferr) {
+ if(perr->oserr == oserr)
+ return perr->nferr;
+ perr++;
+ }
+ return NFP_EUNKNOWN;
+}
+
+int nfp_oserr( nfp_err nferr )
+{
+ struct errstr *perr;
+ if(nferr == NFP_SUCCESS)
+ return 0;
+ perr= errtab;
+ while(perr->nferr) {
+ if(perr->nferr == nferr)
+ return perr->oserr;
+ perr++;
+ }
+ return EIO;
+}
diff --git a/usr/src/uts/common/io/overlay/overlay.c b/usr/src/uts/common/io/overlay/overlay.c
new file mode 100644
index 0000000000..3f34ec3b58
--- /dev/null
+++ b/usr/src/uts/common/io/overlay/overlay.c
@@ -0,0 +1,2184 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2016 Joyent, Inc.
+ */
+
+/*
+ * Overlay Devices
+ *
+ * Overlay devices provide a means for creating overlay networks, a means of
+ * multiplexing multiple logical, isolated, and discrete layer two and layer
+ * three networks on top of one physical network.
+ *
+ * In general, these overlay devices encapsulate the logic to answer two
+ * different questions:
+ *
+ * 1) How should I transform a packet to put it on the wire?
+ * 2) Where should I send a transformed packet?
+ *
+ * Each overlay device is presented to the user as a GLDv3 device. While the
+ * link itself cannot have an IP interface created on top of it, it allows for
+ * additional GLDv3 devices, such as a VNIC, to be created on top of it which
+ * can be plumbed up with IP interfaces.
+ *
+ *
+ * --------------------
+ * General Architecture
+ * --------------------
+ *
+ * The logical overlay device that a user sees in dladm(1M) is a combination of
+ * two different components that work together. The first component is this
+ * kernel module, which is responsible for answering question one -- how should
+ * I transform a packet to put it on the wire.
+ *
+ * The second component is what we call the virtual ARP daemon, or varpd. It is
+ * a userland component that is responsible for answering the second question --
+ * Where should I send a transformed packet. Instances of the kernel overlay
+ * GLDv3 device ask varpd the question of where should a packet go.
+ *
+ * The split was done for a few reasons. Importantly, we wanted to keep the act
+ * of generating encapsulated packets in the kernel so as to ensure that the
+ * general data path was fast and also kept simple. On the flip side, while the
+ * question of where should something go may be simple, it may often be
+ * complicated and need to interface with several different external or
+ * distributed systems. In those cases, it's simpler to allow for the full
+ * flexibility of userland to be brought to bear to solve that problem and in
+ * general, the path isn't very common.
+ *
+ * The following is what makes up the logical overlay device that a user would
+ * create with dladm(1M).
+ *
+ * Kernel Userland
+ * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+ * . +--------+ +--------+ +--------+ . . .
+ * . | VNIC 0 | | VNIC 1 | | VNIC 2 | . . .
+ * . +--------+ +--------+ +--------+ . . .
+ * . | | | . . .
+ * . | | | . . .
+ * . +------------+-----------+ . . .
+ * . | . . /dev/overlay .
+ * . +--------------+ . . . +------------+ .
+ * . | | . . . | | .
+ * . | Overlay |======*=================| Virtual | .
+ * . | GLDv3 Device |========================| ARP Daemon | .
+ * . | | . . | | .
+ * . +--------------+ . . +------------+ .
+ * . | . . | .
+ * . | . . | .
+ * . +----------------+ . . +--------+ .
+ * . | Overlay | . . | varpd | .
+ * . | Encapsulation | . . | Lookup | .
+ * . | Plugin | . . | Plugin | .
+ * . +----------------+ . . +--------+ .
+ * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+ *
+ *
+ * This image shows the two different components and where they live.
+ * Importantly, it also shows that both the kernel overlay device and the
+ * userland varpd both support plugins. The plugins actually implement the
+ * things that users care about and the APIs have been designed to try to
+ * minimize the amount of things that a module writer needs to worry about it.
+ *
+ * IDENTIFIERS
+ *
+ * Every overlay device is defined by a unique identifier which is the overlay
+ * identifier. Its purpose is similar to that of a VLAN identifier, it's a
+ * unique number that is used to differentiate between different entries on the
+ * wire.
+ *
+ * ENCAPSULATION
+ *
+ * An overlay encapsulation plugin is a kernel miscellaneous module whose
+ * purpose is to contain knowledge about how to transform packets to put them
+ * onto the wire and to take them off. An example of an encapsulation plugin is
+ * vxlan. It's also how support for things like nvgre or geneve would be brought
+ * into the system.
+ *
+ * Each encapsulation plugins defines a series of operation vectors and
+ * properties. For the full details on everything they should provide, please
+ * read uts/common/sys/overlay_plugin.h. The encapsulation plugin is responsible
+ * for telling the system what information is required to send a packet. For
+ * example, vxlan is defined to send everything over a UDP packet and therefore
+ * requires a port and an IP address, while nvgre on the other hand is its own
+ * IP type and therefore just requires an IP address. In addition, it also
+ * provides information about the kind of socket that should be created. This is
+ * used by the kernel multiplexor, more of that in the Kernel Components
+ * section.
+ *
+ * LOOKUPS
+ *
+ * The kernel communicates requests for lookups over the character device
+ * /dev/overlay. varpd is responsible for listening for requests on that device
+ * and answering them. The character device is specific to the target path and
+ * varpd.
+ *
+ * Much as the kernel overlay module handles the bulk of the scaffolding but
+ * leaves the important work to the encapsulation plugin, varpd provides a
+ * similar role and leaves the full brunt of lookups to a userland dynamic
+ * shared object which implements the logic of lookups.
+ *
+ * Each lookup plugin defines a series of operation vectors and properties. For
+ * the full details on everything that they should provide, please read
+ * lib/varpd/libvarpd/libvarpd_provider.h. Essentially, they are given a MAC
+ * address and asked to give an address on the physical network that it should
+ * be sent to. In addition, they handle questions related to how to handle
+ * things like broadcast and multicast traffic, etc.
+ *
+ * ----------
+ * Properties
+ * ----------
+ *
+ * A device from a dladm perspective has a unique set of properties that are
+ * combined from three different sources:
+ *
+ * 1) Generic properties that every overlay device has
+ * 2) Properties that are specific to the encapsulation plugin
+ * 3) Properties that are specific to the lookup plugin
+ *
+ * All of these are exposed in a single set of properties in dladm. Note that
+ * these are not necessarily traditional link properties. However, if something
+ * is both a traditional GLDv3 link property, say the MTU of a device, and a
+ * specific property here, than the driver ensures that all existing GLDv3
+ * specific means of manipulating it are used and wraps up its private property
+ * interfaces to ensure that works.
+ *
+ * Properties in the second and third category are prefixed with the name of
+ * their module. For example, the vxlan encapsulation module has a property
+ * called the 'listen_ip'. This property would show up in dladm as
+ * 'vxlan/listen_ip'. This allows different plugins to both use similar names
+ * for similar properties and to also have independent name spaces so that
+ * overlapping names do not conflict with anything else.
+ *
+ * While the kernel combines both sets one and two into a single coherent view,
+ * it does not do anything with respect to the properties that are owned by the
+ * lookup plugin -- those are owned wholly by varpd. Instead, libdladm is in
+ * charge of bridging these two worlds into one magical experience for the user.
+ * It carries the burden of knowing about both overlay specific and varpd
+ * specific properties. Importantly, we want to maintain this distinction. We
+ * don't want to treat the kernel as an arbitrary key/value store for varpd and
+ * we want the kernel to own its own data and not have to ask userland for
+ * information that it owns.
+ *
+ * Every property in the system has the following attributes:
+ *
+ * o A name
+ * o A type
+ * o A size
+ * o Permissions
+ * o Default value
+ * o Valid value ranges
+ * o A value
+ *
+ * Everything except for the value is obtained by callers through the propinfo
+ * callbacks and a property has a maximum size of OVERLAY_PROP_SIZEMAX,
+ * currently 256 bytes.
+ *
+ * The following are the supported types of properties:
+ *
+ * OVERLAY_PROP_T_INT
+ *
+ * A signed integer, its length is 8 bytes, corresponding to a
+ * int64_t.
+ *
+ * OVERLAY_PROP_T_UINT
+ *
+ * An unsigned integer, its length is 8 bytes, corresponding to a
+ * uint64_t.
+ *
+ * OVERLAY_PROP_T_IP
+ *
+ * A struct in6_addr, it has a fixed size.
+ *
+ * OVERLAY_PROP_T_STRING
+ *
+ * A null-terminated character string encoded in either ASCII or
+ * UTF-8. Note that the size of the string includes the null
+ * terminator.
+ *
+ * The next thing that we apply to a property is its permission. The permissions
+ * are put together by the bitwise or of the following flags and values.
+ *
+ * OVERLAY_PROP_PERM_REQ
+ *
+ * This indicates a required property. A property that is required
+ * must be set by a consumer before the device can be created. If a
+ * required property has a default property, this constraint is
+ * loosened because the default property defines the value.
+ *
+ * OVERLAY_PORP_PERM_READ
+ *
+ * This indicates that a property can be read. All properties will
+ * have this value set.
+ *
+ * OVERLAY_PROP_PERM_WRITE
+ *
+ * This indicates that a property can be written to and thus
+ * updated by userland. Properties that are only intended to
+ * display information, will not have OVERLAY_PROP_PERM_WRITE set.
+ *
+ * In addition, a few additional values are defined as a convenience to
+ * consumers. The first, OVERLAY_PROP_PERM_RW, is a combination of
+ * OVERLAY_PROP_PERM_READ and OVERLAY_PERM_PROP_WRITE. The second,
+ * OVERLAY_PROP_PERM_RRW, is a combination of OVERLAY_PROP_PERM_REQ,
+ * OVERLAY_PROP_PERM_READ, and OVERLAY_PROP_PERM_WRITE. The protection mode of a
+ * property should generally be a constant across its lifetime.
+ *
+ * A property may optionally have a default value. If it does have a default
+ * value, and that property is not set to be a different value, then the default
+ * value is inherited automatically. It also means that if the default value is
+ * acceptable, there is no need to set the value for a required property. For
+ * example, the vxlan module has the vxlan/listen_port property which is
+ * required, but has a default value of 4789 (the IANA assigned port). Because
+ * of that default value, there is no need for it to be set.
+ *
+ * Finally, a property may declare a list of valid values. These valid values
+ * are used for display purposes, they are not enforced by the broader system,
+ * but merely allow a means for the information to be communicated to the user
+ * through dladm(1M). Like a default value, this is optional.
+ *
+ * The general scaffolding does not do very much with respect to the getting and
+ * setting of properties. That is really owned by the individual plugins
+ * themselves.
+ *
+ * -----------------------------
+ * Destinations and Plugin Types
+ * -----------------------------
+ *
+ * Both encapsulation and lookup plugins define the kinds of destinations that
+ * they know how to support. There are three different pieces of information
+ * that can be used to address to a destination currently, all of which is
+ * summarized in the type overlay_point_t. Any combination of these is
+ * supported.
+ *
+ * OVERLAY_PLUGIN_D_ETHERNET
+ *
+ * An Ethernet MAC address is required.
+ *
+ * OVERLAY_PLUGIN_D_IP
+ *
+ * An IP address is required. All IP addresses used by the overlay
+ * system are transmitted as IPv6 addresses. IPv4 addresses can be
+ * represented by using IPv4-mapped IPv6 addresses.
+ *
+ * OVERLAY_PLUGIN_D_PORT
+ *
+ * A TCP/UDP port is required.
+ *
+ * A kernel encapsulation plugin declares which of these that it requires, it's
+ * a static set. On the other hand, a userland lookup plugin can be built to
+ * support all of these or any combination thereof. It gets passed the required
+ * destination type, based on the kernel encapsulation method, and then it makes
+ * the determination as to whether or not it supports it. For example, the
+ * direct plugin can support either an IP or both an IP and a port, it simply
+ * doesn't display the direct/dest_port property in the cases where a port is
+ * not required to support this.
+ *
+ * The user lookup plugins have two different modes of operation which
+ * determines how they interact with the broader system and how look ups are
+ * performed. These types are:
+ *
+ * OVERLAY_TARGET_POINT
+ *
+ * A point to point plugin has a single static definition for where
+ * to send all traffic. Every packet in the system always gets sent
+ * to the exact same destination which is programmed into the
+ * kernel when the general device is activated.
+ *
+ * OVERLAY_TARGET_DYNAMIC
+ *
+ * A dynamic plugin does not have a single static definition.
+ * Instead, for each destination, the kernel makes an asynchronous
+ * request to varpd to determine where the packet should be routed,
+ * and if a specific destination is found, then that destination is
+ * cached in the overlay device's target cache.
+ *
+ * This distinction, while important for the general overlay device's operation,
+ * is not important to the encapsulation plugins. They don't need to know about
+ * any of these pieces. It's just a concern for varpd, the userland plugin, and
+ * the general overlay scaffolding.
+ *
+ * When an overlay device is set to OVERLAY_TARGET_POINT, then it does not
+ * maintain a target cache, and instead just keeps track of the destination and
+ * always sends encapsulated packets to that address. When the target type is of
+ * OVERLAY_TARGET_DYNAMIC, then the kernel maintains a cache of all such
+ * destinations. These destinations are kept around in an instance of a
+ * reference hash that is specific to the given overlay device. Entries in the
+ * cache can be invalidated and replaced by varpd and its lookup plugins.
+ *
+ * ----------------------------------
+ * Kernel Components and Architecture
+ * ----------------------------------
+ *
+ * There are multiple pieces inside the kernel that work together, there is the
+ * general overlay_dev_t structure, which is the logical GLDv3 device, but it
+ * itself has references to things like an instance of an encapsulation plugin,
+ * a pointer to a mux and a target cache. It can roughly be summarized in the
+ * following image:
+ *
+ * +------------------+
+ * | global |
+ * | overlay list |
+ * | overlay_dev_list |
+ * +------------------+
+ * |
+ * | +-----------------------+ +---------------+
+ * +->| GLDv3 Device |----------->| GLDv3 Device | -> ...
+ * | overlay_dev_t | | overlay_dev_t |
+ * | | +---------------+
+ * | |
+ * | mac_handle_t -----+---> GLDv3 handle to MAC
+ * | datalink_id_t -----+---> Datalink ID used by DLS
+ * | overlay_dev_flag_t ---+---> Device state
+ * | uint_t -----+---> Curent device MTU
+ * | uint_t -----+---> In-progress RX operations
+ * | uint_t -----+---> In-progress TX operations
+ * | char[] -----+---> FMA degraded message
+ * | void * -----+---> plugin private data
+ * | overlay_target_t * ---+---------------------+
+ * | overlay_plugin_t * ---+---------+ |
+ * +-----------------------+ | |
+ * ^ | |
+ * +--------------------+ | | |
+ * | Kernel Socket | | | |
+ * | Multiplexor | | | |
+ * | overlay_mux_t | | | |
+ * | | | | |
+ * | avl_tree_t -+--+ | |
+ * | uint_t -+--> socket family | |
+ * | uint_t -+--> socket type | |
+ * | uint_t -+--> socket protocol | |
+ * | ksocket_t -+--> I/O socket | |
+ * | struct sockaddr * -+--> ksocket address | |
+ * | overlay_plugin_t --+--------+ | |
+ * +--------------------+ | | |
+ * | | |
+ * +-------------------------+ | | |
+ * | Encap Plugin |<--+-----------+ |
+ * | overlay_plugin_t | |
+ * | | |
+ * | char * ---+--> plugin name |
+ * | overlay_plugin_ops_t * -+--> plugin downcalls |
+ * | char ** (props) ---+--> property list |
+ * | uint_t ---+--> id length |
+ * | overlay_plugin_flags_t -+--> plugin flags |
+ * | overlay_plugin_dest_t --+--> destination type v
+ * +-------------------------+ +-------------------------+
+ * | Target Cache |
+ * | overlay_target_t |
+ * | |
+ * cache mode <--+- overlay_target_mode_t |
+ * dest type <--+- overlay_plugin_dest_t |
+ * cache flags <--+- overlay_target_flag_t |
+ * varpd id <--+- uint64_t |
+ * outstanding varpd reqs. <--+- uint_t |
+ * OVERLAY_TARGET_POINT state <--+- overlay_target_point_t |
+ * OVERLAY_TARGET_DYNAMIC state <-+---+- overlay_target_dyn_t |
+ * | +-------------------------+
+ * +-----------------------+
+ * |
+ * v
+ * +-------------------------------+ +------------------------+
+ * | Target Entry |-->| Target Entry |--> ...
+ * | overlay_target_entry_t | | overlay_target_entry_t |
+ * | | +------------------------+
+ * | |
+ * | overlay_target_entry_flags_t -+--> Entry flags
+ * | uint8_t[ETHERADDRL] ---+--> Target MAC address
+ * | overlay_target_point_t ---+--> Target underlay address
+ * | mblk_t * ---+--> outstanding mblk head
+ * | mblk_t * ---+--> outstanding mblk tail
+ * | size_t ---+--> outstanding mblk size
+ * +-------------------------------+
+ *
+ * The primary entries that we care about are the overlay_dev_t, which
+ * correspond to each overlay device that is created with dladm(1M). Globally,
+ * these devices are maintained in a simple list_t which is protected with a
+ * lock. Hence, these include important information such as the mac_handle_t
+ * and a datalink_id_t which is used to interact with the broader MAC and DLS
+ * ecosystem. We also maintain additional information such as the current state,
+ * outstanding operations, the mtu, and importantly, the plugin's private data.
+ * This is the instance of an encapsulation plugin that gets created as part of
+ * creating an overlay device. Another aspect of this is that the overlay_dev_t
+ * also includes information with respect to FMA. For more information, see the
+ * FMA section.
+ *
+ * Each overlay_dev_t has a pointer to a plugin, a mux, and a target. The plugin
+ * is the encapsulation plugin. This allows the device to make downcalls into it
+ * based on doing things like getting and setting properties. Otherwise, the
+ * plugin itself is a fairly straightforward entity. They are maintained in an
+ * (not pictured above) list. The plugins themselves mostly maintain things like
+ * the static list of properties, what kind of destination they require, and the
+ * operations vector. A given module may contain more if necessary.
+ *
+ * The next piece of the puzzle is the mux, or a multiplexor. The mux itself
+ * maintains a ksocket and it is through the mux that we send and receive
+ * message blocks. The mux represents a socket type and address, as well as a
+ * plugin. Multiple overlay_dev_t devices may then share the same mux. For
+ * example, consider the case where you have different instances of vxlan all on
+ * the same underlay network. These would all logically share the same IP
+ * address and port that packets are sent and received on; however, what differs
+ * is the decapuslation ID.
+ *
+ * Each mux maintains a ksocket_t which is similar to a socket(3SOCKET). Unlike
+ * a socket, we enable a direct callback on the ksocket. This means that
+ * whenever a message block chain is received, rather than sitting there and
+ * getting a callback in a context and kicking that back out to a taskq. Instead
+ * data comes into the callback function overlay_mux_recv().
+ *
+ * The mux is given encapsulated packets (via overlay_m_tx, the GLDv3 tx
+ * function) to transmit. It receives encapsulated packets, decapsulates them to
+ * determine the overlay identifier, looks up the given device that matches that
+ * identifier, and then causes the broader MAC world to receive the packet with
+ * a call to mac_rx().
+ *
+ * Today, we don't do too much that's special with the ksocket; however, as
+ * hardware is gaining understanding for these encapuslation protocols, we'll
+ * probably want to think of better ways to get those capabilities passed down
+ * and potentially better ways to program receive filters so they get directly
+ * to us. Though, that's all fantasy future land.
+ *
+ * The next part of the puzzle is the target cache. The purpose of the target
+ * cache is to cache where we should send a packet on the underlay network,
+ * given its mac address. The target cache operates in two modes depending on
+ * whether the lookup module was declared to OVERLAY_TARGET_POINT or
+ * OVERLAY_TARGET_DYANMIC.
+ *
+ * In the case where the target cache has been programmed to be
+ * OVERLAY_TARGET_POINT, then we only maintain a single overlay_target_point_t
+ * which has the destination that we send everything, no matter the destination
+ * mac address.
+ *
+ * On the other hand, when we have an instance of OVERLAY_TARGET_DYNAMIC, things
+ * are much more interesting and as a result, more complicated. We primarily
+ * store lists of overlay_target_entry_t's which are stored in both an avl tree
+ * and a refhash_t. The primary look up path uses the refhash_t and the avl tree
+ * is only used for a few of the target ioctls used to dump data such that we
+ * can get a consistent iteration order for things like dladm show-overlay -t.
+ * The key that we use for the reference hashtable is based on the mac address
+ * in the cache and currently we just do a simple CRC32 to transform it into a
+ * hash.
+ *
+ * Each entry maintains a set of flags to indicate the current status of the
+ * request. The flags may indicate one of three states: that current cache entry
+ * is valid, that the current cache entry has been directed to drop all output,
+ * and that the current cache entry is invalid and may be being looked up. In
+ * the case where it's valid, we just take the destination address and run with
+ * it.
+ *
+ * If it's invalid and a lookup has not been made, then we start the process
+ * that prepares a query that will make its way up to varpd. The cache entry
+ * entry maintains a message block chain of outstanding message blocks and a
+ * size. These lists are populated only when we don't know the answer as to
+ * where should these be sent. The size entry is used to cap the amount of
+ * outstanding data that we don't know the answer to. If we exceed a cap on the
+ * amount of outstanding data (currently 1 Mb), then we'll drop any additional
+ * packets. Once we get an answer indicating a valid destination, we transmit
+ * any outstanding data to that place. For the full story on how we look that up
+ * will be discussed in the section on the Target Cache Lifecycle.
+ *
+ * ------------------------
+ * FMA and Degraded Devices
+ * ------------------------
+ *
+ * Every kernel overlay device keeps track of its FMA state. Today in FMA we
+ * cannot represent partitions between resources nor can we represent that a
+ * given minor node of a psuedo device has failed -- if we degrade the overlay
+ * device, then the entire dev_info_t is degraded. However, we still want to be
+ * able to indicate to administrators that things may go wrong.
+ *
+ * To this end, we've added a notion of a degraded state to every overlay
+ * device. This state is primarily dictated by userland and it can happen for
+ * various reasons. Generally, because a userland lookup plugin has been
+ * partitioned, or something has gone wrong such that there is no longer any
+ * userland lookup module for a device, then we'll mark it degraded.
+ *
+ * As long as any of our minor instances is degraded, then we'll fire off the
+ * FMA event to note that. Once the last degraded instance is no longer
+ * degraded, then we'll end up telling FMA that we're all clean.
+ *
+ * To help administrators get a better sense of which of the various minor
+ * devices is wrong, we store the odd_fmamsg[] character array. This character
+ * array can be fetched with doing a dladm show-overlay -f.
+ *
+ * Note, that it's important that we do not update the link status of the
+ * devices. We want to remain up as much as possible. By changing the link in a
+ * degraded state, this may end up making things worse. We may still actually
+ * have information in the target cache and if we mark the link down, that'll
+ * result in not being able to use it. The reason being that this'll mark all
+ * the downstream VNICs down which will go to IP and from there we end up
+ * dealing with sadness.
+ *
+ * -----------------------
+ * Target Cache Life Cycle
+ * -----------------------
+ *
+ * This section only applies when we have a lookup plugin of
+ * OVERLAY_TARGET_DYNAMIC. None of this applies to those of type
+ * OVERLAY_TARGET_POINT.
+ *
+ * While we got into the target cache in the general architecture section, it's
+ * worth going into more details as to how this actually works and showing some
+ * examples and state machines. Recall that a target cache entry basically has
+ * the following state transition diagram:
+ *
+ * Initial state
+ * . . . . . . first access . . . varpd lookup enqueued
+ * . . .
+ * . . .
+ * +-------+ . +----------+ .
+ * | No |------*---->| Invalid |-------*----+
+ * | Entry | | Entry | |
+ * +-------+ +----------+ |
+ * varpd ^ ^ varpd |
+ * invalidate | | drop |
+ * . . . * * . . v
+ * +-------+ | | +---------+
+ * | Entry |--->-----+ +----<----| Entry |
+ * | Valid |<----------*---------<----| Pending |->-+ varpd
+ * +-------+ . +---------+ * . . drop, but
+ * . varpd ^ | other queued
+ * . success | | entries
+ * +-----+
+ *
+ * When the table is first created, it is empty. As we attempt to lookup entries
+ * and we find there is no entry at all, we'll create a new table entry for it.
+ * At that point the entry is technically in an invalid state, that means that
+ * we have no valid data from varpd. In that case, we'll go ahead and queue the
+ * packet into the entry's pending chain, and queue a varpd lookup, setting the
+ * OVERLAY_ENTRY_F_PENDING flag in the progress.
+ *
+ * If additional mblk_t's come in for this entry, we end up appending them to
+ * the tail of the chain, if and only if, we don't exceed the threshold for the
+ * amount of space they can take up. An entry remains pending until we get a
+ * varpd reply. If varpd replies with a valid results, we move to the valid
+ * entry state, and remove the OVERLAY_ENTRY_F_PENDING flag and set it with one
+ * of OVERLAY_ENTRY_F_VALID or OVERLAY_ENTRY_F_DROP as appropriate.
+ *
+ * Once an entry is valid, it stays valid until user land tells us to invalidate
+ * it with an ioctl or replace it, OVERLAY_TARG_CACHE_REMOE and
+ * OVERLAY_TARG_CACHE_SET respectively.
+ *
+ * If the lookup fails with a call to drop the packet, then the next state is
+ * determined by the state of the queue. If the set of outstanding entries is
+ * empty, then we just transition back to the invalid state. If instead, the
+ * set of outstanding entries is not empty, then we'll queue another entry and
+ * stay in the same state, repeating this until the number of requests is
+ * drained.
+ *
+ * The following images describes the flow of a given lookup and where the
+ * overlay_target_entry_t is at any given time.
+ *
+ * +-------------------+
+ * | Invalid Entry | An entry starts off as an invalid entry
+ * | de:ad:be:ef:00:00 | and only exists in the target cache.
+ * +-------------------+
+ *
+ * ~~~~
+ *
+ * +---------------------+
+ * | Global list_t | A mblk_t comes in for an entry. We
+ * | overlay_target_list | append it to the overlay_target_list.
+ * +---------------------+
+ * |
+ * v
+ * +-------------------+ +-------------------+
+ * | Pending Entry |----->| Pending Entry |--->...
+ * | 42:5e:1a:10:d6:2d | | de:ad:be:ef:00:00 |
+ * +-------------------+ +-------------------+
+ *
+ * ~~~~
+ *
+ * +--------------------------+
+ * | /dev/overlay minor state | User land said that it would look up an
+ * | overlay_target_hdl_t | entry for us. We remove it from the
+ * +--------------------------+ global list and add it to the handle's
+ * | outstanding list.
+ * |
+ * v
+ * +-------------------+ +-------------------+
+ * | Pending Entry |----->| Pending Entry |
+ * | 90:b8:d0:79:02:dd | | de:ad:be:ef:00:00 |
+ * +-------------------+ +-------------------+
+ *
+ * ~~~~
+ *
+ * +-------------------+
+ * | Valid Entry | varpd returned an answer with
+ * | de:ad:be:ef:00:00 | OVERLAY_IOC_RESPOND and the target cache
+ * | 10.169.23.42:4789 | entry is now populated with a
+ * +-------------------+ destination and marked as valid
+ *
+ *
+ * The lookup mechanism is performed via a series of operations on the character
+ * psuedo-device /dev/overlay. The only thing that uses this device is the
+ * userland daemon varpd. /dev/overlay is a cloneable device, each open of it
+ * granting a new minor number which maintains its own state. We maintain this
+ * state so that way if an outstanding lookup was queued to something that
+ * crashed or closed its handle without responding, we can know about this and
+ * thus handle it appropriately.
+ *
+ * When a lookup is first created it's added to our global list of outstanding
+ * lookups. To service requests, userland is required to perform an ioctl to ask
+ * for a request. We will block it in the kernel a set amount of time waiting
+ * for a request. When we give a request to a given minor instance of the
+ * device, we remove it from the global list and append the request to the
+ * device's list of outstanding entries, for the reasons we discussed above.
+ * When a lookup comes in, we give user land a smaller amount of information
+ * specific to that packet, the overlay_targ_lookup_t. It includes a request id
+ * to identify this, and then the overlay id, the varpd id, the header and
+ * packet size, the source and destination mac address, the SAP, and any
+ * potential VLAN header.
+ *
+ * At that point, it stays in that outstanding list until one of two ioctls are
+ * returned: OVERLAY_TARG_RESPOND or OVERLAY_TARG_DROP. During this time,
+ * userland may also perform other operations. For example, it may use
+ * OVERLAY_TARG_PKT to get a copy of this packet so it can perform more in-depth
+ * analysis of what to do beyond what we gave it initially. This is useful for
+ * providing proxy arp and the like. Finally, there are two other ioctls that
+ * varpd can then do. The first is OVERLAY_TARG_INJECT which injects the
+ * non-jumbo frame packet up into that mac device and OVERLAY_TARG_RESEND which
+ * causes us to encapsulate and send out the packet they've given us.
+ *
+ *
+ * Finally, through the target cache, several ioctls are provided to allow for
+ * interrogation and management of the cache. They allow for individual entries
+ * to be retrieved, set, or have the entire table flushed. For the full set of
+ * ioctls here and what they do, take a look at uts/common/sys/overlay_target.h.
+ *
+ * ------------------
+ * Sample Packet Flow
+ * ------------------
+ *
+ * There's a lot of pieces here, hopefully an example of how this all fits
+ * together will help clarify and elucidate what's going on. We're going to
+ * first track an outgoing packet, eg. one that is sent from an IP interface on
+ * a VNIC on top of an overlay device, and then we'll look at what it means to
+ * respond to that.
+ *
+ *
+ * +----------------+ +--------------+ +------------------+
+ * | IP/DLS send |------->| MAC sends it |----------->| mblk_t reaches |
+ * | packet to MAC | | to the GLDv3 | | overlay GLDv3 tx |
+ * +----------------+ | VNIC device | | overlay_m_tx() |
+ * +--------------+ +------------------+
+ * |
+ * . lookup . cache |
+ * . drop . miss v
+ * +---------+ . +--------+ . +------------------+
+ * | freemsg |<-----*-------| varpd |<---*------| Lookup each mblk |
+ * | mblk_t | | lookup | | in the target |
+ * +---------+ | queued | | cache |
+ * ^ +--------+ +------------------+
+ * on send | | | cache
+ * error . . * *. . lookup * . . hit
+ * | | success v
+ * | | +------------------+
+ * +-----------------+ +--------------->| call plugin |
+ * | Send out | | ovpo_encap() to |
+ * | overlay_mux_t's |<----------------------------------| get encap mblk_t |
+ * | ksocket | +------------------+
+ * +-----------------+
+ *
+ * The receive end point looks a little different and looks more like:
+ *
+ * +------------------+ +----------------+ +-----------+
+ * | mblk_t comes off |---->| enter netstack |--->| delivered |---+
+ * | the physical | | IP stack | | to | * . . direct
+ * | device | +----------------+ | ksocket | | callback
+ * +------------------+ +-----------+ |
+ * . overlay id |
+ * . not found v
+ * +-----------+ . +-----------------+ +--------------------+
+ * | freemsg |<--*------| call plugin |<------| overlay_mux_recv() |
+ * | mblk_t | | ovpo_decap() to | +--------------------+
+ * +-----------+ | decap mblk_t |
+ * +-----------------+
+ * |
+ * * . . overlay id
+ * v found
+ * +--------+ +----------------+
+ * | adjust |----->| call mac_rx |
+ * | mblk_t | | on original |
+ * +--------+ | decaped packet |
+ * +----------------+
+ *
+ * ------------------
+ * Netstack Awareness
+ * ------------------
+ *
+ * In the above image we note that this enters a netstack. Today the only
+ * netstack that can be is the global zone as the overlay driver itself is not
+ * exactly netstack aware. What this really means is that varpd cannot run in a
+ * non-global zone and an overlay device cannot belong to a non-global zone.
+ * Non-global zones can still have a VNIC assigned to them that's been created
+ * over the overlay device the same way they would if it had been created over
+ * an etherstub or a physical device.
+ *
+ * The majority of the work to make it netstack aware is straightforward and the
+ * biggest thing is to create a netstack module that allows us to hook into
+ * netstack (and thus zone) creation and destruction. From there, we need to
+ * amend the target cache lookup routines that we discussed earlier to not have
+ * a global outstanding list and a global list of handles, but rather, one per
+ * netstack.
+ *
+ * For the mux, we'll need to open the ksocket in the context of the zone, we
+ * can likely do this with a properly composed credential, but we'll need to do
+ * some more work on that path. Finally, we'll want to make sure the dld ioctls
+ * are aware of the zoneid of the caller and we use that appropriately and store
+ * it in the overlay_dev_t.
+ *
+ * -----------
+ * GLDv3 Notes
+ * -----------
+ *
+ * The overlay driver implements a GLDv3 device. Parts of GLDv3 are more
+ * relevant and other parts are much less relevant for us. For example, the
+ * GLDv3 is used to toggle the device being put into and out of promiscuous
+ * mode, to program MAC addresses for unicast and multicast hardware filters.
+ * Today, an overlay device doesn't have a notion of promiscuous mode nor does
+ * it have a notion of unicast and multicast addresses programmed into the
+ * device. Instead, for the purposes of the hardware filter, we don't do
+ * anything and just always accept new addresses being added and removed.
+ *
+ * If the GLDv3 start function has not been called, then we will not use this
+ * device for I/O purposes. Any calls to transmit or receive should be dropped,
+ * though the GLDv3 guarantees us that transmit will not be called without
+ * calling start. Similarly, once stop is called, then no packets can be dealt
+ * with.
+ *
+ * Today we don't support the stat interfaces, though there's no good reason
+ * that we shouldn't assemble some of the stats based on what we have in the
+ * future.
+ *
+ * When it comes to link properties, many of the traditional link properties do
+ * not apply and many others MAC handles for us. For example, we don't need to
+ * implement anything for overlay_m_getprop() to deal with returning the MTU, as
+ * MAC never calls into us for that. As such, there isn't much of anything to
+ * support in terms of properties.
+ *
+ * Today, we don't support any notion of hardware capabilities. However, if
+ * future NIC hardware or other changes to the system cause it to make sense for
+ * us to emulate logical groups, then we should do that. However, we still do
+ * implement a capab function so that we can identify ourselves as an overlay
+ * device to the broader MAC framework. This is done mostly so that a device
+ * created on top of us can have fanout rings as we don't try to lie about a
+ * speed for our device.
+ *
+ * The other question is what should be done for a device's MTU and margin. We
+ * set our minimum supported MTU to be the minimum value that an IP network may
+ * be set to 576 -- which mimics what an etherstub does. On the flip side, we
+ * have our upper bound set to 8900. This value comes from the fact that a lot
+ * of jumbo networks use their maximum as 9000. As such, we want to reserve 100
+ * bytes, which isn't exactly the most accurate number, but it'll be good enough
+ * for now. Because of that, our default MTU off of these devices is 1400, as
+ * the default MTU for everything is usually 1500 or whatever the underlying
+ * device is at; however, this is a bit simpler than asking the netstack what
+ * are all the IP interfaces at. It also calls into question how PMTU and PMTU
+ * discovery should work here. The challenge, especially for
+ * OVERLAY_TARG_DYNAMIC is that the MTU to any of the places will vary and it's
+ * not clear that if you have a single bad entry that the overall MTU should be
+ * lowered. Instead, we should figure out a better way of determining these
+ * kinds of PMTU errors and appropriately alerting the administrator via FMA.
+ *
+ * Regarding margin, we allow a margin of up to VLAN_TAGSZ depending on whether
+ * or not the underlying encapsulation device supports VLAN tags. If it does,
+ * then we'll set the margin to allow for it, otherwise, we will not.
+ */
+
+#include <sys/conf.h>
+#include <sys/errno.h>
+#include <sys/stat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/modctl.h>
+#include <sys/policy.h>
+#include <sys/stream.h>
+#include <sys/strsubr.h>
+#include <sys/strsun.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/param.h>
+#include <sys/sysmacros.h>
+#include <sys/ddifm.h>
+
+#include <sys/dls.h>
+#include <sys/dld_ioc.h>
+#include <sys/mac_provider.h>
+#include <sys/mac_client_priv.h>
+#include <sys/mac_ether.h>
+#include <sys/vlan.h>
+
+#include <sys/overlay_impl.h>
+
+dev_info_t *overlay_dip;
+static kmutex_t overlay_dev_lock;
+static list_t overlay_dev_list;
+static uint8_t overlay_macaddr[ETHERADDRL] =
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+typedef enum overlay_dev_prop {
+ OVERLAY_DEV_P_MTU = 0,
+ OVERLAY_DEV_P_VNETID,
+ OVERLAY_DEV_P_ENCAP,
+ OVERLAY_DEV_P_VARPDID
+} overlay_dev_prop_t;
+
+#define OVERLAY_DEV_NPROPS 4
+static const char *overlay_dev_props[] = {
+ "mtu",
+ "vnetid",
+ "encap",
+ "varpd/id"
+};
+
+#define OVERLAY_MTU_MIN 576
+#define OVERLAY_MTU_DEF 1400
+#define OVERLAY_MTU_MAX 8900
+
+overlay_dev_t *
+overlay_hold_by_dlid(datalink_id_t id)
+{
+ overlay_dev_t *o;
+
+ mutex_enter(&overlay_dev_lock);
+ for (o = list_head(&overlay_dev_list); o != NULL;
+ o = list_next(&overlay_dev_list, o)) {
+ if (id == o->odd_linkid) {
+ mutex_enter(&o->odd_lock);
+ o->odd_ref++;
+ mutex_exit(&o->odd_lock);
+ mutex_exit(&overlay_dev_lock);
+ return (o);
+ }
+ }
+
+ mutex_exit(&overlay_dev_lock);
+ return (NULL);
+}
+
+void
+overlay_hold_rele(overlay_dev_t *odd)
+{
+ mutex_enter(&odd->odd_lock);
+ ASSERT(odd->odd_ref > 0);
+ odd->odd_ref--;
+ mutex_exit(&odd->odd_lock);
+}
+
+void
+overlay_io_start(overlay_dev_t *odd, overlay_dev_flag_t flag)
+{
+ ASSERT(flag == OVERLAY_F_IN_RX || flag == OVERLAY_F_IN_TX);
+ ASSERT(MUTEX_HELD(&odd->odd_lock));
+
+ if (flag & OVERLAY_F_IN_RX)
+ odd->odd_rxcount++;
+ if (flag & OVERLAY_F_IN_TX)
+ odd->odd_txcount++;
+ odd->odd_flags |= flag;
+}
+
+void
+overlay_io_done(overlay_dev_t *odd, overlay_dev_flag_t flag)
+{
+ boolean_t signal = B_FALSE;
+
+ ASSERT(flag == OVERLAY_F_IN_RX || flag == OVERLAY_F_IN_TX);
+ ASSERT(MUTEX_HELD(&odd->odd_lock));
+
+ if (flag & OVERLAY_F_IN_RX) {
+ ASSERT(odd->odd_rxcount > 0);
+ odd->odd_rxcount--;
+ if (odd->odd_rxcount == 0) {
+ signal = B_TRUE;
+ odd->odd_flags &= ~OVERLAY_F_IN_RX;
+ }
+ }
+ if (flag & OVERLAY_F_IN_TX) {
+ ASSERT(odd->odd_txcount > 0);
+ odd->odd_txcount--;
+ if (odd->odd_txcount == 0) {
+ signal = B_TRUE;
+ odd->odd_flags &= ~OVERLAY_F_IN_TX;
+ }
+ }
+
+ if (signal == B_TRUE)
+ cv_broadcast(&odd->odd_iowait);
+}
+
+static void
+overlay_io_wait(overlay_dev_t *odd, overlay_dev_flag_t flag)
+{
+ ASSERT((flag & ~OVERLAY_F_IOMASK) == 0);
+ ASSERT(MUTEX_HELD(&odd->odd_lock));
+
+ while (odd->odd_flags & flag) {
+ cv_wait(&odd->odd_iowait, &odd->odd_lock);
+ }
+}
+
+void
+overlay_dev_iter(overlay_dev_iter_f func, void *arg)
+{
+ overlay_dev_t *odd;
+
+ mutex_enter(&overlay_dev_lock);
+ for (odd = list_head(&overlay_dev_list); odd != NULL;
+ odd = list_next(&overlay_dev_list, odd)) {
+ if (func(odd, arg) != 0) {
+ mutex_exit(&overlay_dev_lock);
+ return;
+ }
+ }
+ mutex_exit(&overlay_dev_lock);
+}
+
+/* ARGSUSED */
+static int
+overlay_m_stat(void *arg, uint_t stat, uint64_t *val)
+{
+ return (ENOTSUP);
+}
+
+static int
+overlay_m_start(void *arg)
+{
+ overlay_dev_t *odd = arg;
+ overlay_mux_t *mux;
+ int ret, domain, family, prot;
+ struct sockaddr_storage storage;
+ socklen_t slen;
+
+ mutex_enter(&odd->odd_lock);
+ if ((odd->odd_flags & OVERLAY_F_ACTIVATED) == 0) {
+ mutex_exit(&odd->odd_lock);
+ return (EAGAIN);
+ }
+ mutex_exit(&odd->odd_lock);
+
+ ret = odd->odd_plugin->ovp_ops->ovpo_socket(odd->odd_pvoid, &domain,
+ &family, &prot, (struct sockaddr *)&storage, &slen);
+ if (ret != 0)
+ return (ret);
+
+ mux = overlay_mux_open(odd->odd_plugin, domain, family, prot,
+ (struct sockaddr *)&storage, slen, &ret);
+ if (mux == NULL)
+ return (ret);
+
+ overlay_mux_add_dev(mux, odd);
+ odd->odd_mux = mux;
+ mutex_enter(&odd->odd_lock);
+ ASSERT(!(odd->odd_flags & OVERLAY_F_IN_MUX));
+ odd->odd_flags |= OVERLAY_F_IN_MUX;
+ mutex_exit(&odd->odd_lock);
+
+ return (0);
+}
+
+static void
+overlay_m_stop(void *arg)
+{
+ overlay_dev_t *odd = arg;
+
+ /*
+ * The MAC Perimeter is held here, so we don't have to worry about
+ * synchornizing this with respect to metadata operations.
+ */
+ mutex_enter(&odd->odd_lock);
+ VERIFY(odd->odd_flags & OVERLAY_F_IN_MUX);
+ VERIFY(!(odd->odd_flags & OVERLAY_F_MDDROP));
+ odd->odd_flags |= OVERLAY_F_MDDROP;
+ overlay_io_wait(odd, OVERLAY_F_IOMASK);
+ mutex_exit(&odd->odd_lock);
+
+ overlay_mux_remove_dev(odd->odd_mux, odd);
+ overlay_mux_close(odd->odd_mux);
+ odd->odd_mux = NULL;
+
+ mutex_enter(&odd->odd_lock);
+ odd->odd_flags &= ~OVERLAY_F_IN_MUX;
+ odd->odd_flags &= ~OVERLAY_F_MDDROP;
+ VERIFY((odd->odd_flags & OVERLAY_F_STOPMASK) == 0);
+ mutex_exit(&odd->odd_lock);
+}
+
+/*
+ * For more info on this, see the big theory statement.
+ */
+/* ARGSUSED */
+static int
+overlay_m_promisc(void *arg, boolean_t on)
+{
+ return (0);
+}
+
+/*
+ * For more info on this, see the big theory statement.
+ */
+/* ARGSUSED */
+static int
+overlay_m_multicast(void *arg, boolean_t add, const uint8_t *addrp)
+{
+ return (0);
+}
+
+/*
+ * For more info on this, see the big theory statement.
+ */
+/* ARGSUSED */
+static int
+overlay_m_unicast(void *arg, const uint8_t *macaddr)
+{
+ return (0);
+}
+
+mblk_t *
+overlay_m_tx(void *arg, mblk_t *mp_chain)
+{
+ overlay_dev_t *odd = arg;
+ mblk_t *mp, *ep;
+ int ret;
+ ovep_encap_info_t einfo;
+ struct msghdr hdr;
+
+ mutex_enter(&odd->odd_lock);
+ if ((odd->odd_flags & OVERLAY_F_MDDROP) ||
+ !(odd->odd_flags & OVERLAY_F_IN_MUX)) {
+ mutex_exit(&odd->odd_lock);
+ freemsgchain(mp_chain);
+ return (NULL);
+ }
+ overlay_io_start(odd, OVERLAY_F_IN_TX);
+ mutex_exit(&odd->odd_lock);
+
+ bzero(&hdr, sizeof (struct msghdr));
+
+ bzero(&einfo, sizeof (ovep_encap_info_t));
+ einfo.ovdi_id = odd->odd_vid;
+ mp = mp_chain;
+ while (mp != NULL) {
+ socklen_t slen;
+ struct sockaddr_storage storage;
+
+ mp_chain = mp->b_next;
+ mp->b_next = NULL;
+ ep = NULL;
+
+ ret = overlay_target_lookup(odd, mp,
+ (struct sockaddr *)&storage, &slen);
+ if (ret != OVERLAY_TARGET_OK) {
+ if (ret == OVERLAY_TARGET_DROP)
+ freemsg(mp);
+ mp = mp_chain;
+ continue;
+ }
+
+ hdr.msg_name = &storage;
+ hdr.msg_namelen = slen;
+
+ ret = odd->odd_plugin->ovp_ops->ovpo_encap(odd->odd_mh, mp,
+ &einfo, &ep);
+ if (ret != 0 || ep == NULL) {
+ freemsg(mp);
+ goto out;
+ }
+
+ ep->b_cont = mp;
+ ret = overlay_mux_tx(odd->odd_mux, &hdr, ep);
+ if (ret != 0)
+ goto out;
+
+ mp = mp_chain;
+ }
+
+out:
+ mutex_enter(&odd->odd_lock);
+ overlay_io_done(odd, OVERLAY_F_IN_TX);
+ mutex_exit(&odd->odd_lock);
+ return (mp_chain);
+}
+
+/* ARGSUSED */
+static void
+overlay_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
+{
+ miocnak(q, mp, 0, ENOTSUP);
+}
+
+/* ARGSUSED */
+static boolean_t
+overlay_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
+{
+ /*
+ * Tell MAC we're an overlay.
+ */
+ if (cap == MAC_CAPAB_OVERLAY)
+ return (B_TRUE);
+ return (B_FALSE);
+}
+
+/* ARGSUSED */
+static int
+overlay_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_valsize, const void *pr_val)
+{
+ uint32_t mtu, old;
+ int err;
+ overlay_dev_t *odd = arg;
+
+ if (pr_num != MAC_PROP_MTU)
+ return (ENOTSUP);
+
+ bcopy(pr_val, &mtu, sizeof (mtu));
+ if (mtu < OVERLAY_MTU_MIN || mtu > OVERLAY_MTU_MAX)
+ return (EINVAL);
+
+ mutex_enter(&odd->odd_lock);
+ old = odd->odd_mtu;
+ odd->odd_mtu = mtu;
+ err = mac_maxsdu_update(odd->odd_mh, mtu);
+ if (err != 0)
+ odd->odd_mtu = old;
+ mutex_exit(&odd->odd_lock);
+
+ return (err);
+}
+
+/* ARGSUSED */
+static int
+overlay_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_valsize, void *pr_val)
+{
+ return (ENOTSUP);
+}
+
+/* ARGSUSED */
+static void
+overlay_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ mac_prop_info_handle_t prh)
+{
+ if (pr_num != MAC_PROP_MTU)
+ return;
+
+ mac_prop_info_set_default_uint32(prh, OVERLAY_MTU_DEF);
+ mac_prop_info_set_range_uint32(prh, OVERLAY_MTU_MIN, OVERLAY_MTU_MAX);
+}
+
+static mac_callbacks_t overlay_m_callbacks = {
+ .mc_callbacks = (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP |
+ MC_PROPINFO),
+ .mc_getstat = overlay_m_stat,
+ .mc_start = overlay_m_start,
+ .mc_stop = overlay_m_stop,
+ .mc_setpromisc = overlay_m_promisc,
+ .mc_multicst = overlay_m_multicast,
+ .mc_unicst = overlay_m_unicast,
+ .mc_tx = overlay_m_tx,
+ .mc_ioctl = overlay_m_ioctl,
+ .mc_getcapab = overlay_m_getcapab,
+ .mc_getprop = overlay_m_getprop,
+ .mc_setprop = overlay_m_setprop,
+ .mc_propinfo = overlay_m_propinfo
+};
+
+static boolean_t
+overlay_valid_name(const char *name, size_t buflen)
+{
+ size_t actlen;
+ int err, i;
+
+ for (i = 0; i < buflen; i++) {
+ if (name[i] == '\0')
+ break;
+ }
+
+ if (i == 0 || i == buflen)
+ return (B_FALSE);
+ actlen = i;
+ if (strchr(name, '/') != NULL)
+ return (B_FALSE);
+ if (u8_validate((char *)name, actlen, NULL,
+ U8_VALIDATE_ENTIRE, &err) < 0)
+ return (B_FALSE);
+ return (B_TRUE);
+}
+
+/* ARGSUSED */
+static int
+overlay_i_create(void *karg, intptr_t arg, int mode, cred_t *cred, int *rvalp)
+{
+ int err;
+ uint64_t maxid;
+ overlay_dev_t *odd, *o;
+ mac_register_t *mac;
+ overlay_ioc_create_t *oicp = karg;
+
+ if (overlay_valid_name(oicp->oic_encap, MAXLINKNAMELEN) == B_FALSE)
+ return (EINVAL);
+
+ odd = kmem_zalloc(sizeof (overlay_dev_t), KM_SLEEP);
+ odd->odd_linkid = oicp->oic_linkid;
+ odd->odd_plugin = overlay_plugin_lookup(oicp->oic_encap);
+ if (odd->odd_plugin == NULL) {
+ kmem_free(odd, sizeof (overlay_dev_t));
+ return (ENOENT);
+ }
+ err = odd->odd_plugin->ovp_ops->ovpo_init((overlay_handle_t)odd,
+ &odd->odd_pvoid);
+ if (err != 0) {
+ odd->odd_plugin->ovp_ops->ovpo_fini(odd->odd_pvoid);
+ overlay_plugin_rele(odd->odd_plugin);
+ kmem_free(odd, sizeof (overlay_dev_t));
+ return (EINVAL);
+ }
+
+ /*
+ * Make sure that our virtual network id is valid for the given plugin
+ * that we're working with.
+ */
+ ASSERT(odd->odd_plugin->ovp_id_size <= 8);
+ maxid = UINT64_MAX;
+ if (odd->odd_plugin->ovp_id_size != 8)
+ maxid = (1ULL << (odd->odd_plugin->ovp_id_size * 8)) - 1ULL;
+ if (oicp->oic_vnetid > maxid) {
+ odd->odd_plugin->ovp_ops->ovpo_fini(odd->odd_pvoid);
+ overlay_plugin_rele(odd->odd_plugin);
+ kmem_free(odd, sizeof (overlay_dev_t));
+ return (EINVAL);
+ }
+ odd->odd_vid = oicp->oic_vnetid;
+
+ mac = mac_alloc(MAC_VERSION);
+ if (mac == NULL) {
+ mutex_exit(&overlay_dev_lock);
+ odd->odd_plugin->ovp_ops->ovpo_fini(odd->odd_pvoid);
+ overlay_plugin_rele(odd->odd_plugin);
+ kmem_free(odd, sizeof (overlay_dev_t));
+ return (EINVAL);
+ }
+
+ mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
+ mac->m_driver = odd;
+ mac->m_dip = overlay_dip;
+ mac->m_dst_addr = NULL;
+ mac->m_callbacks = &overlay_m_callbacks;
+ mac->m_pdata = NULL;
+ mac->m_pdata_size = 0;
+
+ mac->m_priv_props = NULL;
+
+ /* Let mac handle this itself. */
+ mac->m_instance = (uint_t)-1;
+
+ /*
+ * There is no real source address that should be used here, but saying
+ * that we're not ethernet is going to cause its own problems. At the
+ * end of the say, this is fine.
+ */
+ mac->m_src_addr = overlay_macaddr;
+
+ /*
+ * Start with the default MTU as the max SDU. If the MTU is changed, the
+ * SDU will be changed to reflect that.
+ */
+ mac->m_min_sdu = 1;
+ mac->m_max_sdu = OVERLAY_MTU_DEF;
+ mac->m_multicast_sdu = 0;
+
+ /*
+ * The underlying device doesn't matter, instead this comes from the
+ * encapsulation protocol and whether or not they allow VLAN tags.
+ */
+ if (odd->odd_plugin->ovp_flags & OVEP_F_VLAN_TAG) {
+ mac->m_margin = VLAN_TAGSZ;
+ } else {
+ mac->m_margin = 0;
+ }
+
+ /*
+ * Today, we have no MAC virtualization, it may make sense in the future
+ * to go ahead and emulate some subset of this, but it doesn't today.
+ */
+ mac->m_v12n = MAC_VIRT_NONE;
+
+ mutex_enter(&overlay_dev_lock);
+ for (o = list_head(&overlay_dev_list); o != NULL;
+ o = list_next(&overlay_dev_list, o)) {
+ if (o->odd_linkid == oicp->oic_linkid) {
+ mutex_exit(&overlay_dev_lock);
+ odd->odd_plugin->ovp_ops->ovpo_fini(odd->odd_pvoid);
+ overlay_plugin_rele(odd->odd_plugin);
+ kmem_free(odd, sizeof (overlay_dev_t));
+ return (EEXIST);
+ }
+
+ if (o->odd_vid == oicp->oic_vnetid &&
+ o->odd_plugin == odd->odd_plugin) {
+ mutex_exit(&overlay_dev_lock);
+ odd->odd_plugin->ovp_ops->ovpo_fini(odd->odd_pvoid);
+ overlay_plugin_rele(odd->odd_plugin);
+ kmem_free(odd, sizeof (overlay_dev_t));
+ return (EEXIST);
+ }
+ }
+
+ err = mac_register(mac, &odd->odd_mh);
+ mac_free(mac);
+ if (err != 0) {
+ mutex_exit(&overlay_dev_lock);
+ odd->odd_plugin->ovp_ops->ovpo_fini(odd->odd_pvoid);
+ overlay_plugin_rele(odd->odd_plugin);
+ kmem_free(odd, sizeof (overlay_dev_t));
+ return (err);
+ }
+
+ err = dls_devnet_create(odd->odd_mh, odd->odd_linkid,
+ crgetzoneid(cred));
+ if (err != 0) {
+ mutex_exit(&overlay_dev_lock);
+ (void) mac_unregister(odd->odd_mh);
+ odd->odd_plugin->ovp_ops->ovpo_fini(odd->odd_pvoid);
+ overlay_plugin_rele(odd->odd_plugin);
+ kmem_free(odd, sizeof (overlay_dev_t));
+ return (err);
+ }
+
+ mutex_init(&odd->odd_lock, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&odd->odd_iowait, NULL, CV_DRIVER, NULL);
+ odd->odd_ref = 0;
+ odd->odd_flags = 0;
+ list_insert_tail(&overlay_dev_list, odd);
+ mutex_exit(&overlay_dev_lock);
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_i_activate(void *karg, intptr_t arg, int mode, cred_t *cred, int *rvalp)
+{
+ int i, ret;
+ overlay_dev_t *odd;
+ mac_perim_handle_t mph;
+ overlay_ioc_activate_t *oiap = karg;
+ overlay_ioc_propinfo_t *infop;
+ overlay_ioc_prop_t *oip;
+ overlay_prop_handle_t phdl;
+
+ odd = overlay_hold_by_dlid(oiap->oia_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ infop = kmem_alloc(sizeof (overlay_ioc_propinfo_t), KM_SLEEP);
+ oip = kmem_alloc(sizeof (overlay_ioc_prop_t), KM_SLEEP);
+ phdl = (overlay_prop_handle_t)infop;
+
+ mac_perim_enter_by_mh(odd->odd_mh, &mph);
+ mutex_enter(&odd->odd_lock);
+ if (odd->odd_flags & OVERLAY_F_ACTIVATED) {
+ mutex_exit(&odd->odd_lock);
+ mac_perim_exit(mph);
+ overlay_hold_rele(odd);
+ kmem_free(infop, sizeof (overlay_ioc_propinfo_t));
+ kmem_free(oip, sizeof (overlay_ioc_prop_t));
+ return (EEXIST);
+ }
+ mutex_exit(&odd->odd_lock);
+
+ for (i = 0; i < odd->odd_plugin->ovp_nprops; i++) {
+ const char *pname = odd->odd_plugin->ovp_props[i];
+ bzero(infop, sizeof (overlay_ioc_propinfo_t));
+ overlay_prop_init(phdl);
+ ret = odd->odd_plugin->ovp_ops->ovpo_propinfo(pname, phdl);
+ if (ret != 0) {
+ mac_perim_exit(mph);
+ overlay_hold_rele(odd);
+ kmem_free(infop, sizeof (overlay_ioc_propinfo_t));
+ kmem_free(oip, sizeof (overlay_ioc_prop_t));
+ return (ret);
+ }
+
+ if ((infop->oipi_prot & OVERLAY_PROP_PERM_REQ) == 0)
+ continue;
+ bzero(oip, sizeof (overlay_ioc_prop_t));
+ oip->oip_size = sizeof (oip->oip_value);
+ ret = odd->odd_plugin->ovp_ops->ovpo_getprop(odd->odd_pvoid,
+ pname, oip->oip_value, &oip->oip_size);
+ if (ret != 0) {
+ mac_perim_exit(mph);
+ overlay_hold_rele(odd);
+ kmem_free(infop, sizeof (overlay_ioc_propinfo_t));
+ kmem_free(oip, sizeof (overlay_ioc_prop_t));
+ return (ret);
+ }
+ if (oip->oip_size == 0) {
+ mac_perim_exit(mph);
+ overlay_hold_rele(odd);
+ kmem_free(infop, sizeof (overlay_ioc_propinfo_t));
+ kmem_free(oip, sizeof (overlay_ioc_prop_t));
+ return (EINVAL);
+ }
+ }
+
+ mutex_enter(&odd->odd_lock);
+ if ((odd->odd_flags & OVERLAY_F_VARPD) == 0) {
+ mutex_exit(&odd->odd_lock);
+ mac_perim_exit(mph);
+ overlay_hold_rele(odd);
+ kmem_free(infop, sizeof (overlay_ioc_propinfo_t));
+ kmem_free(oip, sizeof (overlay_ioc_prop_t));
+ return (ENXIO);
+ }
+
+ ASSERT((odd->odd_flags & OVERLAY_F_ACTIVATED) == 0);
+ odd->odd_flags |= OVERLAY_F_ACTIVATED;
+
+ /*
+ * Now that we've activated ourselves, we should indicate to the world
+ * that we're up. Note that we may not be able to perform lookups at
+ * this time, but our notion of being 'up' isn't dependent on that
+ * ability.
+ */
+ mac_link_update(odd->odd_mh, LINK_STATE_UP);
+ mutex_exit(&odd->odd_lock);
+
+ mac_perim_exit(mph);
+ overlay_hold_rele(odd);
+ kmem_free(infop, sizeof (overlay_ioc_propinfo_t));
+ kmem_free(oip, sizeof (overlay_ioc_prop_t));
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_i_delete(void *karg, intptr_t arg, int mode, cred_t *cred, int *rvalp)
+{
+ overlay_ioc_delete_t *oidp = karg;
+ overlay_dev_t *odd;
+ datalink_id_t tid;
+ int ret;
+
+ odd = overlay_hold_by_dlid(oidp->oid_linkid);
+ if (odd == NULL) {
+ return (ENOENT);
+ }
+
+ mutex_enter(&odd->odd_lock);
+ /* If we're not the only hold, we're busy */
+ if (odd->odd_ref != 1) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (EBUSY);
+ }
+
+ if (odd->odd_flags & OVERLAY_F_IN_MUX) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (EBUSY);
+ }
+
+ /*
+ * To remove this, we need to first remove it from dls and then remove
+ * it from mac. The act of removing it from mac will check if there are
+ * devices on top of this, eg. vnics. If there are, then that will fail
+ * and we'll have to go through and recreate the dls entry. Only after
+ * mac_unregister has succeeded, then we'll go through and actually free
+ * everything and drop the dev lock.
+ */
+ ret = dls_devnet_destroy(odd->odd_mh, &tid, B_TRUE);
+ if (ret != 0) {
+ overlay_hold_rele(odd);
+ return (ret);
+ }
+
+ ASSERT(oidp->oid_linkid == tid);
+ ret = mac_disable(odd->odd_mh);
+ if (ret != 0) {
+ (void) dls_devnet_create(odd->odd_mh, odd->odd_linkid,
+ crgetzoneid(cred));
+ overlay_hold_rele(odd);
+ return (ret);
+ }
+
+ overlay_target_quiesce(odd->odd_target);
+
+ mutex_enter(&overlay_dev_lock);
+ list_remove(&overlay_dev_list, odd);
+ mutex_exit(&overlay_dev_lock);
+
+ cv_destroy(&odd->odd_iowait);
+ mutex_destroy(&odd->odd_lock);
+ overlay_target_free(odd);
+ odd->odd_plugin->ovp_ops->ovpo_fini(odd->odd_pvoid);
+ overlay_plugin_rele(odd->odd_plugin);
+ kmem_free(odd, sizeof (overlay_dev_t));
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_i_nprops(void *karg, intptr_t arg, int mode, cred_t *cred,
+ int *rvalp)
+{
+ overlay_dev_t *odd;
+ overlay_ioc_nprops_t *on = karg;
+
+ odd = overlay_hold_by_dlid(on->oipn_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+ on->oipn_nprops = odd->odd_plugin->ovp_nprops + OVERLAY_DEV_NPROPS;
+ overlay_hold_rele(odd);
+
+ return (0);
+}
+
+static int
+overlay_propinfo_plugin_cb(overlay_plugin_t *opp, void *arg)
+{
+ overlay_prop_handle_t phdl = arg;
+ overlay_prop_set_range_str(phdl, opp->ovp_name);
+ return (0);
+}
+
+static int
+overlay_i_name_to_propid(overlay_dev_t *odd, const char *name, uint_t *id)
+{
+ int i;
+
+ for (i = 0; i < OVERLAY_DEV_NPROPS; i++) {
+ if (strcmp(overlay_dev_props[i], name) == 0) {
+ *id = i;
+ return (0);
+ }
+ }
+
+ for (i = 0; i < odd->odd_plugin->ovp_nprops; i++) {
+ if (strcmp(odd->odd_plugin->ovp_props[i], name) == 0) {
+ *id = i + OVERLAY_DEV_NPROPS;
+ return (0);
+ }
+ }
+
+ return (ENOENT);
+}
+
+static void
+overlay_i_propinfo_mtu(overlay_dev_t *odd, overlay_prop_handle_t phdl)
+{
+ uint32_t def;
+ mac_propval_range_t range;
+ uint_t perm;
+
+ ASSERT(MAC_PERIM_HELD(odd->odd_mh));
+
+ bzero(&range, sizeof (mac_propval_range_t));
+ range.mpr_count = 1;
+ if (mac_prop_info(odd->odd_mh, MAC_PROP_MTU, "mtu", &def,
+ sizeof (def), &range, &perm) != 0)
+ return;
+
+ if (perm == MAC_PROP_PERM_READ)
+ overlay_prop_set_prot(phdl, OVERLAY_PROP_PERM_READ);
+ else if (perm == MAC_PROP_PERM_WRITE)
+ overlay_prop_set_prot(phdl, OVERLAY_PROP_PERM_WRITE);
+ else if (perm == MAC_PROP_PERM_RW)
+ overlay_prop_set_prot(phdl, OVERLAY_PROP_PERM_RW);
+
+ overlay_prop_set_type(phdl, OVERLAY_PROP_T_UINT);
+ overlay_prop_set_default(phdl, &def, sizeof (def));
+ overlay_prop_set_range_uint32(phdl, range.mpr_range_uint32[0].mpur_min,
+ range.mpr_range_uint32[0].mpur_max);
+}
+
+/* ARGSUSED */
+static int
+overlay_i_propinfo(void *karg, intptr_t arg, int mode, cred_t *cred,
+ int *rvalp)
+{
+ overlay_dev_t *odd;
+ int ret;
+ mac_perim_handle_t mph;
+ uint_t propid = UINT_MAX;
+ overlay_ioc_propinfo_t *oip = karg;
+ overlay_prop_handle_t phdl = (overlay_prop_handle_t)oip;
+
+ odd = overlay_hold_by_dlid(oip->oipi_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ overlay_prop_init(phdl);
+ mac_perim_enter_by_mh(odd->odd_mh, &mph);
+
+ /*
+ * If the id is -1, then the property that we're looking for is named in
+ * oipi_name and we should fill in its id. Otherwise, we've been given
+ * an id and we need to turn that into a name for our plugin's sake. The
+ * id is our own fabrication for property discovery.
+ */
+ if (oip->oipi_id == -1) {
+ /*
+ * Determine if it's a known generic property or it belongs to a
+ * module by checking against the list of known names.
+ */
+ oip->oipi_name[OVERLAY_PROP_NAMELEN-1] = '\0';
+ if ((ret = overlay_i_name_to_propid(odd, oip->oipi_name,
+ &propid)) != 0) {
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (ret);
+ }
+ oip->oipi_id = propid;
+ if (propid >= OVERLAY_DEV_NPROPS) {
+ ret = odd->odd_plugin->ovp_ops->ovpo_propinfo(
+ oip->oipi_name, phdl);
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (ret);
+
+ }
+ } else if (oip->oipi_id >= OVERLAY_DEV_NPROPS) {
+ uint_t id = oip->oipi_id - OVERLAY_DEV_NPROPS;
+
+ if (id >= odd->odd_plugin->ovp_nprops) {
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (EINVAL);
+ }
+ ret = odd->odd_plugin->ovp_ops->ovpo_propinfo(
+ odd->odd_plugin->ovp_props[id], phdl);
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (ret);
+ } else if (oip->oipi_id < -1) {
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (EINVAL);
+ } else {
+ ASSERT(oip->oipi_id < OVERLAY_DEV_NPROPS);
+ ASSERT(oip->oipi_id >= 0);
+ propid = oip->oipi_id;
+ (void) strlcpy(oip->oipi_name, overlay_dev_props[propid],
+ sizeof (oip->oipi_name));
+ }
+
+ switch (propid) {
+ case OVERLAY_DEV_P_MTU:
+ overlay_i_propinfo_mtu(odd, phdl);
+ break;
+ case OVERLAY_DEV_P_VNETID:
+ overlay_prop_set_prot(phdl, OVERLAY_PROP_PERM_RW);
+ overlay_prop_set_type(phdl, OVERLAY_PROP_T_UINT);
+ overlay_prop_set_nodefault(phdl);
+ break;
+ case OVERLAY_DEV_P_ENCAP:
+ overlay_prop_set_prot(phdl, OVERLAY_PROP_PERM_READ);
+ overlay_prop_set_type(phdl, OVERLAY_PROP_T_STRING);
+ overlay_prop_set_nodefault(phdl);
+ overlay_plugin_walk(overlay_propinfo_plugin_cb, phdl);
+ break;
+ case OVERLAY_DEV_P_VARPDID:
+ overlay_prop_set_prot(phdl, OVERLAY_PROP_PERM_READ);
+ overlay_prop_set_type(phdl, OVERLAY_PROP_T_UINT);
+ overlay_prop_set_nodefault(phdl);
+ break;
+ default:
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (ENOENT);
+ }
+
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_i_getprop(void *karg, intptr_t arg, int mode, cred_t *cred,
+ int *rvalp)
+{
+ int ret;
+ overlay_dev_t *odd;
+ mac_perim_handle_t mph;
+ overlay_ioc_prop_t *oip = karg;
+ uint_t propid, mtu;
+
+ odd = overlay_hold_by_dlid(oip->oip_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ mac_perim_enter_by_mh(odd->odd_mh, &mph);
+ oip->oip_size = OVERLAY_PROP_SIZEMAX;
+ oip->oip_name[OVERLAY_PROP_NAMELEN-1] = '\0';
+ if (oip->oip_id == -1) {
+ int i;
+
+ for (i = 0; i < OVERLAY_DEV_NPROPS; i++) {
+ if (strcmp(overlay_dev_props[i], oip->oip_name) == 0)
+ break;
+ if (i == OVERLAY_DEV_NPROPS) {
+ ret = odd->odd_plugin->ovp_ops->ovpo_getprop(
+ odd->odd_pvoid, oip->oip_name,
+ oip->oip_value, &oip->oip_size);
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (ret);
+ }
+ }
+
+ propid = i;
+ } else if (oip->oip_id >= OVERLAY_DEV_NPROPS) {
+ uint_t id = oip->oip_id - OVERLAY_DEV_NPROPS;
+
+ if (id > odd->odd_plugin->ovp_nprops) {
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (EINVAL);
+ }
+ ret = odd->odd_plugin->ovp_ops->ovpo_getprop(odd->odd_pvoid,
+ odd->odd_plugin->ovp_props[id], oip->oip_value,
+ &oip->oip_size);
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (ret);
+ } else if (oip->oip_id < -1) {
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (EINVAL);
+ } else {
+ ASSERT(oip->oip_id < OVERLAY_DEV_NPROPS);
+ ASSERT(oip->oip_id >= 0);
+ propid = oip->oip_id;
+ }
+
+ ret = 0;
+ switch (propid) {
+ case OVERLAY_DEV_P_MTU:
+ /*
+ * The MTU is always set and retrieved through MAC, to allow for
+ * MAC to do whatever it wants, as really that property belongs
+ * to MAC. This is important for things where vnics have hold on
+ * the MTU.
+ */
+ mac_sdu_get(odd->odd_mh, NULL, &mtu);
+ bcopy(&mtu, oip->oip_value, sizeof (uint_t));
+ oip->oip_size = sizeof (uint_t);
+ break;
+ case OVERLAY_DEV_P_VNETID:
+ /*
+ * While it's read-only while inside of a mux, we're not in a
+ * context that can guarantee that. Therefore we always grab the
+ * overlay_dev_t's odd_lock.
+ */
+ mutex_enter(&odd->odd_lock);
+ bcopy(&odd->odd_vid, oip->oip_value, sizeof (uint64_t));
+ mutex_exit(&odd->odd_lock);
+ oip->oip_size = sizeof (uint64_t);
+ break;
+ case OVERLAY_DEV_P_ENCAP:
+ oip->oip_size = strlcpy((char *)oip->oip_value,
+ odd->odd_plugin->ovp_name, oip->oip_size);
+ break;
+ case OVERLAY_DEV_P_VARPDID:
+ mutex_enter(&odd->odd_lock);
+ if (odd->odd_flags & OVERLAY_F_VARPD) {
+ const uint64_t val = odd->odd_target->ott_id;
+ bcopy(&val, oip->oip_value, sizeof (uint64_t));
+ oip->oip_size = sizeof (uint64_t);
+ } else {
+ oip->oip_size = 0;
+ }
+ mutex_exit(&odd->odd_lock);
+ break;
+ default:
+ ret = ENOENT;
+ }
+
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (ret);
+}
+
+static void
+overlay_setprop_vnetid(overlay_dev_t *odd, uint64_t vnetid)
+{
+ mutex_enter(&odd->odd_lock);
+
+ /* Simple case, not active */
+ if (!(odd->odd_flags & OVERLAY_F_IN_MUX)) {
+ odd->odd_vid = vnetid;
+ mutex_exit(&odd->odd_lock);
+ return;
+ }
+
+ /*
+ * In the hard case, we need to set the drop flag, quiesce I/O and then
+ * we can go ahead and do everything.
+ */
+ odd->odd_flags |= OVERLAY_F_MDDROP;
+ overlay_io_wait(odd, OVERLAY_F_IOMASK);
+ mutex_exit(&odd->odd_lock);
+
+ overlay_mux_remove_dev(odd->odd_mux, odd);
+ mutex_enter(&odd->odd_lock);
+ odd->odd_vid = vnetid;
+ mutex_exit(&odd->odd_lock);
+ overlay_mux_add_dev(odd->odd_mux, odd);
+
+ mutex_enter(&odd->odd_lock);
+ ASSERT(odd->odd_flags & OVERLAY_F_IN_MUX);
+ odd->odd_flags &= ~OVERLAY_F_IN_MUX;
+ mutex_exit(&odd->odd_lock);
+}
+
+/* ARGSUSED */
+static int
+overlay_i_setprop(void *karg, intptr_t arg, int mode, cred_t *cred,
+ int *rvalp)
+{
+ int ret;
+ overlay_dev_t *odd;
+ overlay_ioc_prop_t *oip = karg;
+ uint_t propid = UINT_MAX;
+ mac_perim_handle_t mph;
+ uint64_t maxid, *vidp;
+
+ if (oip->oip_size > OVERLAY_PROP_SIZEMAX)
+ return (EINVAL);
+
+ odd = overlay_hold_by_dlid(oip->oip_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ oip->oip_name[OVERLAY_PROP_NAMELEN-1] = '\0';
+ mac_perim_enter_by_mh(odd->odd_mh, &mph);
+ mutex_enter(&odd->odd_lock);
+ if (odd->odd_flags & OVERLAY_F_ACTIVATED) {
+ mac_perim_exit(mph);
+ mutex_exit(&odd->odd_lock);
+ return (ENOTSUP);
+ }
+ mutex_exit(&odd->odd_lock);
+ if (oip->oip_id == -1) {
+ int i;
+
+ for (i = 0; i < OVERLAY_DEV_NPROPS; i++) {
+ if (strcmp(overlay_dev_props[i], oip->oip_name) == 0)
+ break;
+ if (i == OVERLAY_DEV_NPROPS) {
+ ret = odd->odd_plugin->ovp_ops->ovpo_setprop(
+ odd->odd_pvoid, oip->oip_name,
+ oip->oip_value, oip->oip_size);
+ overlay_hold_rele(odd);
+ mac_perim_exit(mph);
+ return (ret);
+ }
+ }
+
+ propid = i;
+ } else if (oip->oip_id >= OVERLAY_DEV_NPROPS) {
+ uint_t id = oip->oip_id - OVERLAY_DEV_NPROPS;
+
+ if (id > odd->odd_plugin->ovp_nprops) {
+ mac_perim_exit(mph);
+ overlay_hold_rele(odd);
+ return (EINVAL);
+ }
+ ret = odd->odd_plugin->ovp_ops->ovpo_setprop(odd->odd_pvoid,
+ odd->odd_plugin->ovp_props[id], oip->oip_value,
+ oip->oip_size);
+ mac_perim_exit(mph);
+ overlay_hold_rele(odd);
+ return (ret);
+ } else if (oip->oip_id < -1) {
+ mac_perim_exit(mph);
+ overlay_hold_rele(odd);
+ return (EINVAL);
+ } else {
+ ASSERT(oip->oip_id < OVERLAY_DEV_NPROPS);
+ ASSERT(oip->oip_id >= 0);
+ propid = oip->oip_id;
+ }
+
+ ret = 0;
+ switch (propid) {
+ case OVERLAY_DEV_P_MTU:
+ ret = mac_set_prop(odd->odd_mh, MAC_PROP_MTU, "mtu",
+ oip->oip_value, oip->oip_size);
+ break;
+ case OVERLAY_DEV_P_VNETID:
+ if (oip->oip_size != sizeof (uint64_t)) {
+ ret = EINVAL;
+ break;
+ }
+ vidp = (uint64_t *)oip->oip_value;
+ ASSERT(odd->odd_plugin->ovp_id_size <= 8);
+ maxid = UINT64_MAX;
+ if (odd->odd_plugin->ovp_id_size != 8)
+ maxid = (1ULL << (odd->odd_plugin->ovp_id_size * 8)) -
+ 1ULL;
+ if (*vidp >= maxid) {
+ ret = EINVAL;
+ break;
+ }
+ overlay_setprop_vnetid(odd, *vidp);
+ break;
+ case OVERLAY_DEV_P_ENCAP:
+ case OVERLAY_DEV_P_VARPDID:
+ ret = EPERM;
+ break;
+ default:
+ ret = ENOENT;
+ }
+
+ mac_perim_exit(mph);
+ overlay_hold_rele(odd);
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+overlay_i_status(void *karg, intptr_t arg, int mode, cred_t *cred,
+ int *rvalp)
+{
+ overlay_dev_t *odd;
+ overlay_ioc_status_t *os = karg;
+
+ odd = overlay_hold_by_dlid(os->ois_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ mutex_enter(&odd->odd_lock);
+ if ((odd->odd_flags & OVERLAY_F_DEGRADED) != 0) {
+ os->ois_status = OVERLAY_I_DEGRADED;
+ if (odd->odd_fmamsg != NULL) {
+ (void) strlcpy(os->ois_message, odd->odd_fmamsg,
+ OVERLAY_STATUS_BUFLEN);
+ } else {
+ os->ois_message[0] = '\0';
+ }
+
+ } else {
+ os->ois_status = OVERLAY_I_OK;
+ os->ois_message[0] = '\0';
+ }
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+
+ return (0);
+}
+
+static dld_ioc_info_t overlay_ioc_list[] = {
+ { OVERLAY_IOC_CREATE, DLDCOPYIN, sizeof (overlay_ioc_create_t),
+ overlay_i_create, secpolicy_dl_config },
+ { OVERLAY_IOC_ACTIVATE, DLDCOPYIN, sizeof (overlay_ioc_activate_t),
+ overlay_i_activate, secpolicy_dl_config },
+ { OVERLAY_IOC_DELETE, DLDCOPYIN, sizeof (overlay_ioc_delete_t),
+ overlay_i_delete, secpolicy_dl_config },
+ { OVERLAY_IOC_PROPINFO, DLDCOPYIN | DLDCOPYOUT,
+ sizeof (overlay_ioc_propinfo_t), overlay_i_propinfo,
+ secpolicy_dl_config },
+ { OVERLAY_IOC_GETPROP, DLDCOPYIN | DLDCOPYOUT,
+ sizeof (overlay_ioc_prop_t), overlay_i_getprop,
+ secpolicy_dl_config },
+ { OVERLAY_IOC_SETPROP, DLDCOPYIN,
+ sizeof (overlay_ioc_prop_t), overlay_i_setprop,
+ secpolicy_dl_config },
+ { OVERLAY_IOC_NPROPS, DLDCOPYIN | DLDCOPYOUT,
+ sizeof (overlay_ioc_nprops_t), overlay_i_nprops,
+ secpolicy_dl_config },
+ { OVERLAY_IOC_STATUS, DLDCOPYIN | DLDCOPYOUT,
+ sizeof (overlay_ioc_status_t), overlay_i_status,
+ NULL }
+};
+
+static int
+overlay_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int fmcap = DDI_FM_EREPORT_CAPABLE;
+ if (cmd != DDI_ATTACH)
+ return (DDI_FAILURE);
+
+ if (overlay_dip != NULL || ddi_get_instance(dip) != 0)
+ return (DDI_FAILURE);
+
+ ddi_fm_init(dip, &fmcap, NULL);
+
+ if (ddi_create_minor_node(dip, OVERLAY_CTL, S_IFCHR,
+ ddi_get_instance(dip), DDI_PSEUDO, 0) == DDI_FAILURE)
+ return (DDI_FAILURE);
+
+ if (dld_ioc_register(OVERLAY_IOC, overlay_ioc_list,
+ DLDIOCCNT(overlay_ioc_list)) != 0) {
+ ddi_remove_minor_node(dip, OVERLAY_CTL);
+ return (DDI_FAILURE);
+ }
+
+ overlay_dip = dip;
+ return (DDI_SUCCESS);
+}
+
+/* ARGSUSED */
+static int
+overlay_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resp)
+{
+ int error;
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ *resp = (void *)overlay_dip;
+ error = DDI_SUCCESS;
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *resp = (void *)0;
+ error = DDI_SUCCESS;
+ break;
+ default:
+ error = DDI_FAILURE;
+ break;
+ }
+
+ return (error);
+}
+
+static int
+overlay_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ if (cmd != DDI_DETACH)
+ return (DDI_FAILURE);
+
+ mutex_enter(&overlay_dev_lock);
+ if (!list_is_empty(&overlay_dev_list) || overlay_target_busy()) {
+ mutex_exit(&overlay_dev_lock);
+ return (EBUSY);
+ }
+ mutex_exit(&overlay_dev_lock);
+
+
+ dld_ioc_unregister(OVERLAY_IOC);
+ ddi_remove_minor_node(dip, OVERLAY_CTL);
+ ddi_fm_fini(dip);
+ overlay_dip = NULL;
+ return (DDI_SUCCESS);
+}
+
+static struct cb_ops overlay_cbops = {
+ overlay_target_open, /* cb_open */
+ overlay_target_close, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read */
+ nodev, /* cb_write */
+ overlay_target_ioctl, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ NULL, /* cb_stream */
+ D_MP, /* cb_flag */
+ CB_REV, /* cb_rev */
+ nodev, /* cb_aread */
+ nodev, /* cb_awrite */
+};
+
+static struct dev_ops overlay_dev_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ overlay_getinfo, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ overlay_attach, /* devo_attach */
+ overlay_detach, /* devo_detach */
+ nulldev, /* devo_reset */
+ &overlay_cbops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ NULL, /* devo_power */
+ ddi_quiesce_not_supported /* devo_quiesce */
+};
+
+static struct modldrv overlay_modldrv = {
+ &mod_driverops,
+ "Overlay Network Driver",
+ &overlay_dev_ops
+};
+
+static struct modlinkage overlay_linkage = {
+ MODREV_1,
+ &overlay_modldrv
+};
+
+static int
+overlay_init(void)
+{
+ mutex_init(&overlay_dev_lock, NULL, MUTEX_DRIVER, NULL);
+ list_create(&overlay_dev_list, sizeof (overlay_dev_t),
+ offsetof(overlay_dev_t, odd_link));
+ overlay_mux_init();
+ overlay_plugin_init();
+ overlay_target_init();
+
+ return (DDI_SUCCESS);
+}
+
+static void
+overlay_fini(void)
+{
+ overlay_target_fini();
+ overlay_plugin_fini();
+ overlay_mux_fini();
+ mutex_destroy(&overlay_dev_lock);
+ list_destroy(&overlay_dev_list);
+}
+
+int
+_init(void)
+{
+ int err;
+
+ if ((err = overlay_init()) != DDI_SUCCESS)
+ return (err);
+
+ mac_init_ops(NULL, "overlay");
+ err = mod_install(&overlay_linkage);
+ if (err != DDI_SUCCESS) {
+ overlay_fini();
+ return (err);
+ }
+
+ return (0);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&overlay_linkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int err;
+
+ err = mod_remove(&overlay_linkage);
+ if (err != 0)
+ return (err);
+
+ overlay_fini();
+ return (0);
+}
diff --git a/usr/src/uts/common/io/overlay/overlay.conf b/usr/src/uts/common/io/overlay/overlay.conf
new file mode 100644
index 0000000000..4b62fafd94
--- /dev/null
+++ b/usr/src/uts/common/io/overlay/overlay.conf
@@ -0,0 +1,16 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2015, Joyent, Inc.
+#
+
+name="overlay" parent="pseudo" instance=0;
diff --git a/usr/src/uts/common/io/overlay/overlay.mapfile b/usr/src/uts/common/io/overlay/overlay.mapfile
new file mode 100644
index 0000000000..800d72dc2b
--- /dev/null
+++ b/usr/src/uts/common/io/overlay/overlay.mapfile
@@ -0,0 +1,46 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2015 Joyent, Inc.
+#
+
+#
+# MAPFILE HEADER START
+#
+# WARNING: STOP NOW. DO NOT MODIFY THIS FILE.
+# Object versioning must comply with the rules detailed in
+#
+# usr/src/lib/README.mapfiles
+#
+# You should not be making modifications here until you've read the most current
+# copy of that file. If you need help, contact a gatekeeper for guidance.
+#
+# MAPFILE HEADER END
+#
+
+$mapfile_version 2
+
+SYMBOL_VERSION ILLUMOSprivate {
+ global:
+ # DDI Interfaces
+ _fini;
+ _init;
+ _info;
+
+ # Encapsualation Plugin interfaces
+ overlay_plugin_alloc;
+ overlay_plugin_free;
+ overlay_plugin_register;
+ overlay_plugin_unregister;
+ local:
+ *;
+};
diff --git a/usr/src/uts/common/io/overlay/overlay_fm.c b/usr/src/uts/common/io/overlay/overlay_fm.c
new file mode 100644
index 0000000000..0701d08e8b
--- /dev/null
+++ b/usr/src/uts/common/io/overlay/overlay_fm.c
@@ -0,0 +1,82 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2016 Joyent, Inc.
+ */
+
+/*
+ * Overlay device FMA operations.
+ *
+ * For more information, see the big theory statement in
+ * uts/common/io/overlay/overlay.c
+ */
+
+#include <sys/ddifm.h>
+#include <sys/overlay_impl.h>
+
+kmutex_t overlay_fm_lock;
+uint_t overlay_fm_count;
+
+void
+overlay_fm_init(void)
+{
+ overlay_fm_count = 0;
+ mutex_init(&overlay_fm_lock, NULL, MUTEX_DRIVER, NULL);
+}
+
+void
+overlay_fm_fini(void)
+{
+ VERIFY(overlay_fm_count == 0);
+ mutex_destroy(&overlay_fm_lock);
+}
+
+void
+overlay_fm_degrade(overlay_dev_t *odd, const char *msg)
+{
+ mutex_enter(&overlay_fm_lock);
+ mutex_enter(&odd->odd_lock);
+
+ if (msg != NULL)
+ (void) strlcpy(odd->odd_fmamsg, msg, OVERLAY_STATUS_BUFLEN);
+
+ if (odd->odd_flags & OVERLAY_F_DEGRADED)
+ goto out;
+
+ odd->odd_flags |= OVERLAY_F_DEGRADED;
+ overlay_fm_count++;
+ if (overlay_fm_count == 1) {
+ ddi_fm_service_impact(overlay_dip, DDI_SERVICE_DEGRADED);
+ }
+out:
+ mutex_exit(&odd->odd_lock);
+ mutex_exit(&overlay_fm_lock);
+}
+
+void
+overlay_fm_restore(overlay_dev_t *odd)
+{
+ mutex_enter(&overlay_fm_lock);
+ mutex_enter(&odd->odd_lock);
+ if (!(odd->odd_flags & OVERLAY_F_DEGRADED))
+ goto out;
+
+ odd->odd_fmamsg[0] = '\0';
+ odd->odd_flags &= ~OVERLAY_F_DEGRADED;
+ overlay_fm_count--;
+ if (overlay_fm_count == 0) {
+ ddi_fm_service_impact(overlay_dip, DDI_SERVICE_RESTORED);
+ }
+out:
+ mutex_exit(&odd->odd_lock);
+ mutex_exit(&overlay_fm_lock);
+}
diff --git a/usr/src/uts/common/io/overlay/overlay_mux.c b/usr/src/uts/common/io/overlay/overlay_mux.c
new file mode 100644
index 0000000000..9f70e8c83e
--- /dev/null
+++ b/usr/src/uts/common/io/overlay/overlay_mux.c
@@ -0,0 +1,354 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015 Joyent, Inc.
+ */
+
+/*
+ * Overlay device ksocket multiplexer.
+ *
+ * For more information, see the big theory statement in
+ * uts/common/io/overlay/overlay.c
+ */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/ksynch.h>
+#include <sys/ksocket.h>
+#include <sys/avl.h>
+#include <sys/list.h>
+#include <sys/sysmacros.h>
+#include <sys/strsubr.h>
+#include <sys/strsun.h>
+#include <sys/tihdr.h>
+
+#include <sys/overlay_impl.h>
+
+#include <sys/sdt.h>
+
+#define OVERLAY_FREEMSG(mp, reason) \
+ DTRACE_PROBE2(overlay__fremsg, mblk_t *, mp, char *, reason)
+
+static list_t overlay_mux_list;
+static kmutex_t overlay_mux_lock;
+
+void
+overlay_mux_init(void)
+{
+ list_create(&overlay_mux_list, sizeof (overlay_mux_t),
+ offsetof(overlay_mux_t, omux_lnode));
+ mutex_init(&overlay_mux_lock, NULL, MUTEX_DRIVER, NULL);
+}
+
+void
+overlay_mux_fini(void)
+{
+ mutex_destroy(&overlay_mux_lock);
+ list_destroy(&overlay_mux_list);
+}
+
+static int
+overlay_mux_comparator(const void *a, const void *b)
+{
+ const overlay_dev_t *odl, *odr;
+ odl = a;
+ odr = b;
+ if (odl->odd_vid > odr->odd_vid)
+ return (1);
+ else if (odl->odd_vid < odr->odd_vid)
+ return (-1);
+ else
+ return (0);
+}
+
+/*
+ * This is the central receive data path. We need to decode the packet, if we
+ * can, and then deliver it to the appropriate overlay.
+ */
+/* ARGSUSED */
+static boolean_t
+overlay_mux_recv(ksocket_t ks, mblk_t *mpchain, size_t msgsize, int oob,
+ void *arg)
+{
+ mblk_t *mp, *nmp, *fmp;
+ overlay_mux_t *mux = arg;
+
+ /*
+ * We may have a received a chain of messages. Each messsage in the
+ * chain will likely have a T_unitdata_ind attached to it as an M_PROTO.
+ * If we aren't getting that, we should probably drop that for the
+ * moment.
+ */
+ for (mp = mpchain; mp != NULL; mp = nmp) {
+ struct T_unitdata_ind *tudi;
+ ovep_encap_info_t infop;
+ overlay_dev_t od, *odd;
+ int ret;
+
+ nmp = mp->b_next;
+ mp->b_next = NULL;
+
+ if (DB_TYPE(mp) != M_PROTO) {
+ OVERLAY_FREEMSG(mp, "first one isn't M_PROTO");
+ freemsg(mp);
+ continue;
+ }
+
+ if (mp->b_cont == NULL) {
+ OVERLAY_FREEMSG(mp, "missing a b_cont");
+ freemsg(mp);
+ continue;
+ }
+
+ tudi = (struct T_unitdata_ind *)mp->b_rptr;
+ if (tudi->PRIM_type != T_UNITDATA_IND) {
+ OVERLAY_FREEMSG(mp, "Not a T_unitdata_ind *");
+ freemsg(mp);
+ continue;
+ }
+
+ /*
+ * In the future, we'll care about the source information
+ * for purposes of telling varpd for oob invalidation. But for
+ * now, just drop that block.
+ */
+ fmp = mp;
+ mp = fmp->b_cont;
+ fmp->b_cont = NULL;
+ freemsg(fmp);
+
+ /*
+ * Decap and deliver.
+ */
+ bzero(&infop, sizeof (ovep_encap_info_t));
+ ret = mux->omux_plugin->ovp_ops->ovpo_decap(NULL, mp, &infop);
+ if (ret != 0) {
+ OVERLAY_FREEMSG(mp, "decap failed");
+ freemsg(mp);
+ continue;
+ }
+ if (MBLKL(mp) > infop.ovdi_hdr_size) {
+ mp->b_rptr += infop.ovdi_hdr_size;
+ } else {
+ while (infop.ovdi_hdr_size != 0) {
+ size_t rem, blkl;
+
+ if (mp == NULL)
+ break;
+
+ blkl = MBLKL(mp);
+ rem = MIN(infop.ovdi_hdr_size, blkl);
+ infop.ovdi_hdr_size -= rem;
+ mp->b_rptr += rem;
+ if (rem == blkl) {
+ fmp = mp;
+ mp = fmp->b_cont;
+ fmp->b_cont = NULL;
+ OVERLAY_FREEMSG(mp,
+ "freed a fmp block");
+ freemsg(fmp);
+ }
+ }
+ if (mp == NULL) {
+ OVERLAY_FREEMSG(mp, "freed it all...");
+ continue;
+ }
+ }
+
+
+ od.odd_vid = infop.ovdi_id;
+ mutex_enter(&mux->omux_lock);
+ odd = avl_find(&mux->omux_devices, &od, NULL);
+ if (odd == NULL) {
+ mutex_exit(&mux->omux_lock);
+ OVERLAY_FREEMSG(mp, "no matching vid");
+ freemsg(mp);
+ continue;
+ }
+ mutex_enter(&odd->odd_lock);
+ if ((odd->odd_flags & OVERLAY_F_MDDROP) ||
+ !(odd->odd_flags & OVERLAY_F_IN_MUX)) {
+ mutex_exit(&odd->odd_lock);
+ mutex_exit(&mux->omux_lock);
+ OVERLAY_FREEMSG(mp, "dev dropped");
+ freemsg(mp);
+ continue;
+ }
+ overlay_io_start(odd, OVERLAY_F_IN_RX);
+ mutex_exit(&odd->odd_lock);
+ mutex_exit(&mux->omux_lock);
+
+ mac_rx(odd->odd_mh, NULL, mp);
+
+ mutex_enter(&odd->odd_lock);
+ overlay_io_done(odd, OVERLAY_F_IN_RX);
+ mutex_exit(&odd->odd_lock);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Register a given device with a socket backend. If no such device socket
+ * exists, create a new one.
+ */
+overlay_mux_t *
+overlay_mux_open(overlay_plugin_t *opp, int domain, int family, int protocol,
+ struct sockaddr *addr, socklen_t len, int *errp)
+{
+ int err;
+ overlay_mux_t *mux;
+ ksocket_t ksock;
+
+ if (errp == NULL)
+ errp = &err;
+
+ mutex_enter(&overlay_mux_lock);
+ for (mux = list_head(&overlay_mux_list); mux != NULL;
+ mux = list_next(&overlay_mux_list, mux)) {
+ if (domain == mux->omux_domain &&
+ family == mux->omux_family &&
+ protocol == mux->omux_protocol &&
+ len == mux->omux_alen &&
+ bcmp(addr, mux->omux_addr, len) == 0) {
+
+ if (opp != mux->omux_plugin) {
+ *errp = EEXIST;
+ return (NULL);
+ }
+
+ mutex_enter(&mux->omux_lock);
+ mux->omux_count++;
+ mutex_exit(&mux->omux_lock);
+ mutex_exit(&overlay_mux_lock);
+ *errp = 0;
+ return (mux);
+ }
+ }
+
+ /*
+ * Today we aren't zone-aware and only exist in the global zone. When we
+ * allow for things to exist in the non-global zone, we'll want to use a
+ * credential that's actually specific to the zone.
+ */
+ *errp = ksocket_socket(&ksock, domain, family, protocol, KSOCKET_SLEEP,
+ kcred);
+ if (*errp != 0) {
+ mutex_exit(&overlay_mux_lock);
+ return (NULL);
+ }
+
+ *errp = ksocket_bind(ksock, addr, len, kcred);
+ if (*errp != 0) {
+ mutex_exit(&overlay_mux_lock);
+ ksocket_close(ksock, kcred);
+ return (NULL);
+ }
+
+ /*
+ * Ask our lower layer to optionally toggle anything they need on this
+ * socket. Because a socket is owned by a single type of plugin, we can
+ * then ask it to perform any additional socket set up it'd like to do.
+ */
+ if (opp->ovp_ops->ovpo_sockopt != NULL &&
+ (*errp = opp->ovp_ops->ovpo_sockopt(ksock)) != 0) {
+ mutex_exit(&overlay_mux_lock);
+ ksocket_close(ksock, kcred);
+ return (NULL);
+ }
+
+ mux = kmem_alloc(sizeof (overlay_mux_t), KM_SLEEP);
+ list_link_init(&mux->omux_lnode);
+ mux->omux_ksock = ksock;
+ mux->omux_plugin = opp;
+ mux->omux_domain = domain;
+ mux->omux_family = family;
+ mux->omux_protocol = protocol;
+ mux->omux_addr = kmem_alloc(len, KM_SLEEP);
+ bcopy(addr, mux->omux_addr, len);
+ mux->omux_alen = len;
+ mux->omux_count = 1;
+ avl_create(&mux->omux_devices, overlay_mux_comparator,
+ sizeof (overlay_dev_t), offsetof(overlay_dev_t, odd_muxnode));
+ mutex_init(&mux->omux_lock, NULL, MUTEX_DRIVER, NULL);
+
+
+ /* Once this is called, we need to expect to rx data */
+ *errp = ksocket_krecv_set(ksock, overlay_mux_recv, mux);
+ if (*errp != 0) {
+ ksocket_close(ksock, kcred);
+ mutex_destroy(&mux->omux_lock);
+ avl_destroy(&mux->omux_devices);
+ kmem_free(mux->omux_addr, len);
+ kmem_free(mux, sizeof (overlay_mux_t));
+ return (NULL);
+ }
+
+ list_insert_tail(&overlay_mux_list, mux);
+ mutex_exit(&overlay_mux_lock);
+
+ *errp = 0;
+ return (mux);
+}
+
+void
+overlay_mux_close(overlay_mux_t *mux)
+{
+ mutex_enter(&overlay_mux_lock);
+ mutex_enter(&mux->omux_lock);
+ mux->omux_count--;
+ if (mux->omux_count != 0) {
+ mutex_exit(&mux->omux_lock);
+ mutex_exit(&overlay_mux_lock);
+ return;
+ }
+ list_remove(&overlay_mux_list, mux);
+ mutex_exit(&mux->omux_lock);
+ mutex_exit(&overlay_mux_lock);
+
+ ksocket_close(mux->omux_ksock, kcred);
+ avl_destroy(&mux->omux_devices);
+ kmem_free(mux->omux_addr, mux->omux_alen);
+ kmem_free(mux, sizeof (overlay_mux_t));
+}
+
+void
+overlay_mux_add_dev(overlay_mux_t *mux, overlay_dev_t *odd)
+{
+ mutex_enter(&mux->omux_lock);
+ avl_add(&mux->omux_devices, odd);
+ mutex_exit(&mux->omux_lock);
+}
+
+void
+overlay_mux_remove_dev(overlay_mux_t *mux, overlay_dev_t *odd)
+{
+ mutex_enter(&mux->omux_lock);
+ avl_remove(&mux->omux_devices, odd);
+ mutex_exit(&mux->omux_lock);
+}
+
+int
+overlay_mux_tx(overlay_mux_t *mux, struct msghdr *hdr, mblk_t *mp)
+{
+ int ret;
+
+ /*
+ * It'd be nice to be able to use MSG_MBLK_QUICKRELE, unfortunately,
+ * that isn't actually supported by UDP at this time.
+ */
+ ret = ksocket_sendmblk(mux->omux_ksock, hdr, 0, &mp, kcred);
+ if (ret != 0)
+ freemsg(mp);
+
+ return (ret);
+}
diff --git a/usr/src/uts/common/io/overlay/overlay_plugin.c b/usr/src/uts/common/io/overlay/overlay_plugin.c
new file mode 100644
index 0000000000..348ddb92a2
--- /dev/null
+++ b/usr/src/uts/common/io/overlay/overlay_plugin.c
@@ -0,0 +1,281 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015 Joyent, Inc.
+ */
+
+/*
+ * Overlay device encapsulation plugin management
+ *
+ * For more information, see the big theory statement in
+ * uts/common/io/overlay/overlay.c
+ */
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/errno.h>
+#include <sys/sysmacros.h>
+#include <sys/modctl.h>
+
+#include <sys/overlay_impl.h>
+
+static kmem_cache_t *overlay_plugin_cache;
+static kmutex_t overlay_plugin_lock;
+static list_t overlay_plugin_list;
+
+#define OVERLAY_MODDIR "overlay"
+
+/* ARGSUSED */
+static int
+overlay_plugin_cache_constructor(void *buf, void *arg, int kmflags)
+{
+ overlay_plugin_t *opp = buf;
+
+ mutex_init(&opp->ovp_mutex, NULL, MUTEX_DRIVER, NULL);
+ list_link_init(&opp->ovp_link);
+
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+overlay_plugin_cache_destructor(void *buf, void *arg)
+{
+ overlay_plugin_t *opp = buf;
+ ASSERT(list_link_active(&opp->ovp_link) == 0);
+ mutex_destroy(&opp->ovp_mutex);
+}
+
+void
+overlay_plugin_init(void)
+{
+ mutex_init(&overlay_plugin_lock, NULL, MUTEX_DRIVER, 0);
+
+ /*
+ * In the future we may want to have a reaper to unload unused modules
+ * to help the kernel be able to reclaim memory.
+ */
+ overlay_plugin_cache = kmem_cache_create("overlay_plugin_cache",
+ sizeof (overlay_plugin_t), 0, overlay_plugin_cache_constructor,
+ overlay_plugin_cache_destructor, NULL, NULL, NULL, 0);
+ list_create(&overlay_plugin_list, sizeof (overlay_plugin_t),
+ offsetof(overlay_plugin_t, ovp_link));
+}
+
+void
+overlay_plugin_fini(void)
+{
+ mutex_enter(&overlay_plugin_lock);
+ VERIFY(list_is_empty(&overlay_plugin_list));
+ mutex_exit(&overlay_plugin_lock);
+
+ list_destroy(&overlay_plugin_list);
+ kmem_cache_destroy(overlay_plugin_cache);
+ mutex_destroy(&overlay_plugin_lock);
+}
+
+overlay_plugin_register_t *
+overlay_plugin_alloc(uint_t version)
+{
+ overlay_plugin_register_t *ovrp;
+ /* Version 1 is the only one that exists */
+ if (version != OVEP_VERSION_ONE)
+ return (NULL);
+
+ ovrp = kmem_zalloc(sizeof (overlay_plugin_register_t), KM_SLEEP);
+ ovrp->ovep_version = version;
+ return (ovrp);
+}
+
+void
+overlay_plugin_free(overlay_plugin_register_t *ovrp)
+{
+ kmem_free(ovrp, sizeof (overlay_plugin_register_t));
+}
+
+int
+overlay_plugin_register(overlay_plugin_register_t *ovrp)
+{
+ overlay_plugin_t *opp, *ipp;
+
+ /* Sanity check parameters of the registration */
+ if (ovrp->ovep_version != OVEP_VERSION_ONE)
+ return (EINVAL);
+
+ if (ovrp->ovep_name == NULL || ovrp->ovep_ops == NULL)
+ return (EINVAL);
+
+ if ((ovrp->ovep_flags & ~(OVEP_F_VLAN_TAG)) != 0)
+ return (EINVAL);
+
+ if (ovrp->ovep_id_size < 1)
+ return (EINVAL);
+
+ /* Don't support anything that has an id size larger than 8 bytes */
+ if (ovrp->ovep_id_size > 8)
+ return (ENOTSUP);
+
+ if (ovrp->ovep_dest == OVERLAY_PLUGIN_D_INVALID)
+ return (EINVAL);
+
+ if ((ovrp->ovep_dest & ~OVERLAY_PLUGIN_D_MASK) != 0)
+ return (EINVAL);
+
+ if (ovrp->ovep_ops->ovpo_callbacks != 0)
+ return (EINVAL);
+ if (ovrp->ovep_ops->ovpo_init == NULL)
+ return (EINVAL);
+ if (ovrp->ovep_ops->ovpo_fini == NULL)
+ return (EINVAL);
+ if (ovrp->ovep_ops->ovpo_encap == NULL)
+ return (EINVAL);
+ if (ovrp->ovep_ops->ovpo_decap == NULL)
+ return (EINVAL);
+ if (ovrp->ovep_ops->ovpo_socket == NULL)
+ return (EINVAL);
+ if (ovrp->ovep_ops->ovpo_getprop == NULL)
+ return (EINVAL);
+ if (ovrp->ovep_ops->ovpo_setprop == NULL)
+ return (EINVAL);
+ if (ovrp->ovep_ops->ovpo_propinfo == NULL)
+ return (EINVAL);
+
+
+ opp = kmem_cache_alloc(overlay_plugin_cache, KM_SLEEP);
+ opp->ovp_active = 0;
+ opp->ovp_name = ovrp->ovep_name;
+ opp->ovp_ops = ovrp->ovep_ops;
+ opp->ovp_props = ovrp->ovep_props;
+ opp->ovp_id_size = ovrp->ovep_id_size;
+ opp->ovp_flags = ovrp->ovep_flags;
+ opp->ovp_dest = ovrp->ovep_dest;
+
+ opp->ovp_nprops = 0;
+ if (ovrp->ovep_props != NULL) {
+ while (ovrp->ovep_props[opp->ovp_nprops] != NULL) {
+ if (strlen(ovrp->ovep_props[opp->ovp_nprops]) >=
+ OVERLAY_PROP_NAMELEN) {
+ mutex_exit(&overlay_plugin_lock);
+ kmem_cache_free(overlay_plugin_cache, opp);
+ return (EINVAL);
+ }
+ opp->ovp_nprops++;
+ }
+ }
+
+ mutex_enter(&overlay_plugin_lock);
+ for (ipp = list_head(&overlay_plugin_list); ipp != NULL;
+ ipp = list_next(&overlay_plugin_list, ipp)) {
+ if (strcmp(ipp->ovp_name, opp->ovp_name) == 0) {
+ mutex_exit(&overlay_plugin_lock);
+ kmem_cache_free(overlay_plugin_cache, opp);
+ return (EEXIST);
+ }
+ }
+ list_insert_tail(&overlay_plugin_list, opp);
+ mutex_exit(&overlay_plugin_lock);
+
+ return (0);
+}
+
+int
+overlay_plugin_unregister(const char *name)
+{
+ overlay_plugin_t *opp;
+
+ mutex_enter(&overlay_plugin_lock);
+ for (opp = list_head(&overlay_plugin_list); opp != NULL;
+ opp = list_next(&overlay_plugin_list, opp)) {
+ if (strcmp(opp->ovp_name, name) == 0)
+ break;
+ }
+
+ if (opp == NULL) {
+ mutex_exit(&overlay_plugin_lock);
+ return (ENOENT);
+ }
+
+ mutex_enter(&opp->ovp_mutex);
+ if (opp->ovp_active > 0) {
+ mutex_exit(&opp->ovp_mutex);
+ mutex_exit(&overlay_plugin_lock);
+ return (EBUSY);
+ }
+ mutex_exit(&opp->ovp_mutex);
+
+ list_remove(&overlay_plugin_list, opp);
+ mutex_exit(&overlay_plugin_lock);
+
+ kmem_cache_free(overlay_plugin_cache, opp);
+ return (0);
+}
+
+overlay_plugin_t *
+overlay_plugin_lookup(const char *name)
+{
+ overlay_plugin_t *opp;
+ boolean_t trymodload = B_FALSE;
+
+ for (;;) {
+ mutex_enter(&overlay_plugin_lock);
+ for (opp = list_head(&overlay_plugin_list); opp != NULL;
+ opp = list_next(&overlay_plugin_list, opp)) {
+ if (strcmp(name, opp->ovp_name) == 0) {
+ mutex_enter(&opp->ovp_mutex);
+ opp->ovp_active++;
+ mutex_exit(&opp->ovp_mutex);
+ mutex_exit(&overlay_plugin_lock);
+ return (opp);
+ }
+ }
+ mutex_exit(&overlay_plugin_lock);
+
+ if (trymodload == B_TRUE)
+ return (NULL);
+
+ /*
+ * If we didn't find it, it may still exist, but just not have
+ * been a loaded module. In that case, we'll do one attempt to
+ * load it.
+ */
+ if (modload(OVERLAY_MODDIR, (char *)name) == -1)
+ return (NULL);
+ trymodload = B_TRUE;
+ }
+
+}
+
+void
+overlay_plugin_rele(overlay_plugin_t *opp)
+{
+ mutex_enter(&opp->ovp_mutex);
+ ASSERT(opp->ovp_active > 0);
+ opp->ovp_active--;
+ mutex_exit(&opp->ovp_mutex);
+}
+
+void
+overlay_plugin_walk(overlay_plugin_walk_f func, void *arg)
+{
+ overlay_plugin_t *opp;
+ mutex_enter(&overlay_plugin_lock);
+ for (opp = list_head(&overlay_plugin_list); opp != NULL;
+ opp = list_next(&overlay_plugin_list, opp)) {
+ if (func(opp, arg) != 0) {
+ mutex_exit(&overlay_plugin_lock);
+ return;
+ }
+ }
+ mutex_exit(&overlay_plugin_lock);
+}
diff --git a/usr/src/uts/common/io/overlay/overlay_prop.c b/usr/src/uts/common/io/overlay/overlay_prop.c
new file mode 100644
index 0000000000..ba1ea2a629
--- /dev/null
+++ b/usr/src/uts/common/io/overlay/overlay_prop.c
@@ -0,0 +1,122 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015, Joyent, Inc.
+ */
+
+/*
+ * Routines for manipulating property information structures.
+ *
+ * For more information, see the big theory statement in
+ * uts/common/io/overlay/overlay.c
+ */
+
+#include <sys/overlay_impl.h>
+
+void
+overlay_prop_init(overlay_prop_handle_t phdl)
+{
+ overlay_ioc_propinfo_t *infop = (overlay_ioc_propinfo_t *)phdl;
+ mac_propval_range_t *rangep = (mac_propval_range_t *)infop->oipi_poss;
+
+ infop->oipi_posssize = sizeof (mac_propval_range_t);
+ bzero(rangep, sizeof (mac_propval_range_t));
+}
+
+void
+overlay_prop_set_name(overlay_prop_handle_t phdl, const char *name)
+{
+ overlay_ioc_propinfo_t *infop = (overlay_ioc_propinfo_t *)phdl;
+ (void) strlcpy(infop->oipi_name, name, OVERLAY_PROP_NAMELEN);
+}
+
+void
+overlay_prop_set_prot(overlay_prop_handle_t phdl, overlay_prop_prot_t prot)
+{
+ overlay_ioc_propinfo_t *infop = (overlay_ioc_propinfo_t *)phdl;
+ infop->oipi_prot = prot;
+}
+
+void
+overlay_prop_set_type(overlay_prop_handle_t phdl, overlay_prop_type_t type)
+{
+ overlay_ioc_propinfo_t *infop = (overlay_ioc_propinfo_t *)phdl;
+ infop->oipi_type = type;
+}
+
+int
+overlay_prop_set_default(overlay_prop_handle_t phdl, void *def, ssize_t len)
+{
+ overlay_ioc_propinfo_t *infop = (overlay_ioc_propinfo_t *)phdl;
+
+ if (len > OVERLAY_PROP_SIZEMAX)
+ return (E2BIG);
+
+ if (len < 0)
+ return (EOVERFLOW);
+
+ bcopy(def, infop->oipi_default, len);
+ infop->oipi_defsize = (uint32_t)len;
+
+ return (0);
+}
+
+void
+overlay_prop_set_nodefault(overlay_prop_handle_t phdl)
+{
+ overlay_ioc_propinfo_t *infop = (overlay_ioc_propinfo_t *)phdl;
+ infop->oipi_default[0] = '\0';
+ infop->oipi_defsize = 0;
+}
+
+void
+overlay_prop_set_range_uint32(overlay_prop_handle_t phdl, uint32_t min,
+ uint32_t max)
+{
+ overlay_ioc_propinfo_t *infop = (overlay_ioc_propinfo_t *)phdl;
+ mac_propval_range_t *rangep = (mac_propval_range_t *)infop->oipi_poss;
+
+ if (rangep->mpr_count != 0 && rangep->mpr_type != MAC_PROPVAL_UINT32)
+ return;
+
+ if (infop->oipi_posssize + sizeof (mac_propval_uint32_range_t) >
+ sizeof (infop->oipi_poss))
+ return;
+
+ infop->oipi_posssize += sizeof (mac_propval_uint32_range_t);
+ rangep->mpr_count++;
+ rangep->mpr_type = MAC_PROPVAL_UINT32;
+ rangep->u.mpr_uint32[rangep->mpr_count-1].mpur_min = min;
+ rangep->u.mpr_uint32[rangep->mpr_count-1].mpur_max = max;
+}
+
+void
+overlay_prop_set_range_str(overlay_prop_handle_t phdl, const char *str)
+{
+ size_t len = strlen(str) + 1; /* Account for a null terminator */
+ overlay_ioc_propinfo_t *infop = (overlay_ioc_propinfo_t *)phdl;
+ mac_propval_range_t *rangep = (mac_propval_range_t *)infop->oipi_poss;
+ mac_propval_str_range_t *pstr = &rangep->u.mpr_str;
+
+ if (rangep->mpr_count != 0 && rangep->mpr_type != MAC_PROPVAL_STR)
+ return;
+
+ if (infop->oipi_posssize + len > sizeof (infop->oipi_poss))
+ return;
+
+ rangep->mpr_count++;
+ rangep->mpr_type = MAC_PROPVAL_STR;
+ strlcpy((char *)&pstr->mpur_data[pstr->mpur_nextbyte], str,
+ sizeof (infop->oipi_poss) - infop->oipi_posssize);
+ pstr->mpur_nextbyte += len;
+ infop->oipi_posssize += len;
+}
diff --git a/usr/src/uts/common/io/overlay/overlay_target.c b/usr/src/uts/common/io/overlay/overlay_target.c
new file mode 100644
index 0000000000..f4147b56d1
--- /dev/null
+++ b/usr/src/uts/common/io/overlay/overlay_target.c
@@ -0,0 +1,1651 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2016 Joyent, Inc.
+ */
+
+/*
+ * Overlay device target cache management
+ *
+ * For more information, see the big theory statement in
+ * uts/common/io/overlay/overlay.c
+ */
+
+#include <sys/types.h>
+#include <sys/ethernet.h>
+#include <sys/kmem.h>
+#include <sys/policy.h>
+#include <sys/sysmacros.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/strsubr.h>
+#include <sys/mac_provider.h>
+#include <sys/mac_client.h>
+#include <sys/mac_client_priv.h>
+#include <sys/vlan.h>
+#include <sys/crc32.h>
+#include <sys/cred.h>
+#include <sys/file.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/overlay_impl.h>
+#include <sys/sdt.h>
+
+/*
+ * This is total straw man, but at least it's a prime number. Here we're
+ * going to have to go through and do a lot of evaluation and understanding as
+ * to how these target caches should grow and shrink, as well as, memory
+ * pressure and evictions. This just gives us a starting point that'll be 'good
+ * enough', until it's not.
+ */
+#define OVERLAY_HSIZE 823
+
+/*
+ * We use this data structure to keep track of what requests have been actively
+ * allocated to a given instance so we know what to put back on the pending
+ * list.
+ */
+typedef struct overlay_target_hdl {
+ minor_t oth_minor; /* RO */
+ zoneid_t oth_zoneid; /* RO */
+ int oth_oflags; /* RO */
+ list_node_t oth_link; /* overlay_target_lock */
+ kmutex_t oth_lock;
+ list_t oth_outstanding; /* oth_lock */
+} overlay_target_hdl_t;
+
+typedef int (*overlay_target_copyin_f)(const void *, void **, size_t *, int);
+typedef int (*overlay_target_ioctl_f)(overlay_target_hdl_t *, void *);
+typedef int (*overlay_target_copyout_f)(void *, void *, size_t, int);
+
+typedef struct overaly_target_ioctl {
+ int oti_cmd; /* ioctl id */
+ boolean_t oti_write; /* ioctl requires FWRITE */
+ boolean_t oti_ncopyout; /* copyout data? */
+ overlay_target_copyin_f oti_copyin; /* copyin func */
+ overlay_target_ioctl_f oti_func; /* function to call */
+ overlay_target_copyout_f oti_copyout; /* copyin func */
+ size_t oti_size; /* size of user level structure */
+} overlay_target_ioctl_t;
+
+static kmem_cache_t *overlay_target_cache;
+static kmem_cache_t *overlay_entry_cache;
+static id_space_t *overlay_thdl_idspace;
+static void *overlay_thdl_state;
+
+/*
+ * When we support overlay devices in the NGZ, then all of these need to become
+ * zone aware, by plugging into the netstack engine and becoming per-netstack
+ * data.
+ */
+static list_t overlay_thdl_list;
+static kmutex_t overlay_target_lock;
+static kcondvar_t overlay_target_condvar;
+static list_t overlay_target_list;
+static boolean_t overlay_target_excl;
+
+/*
+ * Outstanding data per hash table entry.
+ */
+static int overlay_ent_size = 128 * 1024;
+
+/* ARGSUSED */
+static int
+overlay_target_cache_constructor(void *buf, void *arg, int kmflgs)
+{
+ overlay_target_t *ott = buf;
+
+ mutex_init(&ott->ott_lock, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&ott->ott_cond, NULL, CV_DRIVER, NULL);
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+overlay_target_cache_destructor(void *buf, void *arg)
+{
+ overlay_target_t *ott = buf;
+
+ cv_destroy(&ott->ott_cond);
+ mutex_destroy(&ott->ott_lock);
+}
+
+/* ARGSUSED */
+static int
+overlay_entry_cache_constructor(void *buf, void *arg, int kmflgs)
+{
+ overlay_target_entry_t *ote = buf;
+
+ bzero(ote, sizeof (overlay_target_entry_t));
+ mutex_init(&ote->ote_lock, NULL, MUTEX_DRIVER, NULL);
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+overlay_entry_cache_destructor(void *buf, void *arg)
+{
+ overlay_target_entry_t *ote = buf;
+
+ mutex_destroy(&ote->ote_lock);
+}
+
+static uint64_t
+overlay_mac_hash(const void *v)
+{
+ uint32_t crc;
+ CRC32(crc, v, ETHERADDRL, -1U, crc32_table);
+ return (crc);
+}
+
+static int
+overlay_mac_cmp(const void *a, const void *b)
+{
+ return (bcmp(a, b, ETHERADDRL));
+}
+
+/* ARGSUSED */
+static void
+overlay_target_entry_dtor(void *arg)
+{
+ overlay_target_entry_t *ote = arg;
+
+ ote->ote_flags = 0;
+ bzero(ote->ote_addr, ETHERADDRL);
+ ote->ote_ott = NULL;
+ ote->ote_odd = NULL;
+ freemsgchain(ote->ote_chead);
+ ote->ote_chead = ote->ote_ctail = NULL;
+ ote->ote_mbsize = 0;
+ ote->ote_vtime = 0;
+ kmem_cache_free(overlay_entry_cache, ote);
+}
+
+static int
+overlay_mac_avl(const void *a, const void *b)
+{
+ int i;
+ const overlay_target_entry_t *l, *r;
+ l = a;
+ r = b;
+
+ for (i = 0; i < ETHERADDRL; i++) {
+ if (l->ote_addr[i] > r->ote_addr[i])
+ return (1);
+ else if (l->ote_addr[i] < r->ote_addr[i])
+ return (-1);
+ }
+
+ return (0);
+}
+
+void
+overlay_target_init(void)
+{
+ int ret;
+ ret = ddi_soft_state_init(&overlay_thdl_state,
+ sizeof (overlay_target_hdl_t), 1);
+ VERIFY(ret == 0);
+ overlay_target_cache = kmem_cache_create("overlay_target",
+ sizeof (overlay_target_t), 0, overlay_target_cache_constructor,
+ overlay_target_cache_destructor, NULL, NULL, NULL, 0);
+ overlay_entry_cache = kmem_cache_create("overlay_entry",
+ sizeof (overlay_target_entry_t), 0, overlay_entry_cache_constructor,
+ overlay_entry_cache_destructor, NULL, NULL, NULL, 0);
+ mutex_init(&overlay_target_lock, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&overlay_target_condvar, NULL, CV_DRIVER, NULL);
+ list_create(&overlay_target_list, sizeof (overlay_target_entry_t),
+ offsetof(overlay_target_entry_t, ote_qlink));
+ list_create(&overlay_thdl_list, sizeof (overlay_target_hdl_t),
+ offsetof(overlay_target_hdl_t, oth_link));
+ overlay_thdl_idspace = id_space_create("overlay_target_minors",
+ 1, INT32_MAX);
+}
+
+void
+overlay_target_fini(void)
+{
+ id_space_destroy(overlay_thdl_idspace);
+ list_destroy(&overlay_thdl_list);
+ list_destroy(&overlay_target_list);
+ cv_destroy(&overlay_target_condvar);
+ mutex_destroy(&overlay_target_lock);
+ kmem_cache_destroy(overlay_entry_cache);
+ kmem_cache_destroy(overlay_target_cache);
+ ddi_soft_state_fini(&overlay_thdl_state);
+}
+
+void
+overlay_target_free(overlay_dev_t *odd)
+{
+ if (odd->odd_target == NULL)
+ return;
+
+ if (odd->odd_target->ott_mode == OVERLAY_TARGET_DYNAMIC) {
+ refhash_t *rp = odd->odd_target->ott_u.ott_dyn.ott_dhash;
+ avl_tree_t *ap = &odd->odd_target->ott_u.ott_dyn.ott_tree;
+ overlay_target_entry_t *ote;
+
+ /*
+ * Our AVL tree and hashtable contain the same elements,
+ * therefore we should just remove it from the tree, but then
+ * delete the entries when we remove them from the hash table
+ * (which happens through the refhash dtor).
+ */
+ while ((ote = avl_first(ap)) != NULL)
+ avl_remove(ap, ote);
+
+ avl_destroy(ap);
+ for (ote = refhash_first(rp); ote != NULL;
+ ote = refhash_next(rp, ote)) {
+ refhash_remove(rp, ote);
+ }
+ refhash_destroy(rp);
+ }
+
+ ASSERT(odd->odd_target->ott_ocount == 0);
+ kmem_cache_free(overlay_target_cache, odd->odd_target);
+}
+
+int
+overlay_target_busy()
+{
+ int ret;
+
+ mutex_enter(&overlay_target_lock);
+ ret = !list_is_empty(&overlay_thdl_list);
+ mutex_exit(&overlay_target_lock);
+
+ return (ret);
+}
+
+static void
+overlay_target_queue(overlay_target_entry_t *entry)
+{
+ mutex_enter(&overlay_target_lock);
+ mutex_enter(&entry->ote_ott->ott_lock);
+ if (entry->ote_ott->ott_flags & OVERLAY_T_TEARDOWN) {
+ mutex_exit(&entry->ote_ott->ott_lock);
+ mutex_exit(&overlay_target_lock);
+ return;
+ }
+ entry->ote_ott->ott_ocount++;
+ mutex_exit(&entry->ote_ott->ott_lock);
+ list_insert_tail(&overlay_target_list, entry);
+ cv_signal(&overlay_target_condvar);
+ mutex_exit(&overlay_target_lock);
+}
+
+void
+overlay_target_quiesce(overlay_target_t *ott)
+{
+ if (ott == NULL)
+ return;
+ mutex_enter(&ott->ott_lock);
+ ott->ott_flags |= OVERLAY_T_TEARDOWN;
+ while (ott->ott_ocount != 0)
+ cv_wait(&ott->ott_cond, &ott->ott_lock);
+ mutex_exit(&ott->ott_lock);
+}
+
+/*
+ * This functions assumes that the destination mode is OVERLAY_PLUGIN_D_IP |
+ * OVERLAY_PLUGIN_D_PORT. As we don't have an implementation of anything else at
+ * this time, say for NVGRE, we drop all packets that mcuh this.
+ */
+int
+overlay_target_lookup(overlay_dev_t *odd, mblk_t *mp, struct sockaddr *sock,
+ socklen_t *slenp)
+{
+ int ret;
+ struct sockaddr_in6 *v6;
+ overlay_target_t *ott;
+ mac_header_info_t mhi;
+ overlay_target_entry_t *entry;
+
+ ASSERT(odd->odd_target != NULL);
+
+ /*
+ * At this point, the overlay device is in a mux which means that it's
+ * been activated. At this point, parts of the target, such as the mode
+ * and the destination are now read-only and we don't have to worry
+ * about synchronization for them.
+ */
+ ott = odd->odd_target;
+ if (ott->ott_dest != (OVERLAY_PLUGIN_D_IP | OVERLAY_PLUGIN_D_PORT))
+ return (OVERLAY_TARGET_DROP);
+
+ v6 = (struct sockaddr_in6 *)sock;
+ bzero(v6, sizeof (struct sockaddr_in6));
+ v6->sin6_family = AF_INET6;
+
+ if (ott->ott_mode == OVERLAY_TARGET_POINT) {
+ mutex_enter(&ott->ott_lock);
+ bcopy(&ott->ott_u.ott_point.otp_ip, &v6->sin6_addr,
+ sizeof (struct in6_addr));
+ v6->sin6_port = htons(ott->ott_u.ott_point.otp_port);
+ mutex_exit(&ott->ott_lock);
+ *slenp = sizeof (struct sockaddr_in6);
+
+ return (OVERLAY_TARGET_OK);
+ }
+
+ ASSERT(ott->ott_mode == OVERLAY_TARGET_DYNAMIC);
+
+ /*
+ * Note we only want the MAC address here, therefore we won't bother
+ * using mac_vlan_header_info(). If any caller needs the vlan info at
+ * this point, this should change to a call to mac_vlan_header_info().
+ */
+ if (mac_header_info(odd->odd_mh, mp, &mhi) != 0)
+ return (OVERLAY_TARGET_DROP);
+ mutex_enter(&ott->ott_lock);
+ entry = refhash_lookup(ott->ott_u.ott_dyn.ott_dhash,
+ mhi.mhi_daddr);
+ if (entry == NULL) {
+ entry = kmem_cache_alloc(overlay_entry_cache,
+ KM_NOSLEEP | KM_NORMALPRI);
+ if (entry == NULL) {
+ mutex_exit(&ott->ott_lock);
+ return (OVERLAY_TARGET_DROP);
+ }
+ bcopy(mhi.mhi_daddr, entry->ote_addr, ETHERADDRL);
+ entry->ote_chead = entry->ote_ctail = mp;
+ entry->ote_mbsize = msgsize(mp);
+ entry->ote_flags |= OVERLAY_ENTRY_F_PENDING;
+ entry->ote_ott = ott;
+ entry->ote_odd = odd;
+ refhash_insert(ott->ott_u.ott_dyn.ott_dhash, entry);
+ avl_add(&ott->ott_u.ott_dyn.ott_tree, entry);
+ mutex_exit(&ott->ott_lock);
+ overlay_target_queue(entry);
+ return (OVERLAY_TARGET_ASYNC);
+ }
+ refhash_hold(ott->ott_u.ott_dyn.ott_dhash, entry);
+ mutex_exit(&ott->ott_lock);
+
+ mutex_enter(&entry->ote_lock);
+ if (entry->ote_flags & OVERLAY_ENTRY_F_DROP) {
+ ret = OVERLAY_TARGET_DROP;
+ } else if (entry->ote_flags & OVERLAY_ENTRY_F_VALID) {
+ bcopy(&entry->ote_dest.otp_ip, &v6->sin6_addr,
+ sizeof (struct in6_addr));
+ v6->sin6_port = htons(entry->ote_dest.otp_port);
+ *slenp = sizeof (struct sockaddr_in6);
+ ret = OVERLAY_TARGET_OK;
+ } else {
+ size_t mlen = msgsize(mp);
+
+ if (mlen + entry->ote_mbsize > overlay_ent_size) {
+ ret = OVERLAY_TARGET_DROP;
+ } else {
+ if (entry->ote_ctail != NULL) {
+ ASSERT(entry->ote_ctail->b_next ==
+ NULL);
+ entry->ote_ctail->b_next = mp;
+ entry->ote_ctail = mp;
+ } else {
+ entry->ote_chead = mp;
+ entry->ote_ctail = mp;
+ }
+ entry->ote_mbsize += mlen;
+ if ((entry->ote_flags &
+ OVERLAY_ENTRY_F_PENDING) == 0) {
+ entry->ote_flags |=
+ OVERLAY_ENTRY_F_PENDING;
+ overlay_target_queue(entry);
+ }
+ ret = OVERLAY_TARGET_ASYNC;
+ }
+ }
+ mutex_exit(&entry->ote_lock);
+
+ mutex_enter(&ott->ott_lock);
+ refhash_rele(ott->ott_u.ott_dyn.ott_dhash, entry);
+ mutex_exit(&ott->ott_lock);
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_info(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_dev_t *odd;
+ overlay_targ_info_t *oti = arg;
+
+ odd = overlay_hold_by_dlid(oti->oti_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ mutex_enter(&odd->odd_lock);
+ oti->oti_flags = 0;
+ oti->oti_needs = odd->odd_plugin->ovp_dest;
+ if (odd->odd_flags & OVERLAY_F_DEGRADED)
+ oti->oti_flags |= OVERLAY_TARG_INFO_F_DEGRADED;
+ if (odd->odd_flags & OVERLAY_F_ACTIVATED)
+ oti->oti_flags |= OVERLAY_TARG_INFO_F_ACTIVE;
+ oti->oti_vnetid = odd->odd_vid;
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_associate(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_dev_t *odd;
+ overlay_target_t *ott;
+ overlay_targ_associate_t *ota = arg;
+
+ odd = overlay_hold_by_dlid(ota->ota_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ if (ota->ota_id == 0) {
+ overlay_hold_rele(odd);
+ return (EINVAL);
+ }
+
+ if (ota->ota_mode != OVERLAY_TARGET_POINT &&
+ ota->ota_mode != OVERLAY_TARGET_DYNAMIC) {
+ overlay_hold_rele(odd);
+ return (EINVAL);
+ }
+
+ if (ota->ota_provides != odd->odd_plugin->ovp_dest) {
+ overlay_hold_rele(odd);
+ return (EINVAL);
+ }
+
+ if (ota->ota_mode == OVERLAY_TARGET_POINT) {
+ if (ota->ota_provides & OVERLAY_PLUGIN_D_IP) {
+ if (IN6_IS_ADDR_UNSPECIFIED(&ota->ota_point.otp_ip) ||
+ IN6_IS_ADDR_V4COMPAT(&ota->ota_point.otp_ip) ||
+ IN6_IS_ADDR_V4MAPPED_ANY(&ota->ota_point.otp_ip)) {
+ overlay_hold_rele(odd);
+ return (EINVAL);
+ }
+ }
+
+ if (ota->ota_provides & OVERLAY_PLUGIN_D_PORT) {
+ if (ota->ota_point.otp_port == 0) {
+ overlay_hold_rele(odd);
+ return (EINVAL);
+ }
+ }
+ }
+
+ ott = kmem_cache_alloc(overlay_target_cache, KM_SLEEP);
+ ott->ott_flags = 0;
+ ott->ott_ocount = 0;
+ ott->ott_mode = ota->ota_mode;
+ ott->ott_dest = ota->ota_provides;
+ ott->ott_id = ota->ota_id;
+
+ if (ott->ott_mode == OVERLAY_TARGET_POINT) {
+ bcopy(&ota->ota_point, &ott->ott_u.ott_point,
+ sizeof (overlay_target_point_t));
+ } else {
+ ott->ott_u.ott_dyn.ott_dhash = refhash_create(OVERLAY_HSIZE,
+ overlay_mac_hash, overlay_mac_cmp,
+ overlay_target_entry_dtor, sizeof (overlay_target_entry_t),
+ offsetof(overlay_target_entry_t, ote_reflink),
+ offsetof(overlay_target_entry_t, ote_addr), KM_SLEEP);
+ avl_create(&ott->ott_u.ott_dyn.ott_tree, overlay_mac_avl,
+ sizeof (overlay_target_entry_t),
+ offsetof(overlay_target_entry_t, ote_avllink));
+ }
+ mutex_enter(&odd->odd_lock);
+ if (odd->odd_flags & OVERLAY_F_VARPD) {
+ mutex_exit(&odd->odd_lock);
+ kmem_cache_free(overlay_target_cache, ott);
+ overlay_hold_rele(odd);
+ return (EEXIST);
+ }
+
+ odd->odd_flags |= OVERLAY_F_VARPD;
+ odd->odd_target = ott;
+ mutex_exit(&odd->odd_lock);
+
+ overlay_hold_rele(odd);
+
+
+ return (0);
+}
+
+
+/* ARGSUSED */
+static int
+overlay_target_degrade(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_dev_t *odd;
+ overlay_targ_degrade_t *otd = arg;
+
+ odd = overlay_hold_by_dlid(otd->otd_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ overlay_fm_degrade(odd, otd->otd_buf);
+ overlay_hold_rele(odd);
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_restore(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_dev_t *odd;
+ overlay_targ_id_t *otid = arg;
+
+ odd = overlay_hold_by_dlid(otid->otid_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ overlay_fm_restore(odd);
+ overlay_hold_rele(odd);
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_disassociate(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_dev_t *odd;
+ overlay_targ_id_t *otid = arg;
+
+ odd = overlay_hold_by_dlid(otid->otid_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ mutex_enter(&odd->odd_lock);
+ odd->odd_flags &= ~OVERLAY_F_VARPD;
+ mutex_exit(&odd->odd_lock);
+
+ overlay_hold_rele(odd);
+ return (0);
+
+}
+
+static int
+overlay_target_lookup_request(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_targ_lookup_t *otl = arg;
+ overlay_target_entry_t *entry;
+ clock_t ret, timeout;
+ mac_header_info_t mhi;
+
+ timeout = ddi_get_lbolt() + drv_usectohz(MICROSEC);
+again:
+ mutex_enter(&overlay_target_lock);
+ while (list_is_empty(&overlay_target_list)) {
+ ret = cv_timedwait(&overlay_target_condvar,
+ &overlay_target_lock, timeout);
+ if (ret == -1) {
+ mutex_exit(&overlay_target_lock);
+ return (ETIME);
+ }
+ }
+ entry = list_remove_head(&overlay_target_list);
+ mutex_exit(&overlay_target_lock);
+ mutex_enter(&entry->ote_lock);
+ if (entry->ote_flags & OVERLAY_ENTRY_F_VALID) {
+ ASSERT(entry->ote_chead == NULL);
+ mutex_exit(&entry->ote_lock);
+ goto again;
+ }
+ ASSERT(entry->ote_chead != NULL);
+
+ /*
+ * If we have a bogon that doesn't have a valid mac header, drop it and
+ * try again.
+ */
+ if (mac_vlan_header_info(entry->ote_odd->odd_mh, entry->ote_chead,
+ &mhi) != 0) {
+ boolean_t queue = B_FALSE;
+ mblk_t *mp = entry->ote_chead;
+ entry->ote_chead = mp->b_next;
+ mp->b_next = NULL;
+ if (entry->ote_ctail == mp)
+ entry->ote_ctail = entry->ote_chead;
+ entry->ote_mbsize -= msgsize(mp);
+ if (entry->ote_chead != NULL)
+ queue = B_TRUE;
+ mutex_exit(&entry->ote_lock);
+ if (queue == B_TRUE)
+ overlay_target_queue(entry);
+ freemsg(mp);
+ goto again;
+ }
+
+ otl->otl_dlid = entry->ote_odd->odd_linkid;
+ otl->otl_reqid = (uintptr_t)entry;
+ otl->otl_varpdid = entry->ote_ott->ott_id;
+ otl->otl_vnetid = entry->ote_odd->odd_vid;
+
+ otl->otl_hdrsize = mhi.mhi_hdrsize;
+ otl->otl_pktsize = msgsize(entry->ote_chead) - otl->otl_hdrsize;
+ bcopy(mhi.mhi_daddr, otl->otl_dstaddr, ETHERADDRL);
+ bcopy(mhi.mhi_saddr, otl->otl_srcaddr, ETHERADDRL);
+ otl->otl_dsttype = mhi.mhi_dsttype;
+ otl->otl_sap = mhi.mhi_bindsap;
+ otl->otl_vlan = VLAN_ID(mhi.mhi_tci);
+ mutex_exit(&entry->ote_lock);
+
+ mutex_enter(&thdl->oth_lock);
+ list_insert_tail(&thdl->oth_outstanding, entry);
+ mutex_exit(&thdl->oth_lock);
+
+ return (0);
+}
+
+static int
+overlay_target_lookup_respond(overlay_target_hdl_t *thdl, void *arg)
+{
+ const overlay_targ_resp_t *otr = arg;
+ overlay_target_entry_t *entry;
+ mblk_t *mp;
+
+ mutex_enter(&thdl->oth_lock);
+ for (entry = list_head(&thdl->oth_outstanding); entry != NULL;
+ entry = list_next(&thdl->oth_outstanding, entry)) {
+ if ((uintptr_t)entry == otr->otr_reqid)
+ break;
+ }
+
+ if (entry == NULL) {
+ mutex_exit(&thdl->oth_lock);
+ return (EINVAL);
+ }
+ list_remove(&thdl->oth_outstanding, entry);
+ mutex_exit(&thdl->oth_lock);
+
+ mutex_enter(&entry->ote_lock);
+ bcopy(&otr->otr_answer, &entry->ote_dest,
+ sizeof (overlay_target_point_t));
+ entry->ote_flags &= ~OVERLAY_ENTRY_F_PENDING;
+ entry->ote_flags |= OVERLAY_ENTRY_F_VALID;
+ mp = entry->ote_chead;
+ entry->ote_chead = NULL;
+ entry->ote_ctail = NULL;
+ entry->ote_mbsize = 0;
+ entry->ote_vtime = gethrtime();
+ mutex_exit(&entry->ote_lock);
+
+ /*
+ * For now do an in-situ drain.
+ */
+ mp = overlay_m_tx(entry->ote_odd, mp);
+ freemsgchain(mp);
+
+ mutex_enter(&entry->ote_ott->ott_lock);
+ entry->ote_ott->ott_ocount--;
+ cv_signal(&entry->ote_ott->ott_cond);
+ mutex_exit(&entry->ote_ott->ott_lock);
+
+ return (0);
+}
+
+static int
+overlay_target_lookup_drop(overlay_target_hdl_t *thdl, void *arg)
+{
+ const overlay_targ_resp_t *otr = arg;
+ overlay_target_entry_t *entry;
+ mblk_t *mp;
+ boolean_t queue = B_FALSE;
+
+ mutex_enter(&thdl->oth_lock);
+ for (entry = list_head(&thdl->oth_outstanding); entry != NULL;
+ entry = list_next(&thdl->oth_outstanding, entry)) {
+ if ((uintptr_t)entry == otr->otr_reqid)
+ break;
+ }
+
+ if (entry == NULL) {
+ mutex_exit(&thdl->oth_lock);
+ return (EINVAL);
+ }
+ list_remove(&thdl->oth_outstanding, entry);
+ mutex_exit(&thdl->oth_lock);
+
+ mutex_enter(&entry->ote_lock);
+
+ /* Safeguard against a confused varpd */
+ if (entry->ote_flags & OVERLAY_ENTRY_F_VALID) {
+ entry->ote_flags &= ~OVERLAY_ENTRY_F_PENDING;
+ DTRACE_PROBE1(overlay__target__valid__drop,
+ overlay_target_entry_t *, entry);
+ mutex_exit(&entry->ote_lock);
+ goto done;
+ }
+
+ mp = entry->ote_chead;
+ if (mp != NULL) {
+ entry->ote_chead = mp->b_next;
+ mp->b_next = NULL;
+ if (entry->ote_ctail == mp)
+ entry->ote_ctail = entry->ote_chead;
+ entry->ote_mbsize -= msgsize(mp);
+ }
+ if (entry->ote_chead != NULL) {
+ queue = B_TRUE;
+ entry->ote_flags |= OVERLAY_ENTRY_F_PENDING;
+ } else {
+ entry->ote_flags &= ~OVERLAY_ENTRY_F_PENDING;
+ }
+ mutex_exit(&entry->ote_lock);
+
+ if (queue == B_TRUE)
+ overlay_target_queue(entry);
+ freemsg(mp);
+
+done:
+ mutex_enter(&entry->ote_ott->ott_lock);
+ entry->ote_ott->ott_ocount--;
+ cv_signal(&entry->ote_ott->ott_cond);
+ mutex_exit(&entry->ote_ott->ott_lock);
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_pkt_copyin(const void *ubuf, void **outp, size_t *bsize,
+ int flags)
+{
+ overlay_targ_pkt_t *pkt;
+ overlay_targ_pkt32_t *pkt32;
+
+ pkt = kmem_alloc(sizeof (overlay_targ_pkt_t), KM_SLEEP);
+ *outp = pkt;
+ *bsize = sizeof (overlay_targ_pkt_t);
+ if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) {
+ uintptr_t addr;
+
+ if (ddi_copyin(ubuf, pkt, sizeof (overlay_targ_pkt32_t),
+ flags & FKIOCTL) != 0) {
+ kmem_free(pkt, *bsize);
+ return (EFAULT);
+ }
+ pkt32 = (overlay_targ_pkt32_t *)pkt;
+ addr = pkt32->otp_buf;
+ pkt->otp_buf = (void *)addr;
+ } else {
+ if (ddi_copyin(ubuf, pkt, *bsize, flags & FKIOCTL) != 0) {
+ kmem_free(pkt, *bsize);
+ return (EFAULT);
+ }
+ }
+ return (0);
+}
+
+static int
+overlay_target_pkt_copyout(void *ubuf, void *buf, size_t bufsize,
+ int flags)
+{
+ if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) {
+ overlay_targ_pkt_t *pkt = buf;
+ overlay_targ_pkt32_t *pkt32 = buf;
+ uintptr_t addr = (uintptr_t)pkt->otp_buf;
+ pkt32->otp_buf = (caddr32_t)addr;
+ if (ddi_copyout(buf, ubuf, sizeof (overlay_targ_pkt32_t),
+ flags & FKIOCTL) != 0)
+ return (EFAULT);
+ } else {
+ if (ddi_copyout(buf, ubuf, bufsize, flags & FKIOCTL) != 0)
+ return (EFAULT);
+ }
+ return (0);
+}
+
+static int
+overlay_target_packet(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_targ_pkt_t *pkt = arg;
+ overlay_target_entry_t *entry;
+ mblk_t *mp;
+ size_t mlen;
+ size_t boff;
+
+ mutex_enter(&thdl->oth_lock);
+ for (entry = list_head(&thdl->oth_outstanding); entry != NULL;
+ entry = list_next(&thdl->oth_outstanding, entry)) {
+ if ((uintptr_t)entry == pkt->otp_reqid)
+ break;
+ }
+
+ if (entry == NULL) {
+ mutex_exit(&thdl->oth_lock);
+ return (EINVAL);
+ }
+ mutex_enter(&entry->ote_lock);
+ mutex_exit(&thdl->oth_lock);
+ mp = entry->ote_chead;
+ /* Protect against a rogue varpd */
+ if (mp == NULL) {
+ mutex_exit(&entry->ote_lock);
+ return (EINVAL);
+ }
+ mlen = MIN(msgsize(mp), pkt->otp_size);
+ pkt->otp_size = mlen;
+ boff = 0;
+ while (mlen > 0) {
+ size_t wlen = MIN(MBLKL(mp), mlen);
+ if (ddi_copyout(mp->b_rptr,
+ (void *)((uintptr_t)pkt->otp_buf + boff),
+ wlen, 0) != 0) {
+ mutex_exit(&entry->ote_lock);
+ return (EFAULT);
+ }
+ mlen -= wlen;
+ boff += wlen;
+ mp = mp->b_cont;
+ }
+ mutex_exit(&entry->ote_lock);
+ return (0);
+}
+
+static int
+overlay_target_inject(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_targ_pkt_t *pkt = arg;
+ overlay_target_entry_t *entry;
+ overlay_dev_t *odd;
+ mblk_t *mp;
+
+ if (pkt->otp_size > ETHERMAX + VLAN_TAGSZ)
+ return (EINVAL);
+
+ mp = allocb(pkt->otp_size, 0);
+ if (mp == NULL)
+ return (ENOMEM);
+
+ if (ddi_copyin(pkt->otp_buf, mp->b_rptr, pkt->otp_size, 0) != 0) {
+ freeb(mp);
+ return (EFAULT);
+ }
+ mp->b_wptr += pkt->otp_size;
+
+ if (pkt->otp_linkid != UINT64_MAX) {
+ odd = overlay_hold_by_dlid(pkt->otp_linkid);
+ if (odd == NULL) {
+ freeb(mp);
+ return (ENOENT);
+ }
+ } else {
+ mutex_enter(&thdl->oth_lock);
+ for (entry = list_head(&thdl->oth_outstanding); entry != NULL;
+ entry = list_next(&thdl->oth_outstanding, entry)) {
+ if ((uintptr_t)entry == pkt->otp_reqid)
+ break;
+ }
+
+ if (entry == NULL) {
+ mutex_exit(&thdl->oth_lock);
+ freeb(mp);
+ return (ENOENT);
+ }
+ odd = entry->ote_odd;
+ mutex_exit(&thdl->oth_lock);
+ }
+
+ mutex_enter(&odd->odd_lock);
+ overlay_io_start(odd, OVERLAY_F_IN_RX);
+ mutex_exit(&odd->odd_lock);
+
+ mac_rx(odd->odd_mh, NULL, mp);
+
+ mutex_enter(&odd->odd_lock);
+ overlay_io_done(odd, OVERLAY_F_IN_RX);
+ mutex_exit(&odd->odd_lock);
+
+ return (0);
+}
+
+static int
+overlay_target_resend(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_targ_pkt_t *pkt = arg;
+ overlay_target_entry_t *entry;
+ overlay_dev_t *odd;
+ mblk_t *mp;
+
+ if (pkt->otp_size > ETHERMAX + VLAN_TAGSZ)
+ return (EINVAL);
+
+ mp = allocb(pkt->otp_size, 0);
+ if (mp == NULL)
+ return (ENOMEM);
+
+ if (ddi_copyin(pkt->otp_buf, mp->b_rptr, pkt->otp_size, 0) != 0) {
+ freeb(mp);
+ return (EFAULT);
+ }
+ mp->b_wptr += pkt->otp_size;
+
+ if (pkt->otp_linkid != UINT64_MAX) {
+ odd = overlay_hold_by_dlid(pkt->otp_linkid);
+ if (odd == NULL) {
+ freeb(mp);
+ return (ENOENT);
+ }
+ } else {
+ mutex_enter(&thdl->oth_lock);
+ for (entry = list_head(&thdl->oth_outstanding); entry != NULL;
+ entry = list_next(&thdl->oth_outstanding, entry)) {
+ if ((uintptr_t)entry == pkt->otp_reqid)
+ break;
+ }
+
+ if (entry == NULL) {
+ mutex_exit(&thdl->oth_lock);
+ freeb(mp);
+ return (ENOENT);
+ }
+ odd = entry->ote_odd;
+ mutex_exit(&thdl->oth_lock);
+ }
+
+ mp = overlay_m_tx(odd, mp);
+ freemsgchain(mp);
+
+ return (0);
+}
+
+typedef struct overlay_targ_list_int {
+ boolean_t otli_count;
+ uint32_t otli_cur;
+ uint32_t otli_nents;
+ uint32_t otli_ents[];
+} overlay_targ_list_int_t;
+
+static int
+overlay_target_list_copyin(const void *ubuf, void **outp, size_t *bsize,
+ int flags)
+{
+ overlay_targ_list_t n;
+ overlay_targ_list_int_t *otl;
+
+ if (ddi_copyin(ubuf, &n, sizeof (overlay_targ_list_t),
+ flags & FKIOCTL) != 0)
+ return (EFAULT);
+
+ /*
+ */
+ if (n.otl_nents >= INT32_MAX / sizeof (uint32_t))
+ return (EINVAL);
+ *bsize = sizeof (overlay_targ_list_int_t) +
+ sizeof (uint32_t) * n.otl_nents;
+ otl = kmem_zalloc(*bsize, KM_SLEEP);
+ otl->otli_cur = 0;
+ otl->otli_nents = n.otl_nents;
+ if (otl->otli_nents != 0) {
+ otl->otli_count = B_FALSE;
+ if (ddi_copyin((void *)((uintptr_t)ubuf +
+ offsetof(overlay_targ_list_t, otl_ents)),
+ otl->otli_ents, n.otl_nents * sizeof (uint32_t),
+ flags & FKIOCTL) != 0) {
+ kmem_free(otl, *bsize);
+ return (EFAULT);
+ }
+ } else {
+ otl->otli_count = B_TRUE;
+ }
+
+ *outp = otl;
+ return (0);
+}
+
+static int
+overlay_target_ioctl_list_cb(overlay_dev_t *odd, void *arg)
+{
+ overlay_targ_list_int_t *otl = arg;
+
+ if (otl->otli_cur < otl->otli_nents)
+ otl->otli_ents[otl->otli_cur] = odd->odd_linkid;
+ otl->otli_cur++;
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_ioctl_list(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_dev_iter(overlay_target_ioctl_list_cb, arg);
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_list_copyout(void *ubuf, void *buf, size_t bufsize, int flags)
+{
+ overlay_targ_list_int_t *otl = buf;
+
+ if (ddi_copyout(&otl->otli_cur, ubuf, sizeof (uint32_t),
+ flags & FKIOCTL) != 0)
+ return (EFAULT);
+
+ if (otl->otli_count == B_FALSE) {
+ if (ddi_copyout(otl->otli_ents,
+ (void *)((uintptr_t)ubuf +
+ offsetof(overlay_targ_list_t, otl_ents)),
+ sizeof (uint32_t) * otl->otli_nents,
+ flags & FKIOCTL) != 0)
+ return (EFAULT);
+ }
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_cache_get(overlay_target_hdl_t *thdl, void *arg)
+{
+ int ret = 0;
+ overlay_dev_t *odd;
+ overlay_target_t *ott;
+ overlay_targ_cache_t *otc = arg;
+
+ odd = overlay_hold_by_dlid(otc->otc_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ mutex_enter(&odd->odd_lock);
+ if (!(odd->odd_flags & OVERLAY_F_VARPD)) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (ENXIO);
+ }
+ ott = odd->odd_target;
+ if (ott->ott_mode != OVERLAY_TARGET_POINT &&
+ ott->ott_mode != OVERLAY_TARGET_DYNAMIC) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (ENOTSUP);
+ }
+ mutex_enter(&ott->ott_lock);
+ mutex_exit(&odd->odd_lock);
+
+ if (ott->ott_mode == OVERLAY_TARGET_POINT) {
+ otc->otc_entry.otce_flags = 0;
+ bcopy(&ott->ott_u.ott_point, &otc->otc_entry.otce_dest,
+ sizeof (overlay_target_point_t));
+ } else {
+ overlay_target_entry_t *ote;
+ ote = refhash_lookup(ott->ott_u.ott_dyn.ott_dhash,
+ otc->otc_entry.otce_mac);
+ if (ote != NULL) {
+ mutex_enter(&ote->ote_lock);
+ if ((ote->ote_flags &
+ OVERLAY_ENTRY_F_VALID_MASK) != 0) {
+ if (ote->ote_flags & OVERLAY_ENTRY_F_DROP) {
+ otc->otc_entry.otce_flags =
+ OVERLAY_TARGET_CACHE_DROP;
+ } else {
+ otc->otc_entry.otce_flags = 0;
+ bcopy(&ote->ote_dest,
+ &otc->otc_entry.otce_dest,
+ sizeof (overlay_target_point_t));
+ }
+ ret = 0;
+ } else {
+ ret = ENOENT;
+ }
+ mutex_exit(&ote->ote_lock);
+ } else {
+ ret = ENOENT;
+ }
+ }
+
+ mutex_exit(&ott->ott_lock);
+ overlay_hold_rele(odd);
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_cache_set(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_dev_t *odd;
+ overlay_target_t *ott;
+ overlay_target_entry_t *ote;
+ overlay_targ_cache_t *otc = arg;
+ mblk_t *mp = NULL;
+
+ if (otc->otc_entry.otce_flags & ~OVERLAY_TARGET_CACHE_DROP)
+ return (EINVAL);
+
+ odd = overlay_hold_by_dlid(otc->otc_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ mutex_enter(&odd->odd_lock);
+ if (!(odd->odd_flags & OVERLAY_F_VARPD)) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (ENXIO);
+ }
+ ott = odd->odd_target;
+ if (ott->ott_mode != OVERLAY_TARGET_DYNAMIC) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (ENOTSUP);
+ }
+ mutex_enter(&ott->ott_lock);
+ mutex_exit(&odd->odd_lock);
+
+ ote = refhash_lookup(ott->ott_u.ott_dyn.ott_dhash,
+ otc->otc_entry.otce_mac);
+ if (ote == NULL) {
+ ote = kmem_cache_alloc(overlay_entry_cache, KM_SLEEP);
+ bcopy(otc->otc_entry.otce_mac, ote->ote_addr, ETHERADDRL);
+ ote->ote_chead = ote->ote_ctail = NULL;
+ ote->ote_mbsize = 0;
+ ote->ote_ott = ott;
+ ote->ote_odd = odd;
+ mutex_enter(&ote->ote_lock);
+ refhash_insert(ott->ott_u.ott_dyn.ott_dhash, ote);
+ avl_add(&ott->ott_u.ott_dyn.ott_tree, ote);
+ } else {
+ mutex_enter(&ote->ote_lock);
+ }
+
+ if (otc->otc_entry.otce_flags & OVERLAY_TARGET_CACHE_DROP) {
+ ote->ote_flags |= OVERLAY_ENTRY_F_DROP;
+ } else {
+ ote->ote_flags |= OVERLAY_ENTRY_F_VALID;
+ bcopy(&otc->otc_entry.otce_dest, &ote->ote_dest,
+ sizeof (overlay_target_point_t));
+ mp = ote->ote_chead;
+ ote->ote_chead = NULL;
+ ote->ote_ctail = NULL;
+ ote->ote_mbsize = 0;
+ ote->ote_vtime = gethrtime();
+ }
+
+ mutex_exit(&ote->ote_lock);
+ mutex_exit(&ott->ott_lock);
+
+ if (mp != NULL) {
+ mp = overlay_m_tx(ote->ote_odd, mp);
+ freemsgchain(mp);
+ }
+
+ overlay_hold_rele(odd);
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_cache_remove(overlay_target_hdl_t *thdl, void *arg)
+{
+ int ret = 0;
+ overlay_dev_t *odd;
+ overlay_target_t *ott;
+ overlay_target_entry_t *ote;
+ overlay_targ_cache_t *otc = arg;
+
+ odd = overlay_hold_by_dlid(otc->otc_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ mutex_enter(&odd->odd_lock);
+ if (!(odd->odd_flags & OVERLAY_F_VARPD)) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (ENXIO);
+ }
+ ott = odd->odd_target;
+ if (ott->ott_mode != OVERLAY_TARGET_DYNAMIC) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (ENOTSUP);
+ }
+ mutex_enter(&ott->ott_lock);
+ mutex_exit(&odd->odd_lock);
+
+ ote = refhash_lookup(ott->ott_u.ott_dyn.ott_dhash,
+ otc->otc_entry.otce_mac);
+ if (ote != NULL) {
+ mutex_enter(&ote->ote_lock);
+ ote->ote_flags &= ~OVERLAY_ENTRY_F_VALID_MASK;
+ mutex_exit(&ote->ote_lock);
+ ret = 0;
+ } else {
+ ret = ENOENT;
+ }
+
+ mutex_exit(&ott->ott_lock);
+ overlay_hold_rele(odd);
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_cache_flush(overlay_target_hdl_t *thdl, void *arg)
+{
+ avl_tree_t *avl;
+ overlay_dev_t *odd;
+ overlay_target_t *ott;
+ overlay_target_entry_t *ote;
+ overlay_targ_cache_t *otc = arg;
+
+ odd = overlay_hold_by_dlid(otc->otc_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ mutex_enter(&odd->odd_lock);
+ if (!(odd->odd_flags & OVERLAY_F_VARPD)) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (ENXIO);
+ }
+ ott = odd->odd_target;
+ if (ott->ott_mode != OVERLAY_TARGET_DYNAMIC) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (ENOTSUP);
+ }
+ mutex_enter(&ott->ott_lock);
+ mutex_exit(&odd->odd_lock);
+ avl = &ott->ott_u.ott_dyn.ott_tree;
+
+ for (ote = avl_first(avl); ote != NULL; ote = AVL_NEXT(avl, ote)) {
+ mutex_enter(&ote->ote_lock);
+ ote->ote_flags &= ~OVERLAY_ENTRY_F_VALID_MASK;
+ mutex_exit(&ote->ote_lock);
+ }
+ ote = refhash_lookup(ott->ott_u.ott_dyn.ott_dhash,
+ otc->otc_entry.otce_mac);
+
+ mutex_exit(&ott->ott_lock);
+ overlay_hold_rele(odd);
+
+ return (0);
+}
+
+static int
+overlay_target_cache_iter_copyin(const void *ubuf, void **outp, size_t *bsize,
+ int flags)
+{
+ overlay_targ_cache_iter_t base, *iter;
+
+ if (ddi_copyin(ubuf, &base, sizeof (overlay_targ_cache_iter_t),
+ flags & FKIOCTL) != 0)
+ return (EFAULT);
+
+ if (base.otci_count > OVERLAY_TARGET_ITER_MAX)
+ return (E2BIG);
+
+ if (base.otci_count == 0)
+ return (EINVAL);
+
+ *bsize = sizeof (overlay_targ_cache_iter_t) +
+ base.otci_count * sizeof (overlay_targ_cache_entry_t);
+ iter = kmem_alloc(*bsize, KM_SLEEP);
+ bcopy(&base, iter, sizeof (overlay_targ_cache_iter_t));
+ *outp = iter;
+
+ return (0);
+}
+
+typedef struct overlay_targ_cache_marker {
+ uint8_t otcm_mac[ETHERADDRL];
+ uint16_t otcm_done;
+} overlay_targ_cache_marker_t;
+
+/* ARGSUSED */
+static int
+overlay_target_cache_iter(overlay_target_hdl_t *thdl, void *arg)
+{
+ overlay_dev_t *odd;
+ overlay_target_t *ott;
+ overlay_target_entry_t lookup, *ent;
+ overlay_targ_cache_marker_t *mark;
+ avl_index_t where;
+ avl_tree_t *avl;
+ uint16_t written = 0;
+
+ overlay_targ_cache_iter_t *iter = arg;
+ mark = (void *)&iter->otci_marker;
+
+ if (mark->otcm_done != 0) {
+ iter->otci_count = 0;
+ return (0);
+ }
+
+ odd = overlay_hold_by_dlid(iter->otci_linkid);
+ if (odd == NULL)
+ return (ENOENT);
+
+ mutex_enter(&odd->odd_lock);
+ if (!(odd->odd_flags & OVERLAY_F_VARPD)) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (ENXIO);
+ }
+ ott = odd->odd_target;
+ if (ott->ott_mode != OVERLAY_TARGET_DYNAMIC &&
+ ott->ott_mode != OVERLAY_TARGET_POINT) {
+ mutex_exit(&odd->odd_lock);
+ overlay_hold_rele(odd);
+ return (ENOTSUP);
+ }
+
+ /*
+ * Holding this lock across the entire iteration probably isn't very
+ * good. We should perhaps add an r/w lock for the avl tree. But we'll
+ * wait until we now it's necessary before we do more.
+ */
+ mutex_enter(&ott->ott_lock);
+ mutex_exit(&odd->odd_lock);
+
+ if (ott->ott_mode == OVERLAY_TARGET_POINT) {
+ overlay_targ_cache_entry_t *out = &iter->otci_ents[0];
+ bzero(out->otce_mac, ETHERADDRL);
+ out->otce_flags = 0;
+ bcopy(&ott->ott_u.ott_point, &out->otce_dest,
+ sizeof (overlay_target_point_t));
+ written++;
+ mark->otcm_done = 1;
+ }
+
+ avl = &ott->ott_u.ott_dyn.ott_tree;
+ bcopy(mark->otcm_mac, lookup.ote_addr, ETHERADDRL);
+ ent = avl_find(avl, &lookup, &where);
+
+ /*
+ * NULL ent means that the entry does not exist, so we want to start
+ * with the closest node in the tree. This means that we implicitly rely
+ * on the tree's order and the first node will be the mac 00:00:00:00:00
+ * and the last will be ff:ff:ff:ff:ff:ff.
+ */
+ if (ent == NULL) {
+ ent = avl_nearest(avl, where, AVL_AFTER);
+ if (ent == NULL) {
+ mark->otcm_done = 1;
+ goto done;
+ }
+ }
+
+ for (; ent != NULL && written < iter->otci_count;
+ ent = AVL_NEXT(avl, ent)) {
+ overlay_targ_cache_entry_t *out = &iter->otci_ents[written];
+ mutex_enter(&ent->ote_lock);
+ if ((ent->ote_flags & OVERLAY_ENTRY_F_VALID_MASK) == 0) {
+ mutex_exit(&ent->ote_lock);
+ continue;
+ }
+ bcopy(ent->ote_addr, out->otce_mac, ETHERADDRL);
+ out->otce_flags = 0;
+ if (ent->ote_flags & OVERLAY_ENTRY_F_DROP)
+ out->otce_flags |= OVERLAY_TARGET_CACHE_DROP;
+ if (ent->ote_flags & OVERLAY_ENTRY_F_VALID)
+ bcopy(&ent->ote_dest, &out->otce_dest,
+ sizeof (overlay_target_point_t));
+ written++;
+ mutex_exit(&ent->ote_lock);
+ }
+
+ if (ent != NULL) {
+ bcopy(ent->ote_addr, mark->otcm_mac, ETHERADDRL);
+ } else {
+ mark->otcm_done = 1;
+ }
+
+done:
+ iter->otci_count = written;
+ mutex_exit(&ott->ott_lock);
+ overlay_hold_rele(odd);
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+overlay_target_cache_iter_copyout(void *ubuf, void *buf, size_t bufsize,
+ int flags)
+{
+ size_t outsize;
+ const overlay_targ_cache_iter_t *iter = buf;
+
+ outsize = sizeof (overlay_targ_cache_iter_t) +
+ iter->otci_count * sizeof (overlay_targ_cache_entry_t);
+
+ if (ddi_copyout(buf, ubuf, outsize, flags & FKIOCTL) != 0)
+ return (EFAULT);
+
+ return (0);
+}
+
+static overlay_target_ioctl_t overlay_target_ioctab[] = {
+ { OVERLAY_TARG_INFO, B_TRUE, B_TRUE,
+ NULL, overlay_target_info,
+ NULL, sizeof (overlay_targ_info_t) },
+ { OVERLAY_TARG_ASSOCIATE, B_TRUE, B_FALSE,
+ NULL, overlay_target_associate,
+ NULL, sizeof (overlay_targ_associate_t) },
+ { OVERLAY_TARG_DISASSOCIATE, B_TRUE, B_FALSE,
+ NULL, overlay_target_disassociate,
+ NULL, sizeof (overlay_targ_id_t) },
+ { OVERLAY_TARG_DEGRADE, B_TRUE, B_FALSE,
+ NULL, overlay_target_degrade,
+ NULL, sizeof (overlay_targ_degrade_t) },
+ { OVERLAY_TARG_RESTORE, B_TRUE, B_FALSE,
+ NULL, overlay_target_restore,
+ NULL, sizeof (overlay_targ_id_t) },
+ { OVERLAY_TARG_LOOKUP, B_FALSE, B_TRUE,
+ NULL, overlay_target_lookup_request,
+ NULL, sizeof (overlay_targ_lookup_t) },
+ { OVERLAY_TARG_RESPOND, B_TRUE, B_FALSE,
+ NULL, overlay_target_lookup_respond,
+ NULL, sizeof (overlay_targ_resp_t) },
+ { OVERLAY_TARG_DROP, B_TRUE, B_FALSE,
+ NULL, overlay_target_lookup_drop,
+ NULL, sizeof (overlay_targ_resp_t) },
+ { OVERLAY_TARG_PKT, B_TRUE, B_TRUE,
+ overlay_target_pkt_copyin,
+ overlay_target_packet,
+ overlay_target_pkt_copyout,
+ sizeof (overlay_targ_pkt_t) },
+ { OVERLAY_TARG_INJECT, B_TRUE, B_FALSE,
+ overlay_target_pkt_copyin,
+ overlay_target_inject,
+ NULL, sizeof (overlay_targ_pkt_t) },
+ { OVERLAY_TARG_RESEND, B_TRUE, B_FALSE,
+ overlay_target_pkt_copyin,
+ overlay_target_resend,
+ NULL, sizeof (overlay_targ_pkt_t) },
+ { OVERLAY_TARG_LIST, B_FALSE, B_TRUE,
+ overlay_target_list_copyin,
+ overlay_target_ioctl_list,
+ overlay_target_list_copyout,
+ sizeof (overlay_targ_list_t) },
+ { OVERLAY_TARG_CACHE_GET, B_FALSE, B_TRUE,
+ NULL, overlay_target_cache_get,
+ NULL, sizeof (overlay_targ_cache_t) },
+ { OVERLAY_TARG_CACHE_SET, B_TRUE, B_TRUE,
+ NULL, overlay_target_cache_set,
+ NULL, sizeof (overlay_targ_cache_t) },
+ { OVERLAY_TARG_CACHE_REMOVE, B_TRUE, B_TRUE,
+ NULL, overlay_target_cache_remove,
+ NULL, sizeof (overlay_targ_cache_t) },
+ { OVERLAY_TARG_CACHE_FLUSH, B_TRUE, B_TRUE,
+ NULL, overlay_target_cache_flush,
+ NULL, sizeof (overlay_targ_cache_t) },
+ { OVERLAY_TARG_CACHE_ITER, B_FALSE, B_TRUE,
+ overlay_target_cache_iter_copyin,
+ overlay_target_cache_iter,
+ overlay_target_cache_iter_copyout,
+ sizeof (overlay_targ_cache_iter_t) },
+ { 0 }
+};
+
+int
+overlay_target_open(dev_t *devp, int flags, int otype, cred_t *credp)
+{
+ minor_t mid;
+ overlay_target_hdl_t *thdl;
+
+ if (secpolicy_dl_config(credp) != 0)
+ return (EPERM);
+
+ if (getminor(*devp) != 0)
+ return (ENXIO);
+
+ if (otype & OTYP_BLK)
+ return (EINVAL);
+
+ if (flags & ~(FREAD | FWRITE | FEXCL))
+ return (EINVAL);
+
+ if ((flags & FWRITE) &&
+ !(flags & FEXCL))
+ return (EINVAL);
+
+ if (!(flags & FREAD) && !(flags & FWRITE))
+ return (EINVAL);
+
+ if (crgetzoneid(credp) != GLOBAL_ZONEID)
+ return (EPERM);
+
+ mid = id_alloc(overlay_thdl_idspace);
+ if (ddi_soft_state_zalloc(overlay_thdl_state, mid) != 0) {
+ id_free(overlay_thdl_idspace, mid);
+ return (ENXIO);
+ }
+
+ thdl = ddi_get_soft_state(overlay_thdl_state, mid);
+ VERIFY(thdl != NULL);
+ thdl->oth_minor = mid;
+ thdl->oth_zoneid = crgetzoneid(credp);
+ thdl->oth_oflags = flags;
+ mutex_init(&thdl->oth_lock, NULL, MUTEX_DRIVER, NULL);
+ list_create(&thdl->oth_outstanding, sizeof (overlay_target_entry_t),
+ offsetof(overlay_target_entry_t, ote_qlink));
+ *devp = makedevice(getmajor(*devp), mid);
+
+ mutex_enter(&overlay_target_lock);
+ if ((flags & FEXCL) && overlay_target_excl == B_TRUE) {
+ mutex_exit(&overlay_target_lock);
+ list_destroy(&thdl->oth_outstanding);
+ mutex_destroy(&thdl->oth_lock);
+ ddi_soft_state_free(overlay_thdl_state, mid);
+ id_free(overlay_thdl_idspace, mid);
+ return (EEXIST);
+ } else if ((flags & FEXCL) != 0) {
+ VERIFY(overlay_target_excl == B_FALSE);
+ overlay_target_excl = B_TRUE;
+ }
+ list_insert_tail(&overlay_thdl_list, thdl);
+ mutex_exit(&overlay_target_lock);
+
+ return (0);
+}
+
+/* ARGSUSED */
+int
+overlay_target_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
+ int *rvalp)
+{
+ overlay_target_ioctl_t *ioc;
+ overlay_target_hdl_t *thdl;
+
+ if (secpolicy_dl_config(credp) != 0)
+ return (EPERM);
+
+ if ((thdl = ddi_get_soft_state(overlay_thdl_state,
+ getminor(dev))) == NULL)
+ return (ENXIO);
+
+ for (ioc = &overlay_target_ioctab[0]; ioc->oti_cmd != 0; ioc++) {
+ int ret;
+ caddr_t buf;
+ size_t bufsize;
+
+ if (ioc->oti_cmd != cmd)
+ continue;
+
+ if (ioc->oti_write == B_TRUE && !(mode & FWRITE))
+ return (EBADF);
+
+ if (ioc->oti_copyin == NULL) {
+ bufsize = ioc->oti_size;
+ buf = kmem_alloc(bufsize, KM_SLEEP);
+ if (ddi_copyin((void *)(uintptr_t)arg, buf, bufsize,
+ mode & FKIOCTL) != 0) {
+ kmem_free(buf, bufsize);
+ return (EFAULT);
+ }
+ } else {
+ if ((ret = ioc->oti_copyin((void *)(uintptr_t)arg,
+ (void **)&buf, &bufsize, mode)) != 0)
+ return (ret);
+ }
+
+ ret = ioc->oti_func(thdl, buf);
+ if (ret == 0 && ioc->oti_size != 0 &&
+ ioc->oti_ncopyout == B_TRUE) {
+ if (ioc->oti_copyout == NULL) {
+ if (ddi_copyout(buf, (void *)(uintptr_t)arg,
+ bufsize, mode & FKIOCTL) != 0)
+ ret = EFAULT;
+ } else {
+ ret = ioc->oti_copyout((void *)(uintptr_t)arg,
+ buf, bufsize, mode);
+ }
+ }
+
+ kmem_free(buf, bufsize);
+ return (ret);
+ }
+
+ return (ENOTTY);
+}
+
+/* ARGSUSED */
+int
+overlay_target_close(dev_t dev, int flags, int otype, cred_t *credp)
+{
+ overlay_target_hdl_t *thdl;
+ overlay_target_entry_t *entry;
+ minor_t mid = getminor(dev);
+
+ if ((thdl = ddi_get_soft_state(overlay_thdl_state, mid)) == NULL)
+ return (ENXIO);
+
+ mutex_enter(&overlay_target_lock);
+ list_remove(&overlay_thdl_list, thdl);
+ mutex_enter(&thdl->oth_lock);
+ while ((entry = list_remove_head(&thdl->oth_outstanding)) != NULL)
+ list_insert_tail(&overlay_target_list, entry);
+ cv_signal(&overlay_target_condvar);
+ mutex_exit(&thdl->oth_lock);
+ if ((thdl->oth_oflags & FEXCL) != 0) {
+ VERIFY(overlay_target_excl == B_TRUE);
+ overlay_target_excl = B_FALSE;
+ }
+ mutex_exit(&overlay_target_lock);
+
+ list_destroy(&thdl->oth_outstanding);
+ mutex_destroy(&thdl->oth_lock);
+ mid = thdl->oth_minor;
+ ddi_soft_state_free(overlay_thdl_state, mid);
+ id_free(overlay_thdl_idspace, mid);
+
+ return (0);
+}
diff --git a/usr/src/uts/common/io/overlay/plugins/overlay_vxlan.c b/usr/src/uts/common/io/overlay/plugins/overlay_vxlan.c
new file mode 100644
index 0000000000..8b4e4ecb42
--- /dev/null
+++ b/usr/src/uts/common/io/overlay/plugins/overlay_vxlan.c
@@ -0,0 +1,372 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2015 Joyent, Inc.
+ */
+
+/*
+ * VXLAN encapsulation module
+ *
+ *
+ * The VXLAN header looks as follows in network byte order:
+ *
+ * |0 3| 4 |5 31|
+ * +----------+---+------------------------+
+ * | Reserved | I | Reserved |
+ * +---------------------------------------+
+ * | Virtual Network ID | Reserved |
+ * +----------------------------+----------+
+ * |0 23|24 31|
+ *
+ * All reserved values must be 0. The I bit must be 1. We call the top
+ * word the VXLAN magic field for the time being. The second word is
+ * definitely not the most friendly way to operate. Specifically, the ID
+ * is a 24-bit big endian value, but we have to make sure not to use the
+ * reserved byte.
+ *
+ * For us, VXLAN encapsulation is a fairly straightforward implementation. It
+ * only has two properties, a listen_ip and a listen_port. These determine on
+ * what address we should be listening on. While we do not have a default
+ * address to listen upon, we do have a default port, which is the IANA assigned
+ * port for VXLAN -- 4789.
+ */
+
+#include <sys/overlay_plugin.h>
+#include <sys/modctl.h>
+#include <sys/errno.h>
+#include <sys/byteorder.h>
+#include <sys/vxlan.h>
+#include <inet/ip.h>
+#include <netinet/in.h>
+#include <sys/strsun.h>
+#include <netinet/udp.h>
+
+static const char *vxlan_ident = "vxlan";
+static uint16_t vxlan_defport = IPPORT_VXLAN;
+
+/*
+ * Should we enable UDP source port hashing for fanout.
+ */
+boolean_t vxlan_fanout = B_TRUE;
+
+static const char *vxlan_props[] = {
+ "vxlan/listen_ip",
+ "vxlan/listen_port",
+ NULL
+};
+
+typedef struct vxlan {
+ kmutex_t vxl_lock;
+ overlay_handle_t vxl_oh;
+ uint16_t vxl_lport;
+ boolean_t vxl_hladdr;
+ struct in6_addr vxl_laddr;
+} vxlan_t;
+
+static int
+vxlan_o_init(overlay_handle_t oh, void **outp)
+{
+ vxlan_t *vxl;
+
+ vxl = kmem_alloc(sizeof (vxlan_t), KM_SLEEP);
+ *outp = vxl;
+ mutex_init(&vxl->vxl_lock, NULL, MUTEX_DRIVER, NULL);
+ vxl->vxl_oh = oh;
+ vxl->vxl_lport = vxlan_defport;
+ vxl->vxl_hladdr = B_FALSE;
+
+ return (0);
+}
+
+static void
+vxlan_o_fini(void *arg)
+{
+ vxlan_t *vxl = arg;
+
+ mutex_destroy(&vxl->vxl_lock);
+ kmem_free(arg, sizeof (vxlan_t));
+}
+
+static int
+vxlan_o_socket(void *arg, int *dp, int *fp, int *pp, struct sockaddr *addr,
+ socklen_t *slenp)
+{
+ vxlan_t *vxl = arg;
+ struct sockaddr_in6 *in;
+
+ in = (struct sockaddr_in6 *)addr;
+ *dp = AF_INET6;
+ *fp = SOCK_DGRAM;
+ *pp = 0;
+ bzero(in, sizeof (struct sockaddr_in6));
+ in->sin6_family = AF_INET6;
+
+ /*
+ * We should consider a more expressive private errno set that
+ * provider's can use.
+ */
+ mutex_enter(&vxl->vxl_lock);
+ if (vxl->vxl_hladdr == B_FALSE) {
+ mutex_exit(&vxl->vxl_lock);
+ return (EINVAL);
+ }
+ in->sin6_port = htons(vxl->vxl_lport);
+ in->sin6_addr = vxl->vxl_laddr;
+ mutex_exit(&vxl->vxl_lock);
+ *slenp = sizeof (struct sockaddr_in6);
+
+ return (0);
+}
+
+static int
+vxlan_o_sockopt(ksocket_t ksock)
+{
+ int val, err;
+ if (vxlan_fanout == B_FALSE)
+ return (0);
+
+ val = UDP_HASH_VXLAN;
+ err = ksocket_setsockopt(ksock, IPPROTO_UDP, UDP_SRCPORT_HASH, &val,
+ sizeof (val), kcred);
+ return (err);
+}
+
+/* ARGSUSED */
+static int
+vxlan_o_encap(void *arg, mblk_t *mp, ovep_encap_info_t *einfop,
+ mblk_t **outp)
+{
+ mblk_t *ob;
+ vxlan_hdr_t *vxh;
+
+ ASSERT(einfop->ovdi_id < (1 << 24));
+
+ /*
+ * This allocation could get hot. We may want to have a good way to
+ * cache and handle this allocation the same way that IP does with
+ * keeping around a message block per entry, or basically treating this
+ * as an immutable message block in the system. Basically freemsg() will
+ * be a nop, but we'll do the right thing with respect to the rest of
+ * the chain.
+ */
+ ob = allocb(VXLAN_HDR_LEN, 0);
+ if (ob == NULL)
+ return (ENOMEM);
+
+ vxh = (vxlan_hdr_t *)ob->b_rptr;
+ vxh->vxlan_flags = ntohl(VXLAN_F_VDI);
+ vxh->vxlan_id = htonl((uint32_t)einfop->ovdi_id << VXLAN_ID_SHIFT);
+ ob->b_wptr += VXLAN_HDR_LEN;
+ *outp = ob;
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+vxlan_o_decap(void *arg, mblk_t *mp, ovep_encap_info_t *dinfop)
+{
+ vxlan_hdr_t *vxh;
+
+ if (MBLKL(mp) < sizeof (vxlan_hdr_t))
+ return (EINVAL);
+ vxh = (vxlan_hdr_t *)mp->b_rptr;
+ if ((ntohl(vxh->vxlan_flags) & VXLAN_F_VDI) == 0)
+ return (EINVAL);
+
+ dinfop->ovdi_id = ntohl(vxh->vxlan_id) >> VXLAN_ID_SHIFT;
+ dinfop->ovdi_hdr_size = VXLAN_HDR_LEN;
+
+ return (0);
+}
+
+static int
+vxlan_o_getprop(void *arg, const char *pr_name, void *buf, uint32_t *bufsize)
+{
+ vxlan_t *vxl = arg;
+
+ /* vxlan/listen_ip */
+ if (strcmp(pr_name, vxlan_props[0]) == 0) {
+ if (*bufsize < sizeof (struct in6_addr))
+ return (EOVERFLOW);
+
+ mutex_enter(&vxl->vxl_lock);
+ if (vxl->vxl_hladdr == B_FALSE) {
+ *bufsize = 0;
+ } else {
+ bcopy(&vxl->vxl_laddr, buf, sizeof (struct in6_addr));
+ *bufsize = sizeof (struct in6_addr);
+ }
+ mutex_exit(&vxl->vxl_lock);
+ return (0);
+ }
+
+ /* vxlan/listen_port */
+ if (strcmp(pr_name, vxlan_props[1]) == 0) {
+ uint64_t val;
+ if (*bufsize < sizeof (uint64_t))
+ return (EOVERFLOW);
+
+ mutex_enter(&vxl->vxl_lock);
+ val = vxl->vxl_lport;
+ bcopy(&val, buf, sizeof (uint64_t));
+ *bufsize = sizeof (uint64_t);
+ mutex_exit(&vxl->vxl_lock);
+ return (0);
+ }
+
+ return (EINVAL);
+}
+
+static int
+vxlan_o_setprop(void *arg, const char *pr_name, const void *buf,
+ uint32_t bufsize)
+{
+ vxlan_t *vxl = arg;
+
+ /* vxlan/listen_ip */
+ if (strcmp(pr_name, vxlan_props[0]) == 0) {
+ const struct in6_addr *ipv6 = buf;
+ if (bufsize != sizeof (struct in6_addr))
+ return (EINVAL);
+
+ if (IN6_IS_ADDR_V4COMPAT(ipv6))
+ return (EINVAL);
+
+ if (IN6_IS_ADDR_MULTICAST(ipv6))
+ return (EINVAL);
+
+ if (IN6_IS_ADDR_6TO4(ipv6))
+ return (EINVAL);
+
+ if (IN6_IS_ADDR_V4MAPPED(ipv6)) {
+ ipaddr_t v4;
+ IN6_V4MAPPED_TO_IPADDR(ipv6, v4);
+ if (IN_MULTICAST(v4))
+ return (EINVAL);
+ }
+
+ mutex_enter(&vxl->vxl_lock);
+ vxl->vxl_hladdr = B_TRUE;
+ bcopy(ipv6, &vxl->vxl_laddr, sizeof (struct in6_addr));
+ mutex_exit(&vxl->vxl_lock);
+
+ return (0);
+ }
+
+ /* vxlan/listen_port */
+ if (strcmp(pr_name, vxlan_props[1]) == 0) {
+ const uint64_t *valp = buf;
+ if (bufsize != 8)
+ return (EINVAL);
+
+ if (*valp == 0 || *valp > UINT16_MAX)
+ return (EINVAL);
+
+ mutex_enter(&vxl->vxl_lock);
+ vxl->vxl_lport = *valp;
+ mutex_exit(&vxl->vxl_lock);
+ return (0);
+ }
+ return (EINVAL);
+}
+
+static int
+vxlan_o_propinfo(const char *pr_name, overlay_prop_handle_t phdl)
+{
+ /* vxlan/listen_ip */
+ if (strcmp(pr_name, vxlan_props[0]) == 0) {
+ overlay_prop_set_name(phdl, vxlan_props[0]);
+ overlay_prop_set_prot(phdl, OVERLAY_PROP_PERM_RRW);
+ overlay_prop_set_type(phdl, OVERLAY_PROP_T_IP);
+ overlay_prop_set_nodefault(phdl);
+ return (0);
+ }
+
+ if (strcmp(pr_name, vxlan_props[1]) == 0) {
+ overlay_prop_set_name(phdl, vxlan_props[1]);
+ overlay_prop_set_prot(phdl, OVERLAY_PROP_PERM_RRW);
+ overlay_prop_set_type(phdl, OVERLAY_PROP_T_UINT);
+ (void) overlay_prop_set_default(phdl, &vxlan_defport,
+ sizeof (vxlan_defport));
+ overlay_prop_set_range_uint32(phdl, 1, UINT16_MAX);
+ return (0);
+ }
+
+ return (EINVAL);
+}
+
+static struct overlay_plugin_ops vxlan_o_ops = {
+ 0,
+ vxlan_o_init,
+ vxlan_o_fini,
+ vxlan_o_encap,
+ vxlan_o_decap,
+ vxlan_o_socket,
+ vxlan_o_sockopt,
+ vxlan_o_getprop,
+ vxlan_o_setprop,
+ vxlan_o_propinfo
+};
+
+static struct modlmisc vxlan_modlmisc = {
+ &mod_miscops,
+ "VXLAN encap plugin"
+};
+
+static struct modlinkage vxlan_modlinkage = {
+ MODREV_1,
+ &vxlan_modlmisc
+};
+
+int
+_init(void)
+{
+ int err;
+ overlay_plugin_register_t *ovrp;
+
+ ovrp = overlay_plugin_alloc(OVEP_VERSION);
+ if (ovrp == NULL)
+ return (ENOTSUP);
+ ovrp->ovep_name = vxlan_ident;
+ ovrp->ovep_ops = &vxlan_o_ops;
+ ovrp->ovep_id_size = VXLAN_ID_LEN;
+ ovrp->ovep_flags = OVEP_F_VLAN_TAG;
+ ovrp->ovep_dest = OVERLAY_PLUGIN_D_IP | OVERLAY_PLUGIN_D_PORT;
+ ovrp->ovep_props = vxlan_props;
+
+ if ((err = overlay_plugin_register(ovrp)) == 0) {
+ if ((err = mod_install(&vxlan_modlinkage)) != 0) {
+ (void) overlay_plugin_unregister(vxlan_ident);
+ }
+ }
+
+ overlay_plugin_free(ovrp);
+ return (err);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&vxlan_modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int err;
+
+ if ((err = overlay_plugin_unregister(vxlan_ident)) != 0)
+ return (err);
+
+ return (mod_remove(&vxlan_modlinkage));
+}
diff --git a/usr/src/uts/common/io/physmem.c b/usr/src/uts/common/io/physmem.c
index 39d5003b02..c48fecd133 100644
--- a/usr/src/uts/common/io/physmem.c
+++ b/usr/src/uts/common/io/physmem.c
@@ -21,6 +21,7 @@
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2016 Joyent, Inc.
*/
@@ -807,6 +808,13 @@ physmem_open(dev_t *devp, int flag, int otyp, cred_t *credp)
int ret;
static int msg_printed = 0;
+ /*
+ * This device should never be visible in a zone, but if it somehow
+ * does get created we refuse to allow the zone to use it.
+ */
+ if (crgetzoneid(credp) != GLOBAL_ZONEID)
+ return (EACCES);
+
if ((flag & (FWRITE | FREAD)) != (FWRITE | FREAD)) {
return (EINVAL);
}
diff --git a/usr/src/uts/common/io/pseudo.conf b/usr/src/uts/common/io/pseudo.conf
index 42248e93d6..08affec609 100644
--- a/usr/src/uts/common/io/pseudo.conf
+++ b/usr/src/uts/common/io/pseudo.conf
@@ -22,8 +22,7 @@
#
# Copyright 2003 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
-#
-# ident "%Z%%M% %I% %E% SMI"
+# Copyright 2014 Joyent, Inc. All rights reserved.
#
# This file is private to the pseudonex driver. It should not be edited.
#
@@ -38,3 +37,9 @@ name="pseudo" class="root" instance=0;
# /pseudo; it has as its children the zone console pseudo nodes.
#
name="zconsnex" parent="/pseudo" instance=1 valid-children="zcons";
+
+#
+# zfdnex is an alias for pseudo; this node is instantiated as a child of
+# /pseudo; it has as its children the zone fd pseudo nodes.
+#
+name="zfdnex" parent="/pseudo" instance=2 valid-children="zfd";
diff --git a/usr/src/uts/common/io/pseudonex.c b/usr/src/uts/common/io/pseudonex.c
index f83b0abf39..0ae06f88cc 100644
--- a/usr/src/uts/common/io/pseudonex.c
+++ b/usr/src/uts/common/io/pseudonex.c
@@ -83,6 +83,8 @@ static int pseudonex_detach(dev_info_t *, ddi_detach_cmd_t);
static int pseudonex_open(dev_t *, int, int, cred_t *);
static int pseudonex_close(dev_t, int, int, cred_t *);
static int pseudonex_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
+static int pseudonex_fm_init(dev_info_t *, dev_info_t *, int,
+ ddi_iblock_cookie_t *);
static int pseudonex_ctl(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *,
void *);
@@ -90,6 +92,8 @@ static void *pseudonex_state;
typedef struct pseudonex_state {
dev_info_t *pnx_devi;
+ int pnx_fmcap;
+ ddi_iblock_cookie_t pnx_fm_ibc;
} pseudonex_state_t;
static struct bus_ops pseudonex_bus_ops = {
@@ -116,7 +120,7 @@ static struct bus_ops pseudonex_bus_ops = {
NULL, /* bus_intr_ctl */
NULL, /* bus_config */
NULL, /* bus_unconfig */
- NULL, /* bus_fm_init */
+ pseudonex_fm_init, /* bus_fm_init */
NULL, /* bus_fm_fini */
NULL, /* bus_fm_access_enter */
NULL, /* bus_fm_access_exit */
@@ -228,6 +232,9 @@ pseudonex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
pnx_state = ddi_get_soft_state(pseudonex_state, instance);
pnx_state->pnx_devi = devi;
+ pnx_state->pnx_fmcap = DDI_FM_EREPORT_CAPABLE;
+ ddi_fm_init(devi, &pnx_state->pnx_fmcap, &pnx_state->pnx_fm_ibc);
+
if (ddi_create_minor_node(devi, "devctl", S_IFCHR, instance,
DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
ddi_remove_minor_node(devi, NULL);
@@ -247,6 +254,10 @@ pseudonex_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
if (cmd == DDI_SUSPEND)
return (DDI_SUCCESS);
+ if (cmd != DDI_DETACH)
+ return (DDI_FAILURE);
+
+ ddi_fm_fini(devi);
ddi_remove_minor_node(devi, NULL);
ddi_soft_state_free(pseudonex_state, instance);
return (DDI_SUCCESS);
@@ -375,6 +386,19 @@ pseudonex_auto_assign(dev_info_t *child)
}
static int
+pseudonex_fm_init(dev_info_t *dip, dev_info_t *tdip, int cap,
+ ddi_iblock_cookie_t *ibc)
+{
+ pseudonex_state_t *pnx_state;
+
+ pnx_state = ddi_get_soft_state(pseudonex_state, ddi_get_instance(dip));
+ ASSERT(pnx_state != NULL);
+ ASSERT(ibc != NULL);
+ *ibc = pnx_state->pnx_fm_ibc;
+ return (pnx_state->pnx_fmcap & cap);
+}
+
+static int
pseudonex_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
void *arg, void *result)
{
diff --git a/usr/src/uts/common/io/ptm.c b/usr/src/uts/common/io/ptm.c
index 400e9ffd10..07ffddc123 100644
--- a/usr/src/uts/common/io/ptm.c
+++ b/usr/src/uts/common/io/ptm.c
@@ -447,6 +447,18 @@ ptmclose(queue_t *rqp, int flag, cred_t *credp)
return (0);
}
+static boolean_t
+ptmptsopencb(ptmptsopencb_arg_t arg)
+{
+ struct pt_ttys *ptmp = (struct pt_ttys *)arg;
+ boolean_t rval;
+
+ PT_ENTER_READ(ptmp);
+ rval = (ptmp->pt_nullmsg != NULL);
+ PT_EXIT_READ(ptmp);
+ return (rval);
+}
+
/*
* The wput procedure will only handle ioctl and flush messages.
*/
@@ -574,6 +586,41 @@ ptmwput(queue_t *qp, mblk_t *mp)
miocack(qp, mp, 0, 0);
break;
}
+ case PTMPTSOPENCB:
+ {
+ mblk_t *dp; /* ioctl reply data */
+ ptmptsopencb_t *ppocb;
+
+ /* only allow the kernel to invoke this ioctl */
+ if (iocp->ioc_cr != kcred) {
+ miocnak(qp, mp, 0, EINVAL);
+ break;
+ }
+
+ /* we don't support transparent ioctls */
+ ASSERT(iocp->ioc_count != TRANSPARENT);
+ if (iocp->ioc_count == TRANSPARENT) {
+ miocnak(qp, mp, 0, EINVAL);
+ break;
+ }
+
+ /* allocate a response message */
+ dp = allocb(sizeof (ptmptsopencb_t), BPRI_MED);
+ if (dp == NULL) {
+ miocnak(qp, mp, 0, EAGAIN);
+ break;
+ }
+
+ /* initialize the ioctl results */
+ ppocb = (ptmptsopencb_t *)dp->b_rptr;
+ ppocb->ppocb_func = ptmptsopencb;
+ ppocb->ppocb_arg = (ptmptsopencb_arg_t)ptmp;
+
+ /* send the reply data */
+ mioc2ack(mp, dp, sizeof (ptmptsopencb_t), 0);
+ qreply(qp, mp);
+ break;
+ }
}
break;
diff --git a/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c b/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
index 72c8800f3e..dc5e8eafc9 100644
--- a/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
+++ b/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
@@ -22,7 +22,7 @@
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
- * Copyright (c) 2014, Joyent, Inc. All rights reserved.
+ * Copyright 2015, Joyent, Inc.
* Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
*/
@@ -72,6 +72,7 @@
#include <sys/file.h>
#include <sys/policy.h>
#include <sys/model.h>
+#include <sys/refhash.h>
#include <sys/sysevent.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sysevent/dr.h>
@@ -99,7 +100,6 @@
#include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
#include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
#include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
-#include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
#include <sys/raidioctl.h>
#include <sys/fs/dv_node.h> /* devfs_clean */
diff --git a/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas_hash.c b/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas_hash.c
deleted file mode 100644
index 8f96c2d9f1..0000000000
--- a/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas_hash.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * This file and its contents are supplied under the terms of the
- * Common Development and Distribution License ("CDDL"), version 1.0.
- * You may only use this file in accordance with the terms of version
- * 1.0 of the CDDL.
- *
- * A full copy of the text of the CDDL should have accompanied this
- * source. A copy of the CDDL is also available via the Internet at
- * http://www.illumos.org/license/CDDL.
- */
-
-/*
- * Copyright 2014 Joyent, Inc. All rights reserved.
- */
-
-#include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
-#include <sys/sysmacros.h>
-#include <sys/types.h>
-#include <sys/kmem.h>
-#include <sys/list.h>
-#include <sys/ddi.h>
-
-#ifdef lint
-extern refhash_link_t *obj_to_link(refhash_t *, void *);
-extern void *link_to_obj(refhash_t *, refhash_link_t *);
-extern void *obj_to_tag(refhash_t *, void *);
-#else
-#define obj_to_link(_h, _o) \
- ((refhash_link_t *)(((char *)(_o)) + (_h)->rh_link_off))
-#define link_to_obj(_h, _l) \
- ((void *)(((char *)(_l)) - (_h)->rh_link_off))
-#define obj_to_tag(_h, _o) \
- ((void *)(((char *)(_o)) + (_h)->rh_tag_off))
-#endif
-
-refhash_t *
-refhash_create(uint_t bucket_count, refhash_hash_f hash,
- refhash_cmp_f cmp, refhash_dtor_f dtor, size_t obj_size, size_t link_off,
- size_t tag_off, int km_flags)
-{
- refhash_t *hp;
- uint_t i;
-
- hp = kmem_alloc(sizeof (refhash_t), km_flags);
- if (hp == NULL)
- return (NULL);
- hp->rh_buckets = kmem_zalloc(bucket_count * sizeof (list_t), km_flags);
- if (hp->rh_buckets == NULL) {
- kmem_free(hp, sizeof (refhash_t));
- return (NULL);
- }
- hp->rh_bucket_count = bucket_count;
-
- for (i = 0; i < bucket_count; i++) {
- list_create(&hp->rh_buckets[i], sizeof (refhash_link_t),
- offsetof(refhash_link_t, rhl_chain_link));
- }
- list_create(&hp->rh_objs, sizeof (refhash_link_t),
- offsetof(refhash_link_t, rhl_global_link));
-
- hp->rh_obj_size = obj_size;
- hp->rh_link_off = link_off;
- hp->rh_tag_off = tag_off;
- hp->rh_hash = hash;
- hp->rh_cmp = cmp;
- hp->rh_dtor = dtor;
-
- return (hp);
-}
-
-void
-refhash_destroy(refhash_t *hp)
-{
- ASSERT(list_is_empty(&hp->rh_objs));
-
- kmem_free(hp->rh_buckets, hp->rh_bucket_count * sizeof (list_t));
- kmem_free(hp, sizeof (refhash_t));
-}
-
-void
-refhash_insert(refhash_t *hp, void *op)
-{
- uint_t bucket;
- refhash_link_t *lp = obj_to_link(hp, op);
-
- bucket = hp->rh_hash(obj_to_tag(hp, op)) % hp->rh_bucket_count;
- list_link_init(&lp->rhl_chain_link);
- list_link_init(&lp->rhl_global_link);
- lp->rhl_flags = 0;
- lp->rhl_refcnt = 0;
- list_insert_tail(&hp->rh_buckets[bucket], lp);
- list_insert_tail(&hp->rh_objs, lp);
-}
-
-static void
-refhash_delete(refhash_t *hp, void *op)
-{
- refhash_link_t *lp = obj_to_link(hp, op);
- uint_t bucket;
-
- bucket = hp->rh_hash(obj_to_tag(hp, op)) % hp->rh_bucket_count;
- list_remove(&hp->rh_buckets[bucket], lp);
- list_remove(&hp->rh_objs, lp);
- hp->rh_dtor(op);
-}
-
-void
-refhash_remove(refhash_t *hp, void *op)
-{
- refhash_link_t *lp = obj_to_link(hp, op);
-
- if (lp->rhl_refcnt > 0) {
- lp->rhl_flags |= RHL_F_DEAD;
- } else {
- refhash_delete(hp, op);
- }
-}
-
-void *
-refhash_lookup(refhash_t *hp, const void *tp)
-{
- uint_t bucket;
- refhash_link_t *lp;
- void *op;
-
- bucket = hp->rh_hash(tp) % hp->rh_bucket_count;
- for (lp = list_head(&hp->rh_buckets[bucket]); lp != NULL;
- lp = list_next(&hp->rh_buckets[bucket], lp)) {
- op = link_to_obj(hp, lp);
- if (hp->rh_cmp(obj_to_tag(hp, op), tp) == 0 &&
- !(lp->rhl_flags & RHL_F_DEAD)) {
- return (op);
- }
- }
-
- return (NULL);
-}
-
-void *
-refhash_linear_search(refhash_t *hp, refhash_eval_f eval, void *arg)
-{
- void *op;
- refhash_link_t *lp;
-
- for (lp = list_head(&hp->rh_objs); lp != NULL;
- lp = list_next(&hp->rh_objs, lp)) {
- op = link_to_obj(hp, lp);
- if (eval(op, arg) == 0)
- return (op);
- }
-
- return (NULL);
-}
-
-void
-refhash_hold(refhash_t *hp, void *op)
-{
- refhash_link_t *lp = obj_to_link(hp, op);
-
- ++lp->rhl_refcnt;
-}
-
-void
-refhash_rele(refhash_t *hp, void *op)
-{
- refhash_link_t *lp = obj_to_link(hp, op);
-
- ASSERT(lp->rhl_refcnt > 0);
-
- if (--lp->rhl_refcnt == 0 && (lp->rhl_flags & RHL_F_DEAD))
- refhash_remove(hp, op);
-}
-
-void *
-refhash_first(refhash_t *hp)
-{
- refhash_link_t *lp;
-
- lp = list_head(&hp->rh_objs);
- if (lp == NULL)
- return (NULL);
-
- ++lp->rhl_refcnt;
-
- return (link_to_obj(hp, lp));
-}
-
-void *
-refhash_next(refhash_t *hp, void *op)
-{
- refhash_link_t *lp;
-
- lp = obj_to_link(hp, op);
- while ((lp = list_next(&hp->rh_objs, lp)) != NULL) {
- if (!(lp->rhl_flags & RHL_F_DEAD))
- break;
- }
-
- refhash_rele(hp, op);
- if (lp == NULL)
- return (NULL);
-
- ++lp->rhl_refcnt;
-
- return (link_to_obj(hp, lp));
-}
-
-boolean_t
-refhash_obj_valid(refhash_t *hp, const void *op)
-{
- /* LINTED - E_ARG_INCOMPATIBLE_WITH_ARG_L */
- const refhash_link_t *lp = obj_to_link(hp, op);
-
- return ((lp->rhl_flags & RHL_F_DEAD) != 0);
-}
diff --git a/usr/src/uts/common/io/scsi/targets/sd.c b/usr/src/uts/common/io/scsi/targets/sd.c
index ae1e7e0fc3..dc5dc22e37 100644
--- a/usr/src/uts/common/io/scsi/targets/sd.c
+++ b/usr/src/uts/common/io/scsi/targets/sd.c
@@ -3503,9 +3503,13 @@ sd_set_mmc_caps(sd_ssc_t *ssc)
* according to the successful response to the page
* 0x2A mode sense request.
*/
- scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
- "sd_set_mmc_caps: Mode Sense returned "
- "invalid block descriptor length\n");
+ /*
+ * The following warning occurs due to the KVM CD-ROM
+ * mishandling the multi-media commands. Ignore it.
+ * scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
+ * "sd_set_mmc_caps: Mode Sense returned "
+ * "invalid block descriptor length\n");
+ */
kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
return;
}
@@ -4450,18 +4454,77 @@ sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen)
{
struct scsi_inquiry *sd_inq;
int rval = SD_SUCCESS;
+ char *p;
+ int chk_vidlen = 0, chk_pidlen = 0;
+ int has_tail = 0;
+ static const int VSZ = sizeof (sd_inq->inq_vid);
+ static const int PSZ = sizeof (sd_inq->inq_pid);
ASSERT(un != NULL);
sd_inq = un->un_sd->sd_inq;
ASSERT(id != NULL);
/*
- * We use the inq_vid as a pointer to a buffer containing the
- * vid and pid and use the entire vid/pid length of the table
- * entry for the comparison. This works because the inq_pid
- * data member follows inq_vid in the scsi_inquiry structure.
+ * We would like to use the inq_vid as a pointer to a buffer
+ * containing the vid and pid and use the entire vid/pid length of
+ * the table entry for the comparison. However, this does not work
+ * because, while the inq_pid data member follows inq_vid in the
+ * scsi_inquiry structure, we do not control the contents of this
+ * buffer, and some broken devices violate SPC 4.3.1 and return
+ * fields with null bytes in them.
+ */
+ chk_vidlen = MIN(VSZ, idlen);
+ p = id + chk_vidlen - 1;
+ while (*p == ' ' && chk_vidlen > 0) {
+ --p;
+ --chk_vidlen;
+ }
+
+ /*
+ * If it's all spaces, check the whole thing.
*/
- if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) {
+ if (chk_vidlen == 0)
+ chk_vidlen = MIN(VSZ, idlen);
+
+ if (idlen > VSZ) {
+ chk_pidlen = idlen - VSZ;
+ p = id + idlen - 1;
+ while (*p == ' ' && chk_pidlen > 0) {
+ --p;
+ --chk_pidlen;
+ }
+ if (chk_pidlen == 0)
+ chk_pidlen = MIN(PSZ, idlen - VSZ);
+ }
+
+ /*
+ * There's one more thing we need to do here. If the user specified
+ * an ID with trailing spaces, we need to make sure the inquiry
+ * vid/pid has only spaces or NULs after the check length; otherwise, it
+ * can't match.
+ */
+ if (idlen > chk_vidlen && chk_vidlen < VSZ) {
+ for (p = sd_inq->inq_vid + chk_vidlen;
+ p < sd_inq->inq_vid + VSZ; ++p) {
+ if (*p != ' ' && *p != '\0') {
+ ++has_tail;
+ break;
+ }
+ }
+ }
+ if (idlen > chk_pidlen + VSZ && chk_pidlen < PSZ) {
+ for (p = sd_inq->inq_pid + chk_pidlen;
+ p < sd_inq->inq_pid + PSZ; ++p) {
+ if (*p != ' ' && *p != '\0') {
+ ++has_tail;
+ break;
+ }
+ }
+ }
+
+ if (has_tail || strncasecmp(sd_inq->inq_vid, id, chk_vidlen) != 0 ||
+ (idlen > VSZ &&
+ strncasecmp(sd_inq->inq_pid, id + VSZ, chk_pidlen) != 0)) {
/*
* The user id string is compared to the inquiry vid/pid
* using a case insensitive comparison and ignoring
diff --git a/usr/src/uts/common/io/signalfd.c b/usr/src/uts/common/io/signalfd.c
index 32f8f85f7a..4ab4f36d4e 100644
--- a/usr/src/uts/common/io/signalfd.c
+++ b/usr/src/uts/common/io/signalfd.c
@@ -10,7 +10,7 @@
*/
/*
- * Copyright 2015 Joyent, Inc.
+ * Copyright 2016 Joyent, Inc.
*/
/*
@@ -19,97 +19,73 @@
*
* As described on the signalfd(3C) man page, the general idea behind these
* file descriptors is that they can be used to synchronously consume signals
- * via the read(2) syscall. That capability already exists with the
- * sigwaitinfo(3C) function but the key advantage of signalfd is that, because
- * it is file descriptor based, poll(2) can be used to determine when signals
- * are available to be consumed.
+ * via the read(2) syscall. While that capability already exists with the
+ * sigwaitinfo(3C) function, signalfd holds an advantage since it is file
+ * descriptor based: It is able use the event facilities (poll(2), /dev/poll,
+ * event ports) to notify interested parties when consumable signals arrive.
*
- * The general implementation uses signalfd_state to hold both the signal set
- * and poll head for an open file descriptor. Because a process can be using
- * different sigfds with different signal sets, each signalfd_state poll head
- * can be thought of as an independent signal stream and the thread(s) waiting
- * on that stream will get poll notification when any signal in the
- * corresponding set is received.
+ * The signalfd lifecycle begins When a process opens /dev/signalfd. A minor
+ * will be allocated for them along with an associated signalfd_state_t struct.
+ * It is there where the mask of desired signals resides.
*
- * The sigfd_proc_state_t struct lives on the proc_t and maintains per-proc
- * state for function callbacks and data when the proc needs to do work during
- * signal delivery for pollwakeup.
+ * Reading from the signalfd is straightforward and mimics the kernel behavior
+ * for sigtimedwait(). Signals continue to live on either the proc's p_sig, or
+ * thread's t_sig, member. During a read operation, those which match the mask
+ * are consumed so they are no longer pending.
*
- * The read side of the implementation is straightforward and mimics the
- * kernel behavior for sigtimedwait(). Signals continue to live on either
- * the proc's p_sig, or thread's t_sig, member. Read consumes the signal so
- * that it is no longer pending.
+ * The poll side is more complex. Every time a signal is delivered, all of the
+ * signalfds on the process need to be examined in order to pollwake threads
+ * waiting for signal arrival.
*
- * The poll side is more complex since all of the sigfds on the process need
- * to be examined every time a signal is delivered to the process in order to
- * pollwake any thread waiting in poll for that signal.
+ * When a thread polling on a signalfd requires a pollhead, several steps must
+ * be taken to safely ensure the proper result. A sigfd_proc_state_t is
+ * created for the calling process if it does not yet exist. It is there where
+ * a list of sigfd_poll_waiter_t structures reside which associate pollheads to
+ * signalfd_state_t entries. The sigfd_proc_state_t list is walked to find a
+ * sigfd_poll_waiter_t matching the signalfd_state_t which corresponds to the
+ * polled resource. If one is found, it is reused. Otherwise a new one is
+ * created, incrementing the refcount on the signalfd_state_t, and it is added
+ * to the sigfd_poll_waiter_t list.
*
- * Because it is likely that a process will only be using one, or a few, sigfds,
- * but many total file descriptors, we maintain a list of sigfds which need
- * pollwakeup. The list lives on the proc's p_sigfd struct. In this way only
- * zero, or a few, of the state structs will need to be examined every time a
- * signal is delivered to the process, instead of having to examine all of the
- * file descriptors to find the state structs. When a state struct with a
- * matching signal set is found then pollwakeup is called.
+ * The complications imposed by fork(2) are why the pollhead is stored in the
+ * associated sigfd_poll_waiter_t instead of directly in the signalfd_state_t.
+ * More than one process can hold a reference to the signalfd at a time but
+ * arriving signals should wake only process-local pollers. Additionally,
+ * signalfd_close is called only when the last referencing fd is closed, hiding
+ * occurrences of preceeding threads which released their references. This
+ * necessitates reference counting on the signalfd_state_t so it is able to
+ * persist after close until all poll references have been cleansed. Doing so
+ * ensures that blocked pollers which hold references to the signalfd_state_t
+ * will be able to do clean-up after the descriptor itself has been closed.
*
- * The sigfd_list is self-cleaning; as signalfd_pollwake_cb is called, the list
- * will clear out on its own. There is an exit helper (signalfd_exit_helper)
- * which cleans up any remaining per-proc state when the process exits.
+ * When a signal arrives in a process polling on signalfd, signalfd_pollwake_cb
+ * is called via the pointer in sigfd_proc_state_t. It will walk over the
+ * sigfd_poll_waiter_t entries present in the list, searching for any
+ * associated with a signalfd_state_t with a matching signal mask. The
+ * approach of keeping the poller list in p_sigfd was chosen because a process
+ * is likely to use few signalfds relative to its total file descriptors. It
+ * reduces the work required for each received signal.
*
- * The main complexity with signalfd is the interaction of forking and polling.
- * This interaction is complex because now two processes have a fd that
- * references the same dev_t (and its associated signalfd_state), but signals
- * go to only one of those processes. Also, we don't know when one of the
- * processes closes its fd because our 'close' entry point is only called when
- * the last fd is closed (which could be by either process).
+ * The sigfd_list is self-cleaning; as signalfd_pollwake_cb is called, the list
+ * will clear out on its own. Any remaining per-process state which remains
+ * will be cleaned up by the exit helper (signalfd_exit_helper).
*
- * Because the state struct is referenced by both file descriptors, and the
- * state struct represents a signal stream needing a pollwakeup, if both
- * processes were polling then both processes would get a pollwakeup when a
- * signal arrives for either process (that is, the pollhead is associated with
- * our dev_t so when a signal arrives the pollwakeup wakes up all waiters).
+ * The structures associated with signalfd state are designed to operate
+ * correctly across fork, but there is one caveat that applies. Using
+ * fork-shared signalfd descriptors in conjuction with fork-shared caching poll
+ * descriptors (such as /dev/poll or event ports) will result in missed poll
+ * wake-ups. This is caused by the pollhead identity of signalfd descriptors
+ * being dependent on the process they are polled from. Because it has a
+ * thread-local cache, poll(2) is unaffected by this limitation.
*
- * Fortunately this is not a common problem in practice, but the implementation
- * attempts to mitigate unexpected behavior. The typical behavior is that the
- * parent has been polling the signalfd (which is why it was open in the first
- * place) and the parent might have a pending signalfd_state (with the
- * pollhead) on its per-process sigfd_list. After the fork the child will
- * simply close that fd (among others) as part of the typical fork/close/exec
- * pattern. Because the child will never poll that fd, it will never get any
- * state onto its own sigfd_list (the child starts with a null list). The
- * intention is that the child sees no pollwakeup activity for signals unless
- * it explicitly reinvokes poll on the sigfd.
+ * Lock ordering:
*
- * As background, there are two primary polling cases to consider when the
- * parent process forks:
- * 1) If any thread is blocked in poll(2) then both the parent and child will
- * return from the poll syscall with EINTR. This means that if either
- * process wants to re-poll on a sigfd then it needs to re-run poll and
- * would come back in to the signalfd_poll entry point. The parent would
- * already have the dev_t's state on its sigfd_list and the child would not
- * have anything there unless it called poll again on its fd.
- * 2) If the process is using /dev/poll(7D) then the polling info is being
- * cached by the poll device and the process might not currently be blocked
- * on anything polling related. A subsequent DP_POLL ioctl will not invoke
- * our signalfd_poll entry point again. Because the parent still has its
- * sigfd_list setup, an incoming signal will hit our signalfd_pollwake_cb
- * entry point, which in turn calls pollwake, and /dev/poll will do the
- * right thing on DP_POLL. The child will not have a sigfd_list yet so the
- * signal will not cause a pollwakeup. The dp code does its own handling for
- * cleaning up its cache.
+ * 1. signalfd_lock
+ * 2. signalfd_state_t`sfd_lock
*
- * This leaves only one odd corner case. If the parent and child both use
- * the dup-ed sigfd to poll then when a signal is delivered to either process
- * there is no way to determine which one should get the pollwakeup (since
- * both processes will be queued on the same signal stream poll head). What
- * happens in this case is that both processes will return from poll, but only
- * one of them will actually have a signal to read. The other will return
- * from read with EAGAIN, or block. This case is actually similar to the
- * situation within a single process which got two different sigfd's with the
- * same mask (or poll on two fd's that are dup-ed). Both would return from poll
- * when a signal arrives but only one read would consume the signal and the
- * other read would fail or block. Applications which poll on shared fd's
- * cannot assume that a subsequent read will actually obtain data.
+ * 1. proc_t`p_lock (to walk p_sigfd)
+ * 2. signalfd_state_t`sfd_lock
+ * 2a. signalfd_lock (after sfd_lock is dropped, when sfd_count falls to 0)
*/
#include <sys/ddi.h>
@@ -123,118 +99,150 @@
#include <sys/schedctl.h>
#include <sys/id_space.h>
#include <sys/sdt.h>
+#include <sys/brand.h>
typedef struct signalfd_state signalfd_state_t;
struct signalfd_state {
- kmutex_t sfd_lock; /* lock protecting state */
- pollhead_t sfd_pollhd; /* poll head */
- k_sigset_t sfd_set; /* signals for this fd */
- signalfd_state_t *sfd_next; /* next state on global list */
+ list_node_t sfd_list; /* node in global list */
+ kmutex_t sfd_lock; /* protects fields below */
+ uint_t sfd_count; /* ref count */
+ boolean_t sfd_valid; /* valid while open */
+ k_sigset_t sfd_set; /* signals for this fd */
};
+typedef struct sigfd_poll_waiter {
+ list_node_t spw_list;
+ signalfd_state_t *spw_state;
+ pollhead_t spw_pollhd;
+} sigfd_poll_waiter_t;
+
/*
- * Internal global variables.
+ * Protects global state in signalfd_devi, signalfd_minor, signalfd_softstate,
+ * and signalfd_state (including sfd_list field of members)
*/
-static kmutex_t signalfd_lock; /* lock protecting state */
+static kmutex_t signalfd_lock;
static dev_info_t *signalfd_devi; /* device info */
static id_space_t *signalfd_minor; /* minor number arena */
static void *signalfd_softstate; /* softstate pointer */
-static signalfd_state_t *signalfd_state; /* global list of state */
+static list_t signalfd_state; /* global list of state */
+
-/*
- * If we don't already have an entry in the proc's list for this state, add one.
- */
static void
-signalfd_wake_list_add(signalfd_state_t *state)
+signalfd_state_enter(signalfd_state_t *state)
{
- proc_t *p = curproc;
- list_t *lst;
- sigfd_wake_list_t *wlp;
-
- ASSERT(MUTEX_HELD(&p->p_lock));
- ASSERT(p->p_sigfd != NULL);
+ ASSERT(MUTEX_HELD(&state->sfd_lock));
+ ASSERT(state->sfd_count > 0);
+ VERIFY(state->sfd_valid == B_TRUE);
- lst = &((sigfd_proc_state_t *)p->p_sigfd)->sigfd_list;
- for (wlp = list_head(lst); wlp != NULL; wlp = list_next(lst, wlp)) {
- if (wlp->sigfd_wl_state == state)
- break;
- }
-
- if (wlp == NULL) {
- wlp = kmem_zalloc(sizeof (sigfd_wake_list_t), KM_SLEEP);
- wlp->sigfd_wl_state = state;
- list_insert_head(lst, wlp);
- }
+ state->sfd_count++;
}
static void
-signalfd_wake_rm(list_t *lst, sigfd_wake_list_t *wlp)
+signalfd_state_release(signalfd_state_t *state, boolean_t locked)
{
- list_remove(lst, wlp);
- kmem_free(wlp, sizeof (sigfd_wake_list_t));
+ ASSERT(MUTEX_HELD(&state->sfd_lock));
+ ASSERT(state->sfd_count > 0);
+
+ if (state->sfd_count == 1) {
+ VERIFY(state->sfd_valid == B_FALSE);
+ mutex_exit(&state->sfd_lock);
+ if (locked) {
+ ASSERT(MUTEX_HELD(&signalfd_lock));
+ list_remove(&signalfd_state, state);
+ } else {
+ ASSERT(MUTEX_NOT_HELD(&signalfd_lock));
+ mutex_enter(&signalfd_lock);
+ list_remove(&signalfd_state, state);
+ mutex_exit(&signalfd_lock);
+ }
+ kmem_free(state, sizeof (*state));
+ return;
+ }
+ state->sfd_count--;
+ mutex_exit(&state->sfd_lock);
}
-static void
-signalfd_wake_list_rm(proc_t *p, signalfd_state_t *state)
+static sigfd_poll_waiter_t *
+signalfd_wake_list_add(sigfd_proc_state_t *pstate, signalfd_state_t *state)
{
- sigfd_wake_list_t *wlp;
- list_t *lst;
+ list_t *lst = &pstate->sigfd_list;
+ sigfd_poll_waiter_t *pw;
- ASSERT(MUTEX_HELD(&p->p_lock));
+ for (pw = list_head(lst); pw != NULL; pw = list_next(lst, pw)) {
+ if (pw->spw_state == state)
+ break;
+ }
- if (p->p_sigfd == NULL)
- return;
+ if (pw == NULL) {
+ pw = kmem_zalloc(sizeof (*pw), KM_SLEEP);
+
+ mutex_enter(&state->sfd_lock);
+ signalfd_state_enter(state);
+ pw->spw_state = state;
+ mutex_exit(&state->sfd_lock);
+ list_insert_head(lst, pw);
+ }
+ return (pw);
+}
+
+static sigfd_poll_waiter_t *
+signalfd_wake_list_rm(sigfd_proc_state_t *pstate, signalfd_state_t *state)
+{
+ list_t *lst = &pstate->sigfd_list;
+ sigfd_poll_waiter_t *pw;
- lst = &((sigfd_proc_state_t *)p->p_sigfd)->sigfd_list;
- for (wlp = list_head(lst); wlp != NULL; wlp = list_next(lst, wlp)) {
- if (wlp->sigfd_wl_state == state) {
- signalfd_wake_rm(lst, wlp);
+ for (pw = list_head(lst); pw != NULL; pw = list_next(lst, pw)) {
+ if (pw->spw_state == state) {
break;
}
}
- if (list_is_empty(lst)) {
- ((sigfd_proc_state_t *)p->p_sigfd)->sigfd_pollwake_cb = NULL;
- list_destroy(lst);
- kmem_free(p->p_sigfd, sizeof (sigfd_proc_state_t));
- p->p_sigfd = NULL;
+ if (pw != NULL) {
+ list_remove(lst, pw);
+ mutex_enter(&state->sfd_lock);
+ signalfd_state_release(state, B_FALSE);
+ pw->spw_state = NULL;
}
+
+ return (pw);
}
static void
signalfd_wake_list_cleanup(proc_t *p)
{
- sigfd_wake_list_t *wlp;
+ sigfd_proc_state_t *pstate = p->p_sigfd;
+ sigfd_poll_waiter_t *pw;
list_t *lst;
ASSERT(MUTEX_HELD(&p->p_lock));
+ ASSERT(pstate != NULL);
+
+ lst = &pstate->sigfd_list;
+ while ((pw = list_remove_head(lst)) != NULL) {
+ signalfd_state_t *state = pw->spw_state;
- ((sigfd_proc_state_t *)p->p_sigfd)->sigfd_pollwake_cb = NULL;
+ pw->spw_state = NULL;
+ mutex_enter(&state->sfd_lock);
+ signalfd_state_release(state, B_FALSE);
- lst = &((sigfd_proc_state_t *)p->p_sigfd)->sigfd_list;
- while (!list_is_empty(lst)) {
- wlp = (sigfd_wake_list_t *)list_remove_head(lst);
- kmem_free(wlp, sizeof (sigfd_wake_list_t));
+ pollwakeup(&pw->spw_pollhd, POLLERR);
+ pollhead_clean(&pw->spw_pollhd);
+ kmem_free(pw, sizeof (*pw));
}
+ list_destroy(lst);
+
+ p->p_sigfd = NULL;
+ kmem_free(pstate, sizeof (*pstate));
}
static void
signalfd_exit_helper(void)
{
proc_t *p = curproc;
- list_t *lst;
-
- /* This being non-null is the only way we can get here */
- ASSERT(p->p_sigfd != NULL);
mutex_enter(&p->p_lock);
- lst = &((sigfd_proc_state_t *)p->p_sigfd)->sigfd_list;
-
signalfd_wake_list_cleanup(p);
- list_destroy(lst);
- kmem_free(p->p_sigfd, sizeof (sigfd_proc_state_t));
- p->p_sigfd = NULL;
mutex_exit(&p->p_lock);
}
@@ -254,35 +262,40 @@ static void
signalfd_pollwake_cb(void *arg0, int sig)
{
proc_t *p = (proc_t *)arg0;
+ sigfd_proc_state_t *pstate = (sigfd_proc_state_t *)p->p_sigfd;
list_t *lst;
- sigfd_wake_list_t *wlp;
+ sigfd_poll_waiter_t *pw;
ASSERT(MUTEX_HELD(&p->p_lock));
+ ASSERT(pstate != NULL);
- if (p->p_sigfd == NULL)
- return;
-
- lst = &((sigfd_proc_state_t *)p->p_sigfd)->sigfd_list;
- wlp = list_head(lst);
- while (wlp != NULL) {
- signalfd_state_t *state = wlp->sigfd_wl_state;
+ lst = &pstate->sigfd_list;
+ pw = list_head(lst);
+ while (pw != NULL) {
+ signalfd_state_t *state = pw->spw_state;
+ sigfd_poll_waiter_t *next;
+ short pollev;
mutex_enter(&state->sfd_lock);
-
- if (sigismember(&state->sfd_set, sig) &&
- state->sfd_pollhd.ph_list != NULL) {
- sigfd_wake_list_t *tmp = wlp;
-
- /* remove it from the list */
- wlp = list_next(lst, wlp);
- signalfd_wake_rm(lst, tmp);
-
- mutex_exit(&state->sfd_lock);
- pollwakeup(&state->sfd_pollhd, POLLRDNORM | POLLIN);
+ if (!state->sfd_valid) {
+ pollev = POLLERR;
+ } else if (sigismember(&state->sfd_set, sig)) {
+ pollev = POLLRDNORM | POLLIN;
} else {
mutex_exit(&state->sfd_lock);
- wlp = list_next(lst, wlp);
+ pw = list_next(lst, pw);
+ continue;
}
+
+ signalfd_state_release(state, B_FALSE);
+ pw->spw_state = NULL;
+ pollwakeup(&pw->spw_pollhd, pollev);
+ pollhead_clean(&pw->spw_pollhd);
+
+ next = list_next(lst, pw);
+ list_remove(lst, pw);
+ kmem_free(pw, sizeof (*pw));
+ pw = next;
}
}
@@ -290,7 +303,7 @@ _NOTE(ARGSUSED(1))
static int
signalfd_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
{
- signalfd_state_t *state;
+ signalfd_state_t *state, **sstate;
major_t major = getemajor(*devp);
minor_t minor = getminor(*devp);
@@ -300,18 +313,20 @@ signalfd_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
mutex_enter(&signalfd_lock);
minor = (minor_t)id_allocff(signalfd_minor);
-
if (ddi_soft_state_zalloc(signalfd_softstate, minor) != DDI_SUCCESS) {
id_free(signalfd_minor, minor);
mutex_exit(&signalfd_lock);
return (ENODEV);
}
- state = ddi_get_soft_state(signalfd_softstate, minor);
- *devp = makedevice(major, minor);
+ state = kmem_zalloc(sizeof (*state), KM_SLEEP);
+ state->sfd_valid = B_TRUE;
+ state->sfd_count = 1;
+ list_insert_head(&signalfd_state, (void *)state);
- state->sfd_next = signalfd_state;
- signalfd_state = state;
+ sstate = ddi_get_soft_state(signalfd_softstate, minor);
+ *sstate = state;
+ *devp = makedevice(major, minor);
mutex_exit(&signalfd_lock);
@@ -405,6 +420,9 @@ consume_signal(k_sigset_t set, uio_t *uio, boolean_t block)
lwp->lwp_extsig = 0;
mutex_exit(&p->p_lock);
+ if (PROC_IS_BRANDED(p) && BROP(p)->b_sigfd_translate)
+ BROP(p)->b_sigfd_translate(infop);
+
/* Convert k_siginfo into external, datamodel independent, struct. */
bzero(ssp, sizeof (*ssp));
ssp->ssi_signo = infop->si_signo;
@@ -439,7 +457,7 @@ _NOTE(ARGSUSED(2))
static int
signalfd_read(dev_t dev, uio_t *uio, cred_t *cr)
{
- signalfd_state_t *state;
+ signalfd_state_t *state, **sstate;
minor_t minor = getminor(dev);
boolean_t block = B_TRUE;
k_sigset_t set;
@@ -449,7 +467,8 @@ signalfd_read(dev_t dev, uio_t *uio, cred_t *cr)
if (uio->uio_resid < sizeof (signalfd_siginfo_t))
return (EINVAL);
- state = ddi_get_soft_state(signalfd_softstate, minor);
+ sstate = ddi_get_soft_state(signalfd_softstate, minor);
+ state = *sstate;
if (uio->uio_fmode & (FNDELAY|FNONBLOCK))
block = B_FALSE;
@@ -462,15 +481,26 @@ signalfd_read(dev_t dev, uio_t *uio, cred_t *cr)
return (set_errno(EINVAL));
do {
- res = consume_signal(state->sfd_set, uio, block);
- if (res == 0)
- got_one = B_TRUE;
+ res = consume_signal(set, uio, block);
- /*
- * After consuming one signal we won't block trying to consume
- * further signals.
- */
- block = B_FALSE;
+ if (res == 0) {
+ /*
+ * After consuming one signal, do not block while
+ * trying to consume more.
+ */
+ got_one = B_TRUE;
+ block = B_FALSE;
+
+ /*
+ * Refresh the matching signal set in case it was
+ * updated during the wait.
+ */
+ mutex_enter(&state->sfd_lock);
+ set = state->sfd_set;
+ mutex_exit(&state->sfd_lock);
+ if (sigisempty(&set))
+ break;
+ }
} while (res == 0 && uio->uio_resid >= sizeof (signalfd_siginfo_t));
if (got_one)
@@ -499,13 +529,14 @@ static int
signalfd_poll(dev_t dev, short events, int anyyet, short *reventsp,
struct pollhead **phpp)
{
- signalfd_state_t *state;
+ signalfd_state_t *state, **sstate;
minor_t minor = getminor(dev);
kthread_t *t = curthread;
proc_t *p = ttoproc(t);
short revents = 0;
- state = ddi_get_soft_state(signalfd_softstate, minor);
+ sstate = ddi_get_soft_state(signalfd_softstate, minor);
+ state = *sstate;
mutex_enter(&state->sfd_lock);
@@ -515,39 +546,36 @@ signalfd_poll(dev_t dev, short events, int anyyet, short *reventsp,
mutex_exit(&state->sfd_lock);
if (!(*reventsp = revents & events) && !anyyet) {
- *phpp = &state->sfd_pollhd;
+ sigfd_proc_state_t *pstate;
+ sigfd_poll_waiter_t *pw;
/*
* Enable pollwakeup handling.
*/
- if (p->p_sigfd == NULL) {
- sigfd_proc_state_t *pstate;
+ mutex_enter(&p->p_lock);
+ if ((pstate = (sigfd_proc_state_t *)p->p_sigfd) == NULL) {
- pstate = kmem_zalloc(sizeof (sigfd_proc_state_t),
- KM_SLEEP);
+ mutex_exit(&p->p_lock);
+ pstate = kmem_zalloc(sizeof (*pstate), KM_SLEEP);
list_create(&pstate->sigfd_list,
- sizeof (sigfd_wake_list_t),
- offsetof(sigfd_wake_list_t, sigfd_wl_lst));
+ sizeof (sigfd_poll_waiter_t),
+ offsetof(sigfd_poll_waiter_t, spw_list));
+ pstate->sigfd_pollwake_cb = signalfd_pollwake_cb;
+ /* Check again, after blocking for the alloc. */
mutex_enter(&p->p_lock);
- /* check again now that we're locked */
if (p->p_sigfd == NULL) {
p->p_sigfd = pstate;
} else {
/* someone beat us to it */
list_destroy(&pstate->sigfd_list);
- kmem_free(pstate, sizeof (sigfd_proc_state_t));
+ kmem_free(pstate, sizeof (*pstate));
+ pstate = p->p_sigfd;
}
- mutex_exit(&p->p_lock);
}
- mutex_enter(&p->p_lock);
- if (((sigfd_proc_state_t *)p->p_sigfd)->sigfd_pollwake_cb ==
- NULL) {
- ((sigfd_proc_state_t *)p->p_sigfd)->sigfd_pollwake_cb =
- signalfd_pollwake_cb;
- }
- signalfd_wake_list_add(state);
+ pw = signalfd_wake_list_add(pstate, state);
+ *phpp = &pw->spw_pollhd;
mutex_exit(&p->p_lock);
}
@@ -558,11 +586,12 @@ _NOTE(ARGSUSED(4))
static int
signalfd_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
{
- signalfd_state_t *state;
+ signalfd_state_t *state, **sstate;
minor_t minor = getminor(dev);
sigset_t mask;
- state = ddi_get_soft_state(signalfd_softstate, minor);
+ sstate = ddi_get_soft_state(signalfd_softstate, minor);
+ state = *sstate;
switch (cmd) {
case SIGNALFDIOC_MASK:
@@ -587,33 +616,42 @@ _NOTE(ARGSUSED(1))
static int
signalfd_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
{
- signalfd_state_t *state, **sp;
+ signalfd_state_t *state, **sstate;
+ sigfd_poll_waiter_t *pw = NULL;
minor_t minor = getminor(dev);
proc_t *p = curproc;
- state = ddi_get_soft_state(signalfd_softstate, minor);
-
- if (state->sfd_pollhd.ph_list != NULL) {
- pollwakeup(&state->sfd_pollhd, POLLERR);
- pollhead_clean(&state->sfd_pollhd);
- }
+ sstate = ddi_get_soft_state(signalfd_softstate, minor);
+ state = *sstate;
- /* Make sure our state is removed from our proc's pollwake list. */
+ /* Make sure state is removed from this proc's pollwake list. */
mutex_enter(&p->p_lock);
- signalfd_wake_list_rm(p, state);
- mutex_exit(&p->p_lock);
+ if (p->p_sigfd != NULL) {
+ sigfd_proc_state_t *pstate = p->p_sigfd;
- mutex_enter(&signalfd_lock);
+ pw = signalfd_wake_list_rm(pstate, state);
+ if (list_is_empty(&pstate->sigfd_list)) {
+ signalfd_wake_list_cleanup(p);
+ }
+ }
+ mutex_exit(&p->p_lock);
- /* Remove our state from our global list. */
- for (sp = &signalfd_state; *sp != state; sp = &((*sp)->sfd_next))
- VERIFY(*sp != NULL);
+ if (pw != NULL) {
+ pollwakeup(&pw->spw_pollhd, POLLERR);
+ pollhead_clean(&pw->spw_pollhd);
+ kmem_free(pw, sizeof (*pw));
+ }
- *sp = (*sp)->sfd_next;
+ mutex_enter(&signalfd_lock);
+ *sstate = NULL;
ddi_soft_state_free(signalfd_softstate, minor);
id_free(signalfd_minor, minor);
+ mutex_enter(&state->sfd_lock);
+ state->sfd_valid = B_FALSE;
+ signalfd_state_release(state, B_TRUE);
+
mutex_exit(&signalfd_lock);
return (0);
@@ -635,7 +673,7 @@ signalfd_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
}
if (ddi_soft_state_init(&signalfd_softstate,
- sizeof (signalfd_state_t), 0) != 0) {
+ sizeof (signalfd_state_t *), 0) != 0) {
cmn_err(CE_WARN, "signalfd failed to create soft state");
id_space_destroy(signalfd_minor);
mutex_exit(&signalfd_lock);
@@ -656,6 +694,9 @@ signalfd_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
sigfd_exit_helper = signalfd_exit_helper;
+ list_create(&signalfd_state, sizeof (signalfd_state_t),
+ offsetof(signalfd_state_t, sfd_list));
+
mutex_exit(&signalfd_lock);
return (DDI_SUCCESS);
@@ -673,10 +714,19 @@ signalfd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
return (DDI_FAILURE);
}
- /* list should be empty */
- VERIFY(signalfd_state == NULL);
-
mutex_enter(&signalfd_lock);
+
+ if (!list_is_empty(&signalfd_state)) {
+ /*
+ * There are dangling poll waiters holding signalfd_state_t
+ * entries on the global list. Detach is not possible until
+ * they purge themselves.
+ */
+ return (DDI_FAILURE);
+ mutex_exit(&signalfd_lock);
+ }
+ list_destroy(&signalfd_state);
+
id_space_destroy(signalfd_minor);
ddi_remove_minor_node(signalfd_devi, NULL);
diff --git a/usr/src/uts/common/io/stream.c b/usr/src/uts/common/io/stream.c
index e9af19ca18..994ca8baa8 100644
--- a/usr/src/uts/common/io/stream.c
+++ b/usr/src/uts/common/io/stream.c
@@ -1451,6 +1451,16 @@ copyb(mblk_t *bp)
ndp = nbp->b_datap;
/*
+ * Copy the various checksum information that came in
+ * originally.
+ */
+ ndp->db_cksumstart = dp->db_cksumstart;
+ ndp->db_cksumend = dp->db_cksumend;
+ ndp->db_cksumstuff = dp->db_cksumstuff;
+ bcopy(dp->db_struioun.data, ndp->db_struioun.data,
+ sizeof (dp->db_struioun.data));
+
+ /*
* Well, here is a potential issue. If we are trying to
* trace a flow, and we copy the message, we might lose
* information about where this message might have been.
diff --git a/usr/src/uts/common/io/udmf/dm9601reg.h b/usr/src/uts/common/io/udmf/dm9601reg.h
new file mode 100644
index 0000000000..a36f2b0fc8
--- /dev/null
+++ b/usr/src/uts/common/io/udmf/dm9601reg.h
@@ -0,0 +1,348 @@
+/*
+ * %W% %E%
+ * Macro definitions for Davicom DM9601 USB to fast ethernet controler
+ * based on Davicom DM9601E data sheet
+ * This file is public domain. Coded by M.Murayama (KHF04453@nifty.com)
+ */
+
+#ifndef __DM9601_H__
+#define __DM9601_H__
+
+/*
+ * offset of registers
+ */
+#define NCR 0x00U /* network control register */
+#define NSR 0x01U /* network status register */
+#define TCR 0x02U /* tx control register */
+#define TSR1 0x03U /* tx status register 1 */
+#define TSR2 0x04U /* tx status register 2 */
+#define RCR 0x05U /* rx control register */
+#define RSR 0x06U /* rx status register */
+#define ROCR 0x07U /* rx overflow counter register */
+#define BPTR 0x08U /* back pressure threshold regster */
+#define FCTR 0x09U /* flow control threshold regster */
+#define FCR 0x0aU /* flow control threshold regster */
+#define EPCR 0x0bU /* eeprom & phy control register */
+#define EPAR 0x0cU /* eeprom & phy address register */
+#define EPDR 0x0dU /* eeprom & phy data register (2byte) */
+#define WCR 0x0fU /* wake up control register */
+#define PAR 0x10U /* physical address register (6byte) */
+#define MAR 0x16U /* multicast address register (8byte) */
+#define GPCR 0x1eU /* general purpose control register */
+#define GPR 0x1fU /* general purpose register */
+#define VID 0x28U /* vendor ID (2byte) */
+#define PID 0x2aU /* product ID (2byte) */
+#define CHIPR 0x2cU /* chip revision */
+#define USBDA 0xf0U /* usb device address register */
+#define RXC 0xf1U /* received packet counter register */
+#define TUSC 0xf2U /* tx packet counter/usb status register */
+#define USBC 0xf4U /* usb control register */
+
+/*
+ * register definitions
+ */
+/* network control register */
+#define NCR_EXT_PHY 0x80U /* 1: select external phy */
+#define NCR_WAKEEN 0x40U /* 1: wake up event enable */
+#define NCR_FCOL 0x10U /* force collision mode for test */
+#define NCR_FDX 0x08U /* 1: full duplex mode (for external phy) */
+#define NCR_LBK 0x06U
+#define NCR_LBK_SHIFT 1
+#define NCR_LBK_NORMAL (0U << NCR_LBK_SHIFT)
+#define NCR_LBK_MAC (1U << NCR_LBK_SHIFT)
+#define NCR_LBK_PHY_D (2U << NCR_LBK_SHIFT)
+#define NCR_LBK_PHY_A (3U << NCR_LBK_SHIFT)
+#define NCR_RST 0x01U /* 1: reset, auto clear */
+
+#define NCR_BITS \
+ "\020" \
+ "\010EXT_PHY" \
+ "\007WAKEEN" \
+ "\005FCOL" \
+ "\004FDX" \
+ "\001RST"
+
+/* network status register */
+#define NSR_SPEED 0x80U /* 1:10M 0:100M */
+#define NSR_LINKST 0x40U /* 1:ok 0:fail */
+#define NSR_WAKEST 0x20U /* 1:enabled */
+#define NSR_TXFULL 0x10U /* 1:tx fifo full */
+#define NSR_TX2END 0x08U /* tx packet2 complete status */
+#define NSR_TX1END 0x04U /* tx packet1 complete status */
+#define NSR_RXOV 0x02U /* rx fifo overflow */
+#define NSR_RXRDY 0x01U /* rx packet ready */
+
+#define NSR_BITS \
+ "\020" \
+ "\010SPEED_10" \
+ "\007LINKST_UP" \
+ "\006WAKEST" \
+ "\005TXFULL" \
+ "\004TX2END" \
+ "\003TX1END" \
+ "\002RXOV" \
+ "\001RXRDY"
+
+/* tx control register */
+#define TCR_TJDIS 0x40U /* tx jitter control */
+#define TCR_EXCEDM 0x20U /* excessive collision mode */
+#define TCR_PAD_DIS2 0x10U /* PAD appends disable for pkt2 */
+#define TCR_CRC_DIS2 0x08U /* CRC appends disable for pkt2 */
+#define TCR_PAD_DIS1 0x04U /* PAD appends disable for pkt1 */
+#define TCR_CRC_DIS1 0x02U /* CRC appends disable for pkt1 */
+
+#define TCR_BITS \
+ "\020" \
+ "\007TJDIS" \
+ "\006EXCEDM" \
+ "\005PAD_DIS2" \
+ "\004CRC_DIS2" \
+ "\003PAD_DIS1" \
+ "\002CRC_DIS1"
+
+/* tx status register (ro) */
+#define TSR_TJTO 0x80U /* tx jabber time out */
+#define TSR_LC 0x40U /* loss of carrier */
+#define TSR_NC 0x20U /* no carrier */
+#define TSR_LATEC 0x10U /* late collision */
+#define TSR_COL 0x08U /* late collision */
+#define TSR_EL 0x04U /* excessive collision */
+
+#define TSR_BITS \
+ "\020" \
+ "\010TJTO" \
+ "\007LC" \
+ "\006NC" \
+ "\005LATEC" \
+ "\004COL" \
+ "\003EL"
+
+/* rx control register */
+#define RCR_WTDIS 0x40U /* watch dog timer disable */
+#define RCR_DIS_LONG 0x20U /* discard longer packets than 1522 */
+#define RCR_DIS_CRC 0x10U /* discard crc error packets */
+#define RCR_ALL 0x08U /* pass all multicast */
+#define RCR_RUNT 0x04U /* pass runt packets */
+#define RCR_PRMSC 0x02U /* promiscuous mode */
+#define RCR_RXEN 0x01U /* rx enable */
+
+#define RCR_BITS \
+ "\020" \
+ "\007WTDIS" \
+ "\006DIS_LONG" \
+ "\005DIS_CRC" \
+ "\004ALL" \
+ "\003RUNT" \
+ "\002PRMSC" \
+ "\001RXEN"
+
+/* rx status register */
+#define RSR_RF 0x80U /* runt frame */
+#define RSR_MF 0x40U /* multicast frame */
+#define RSR_LCS 0x20U /* late collision seen */
+#define RSR_RWTO 0x10U /* receive watchdog timeout */
+#define RSR_PLE 0x08U /* physical layer error */
+#define RSR_AE 0x04U /* alignment error */
+#define RSR_CE 0x02U /* crc error */
+#define RSR_FOE 0x01U /* fifo overflow error */
+
+#define RSR_BITS \
+ "\020" \
+ "\010RF" \
+ "\007MF" \
+ "\006LCS" \
+ "\005RWTO" \
+ "\004PLE" \
+ "\003AE" \
+ "\002CE" \
+ "\001FOE"
+
+/* receive overflow counter register */
+#define ROCR_RXFU 0x80U /* receive overflow counter overflow */
+#define ROCR_ROC 0x7fU /* receive overflow counter */
+
+#define ROCR_BITS \
+ "\020" \
+ "\010RXFU"
+
+/* back pressure threshold register */
+#define BPTR_BPHW 0xf0U /* high water overflow threshold */
+#define BPTR_BPHW_SHIFT 4
+#define BPTR_BPHW_UNIT 1024U
+#define BPTR_BPHW_DEFAULT (3 << BPTR_BPHW_SHIFT) /* 3k */
+#define BPTR_JPT 0x0fU /* jam pattern time */
+#define BPTR_JPT_SHIFT 0
+#define BPTR_JPT_5us (0U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_10us (1U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_15us (2U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_25us (3U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_50us (4U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_100us (5U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_150us (6U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_200us (7U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_250us (8U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_300us (9U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_350us (10U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_400us (11U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_450us (12U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_500us (13U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_550us (14U << BPTR_JPT_SHIFT)
+#define BPTR_JPT_600us (15U << BPTR_JPT_SHIFT)
+
+/* flow control threshold register */
+#define FCTR_HWOT 0xf0U /* rx fifo high water overflow threshold */
+#define FCTR_HWOT_SHIFT 4
+#define FCTR_HWOT_UNIT 1024U
+#define FCTR_LWOT 0x0fU /* rx fifo low water overflow threshold */
+#define FCTR_LWOT_SHIFT 0
+#define FCTR_LWOT_UNIT 1024U
+
+/* rx/tx flow control register */
+#define FCR_TXPO 0x80U /* tx pause packet */
+#define FCR_TXPF 0x40U /* tx pause packet */
+#define FCR_TXPEN 0x20U /* tx pause packet */
+#define FCR_BKPA 0x10U /* back pressure mode */
+#define FCR_BKPM 0x08U /* back pressure mode */
+#define FCR_BKPS 0x04U /* rx pause packet current status (r/c) */
+#define FCR_RXPCS 0x02U /* rx pause packet current status (ro) */
+#define FCR_FLCE 0x01U /* flow control enbale */
+
+#define FCR_BITS \
+ "\020" \
+ "\000TXPO" \
+ "\000TXPF" \
+ "\000TXPEN" \
+ "\000BKPA" \
+ "\000BKPM" \
+ "\000BKPS" \
+ "\000RXPCS" \
+ "\000FLCE"
+
+/* EEPROM & PHY control register (0x0b) */
+#define EPCR_REEP 0x20U /* reload eeprom */
+#define EPCR_WEP 0x10U /* write eeprom enable */
+#define EPCR_EPOS 0x08U /* select device, 0:eeprom, 1:phy */
+#define EPCR_ERPRR 0x04U /* read command */
+#define EPCR_ERPRW 0x02U /* write command */
+#define EPCR_ERRE 0x01U /* eeprom/phy access in progress (ro) */
+
+#define EPCR_BITS \
+ "\020" \
+ "\005REEP" \
+ "\004WEP" \
+ "\003EPOS" \
+ "\002ERPRR" \
+ "\001ERPRW" \
+ "\000ERRE"
+
+/* EEPROM & PHY access register (0x0c) */
+#define EPAR_PHYADR 0xc0U /* phy address, internal phy(1) or external */
+#define EPAR_PHYADR_SHIFT 6
+#define EPAR_EROA 0x3fU /* eeprom word addr or phy register addr */
+#define EPAR_EROA_SHIFT 0
+
+/* EEPROM & PHY data register (0x0d(low)-0x0e(hi)) */
+
+/* wake up control register (0x0f) */
+#define WCR_LINKEN 0x20U /* enable link status event */
+#define WCR_SAMPLEEN 0x10U /* enable sample frame event */
+#define WCR_MAGICEN 0x08U /* enable magic pkt event */
+#define WCR_LINKST 0x04U /* link status change occur ro */
+#define WCR_SAMPLEST 0x02U /* sample frame rx occur ro */
+#define WCR_MAGICST 0x01U /* magic pkt rx occur ro */
+
+#define WCR_BITS \
+ "\020" \
+ "\000LINKEN" \
+ "\000SAMPLEEN" \
+ "\000MAGICEN" \
+ "\000LINKST" \
+ "\000SAMPLEST" \
+ "\000MAGICST"
+
+/* physical address register (0x10-0x15) */
+/* multicast address register (0x16-0x1c) */
+/* general purpose control register (0x1e) */
+#define GPCR_GEPCTRL 0x7f
+#define GPCR_OUT(n) (1U << (n))
+
+#define GPCR_BITS \
+ "\020" \
+ "\006OUT5" \
+ "\005OUT4" \
+ "\004OUT3" \
+ "\003OUT2" \
+ "\002OUT1" \
+ "\001OUT0"
+
+/* general purpose register (0x1f) */
+#define GPR_GEPIO5 0x20U
+#define GPR_GEPIO4 0x10U
+#define GPR_GEPIO3 0x08U
+#define GPR_GEPIO2 0x04U
+#define GPR_GEPIO1 0x02U
+#define GPR_GEPIO0 0x01U
+
+#define GPR_BITS \
+ "\020" \
+ "\006GEPIO5" \
+ "\005GEPIO4" \
+ "\004GEPIO3" \
+ "\003GEPIO2" \
+ "\002GEPIO1" \
+ "\001GEPIO0"
+
+/* vendor id register (0x28-0x29) */
+/* product id register (0x2a-0x2b) */
+/* chip revision register (0x2c) */
+
+/* usb device address register (0xf0) */
+#define USBDA_USBFA 0x3fU /* usb device address */
+#define USBDA_USBFA_SHIFT 0
+
+/* receive packet counter register (0xf1) */
+
+/* transmitpacket counter/usb status register (0xf2) */
+#define TUSR_RXFAULT 0x80U /* indicate rx has unexpected condition */
+#define TUSR_SUSFLAG 0x40U /* indicate device has suspended condition */
+#define TUSR_EP1RDY 0x20U /* ready for read from ep1 pipe */
+#define TUSR_SRAM 0x18U /* sram size 0:32K, 1:48K, 2:16K, 3:64K */
+#define TUSR_SRAM_SHIFT 3
+#define TUSR_SRAM_32K (0U << TUSR_SRAM_SHIFT)
+#define TUSR_SRAM_48K (1U << TUSR_SRAM_SHIFT)
+#define TUSR_SRAM_16K (2U << TUSR_SRAM_SHIFT)
+#define TUSR_SRAM_64K (3U << TUSR_SRAM_SHIFT)
+#define TUSR_TXC2 0x04U /* two or more packets in tx buffer */
+#define TUSR_TXC1 0x02U /* one packet in tx buffer */
+#define TUSR_TXC0 0x01U /* no packet in tx buffer */
+
+#define TUSR_BITS \
+ "\020" \
+ "\010RXFAULT" \
+ "\007SUSFLAG" \
+ "\006EP1RDY" \
+ "\003TXC2" \
+ "\002TXC1" \
+ "\001TXC0"
+
+/* usb control register (0xf4) */
+#define USBC_EP3ACK 0x20U /* ep3 will alway return 8byte data if NAK=0*/
+#define USBC_EP3NACK 0x10U /* ep3 will alway return NAK */
+#define USBC_MEMTST 0x01U
+
+/* bulk message format */
+#define TX_HEADER_SIZE 2
+#define RX_HEADER_SIZE 3
+
+/* interrupt msg format */
+struct intr_msg {
+ uint8_t im_nsr;
+ uint8_t im_tsr1;
+ uint8_t im_tsr2;
+ uint8_t im_rsr;
+ uint8_t im_rocr;
+ uint8_t im_rxc;
+ uint8_t im_txc;
+ uint8_t im_gpr;
+};
+#endif /* __DM9601_H__ */
diff --git a/usr/src/uts/common/io/udmf/udmf_usbgem.c b/usr/src/uts/common/io/udmf/udmf_usbgem.c
new file mode 100644
index 0000000000..0637de054b
--- /dev/null
+++ b/usr/src/uts/common/io/udmf/udmf_usbgem.c
@@ -0,0 +1,1036 @@
+/*
+ * udmfE_usbgem.c : Davicom DM9601E USB to Fast Ethernet Driver for Solaris
+ *
+ * Copyright (c) 2009-2012 Masayuki Murayama. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the author nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#pragma ident "%W% %E%"
+
+/*
+ * Changelog:
+ */
+
+/*
+ * TODO
+ */
+/* ======================================================= */
+
+/*
+ * Solaris system header files and macros
+ */
+
+/* minimum kernel headers for drivers */
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/debug.h>
+#include <sys/kmem.h>
+#include <sys/modctl.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/byteorder.h>
+
+/* ethernet stuff */
+#include <sys/ethernet.h>
+
+/* interface card depend stuff */
+#include <sys/stropts.h>
+#include <sys/stream.h>
+#include <sys/strlog.h>
+#include <sys/usb/usba.h>
+#include "usbgem.h"
+
+/* hardware stuff */
+#include "usbgem_mii.h"
+#include "dm9601reg.h"
+
+char ident[] = "dm9601 usbnic driver v" VERSION;
+
+/*
+ * Useful macros
+ */
+#define CHECK_AND_JUMP(err, label) if (err != USB_SUCCESS) goto label
+#define LE16P(p) ((((uint8_t *)(p))[1] << 8) | ((uint8_t *)(p))[0])
+
+/*
+ * Debugging
+ */
+#ifdef DEBUG_LEVEL
+static int udmf_debug = DEBUG_LEVEL;
+#define DPRINTF(n, args) if (udmf_debug > (n)) cmn_err args
+#else
+#define DPRINTF(n, args)
+#endif
+
+/*
+ * Our configration for dm9601
+ */
+/* timeouts */
+#define ONESEC (drv_usectohz(1*1000000))
+
+/*
+ * Local device definitions
+ */
+struct udmf_dev {
+ /*
+ * Misc HW information
+ */
+ uint8_t rcr;
+ uint8_t last_nsr;
+ uint8_t mac_addr[ETHERADDRL];
+};
+
+/*
+ * private functions
+ */
+
+/* mii operations */
+static uint16_t udmf_mii_read(struct usbgem_dev *, uint_t, int *errp);
+static void udmf_mii_write(struct usbgem_dev *, uint_t, uint16_t, int *errp);
+
+/* nic operations */
+static int udmf_reset_chip(struct usbgem_dev *);
+static int udmf_init_chip(struct usbgem_dev *);
+static int udmf_start_chip(struct usbgem_dev *);
+static int udmf_stop_chip(struct usbgem_dev *);
+static int udmf_set_media(struct usbgem_dev *);
+static int udmf_set_rx_filter(struct usbgem_dev *);
+static int udmf_get_stats(struct usbgem_dev *);
+static void udmf_interrupt(struct usbgem_dev *, mblk_t *);
+
+/* packet operations */
+static mblk_t *udmf_tx_make_packet(struct usbgem_dev *, mblk_t *);
+static mblk_t *udmf_rx_make_packet(struct usbgem_dev *, mblk_t *);
+
+/* =============================================================== */
+/*
+ * I/O functions
+ */
+/* =============================================================== */
+#define OUT(dp, ix, len, buf, errp, label) \
+ if ((*(errp) = usbgem_ctrl_out((dp), \
+ /* bmRequestType */ USB_DEV_REQ_HOST_TO_DEV \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ 1, \
+ /* wValue */ 0, \
+ /* wIndex */ (ix), \
+ /* wLength */ (len), \
+ /* value */ (buf), \
+ /* size */ (len))) != USB_SUCCESS) goto label
+
+#define OUTB(dp, ix, val, errp, label) \
+ if ((*(errp) = usbgem_ctrl_out((dp), \
+ /* bmRequestType */ USB_DEV_REQ_HOST_TO_DEV \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ 3, \
+ /* wValue */ (val), \
+ /* wIndex */ (ix), \
+ /* wLength */ 0, \
+ /* value */ NULL, \
+ /* size */ 0)) != USB_SUCCESS) goto label
+
+#define IN(dp, ix, len, buf, errp, label) \
+ if ((*(errp) = usbgem_ctrl_in((dp), \
+ /* bmRequestType */ USB_DEV_REQ_DEV_TO_HOST \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ 0, \
+ /* wValue */ 0, \
+ /* wIndex */ (ix), \
+ /* wLength */ (len), \
+ /* valuep */ (buf), \
+ /* size */ (len))) != USB_SUCCESS) goto label
+
+/* =============================================================== */
+/*
+ * Hardware manupilation
+ */
+/* =============================================================== */
+static void
+udmf_enable_phy(struct usbgem_dev *dp)
+{
+ int err = USB_SUCCESS;
+
+ /* de-assert reset signal to phy */
+ OUTB(dp, GPCR, GPCR_OUT(0), &err, usberr);
+ OUTB(dp, GPR, 0, &err, usberr);
+usberr:
+ ;
+}
+
+static int
+udmf_reset_chip(struct usbgem_dev *dp)
+{
+ int err = USB_SUCCESS;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ OUTB(dp, NCR, NCR_LBK_NORMAL | NCR_RST, &err, usberr);
+ drv_usecwait(100);
+usberr:
+ return (err);
+}
+
+/*
+ * Setup dm9601
+ */
+static int
+udmf_init_chip(struct usbgem_dev *dp)
+{
+ int i;
+ uint32_t val;
+ int err = USB_SUCCESS;
+ uint16_t reg;
+ uint8_t buf[2];
+ struct udmf_dev *lp = dp->private;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ OUTB(dp, NCR, NCR_LBK_NORMAL, &err, usberr);
+
+ /* tx control regiser: enable padding and crc generation */
+ OUTB(dp, TCR, 0, &err, usberr);
+
+ /* rx control register: will be set later by udmf_set_rx_filer() */
+ lp->rcr = RCR_RUNT;
+
+ /* back pressure threshold: */
+ OUTB(dp, BPTR, (2 << BPTR_BPHW_SHIFT) | BPTR_JPT_200us,
+ &err, usberr);
+
+ /* flow control threshold: same as default */
+ OUTB(dp, FCTR, (3 << FCTR_HWOT_SHIFT) | (8 << FCTR_LWOT_SHIFT),
+ &err, usberr);
+
+ /* usb control register */
+ OUTB(dp, USBC, USBC_EP3ACK | 0x06, &err, usberr);
+
+ /* flow control: will be set later by udmf_set_media() */
+
+ /* wake up control register: */
+ OUTB(dp, WCR, 0, &err, usberr);
+
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end err:%d(%s)",
+ dp->name, __func__,
+ err, err == USB_SUCCESS ? "success" : "error"));
+ return (err);
+}
+
+static int
+udmf_start_chip(struct usbgem_dev *dp)
+{
+ int err = USB_SUCCESS;
+ struct udmf_dev *lp = dp->private;
+
+ /* enable Rx */
+ lp->rcr |= RCR_RXEN;
+ OUTB(dp, RCR, lp->rcr, &err, usberr);
+
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end err:%d(%s)",
+ dp->name, __func__,
+ err, err == USB_SUCCESS ? "success" : "error"));
+ return (err);
+}
+
+static int
+udmf_stop_chip(struct usbgem_dev *dp)
+{
+ int err = USB_SUCCESS;
+ struct udmf_dev *lp = dp->private;
+
+ /* disable rx */
+ lp->rcr &= ~RCR_RXEN;
+ OUTB(dp, RCR, lp->rcr, &err, usberr);
+
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end err:%d(%s)",
+ dp->name, __func__,
+ err, err == USB_SUCCESS ? "success" : "error"));
+ return (err);
+}
+
+static int
+udmf_get_stats(struct usbgem_dev *dp)
+{
+ /* EMPTY */
+ return (USB_SUCCESS);
+}
+
+static uint_t
+udmf_mcast_hash(struct usbgem_dev *dp, const uint8_t *addr)
+{
+ return (usbgem_ether_crc_le(addr) & 0x3f);
+}
+
+static int
+udmf_set_rx_filter(struct usbgem_dev *dp)
+{
+ int i;
+ uint8_t rcr;
+ uint8_t mode;
+ uint8_t mhash[8];
+ uint8_t *mac;
+ uint_t h;
+ int err = USB_SUCCESS;
+ struct udmf_dev *lp = dp->private;
+ static uint8_t invalid_mac[ETHERADDRL] = {0, 0, 0, 0, 0, 0};
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: called, rxmode:%x",
+ dp->name, __func__, dp->rxmode));
+
+ if (lp->rcr & RCR_RXEN) {
+ /* set promiscuous mode before changing rx filter mode */
+ OUTB(dp, RCR, lp->rcr | RCR_PRMSC, &err, usberr);
+ }
+
+ lp->rcr &= ~(RCR_ALL | RCR_PRMSC);
+ mode = 0;
+ bzero(mhash, sizeof (mhash));
+ mac = dp->cur_addr.ether_addr_octet;
+
+ if ((dp->rxmode & RXMODE_ENABLE) == 0) {
+ mac = invalid_mac;
+ } else if (dp->rxmode & RXMODE_PROMISC) {
+ /* promiscious mode implies all multicast and all physical */
+ mode |= RCR_PRMSC;
+ } else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 32) {
+ /* accept all multicast packets */
+ mode |= RCR_ALL;
+ } else if (dp->mc_count > 0) {
+ /*
+ * make hash table to select interresting
+ * multicast address only.
+ */
+ for (i = 0; i < dp->mc_count; i++) {
+ /* hash table is 64 = 2^6 bit width */
+ h = dp->mc_list[i].hash;
+ mhash[h / 8] |= 1 << (h % 8);
+ }
+ }
+
+ /* set node address */
+ if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
+ OUT(dp, PAR, ETHERADDRL, dp->cur_addr.ether_addr_octet,
+ &err, usberr);
+ bcopy(mac, lp->mac_addr, ETHERADDRL);
+ }
+
+ /* set multicast hash table */
+ OUT(dp, MAR, sizeof (mhash), &mhash[0], &err, usberr);
+
+ /* update rcr */
+ lp->rcr |= mode;
+ OUTB(dp, RCR, lp->rcr, &err, usberr);
+
+#if DEBUG_LEVEL > 1
+ /* verify rcr */
+ IN(dp, RCR, 1, &rcr, &err, usberr);
+ cmn_err(CE_CONT, "!%s: %s: rcr:%b returned",
+ dp->name, __func__, rcr, RCR_BITS);
+#endif
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end err:%d(%s)",
+ dp->name, __func__,
+ err, err == USB_SUCCESS ? "success" : "error"));
+ return (err);
+}
+
+static int
+udmf_set_media(struct usbgem_dev *dp)
+{
+ int err = USB_SUCCESS;
+ uint8_t fcr;
+ struct udmf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* setup flow control */
+ fcr = 0;
+ if (dp->full_duplex) {
+ /* select flow control */
+ switch (dp->flow_control) {
+ case FLOW_CONTROL_RX_PAUSE:
+ fcr |= FCR_FLCE;
+ break;
+
+ case FLOW_CONTROL_TX_PAUSE:
+ fcr |= FCR_TXPEN;
+ break;
+
+ case FLOW_CONTROL_SYMMETRIC:
+ fcr |= FCR_FLCE | FCR_TXPEN;
+ break;
+ }
+ }
+
+ /* update flow control register */
+ OUTB(dp, FCR, fcr, &err, usberr);
+
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end err:%d(%s)",
+ dp->name, __func__,
+ err, err == USB_SUCCESS ? "success" : "error"));
+ return (err);
+}
+
+/*
+ * send/receive packet check
+ */
+static mblk_t *
+udmf_tx_make_packet(struct usbgem_dev *dp, mblk_t *mp)
+{
+ int n;
+ size_t pkt_size;
+ mblk_t *new;
+ mblk_t *tp;
+ uint8_t *bp;
+ uint8_t *last_pos;
+ uint_t align_mask;
+
+ pkt_size = msgdsize(mp);
+ align_mask = 63;
+
+ /*
+ * re-allocate the mp
+ */
+
+ /* minimum ethernet packet size of ETHERMIN */
+ pkt_size = max(pkt_size, ETHERMIN);
+
+#if 0 /* CONFIG_ADD_TX_DELIMITOR_ALWAYS */
+ pkt_size += TX_HEADER_SIZE;
+#endif
+ if (((pkt_size + TX_HEADER_SIZE) & align_mask) == 0) {
+ /* padding is required in usb communication */
+ pkt_size += TX_HEADER_SIZE;
+ }
+
+ if ((new = allocb(TX_HEADER_SIZE + pkt_size, 0)) == NULL) {
+ return (NULL);
+ }
+ new->b_wptr = new->b_rptr + TX_HEADER_SIZE + pkt_size;
+
+ /* add a header */
+ bp = new->b_rptr;
+ bp[0] = (uint8_t)pkt_size;
+ bp[1] = (uint8_t)(pkt_size >> 8);
+ bp += TX_HEADER_SIZE;
+
+ /* copy contents of the buffer */
+ for (tp = mp; tp; tp = tp->b_cont) {
+ n = tp->b_wptr - tp->b_rptr;
+ bcopy(tp->b_rptr, bp, n);
+ bp += n;
+ }
+
+ /* clear the rest including the next zero length header */
+ last_pos = new->b_wptr;
+ while (bp < last_pos) {
+ *bp++ = 0;
+ }
+
+ return (new);
+}
+
+static void
+udmf_dump_packet(struct usbgem_dev *dp, uint8_t *bp, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i += 8, bp += 8) {
+ cmn_err(CE_CONT, "%02x %02x %02x %02x %02x %02x %02x %02x",
+ bp[0], bp[1], bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
+ }
+}
+
+static mblk_t *
+udmf_rx_make_packet(struct usbgem_dev *dp, mblk_t *mp)
+{
+ int len;
+ uint8_t rx_stat;
+
+ len = mp->b_wptr - mp->b_rptr;
+
+ if (len <= RX_HEADER_SIZE) {
+ /*
+ * the usb bulk-in frame doesn't include a valid
+ * ethernet packet.
+ */
+ return (NULL);
+ }
+
+ /* remove rx header */
+ rx_stat = mp->b_rptr[0];
+ if (rx_stat & (RSR_RF | RSR_LCS | RSR_RWTO |
+ RSR_PLE | RSR_AE | RSR_CE | RSR_FOE)) {
+ if (rx_stat & RSR_RF) {
+ dp->stats.runt++;
+ }
+ if (rx_stat & RSR_LCS) {
+ /* late collision */
+ dp->stats.rcv_internal_err++;
+ }
+ if (rx_stat & RSR_RWTO) {
+ /* rx timeout */
+ dp->stats.rcv_internal_err++;
+ }
+ if (rx_stat & RSR_PLE) {
+ /* physical layer error */
+ dp->stats.rcv_internal_err++;
+ }
+ if (rx_stat & RSR_AE) {
+ /* alignment error */
+ dp->stats.frame++;
+ }
+ if (rx_stat & RSR_CE) {
+ /* crc error */
+ dp->stats.crc++;
+ }
+ if (rx_stat & RSR_FOE) {
+ /* fifo overflow error */
+ dp->stats.overflow++;
+ }
+ dp->stats.errrcv++;
+ }
+ len = LE16P(&mp->b_rptr[1]);
+ if (len >= ETHERFCSL) {
+ len -= ETHERFCSL;
+ }
+ mp->b_rptr += RX_HEADER_SIZE;
+ mp->b_wptr = mp->b_rptr + len;
+
+ return (mp);
+}
+
+/*
+ * MII Interfaces
+ */
+static uint16_t
+udmf_ep_read(struct usbgem_dev *dp, uint_t which, uint_t addr, int *errp)
+{
+ int i;
+ uint8_t epcr;
+ uint16_t val;
+
+ DPRINTF(4, (CE_CONT, "!%s: %s: called, ix:%d",
+ dp->name, __func__, addr));
+
+ OUTB(dp, EPAR, addr, errp, usberr);
+ OUTB(dp, EPCR, which | EPCR_ERPRR, errp, usberr);
+
+ for (i = 0; i < 100; i++) {
+ IN(dp, EPCR, sizeof (epcr), &epcr, errp, usberr);
+ if ((epcr & EPCR_ERRE) == 0) {
+ /* done */
+ IN(dp, EPDR, sizeof (val), &val, errp, usberr);
+ val = LE_16(val);
+ goto done;
+ }
+ drv_usecwait(10);
+ }
+ /* timeout */
+ cmn_err(CE_WARN, "!%s: %s: timeout", dp->name, __func__);
+ val = 0;
+done:
+ OUTB(dp, EPCR, 0, errp, usberr);
+ return (val);
+
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end err:%d(%s)",
+ dp->name, __func__,
+ *errp, *errp == USB_SUCCESS ? "success" : "error"));
+ return (0);
+}
+
+static void
+udmf_ep_write(struct usbgem_dev *dp, uint_t which, uint_t addr,
+ uint16_t val, int *errp)
+{
+ int i;
+ uint8_t epcr;
+
+ DPRINTF(5, (CE_CONT, "!%s: %s called", dp->name, __func__));
+
+ val = LE_16(val);
+ OUT(dp, EPDR, sizeof (val), &val, errp, usberr);
+
+ OUTB(dp, EPAR, addr, errp, usberr);
+
+ OUTB(dp, EPCR, which | EPCR_WEP | EPCR_ERPRW, errp, usberr);
+
+ for (i = 0; i < 100; i++) {
+ IN(dp, EPCR, 1, &epcr, errp, usberr);
+ if ((epcr & EPCR_ERRE) == 0) {
+ /* done */
+ goto done;
+ }
+ drv_usecwait(10);
+ }
+ /* timeout */
+ cmn_err(CE_WARN, "!%s: %s: timeout", dp->name, __func__);
+done:
+ OUTB(dp, EPCR, 0, errp, usberr);
+ return;
+
+usberr:
+ DPRINTF(2, (CE_CONT, "!%s: %s: end err:%d(%s)",
+ dp->name, __func__,
+ *errp, *errp == USB_SUCCESS ? "success" : "error"));
+}
+
+static uint16_t
+udmf_mii_read(struct usbgem_dev *dp, uint_t index, int *errp)
+{
+ uint16_t val;
+
+ val = udmf_ep_read(dp, EPCR_EPOS,
+ (dp->mii_phy_addr << EPAR_PHYADR_SHIFT) | index, errp);
+
+ return (val);
+}
+
+static void
+udmf_mii_write(struct usbgem_dev *dp, uint_t index, uint16_t val, int *errp)
+{
+ udmf_ep_write(dp, EPCR_EPOS,
+ (dp->mii_phy_addr << EPAR_PHYADR_SHIFT) | index, val, errp);
+}
+
+static void
+udmf_interrupt(struct usbgem_dev *dp, mblk_t *mp)
+{
+ struct intr_msg *imp;
+ struct udmf_dev *lp = dp->private;
+
+ imp = (struct intr_msg *)&mp->b_rptr[0];
+
+ DPRINTF(4, (CE_CONT,
+ "!%s: %s: size:%d, nsr:%b tsr1:%b tsr2:%b"
+ " rsr:%b rocr:%b rxc:%02x txc:%b gpr:%b",
+ dp->name, __func__, mp->b_wptr - mp->b_rptr,
+ imp->im_nsr, NSR_BITS,
+ imp->im_tsr1, TSR_BITS,
+ imp->im_tsr2, TSR_BITS,
+ imp->im_rsr, RSR_BITS,
+ imp->im_rocr, ROCR_BITS,
+ imp->im_rxc,
+ imp->im_txc, TUSR_BITS,
+ imp->im_gpr, GPR_BITS));
+
+ if ((lp->last_nsr ^ imp->im_nsr) & NSR_LINKST) {
+ usbgem_mii_update_link(dp);
+ }
+
+ lp->last_nsr = imp->im_nsr;
+}
+
+/* ======================================================== */
+/*
+ * OS depend (device driver DKI) routine
+ */
+/* ======================================================== */
+static uint16_t
+udmf_eeprom_read(struct usbgem_dev *dp, uint_t index, int *errp)
+{
+ uint16_t val;
+
+ val = udmf_ep_read(dp, 0, index, errp);
+
+ return (val);
+}
+
+#ifdef DEBUG_LEVEL
+static void
+udmf_eeprom_dump(struct usbgem_dev *dp, int size)
+{
+ int i;
+ int err;
+ uint16_t w0, w1, w2, w3;
+
+ cmn_err(CE_CONT, "!%s: eeprom dump:", dp->name);
+
+ err = USB_SUCCESS;
+
+ for (i = 0; i < size; i += 4) {
+ w0 = udmf_eeprom_read(dp, i + 0, &err);
+ w1 = udmf_eeprom_read(dp, i + 1, &err);
+ w2 = udmf_eeprom_read(dp, i + 2, &err);
+ w3 = udmf_eeprom_read(dp, i + 3, &err);
+ cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
+ i, w0, w1, w2, w3);
+ }
+usberr:
+ ;
+}
+#endif
+
+static int
+udmf_attach_chip(struct usbgem_dev *dp)
+{
+ int i;
+ uint_t val;
+ uint8_t *m;
+ int err;
+ struct udmf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s enter", dp->name, __func__));
+
+ /*
+ * get mac address from EEPROM
+ */
+ m = dp->dev_addr.ether_addr_octet;
+ for (i = 0; i < ETHERADDRL; i += 2) {
+ val = udmf_eeprom_read(dp, i/2, &err);
+ m[i + 0] = (uint8_t)val;
+ m[i + 1] = (uint8_t)(val >> 8);
+ }
+
+ /* invalidate a private cache for mac addr */
+ bzero(lp->mac_addr, sizeof (lp->mac_addr));
+#ifdef CONFIG_VLAN
+ dp->misc_flag = USBGEM_VLAN;
+#endif
+#if DEBUG_LEVEL > 0
+ udmf_eeprom_dump(dp, /* 0x3f + 1 */ 128);
+#endif
+{
+ static uint8_t bcst[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ DPRINTF(0, (CE_CONT, "!%s: %s: hash of bcast:%x",
+ dp->name, __func__, usbgem_ether_crc_be(bcst)));
+}
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_WARN, "%s: %s: usb error detected (%d)",
+ dp->name, __func__, err);
+ return (USB_FAILURE);
+}
+
+static int
+udmf_mii_probe(struct usbgem_dev *dp)
+{
+ DPRINTF(2, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ udmf_enable_phy(dp);
+ return (usbgem_mii_probe_default(dp));
+}
+
+static int
+udmf_mii_init(struct usbgem_dev *dp)
+{
+ DPRINTF(2, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+ udmf_enable_phy(dp);
+ return (USB_SUCCESS);
+}
+
+static int
+udmfattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int i;
+ ddi_iblock_cookie_t c;
+ int ret;
+ int revid;
+ int unit;
+ int len;
+ const char *drv_name;
+ struct usbgem_dev *dp;
+ void *base;
+ struct usbgem_conf *ugcp;
+ struct udmf_dev *lp;
+
+ unit = ddi_get_instance(dip);
+ drv_name = ddi_driver_name(dip);
+
+ DPRINTF(3, (CE_CONT, "!%s%d: %s: called, cmd:%d",
+ drv_name, unit, __func__, cmd));
+
+ if (cmd == DDI_ATTACH) {
+ /*
+ * construct usbgem configration
+ */
+ ugcp = kmem_zalloc(sizeof (*ugcp), KM_SLEEP);
+
+ /* name */
+ /*
+ * softmac requires that ppa is the instance number
+ * of the device, otherwise it hangs in seaching the device.
+ */
+ sprintf(ugcp->usbgc_name, "%s%d", drv_name, unit);
+ ugcp->usbgc_ppa = unit;
+
+ ugcp->usbgc_ifnum = 0;
+ ugcp->usbgc_alt = 0;
+
+ ugcp->usbgc_tx_list_max = 64;
+
+ ugcp->usbgc_rx_header_len = RX_HEADER_SIZE;
+ ugcp->usbgc_rx_list_max = 64;
+
+ /* time out parameters */
+ ugcp->usbgc_tx_timeout = USBGEM_TX_TIMEOUT;
+ ugcp->usbgc_tx_timeout_interval = USBGEM_TX_TIMEOUT_INTERVAL;
+#if 1
+ /* flow control */
+ ugcp->usbgc_flow_control = FLOW_CONTROL_RX_PAUSE;
+#else
+ /*
+ * XXX - flow control caused link down frequently under
+ * heavy traffic
+ */
+ ugcp->usbgc_flow_control = FLOW_CONTROL_NONE;
+#endif
+ /* MII timeout parameters */
+ ugcp->usbgc_mii_link_watch_interval =
+ USBGEM_LINK_WATCH_INTERVAL;
+ ugcp->usbgc_mii_an_watch_interval =
+ USBGEM_LINK_WATCH_INTERVAL/5;
+ ugcp->usbgc_mii_reset_timeout = MII_RESET_TIMEOUT; /* 1 sec */
+ ugcp->usbgc_mii_an_timeout = MII_AN_TIMEOUT; /* 5 sec */
+ ugcp->usbgc_mii_an_wait = (25*ONESEC)/10;
+ ugcp->usbgc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
+
+ ugcp->usbgc_mii_an_delay = ONESEC/10;
+ ugcp->usbgc_mii_linkdown_action = MII_ACTION_RSA;
+ ugcp->usbgc_mii_linkdown_timeout_action = MII_ACTION_RESET;
+ ugcp->usbgc_mii_dont_reset = B_FALSE;
+ ugcp->usbgc_mii_hw_link_detection = B_TRUE;
+
+ /* I/O methods */
+
+ /* mac operation */
+ ugcp->usbgc_attach_chip = &udmf_attach_chip;
+ ugcp->usbgc_reset_chip = &udmf_reset_chip;
+ ugcp->usbgc_init_chip = &udmf_init_chip;
+ ugcp->usbgc_start_chip = &udmf_start_chip;
+ ugcp->usbgc_stop_chip = &udmf_stop_chip;
+ ugcp->usbgc_multicast_hash = &udmf_mcast_hash;
+
+ ugcp->usbgc_set_rx_filter = &udmf_set_rx_filter;
+ ugcp->usbgc_set_media = &udmf_set_media;
+ ugcp->usbgc_get_stats = &udmf_get_stats;
+ ugcp->usbgc_interrupt = &udmf_interrupt;
+
+ /* packet operation */
+ ugcp->usbgc_tx_make_packet = &udmf_tx_make_packet;
+ ugcp->usbgc_rx_make_packet = &udmf_rx_make_packet;
+
+ /* mii operations */
+ ugcp->usbgc_mii_probe = &udmf_mii_probe;
+ ugcp->usbgc_mii_init = &udmf_mii_init;
+ ugcp->usbgc_mii_config = &usbgem_mii_config_default;
+ ugcp->usbgc_mii_read = &udmf_mii_read;
+ ugcp->usbgc_mii_write = &udmf_mii_write;
+ ugcp->usbgc_mii_addr_min = 1;
+
+ /* mtu */
+ ugcp->usbgc_min_mtu = ETHERMTU;
+ ugcp->usbgc_max_mtu = ETHERMTU;
+ ugcp->usbgc_default_mtu = ETHERMTU;
+
+ lp = kmem_zalloc(sizeof (struct udmf_dev), KM_SLEEP);
+ lp->last_nsr;
+
+ ddi_set_driver_private(dip, NULL);
+
+ dp = usbgem_do_attach(dip, ugcp, lp, sizeof (struct udmf_dev));
+
+ kmem_free(ugcp, sizeof (*ugcp));
+
+ if (dp != NULL) {
+ return (DDI_SUCCESS);
+ }
+
+err_free_mem:
+ kmem_free(lp, sizeof (struct udmf_dev));
+err_close_pipe:
+err:
+ return (DDI_FAILURE);
+ }
+
+ if (cmd == DDI_RESUME) {
+ return (usbgem_resume(dip));
+ }
+
+ return (DDI_FAILURE);
+}
+
+static int
+udmfdetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int ret;
+
+ if (cmd == DDI_DETACH) {
+ ret = usbgem_do_detach(dip);
+ if (ret != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+ }
+ if (cmd == DDI_SUSPEND) {
+ return (usbgem_suspend(dip));
+ }
+ return (DDI_FAILURE);
+}
+
+/* ======================================================== */
+/*
+ * OS depend (loadable streams driver) routine
+ */
+/* ======================================================== */
+#ifdef USBGEM_CONFIG_GLDv3
+USBGEM_STREAM_OPS(udmf_ops, udmfattach, udmfdetach);
+#else
+static struct module_info udmfminfo = {
+ 0, /* mi_idnum */
+ "udmf", /* mi_idname */
+ 0, /* mi_minpsz */
+ ETHERMTU, /* mi_maxpsz */
+ ETHERMTU*128, /* mi_hiwat */
+ 1, /* mi_lowat */
+};
+
+static struct qinit udmfrinit = {
+ (int (*)()) NULL, /* qi_putp */
+ usbgem_rsrv, /* qi_srvp */
+ usbgem_open, /* qi_qopen */
+ usbgem_close, /* qi_qclose */
+ (int (*)()) NULL, /* qi_qadmin */
+ &udmfminfo, /* qi_minfo */
+ NULL /* qi_mstat */
+};
+
+static struct qinit udmfwinit = {
+ usbgem_wput, /* qi_putp */
+ usbgem_wsrv, /* qi_srvp */
+ (int (*)()) NULL, /* qi_qopen */
+ (int (*)()) NULL, /* qi_qclose */
+ (int (*)()) NULL, /* qi_qadmin */
+ &udmfminfo, /* qi_minfo */
+ NULL /* qi_mstat */
+};
+
+static struct streamtab udmf_info = {
+ &udmfrinit, /* st_rdinit */
+ &udmfwinit, /* st_wrinit */
+ NULL, /* st_muxrinit */
+ NULL /* st_muxwrinit */
+};
+
+static struct cb_ops cb_udmf_ops = {
+ nulldev, /* cb_open */
+ nulldev, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read */
+ nodev, /* cb_write */
+ nodev, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ &udmf_info, /* cb_stream */
+ D_NEW|D_MP /* cb_flag */
+};
+
+static struct dev_ops udmf_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ usbgem_getinfo, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ udmfattach, /* devo_attach */
+ udmfdetach, /* devo_detach */
+ nodev, /* devo_reset */
+ &cb_udmf_ops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ usbgem_power, /* devo_power */
+#if DEVO_REV >= 4
+ usbgem_quiesce, /* devo_quiesce */
+#endif
+};
+#endif
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* Type of module. This one is a driver */
+ ident,
+ &udmf_ops, /* driver ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, &modldrv, NULL
+};
+
+/* ======================================================== */
+/*
+ * _init : done
+ */
+/* ======================================================== */
+int
+_init(void)
+{
+ int status;
+
+ DPRINTF(2, (CE_CONT, "!udmf: _init: called"));
+
+ status = usbgem_mod_init(&udmf_ops, "udmf");
+ if (status != DDI_SUCCESS) {
+ return (status);
+ }
+ status = mod_install(&modlinkage);
+ if (status != DDI_SUCCESS) {
+ usbgem_mod_fini(&udmf_ops);
+ }
+ return (status);
+}
+
+/*
+ * _fini : done
+ */
+int
+_fini(void)
+{
+ int status;
+
+ DPRINTF(2, (CE_CONT, "!udmf: _fini: called"));
+ status = mod_remove(&modlinkage);
+ if (status == DDI_SUCCESS) {
+ usbgem_mod_fini(&udmf_ops);
+ }
+ return (status);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
diff --git a/usr/src/uts/common/io/upf/adm8511reg.h b/usr/src/uts/common/io/upf/adm8511reg.h
new file mode 100644
index 0000000000..68a2207bb5
--- /dev/null
+++ b/usr/src/uts/common/io/upf/adm8511reg.h
@@ -0,0 +1,205 @@
+/*
+ * @(#)adm8511reg.h 1.1 09/06/20
+ * Register dehinitsions of ADMtek ADM8511 Fast Ethernet to USB controller.
+ * Codeded by Masayuki Murayama(KHF04453@nifty.ne.jp)
+ * This file is public domain.
+ */
+
+#define EC0 0x00 /* B */
+#define EC1 0x01 /* B */
+#define EC2 0x02 /* B */
+#define MA 0x08 /* 8byte array */
+#define EID 0x10 /* B */
+#define PAUSETIMER 0x18 /* B pause timer */
+#define RPNBFC 0x1a /* B */
+#define ORFBFC 0x1b /* B */
+#define EP1C 0x1c /* B */
+#define RXFC 0x1d /* B */
+#define BIST 0x1e /* B */
+#define EEOFFSET 0x20 /* B */
+#define EEDATA 0x21 /* W */
+#define EECTRL 0x23 /* B */
+#define PHYA 0x25 /* B */
+#define PHYD 0x26 /* W */
+#define PHYAC 0x28 /* B */
+#define USBSTAT 0x2a /* B */
+#define ETHTXSTAT 0x2b /* W */
+#define ETHRXSTAT 0x2d /* B */
+#define LOSTCNT 0x2e /* W */
+#define WF0MASK 0x30 /* 16byte array */
+#define WF0OFFSET 0x40 /* W */
+#define WF0CRC 0x41 /* W */
+#define WF1MASK 0x48 /* 16byte array */
+#define WF1OFFSET 0x58 /* W */
+#define WF1CRC 0x59 /* W */
+#define WF2MASK 0x60 /* 16byte array */
+#define WF2OFFSET 0x70 /* W */
+#define WF2CRC 0x71 /* W */
+#define WCTRL 0x78 /* B */
+#define WSTAT 0x7a /* B */
+#define IPHYC 0x7b /* B */
+#define GPIO54 0x7c /* B */
+#define GPIO10 0x7e /* B */
+#define GPIO32 0x7f /* B */
+#define TEST 0x80 /* B */
+#define TM 0x81 /* B */
+#define RPN 0x82 /* B */
+
+/* Ethernet control register 0: offset 0 */
+#define EC0_TXE 0x80U
+#define EC0_RXE 0x40U
+#define EC0_RXFCE 0x20U
+#define EC0_WOE 0x10U
+#define EC0_RXSA 0x08U
+#define EC0_SBO 0x04U
+#define EC0_RXMA 0x02U
+#define EC0_RXCS 0x01U
+
+#define EC0_BITS \
+ "\020" \
+ "\010TXE" \
+ "\007RXE" \
+ "\006RXFCE" \
+ "\005WOE" \
+ "\004RXSA" \
+ "\003SBO" \
+ "\002RXMA" \
+ "\001RXCS"
+
+/* Ethernet control register 1: offset 1 */
+#define EC1_FD 0x20U
+#define EC1_100M 0x10U /* 0:10Mbps 1:100Mbps */
+#define EC1_RM 0x08U /* reset mac */
+
+#define EC1_BITS \
+ "\020" \
+ "\006FD" \
+ "\005100M" \
+ "\004RM"
+
+/* Ethernet control register 2: offset 2 */
+#define EC2_MEPL 0x80U /* 8515: MTU 0:1528, 1:1638 */
+#define EC2_RPNC 0x40U
+#define EC2_LEEPRS 0x20U
+#define EC2_EEPRW 0x10U
+#define EC2_LB 0x08U
+#define EC2_PROM 0x04U
+#define EC2_RXBP 0x02U
+#define EC2_EP3RC 0x01U
+
+#define EC2_BITS \
+ "\020" \
+ "\010MEPS" \
+ "\007RPNC" \
+ "\006LEEPRS" \
+ "\005EEPRW" \
+ "\004LB" \
+ "\003PROM" \
+ "\002RXBP" \
+ "\001EP3RC"
+
+/* Recieve Packet number based Flow Control register: offset 0x1a */
+#define RPNBFC_PN 0x7eU /* */
+#define RPNBFC_PN_SHIFT 1
+#define RPNBFC_FCP 0x01U /* enable rx flow control */
+
+/* Occupied Recieve FIFO based Flow Control register: offset 0x1b */
+#define ORFBFC_RXS 0x7eU /* */
+#define ORFBFC_RXS_SHIFT 1
+#define ORFBFC_RXS_UNIT 1024U
+#define ORFBFC_FCRXS 0x01U /* enable rx flow control */
+
+/* EP1 control register: offset 0x1c */
+#define EP1C_EP1S0E 0x80U /* send 0 enable */
+#define EP1C_ITMA 0x60U /* internal test mode A */
+#define EP1C_ITMB 0x1fU /* internal test mode B */
+
+#define EP1C_BITS \
+ "\020" \
+ "\010EP1S0E"
+
+/* Rx FIFO Control register: offset 0x1d */
+#define RXFC_EXT_SRAM 0x02 /* enable external 32k sram */
+#define RXFC_RX32PKT 0x01 /* max 32 packet */
+
+/* EEPROM offset register: offset 0x20 */
+#define EEOFFSET_MASK 0x3f /* eeprom offset address in word */
+
+/* EEPROM access control register: offset 0x23 */
+#define EECTRL_DONE 0x04
+#define EECTRL_RD 0x02
+#define EECTRL_WR 0x01
+
+#define EECTRL_BITS \
+ "\020" \
+ "\003DONE" \
+ "\002RD" \
+ "\001WR"
+
+/* PHY control register: offset 28 */
+#define PHYAC_DO 0x80U /* Done */
+#define PHYAC_RDPHY 0x40U /* read phy */
+#define PHYAC_WRPHY 0x20U /* write phy */
+#define PHYAC_PHYRA 0x1fU /* PHY register address */
+
+#define PHYCTRL_BITS \
+ "\020" \
+ "\010DO" \
+ "\007RDPHY" \
+ "\006WRPHY"
+
+/* Internal PHY control register: offset 7b */
+#define IPHYC_EPHY 0x02
+#define IPHYC_PHYR 0x01
+
+#define IPHYC_BITS \
+ "\020" \
+ "\002EPHY" \
+ "\001PHYR"
+
+/* GPIO45 register: offset 7c */
+#define GPIO54_5OE 0x20
+#define GPIO54_5O 0x10
+#define GPIO54_5I 0x08
+#define GPIO54_4OE 0x04
+#define GPIO54_4O 0x02
+#define GPIO54_4I 0x01
+
+/* GPIO01 register: offset 7e */
+#define GPIO10_1OE 0x20
+#define GPIO10_1O 0x10
+#define GPIO10_1I 0x08
+#define GPIO10_0OE 0x04
+#define GPIO10_0O 0x02
+#define GPIO10_0I 0x01
+
+/* GPIO23 register: offset 7f */
+#define GPIO32_3OE 0x20
+#define GPIO32_3O 0x10
+#define GPIO32_3I 0x08
+#define GPIO32_2OE 0x04
+#define GPIO32_2O 0x02
+#define GPIO32_2I 0x01
+
+/* rx status at the end of received packets */
+/* byte 0 and 1 is packet length in little endian */
+/* byte 2 is receive status */
+#define RSR_DRIBBLE 0x10
+#define RSR_CRC 0x08
+#define RSR_RUNT 0x04
+#define RSR_LONG 0x02
+#define RSR_MULTI 0x01
+
+#define RSR_ERRORS \
+ (RSR_DRIBBLE | RSR_CRC | RSR_RUNT | RSR_LONG | RSR_MULTI)
+
+#define RSR_BITS \
+ "\020" \
+ "\005DRIBBLE" \
+ "\004CRC" \
+ "\003RUNT" \
+ "\002LONG" \
+ "\001MULTI"
+/* byte 3 is reserved */
+
+/* TEST register: offset 80 */
diff --git a/usr/src/uts/common/io/upf/upf_usbgem.c b/usr/src/uts/common/io/upf/upf_usbgem.c
new file mode 100644
index 0000000000..5614803158
--- /dev/null
+++ b/usr/src/uts/common/io/upf/upf_usbgem.c
@@ -0,0 +1,1213 @@
+/*
+ * upf_usbgem.c : ADMtek an986/adm8511/adm8513/adm8515 USB to
+ * Fast Ethernet Driver for Solaris
+ */
+
+/*
+ * Copyright (c) 2004-2011 Masayuki Murayama. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the author nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#pragma ident "%W% %E%"
+
+/*
+ * Changelog:
+ */
+
+/*
+ * TODO
+ */
+/* ======================================================= */
+
+/*
+ * Solaris system header files and macros
+ */
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/debug.h>
+#include <sys/kmem.h>
+#include <sys/modctl.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/byteorder.h>
+
+/* ethernet stuff */
+#include <sys/ethernet.h>
+
+/* interface card depend stuff */
+#include <sys/stropts.h>
+#include <sys/stream.h>
+#include <sys/strlog.h>
+#include <sys/usb/usba.h>
+#include "usbgem.h"
+
+/* hardware stuff */
+#include "usbgem_mii.h"
+#include "adm8511reg.h"
+
+char ident[] = "pegasus usbnic driver v" VERSION;
+
+/*
+ * Useful macros
+ */
+#define CHECK_AND_JUMP(val, label) \
+ if ((val) != USB_SUCCESS) { goto label; }
+
+/*
+ * Debugging
+ */
+#ifdef DEBUG_LEVEL
+static int upf_debug = DEBUG_LEVEL;
+#define DPRINTF(n, args) if (upf_debug > (n)) cmn_err args
+#else
+#define DPRINTF(n, args)
+#endif
+
+/*
+ * Our configration for ADMtek Pegasus/PegasusII
+ */
+/* timeouts */
+#define ONESEC (drv_usectohz(1*1000000))
+
+/*
+ * Local device definitions
+ */
+struct upf_dev {
+ /*
+ * Misc HW information
+ */
+ uint8_t ec[3];
+ uint8_t mac_addr[ETHERADDRL];
+ int chip_type;
+#define CHIP_AN986 1 /* avoid 0 */
+#define CHIP_ADM8511 2 /* including adm8515 */
+#define CHIP_ADM8513 3
+ boolean_t phy_init_done;
+ uint8_t last_link_state;
+
+ uint16_t vid; /* vendor id */
+ uint16_t pid; /* product id */
+};
+
+/*
+ * private functions
+ */
+
+/* mii operations */
+static uint16_t upf_mii_read(struct usbgem_dev *, uint_t, int *errp);
+static void upf_mii_write(struct usbgem_dev *, uint_t, uint16_t, int *errp);
+
+/* nic operations */
+static int upf_attach_chip(struct usbgem_dev *);
+static int upf_reset_chip(struct usbgem_dev *);
+static int upf_init_chip(struct usbgem_dev *);
+static int upf_start_chip(struct usbgem_dev *);
+static int upf_stop_chip(struct usbgem_dev *);
+static int upf_set_media(struct usbgem_dev *);
+static int upf_set_rx_filter(struct usbgem_dev *);
+static int upf_get_stats(struct usbgem_dev *);
+
+/* packet operations */
+static mblk_t *upf_tx_make_packet(struct usbgem_dev *, mblk_t *);
+static mblk_t *upf_rx_make_packet(struct usbgem_dev *, mblk_t *);
+
+/* interrupt handler */
+static void upf_interrupt(struct usbgem_dev *, mblk_t *);
+
+/* =============================================================== */
+/*
+ * I/O functions
+ */
+/* =============================================================== */
+#define UPF_REQ_GET_REGISTER 0xf0
+#define UPF_REQ_SET_REGISTER 0xf1
+#define OUTB(dp, p, v, errp, label) \
+ if ((*(errp) = usbgem_ctrl_out((dp), \
+ /* bmRequestType */ USB_DEV_REQ_HOST_TO_DEV \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ UPF_REQ_SET_REGISTER, \
+ /* wValue */ (v), \
+ /* wIndex */ (p), \
+ /* wLength */ 1, \
+ /* buf */ NULL, \
+ /* size */ 0)) != USB_SUCCESS) goto label;
+
+#define OUTW(dp, p, v, errp, label) \
+ if ((*(errp) = usbgem_ctrl_out_val((dp), \
+ /* bmRequestType */ USB_DEV_REQ_HOST_TO_DEV \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ UPF_REQ_SET_REGISTER, \
+ /* wValue */ 0, \
+ /* wIndex */ (p), \
+ /* wLength */ 2, \
+ /* value */ (v))) != USB_SUCCESS) goto label
+
+#define OUTS(dp, p, buf, len, errp, label) \
+ if ((*(errp) = usbgem_ctrl_out((dp), \
+ /* bmRequestType */ USB_DEV_REQ_HOST_TO_DEV \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ UPF_REQ_SET_REGISTER, \
+ /* wValue */ 0, \
+ /* wIndex */ (p), \
+ /* wLength */ (len), \
+ /* buf */ (buf), \
+ /* size */ (len))) != USB_SUCCESS) goto label
+
+#define INB(dp, p, vp, errp, label) \
+ if ((*(errp) = usbgem_ctrl_in_val((dp), \
+ /* bmRequestType */ USB_DEV_REQ_DEV_TO_HOST \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ UPF_REQ_GET_REGISTER, \
+ /* wValue */ 0, \
+ /* wIndex */ (p), \
+ /* wLength */ 1, \
+ /* valuep */ (vp))) != USB_SUCCESS) goto label
+
+#define INW(dp, p, vp, errp, label) \
+ if ((*(errp) = usbgem_ctrl_in_val((dp), \
+ /* bmRequestType */ USB_DEV_REQ_DEV_TO_HOST \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ UPF_REQ_GET_REGISTER, \
+ /* wValue */ 0, \
+ /* wIndex */ (p), \
+ /* wLength */ 2, \
+ /* valuep */ (vp))) != USB_SUCCESS) goto label
+
+#define INS(dp, p, buf, len, errp, label) \
+ if ((*(errp) = usbgem_ctrl_in((dp), \
+ /* bmRequestType */ USB_DEV_REQ_DEV_TO_HOST \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ UPF_REQ_GET_REGISTER, \
+ /* wValue */ 0, \
+ /* wIndex */ (p), \
+ /* wLength */ (len), \
+ /* buf */ (buf), \
+ /* size */ (len))) != USB_SUCCESS) goto label
+
+/* =============================================================== */
+/*
+ * Hardware manupilation
+ */
+/* =============================================================== */
+static int
+upf_reset_chip(struct usbgem_dev *dp)
+{
+ int i;
+ uint8_t val;
+ int err;
+ struct upf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+ bzero(lp->mac_addr, sizeof (lp->mac_addr));
+
+ lp->ec[1] = 0;
+ OUTB(dp, EC1, EC1_RM, &err, usberr);
+
+ for (i = 0; i < 1000; i++) {
+ INB(dp, EC1, &val, &err, usberr);
+ if ((val & EC1_RM) == 0) {
+ lp->ec[1] = val;
+ return (USB_SUCCESS);
+ }
+ drv_usecwait(10);
+ }
+
+ /* time out */
+ cmn_err(CE_WARN, "!%s: failed to reset: timeout", dp->name);
+ return (USB_FAILURE);
+
+usberr:
+ cmn_err(CE_NOTE, "!%s: %s: usberr detected", dp->name, __func__);
+ return (USB_FAILURE);
+}
+
+/*
+ * Setup an986/adm8511/adm8513/adm8515
+ */
+static int
+upf_init_chip(struct usbgem_dev *dp)
+{
+ uint64_t zero64 = 0;
+ int err = USB_SUCCESS;
+ struct upf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* ethernet control register 0 */
+ lp->ec[0] |= EC0_RXSA | EC0_RXCS;
+ OUTB(dp, EC0, lp->ec[0], &err, usberr);
+
+ /* ethernet control reg1: will be set later in set_rx_filter() */
+
+ /* ethernet control register 2: will be set later in set_rx_filter() */
+ INB(dp, EC2, &lp->ec[2], &err, usberr);
+ lp->ec[2] |= EC2_RXBP | EC2_EP3RC;
+#ifdef CONFIG_VLAN
+ if (dp->misc_flag & USBGEM_VLAN) {
+ lp->ec[2] |= EC2_MEPL;
+ }
+#endif
+ OUTB(dp, EC2, lp->ec[2], &err, usberr);
+
+ /* Multicast address hash: clear */
+ OUTS(dp, MA, &zero64, 8, &err, usberr);
+
+ /* Ethernet ID : will be set later in upf_set_rx_filter() */
+
+ /* PAUSE timer */
+ OUTB(dp, PAUSETIMER, 0x1f, &err, usberr);
+
+ /* receive packet number based pause control:set in upf_set_media() */
+
+ /* occupied receive FIFO based pause control:set in upf_set_media() */
+
+ /* EP1 control: default */
+
+ /* Rx FIFO control */
+ if (lp->chip_type != CHIP_AN986) {
+ /* use 24K internal sram, 16pkts in fifo */
+ OUTB(dp, RXFC, 0, &err, usberr);
+ }
+
+ /* BIST contror: do nothing */
+ err = upf_set_media(dp);
+ CHECK_AND_JUMP(err, usberr);
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: end (success)", dp->name, __func__));
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_NOTE, "!%s: %s: usberr(%d) detected",
+ dp->name, __func__, err);
+ return (err);
+}
+
+static int
+upf_start_chip(struct usbgem_dev *dp)
+{
+ int err = USB_SUCCESS;
+ struct upf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* enable RX and TX */
+ lp->ec[0] |= EC0_TXE | EC0_RXE;
+ OUTB(dp, EC0, lp->ec[0], &err, usberr);
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_WARN, "!%s: %s: usberr(%d) detected",
+ dp->name, __func__, err);
+ return (err);
+}
+
+static int
+upf_stop_chip(struct usbgem_dev *dp)
+{
+ int err;
+ struct upf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* disable RX and TX */
+ lp->ec[0] &= ~(EC0_TXE | EC0_RXE);
+ OUTB(dp, EC0, lp->ec[0], &err, usberr);
+
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_WARN, "!%s: %s: usberr(%d) detected",
+ dp->name, __func__, err);
+ return (err);
+}
+
+static int
+upf_get_stats(struct usbgem_dev *dp)
+{
+ /* do nothing */
+ return (USB_SUCCESS);
+}
+
+static uint_t
+upf_mcast_hash(struct usbgem_dev *dp, const uint8_t *addr)
+{
+ /* hash table is 64 = 2^6 bit width */
+ return (usbgem_ether_crc_le(addr) & 0x3f);
+}
+
+static int
+upf_set_rx_filter(struct usbgem_dev *dp)
+{
+ int i;
+ int err;
+#ifdef DEBUG_LEVEL
+ uint8_t reg0;
+ uint8_t reg1;
+ uint8_t reg2;
+#endif
+ struct upf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called, rxmode:%b",
+ dp->name, __func__, dp->rxmode, RXMODE_BITS));
+
+ /* reset rx mode */
+ lp->ec[0] &= ~EC0_RXMA;
+ lp->ec[2] &= ~EC2_PROM;
+
+ if (dp->rxmode & RXMODE_PROMISC) {
+ /* promiscious mode implies all multicast and all physical */
+ lp->ec[0] |= EC0_RXMA;
+ lp->ec[2] |= EC2_PROM;
+ } else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 0) {
+ /* XXX - multicast hash table didin't work */
+ /* accept all multicast packets */
+ lp->ec[0] |= EC0_RXMA;
+ }
+
+ if (bcmp(dp->cur_addr.ether_addr_octet,
+ lp->mac_addr, ETHERADDRL) != 0) {
+
+ /* need to update mac address */
+ bcopy(dp->cur_addr.ether_addr_octet,
+ lp->mac_addr, ETHERADDRL);
+ OUTS(dp, EID,
+ lp->mac_addr, ETHERADDRL, &err, usberr);
+ }
+
+ /* update rx mode */
+ OUTS(dp, EC0, lp->ec, 3, &err, usberr);
+
+#if DEBUG_LEVEL > 0
+ INB(dp, EC0, &reg0, &err, usberr);
+ INB(dp, EC1, &reg1, &err, usberr);
+ INB(dp, EC2, &reg2, &err, usberr);
+
+ cmn_err(CE_CONT, "!%s: %s: returned, ec:%b %b %b",
+ dp->name, __func__,
+ reg0, EC0_BITS, reg1, EC1_BITS, reg2, EC2_BITS);
+#endif
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_NOTE, "!%s: %s: usberr detected", dp->name, __func__);
+ return (err);
+}
+
+static int
+upf_set_media(struct usbgem_dev *dp)
+{
+ int err;
+ struct upf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ lp->ec[1] &= ~(EC1_FD | EC1_100M);
+
+ /* select duplex */
+ if (dp->full_duplex) {
+ lp->ec[1] |= EC1_FD;
+ }
+
+ /* select speed */
+ if (dp->speed == USBGEM_SPD_100) {
+ lp->ec[1] |= EC1_100M;
+ }
+
+ /* rx flow control */
+ switch (dp->flow_control) {
+ case FLOW_CONTROL_SYMMETRIC:
+ case FLOW_CONTROL_RX_PAUSE:
+ lp->ec[0] |= EC0_RXFCE;
+ break;
+
+ default:
+ lp->ec[0] &= ~EC0_RXFCE;
+ break;
+ }
+
+ /* tx flow control */
+ switch (dp->flow_control) {
+ case FLOW_CONTROL_SYMMETRIC:
+ case FLOW_CONTROL_TX_PAUSE:
+ if (lp->chip_type != CHIP_AN986) {
+ /* pegasus II has internal 24k fifo */
+ OUTB(dp, ORFBFC,
+ (12 << ORFBFC_RXS_SHIFT) | ORFBFC_FCRXS,
+ &err, usberr);
+
+ /* 16 packts can be stored in rx fifo */
+ OUTB(dp, RPNBFC_PN,
+ (8 << RPNBFC_PN_SHIFT) | RPNBFC_FCP,
+ &err, usberr);
+ } else {
+ /* an986 has external 32k fifo */
+ OUTB(dp, ORFBFC,
+ (16 << ORFBFC_RXS_SHIFT) | ORFBFC_FCRXS,
+ &err, usberr);
+
+ /* AN986 fails to link up when RPNBFC is enabled */
+ OUTB(dp, RPNBFC, 0, &err, usberr);
+ }
+ break;
+
+ default:
+ OUTB(dp, ORFBFC, 0, &err, usberr);
+ OUTB(dp, RPNBFC, 0, &err, usberr);
+ break;
+ }
+
+ /* update ether control registers */
+ OUTS(dp, EC0, lp->ec, 2, &err, usberr);
+ DPRINTF(0, (CE_CONT, "!%s: %s: returned, ec0:%b, ec1:%b",
+ dp->name, __func__, lp->ec[0], EC0_BITS, lp->ec[1], EC1_BITS));
+
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_WARN, "%s: %s: failed to write ec1", dp->name, __func__);
+ return (err);
+}
+
+/*
+ * send/receive packet check
+ */
+static mblk_t *
+upf_tx_make_packet(struct usbgem_dev *dp, mblk_t *mp)
+{
+ size_t len;
+ mblk_t *new;
+ mblk_t *tp;
+ uint8_t *bp;
+ uint8_t *last_pos;
+ int msglen;
+
+ DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ len = msgdsize(mp);
+ if (len < ETHERMIN) {
+ len = ETHERMIN;
+ }
+
+ /* allocate msg block */
+ msglen = len + sizeof (uint16_t);
+
+ /* avoid usb controller bug */
+ if ((msglen & 0x3f) == 0) {
+ /* add a header for additional 0-length usb message */
+ msglen += sizeof (uint16_t);
+ }
+
+ if ((new = allocb(msglen, 0)) == NULL) {
+ return (NULL);
+ }
+
+ /* copy contents of the buffer */
+ new->b_wptr = new->b_rptr + msglen;
+ bp = new->b_rptr;
+
+ /* the nic requires a two byte header of the packet size */
+ bp[0] = (uint8_t)len;
+ bp[1] = (uint8_t)(len >> 8);
+ bp += sizeof (uint16_t);
+
+ /* copy the payload */
+ for (tp = mp; tp; tp = tp->b_cont) {
+ len = tp->b_wptr - tp->b_rptr;
+ if (len > 0) {
+ bcopy(tp->b_rptr, bp, len);
+ bp += len;
+ }
+ }
+
+ /* clear ethernet pads and additional usb header if we have */
+ last_pos = new->b_wptr;
+ while (bp < last_pos) {
+ *bp++ = 0;
+ }
+
+ return (new);
+}
+
+static void
+upf_dump_packet(struct usbgem_dev *dp, uint8_t *bp, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i += 8, bp += 8) {
+ cmn_err(CE_CONT, "%02x %02x %02x %02x %02x %02x %02x %02x",
+ bp[0], bp[1], bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
+ }
+}
+
+static mblk_t *
+upf_rx_make_packet(struct usbgem_dev *dp, mblk_t *mp)
+{
+ uint8_t *p;
+ uint16_t rxhd;
+ uint_t len;
+ uint8_t rsr;
+ struct upf_dev *lp = dp->private;
+
+ ASSERT(mp != NULL);
+
+#ifdef DEBUG_LEVEL
+ len = msgdsize(mp);
+ DPRINTF(2, (CE_CONT, "!%s: time:%d %s: cont:%p",
+ dp->name, ddi_get_lbolt(), __func__, len, mp->b_cont));
+
+ if (upf_debug > 3) {
+ upf_dump_packet(dp, mp->b_rptr, max(6, len));
+ }
+#endif
+ /* get the length of Rx packet */
+ p = mp->b_wptr - 4;
+ rsr = p[3];
+ if (lp->chip_type == CHIP_ADM8513) {
+ /* As Rx packets from ADM8513 have two byte header, remove it */
+ p = mp->b_rptr;
+ len = ((p[1] << 8) | p[0]) & 0x0fff;
+ mp->b_rptr += 2;
+ } else {
+ len = (((p[1] << 8) | p[0]) & 0x0fff) - ETHERFCSL - 4;
+ }
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: rsr:%b len:%d",
+ dp->name, __func__, rsr, RSR_BITS, len));
+
+ /* check if error happen */
+ if (rsr & RSR_ERRORS) {
+ DPRINTF(0, (CE_CONT, "!%s: rsr:%b", dp->name, rsr, RSR_BITS));
+ if (rsr & (RSR_CRC | RSR_DRIBBLE)) {
+ dp->stats.frame++;
+ }
+ if (rsr & RSR_LONG) {
+ dp->stats.frame_too_long++;
+ }
+ if (rsr & RSR_RUNT) {
+ dp->stats.runt++;
+ }
+
+ dp->stats.errrcv++;
+ return (NULL);
+ }
+#ifndef CONFIG_VLAN
+ /* check packet size */
+ if (len > ETHERMAX) {
+ /* too long */
+ dp->stats.frame_too_long++;
+ dp->stats.errrcv++;
+ return (NULL);
+ } else if (len < ETHERMIN) {
+ dp->stats.runt++;
+ dp->stats.errrcv++;
+ return (NULL);
+ }
+#endif
+ /* remove tailing crc and rx status fields */
+ mp->b_wptr = mp->b_rptr + len;
+ ASSERT(mp->b_next == NULL);
+ return (mp);
+}
+
+/*
+ * Device depend interrupt handler
+ */
+static void
+upf_interrupt(struct usbgem_dev *dp, mblk_t *mp)
+{
+ uint8_t *bp;
+ struct upf_dev *lp = dp->private;
+
+ bp = mp->b_rptr;
+
+ DPRINTF(2, (CE_CONT,
+ "!%s: %s: size:%d, %02x %02x %02x %02x %02x %02x %02x %02x",
+ dp->name, __func__, mp->b_wptr - mp->b_rptr,
+ bp[0], bp[1], bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]));
+
+ if ((lp->last_link_state ^ bp[5]) & 1) {
+ DPRINTF(1, (CE_CONT, "!%s:%s link status changed:",
+ dp->name, __func__));
+ usbgem_mii_update_link(dp);
+ }
+
+ lp->last_link_state = bp[5] & 1;
+}
+
+/*
+ * MII Interfaces
+ */
+static uint16_t
+upf_mii_read(struct usbgem_dev *dp, uint_t index, int *errp)
+{
+ uint8_t phyctrl;
+ uint16_t val;
+ int i;
+
+ DPRINTF(4, (CE_CONT, "!%s: %s: called, ix:%d",
+ dp->name, __func__, index));
+ ASSERT(index >= 0 && index < 32);
+
+ *errp = USB_SUCCESS;
+
+ /* set PHYADDR */
+ OUTB(dp, PHYA, dp->mii_phy_addr, errp, usberr);
+
+ /* Initiate MII read transaction */
+ OUTB(dp, PHYAC, index | PHYAC_RDPHY, errp, usberr);
+
+ for (i = 0; i < 100; i++) {
+ INB(dp, PHYAC, &phyctrl, errp, usberr);
+ if (phyctrl & PHYAC_DO) {
+ /* done */
+ INW(dp, PHYD, &val, errp, usberr);
+ DPRINTF(4, (CE_CONT, "!%s: %s: return %04x",
+ dp->name, __func__, val));
+ return (val);
+ }
+ drv_usecwait(10);
+ }
+ /* timeout */
+ cmn_err(CE_WARN, "!%s: %s: timeout detected", dp->name, __func__);
+ *errp = USB_FAILURE;
+ return (0);
+
+usberr:
+ cmn_err(CE_CONT,
+ "!%s: %s: usberr(%d) detected", dp->name, __func__, *errp);
+ return (0);
+}
+
+static void
+upf_mii_write(struct usbgem_dev *dp, uint_t index, uint16_t val, int *errp)
+{
+ int i;
+ uint8_t phyctrl;
+
+ DPRINTF(4, (CE_CONT, "!%s: %s called index:%d val:0x%04x",
+ dp->name, __func__, index, val));
+ ASSERT(index >= 0 && index < 32);
+
+ *errp = USB_SUCCESS;
+
+ OUTW(dp, PHYD, val, errp, usberr);
+ OUTB(dp, PHYA, dp->mii_phy_addr, errp, usberr);
+ OUTB(dp, PHYAC, index | PHYAC_WRPHY, errp, usberr);
+
+ for (i = 0; i < 100; i++) {
+ INB(dp, PHYAC, &phyctrl, errp, usberr);
+ if (phyctrl & PHYAC_DO) {
+ /* done */
+ return;
+ }
+ drv_usecwait(10);
+ }
+
+ /* time out */
+ cmn_err(CE_WARN, "!%s: %s: timeout detected", dp->name, __func__);
+ *errp = USB_FAILURE;
+ return;
+
+usberr:
+ cmn_err(CE_CONT,
+ "!%s: %s: usberr(%d) detected", dp->name, __func__, *errp);
+}
+
+
+static int
+upf_enable_phy(struct usbgem_dev *dp)
+{
+ uint8_t val;
+ int err;
+ struct upf_dev *lp = dp->private;
+
+ /*
+ * first, try to enable internal phy
+ */
+ INB(dp, IPHYC, &val, &err, usberr);
+ val = (val | IPHYC_EPHY) & ~IPHYC_PHYR;
+ OUTB(dp, IPHYC, val, &err, usberr);
+
+ INB(dp, IPHYC, &val, &err, usberr);
+ DPRINTF(0, (CE_CONT, "!%s: %s: IPHYC: %b",
+ dp->name, __func__, val, IPHYC_BITS));
+ if (val) {
+ /* reset internal phy */
+ OUTB(dp, IPHYC, val | IPHYC_PHYR, &err, usberr);
+ OUTB(dp, IPHYC, val, &err, usberr);
+ delay(drv_usectohz(10000));
+
+ /* identify the chip generation */
+ OUTB(dp, 0x83, 0xa5, &err, usberr);
+ INB(dp, 0x83, &val, &err, usberr);
+ if (val == 0xa5) {
+ lp->chip_type = CHIP_ADM8513;
+ } else {
+ /* adm8511 or adm8515 */
+ lp->chip_type = CHIP_ADM8511;
+ }
+ dp->ugc.usbgc_mii_hw_link_detection = B_TRUE;
+ } else {
+ /*
+ * It should be AN986 which doesn't have an internal PHY.
+ * We need to setup gpio ports in AN986, which are
+ * connected to external PHY control pins.
+ */
+ lp->chip_type = CHIP_AN986;
+
+ /* reset external phy */
+ /* output port#0 L, port#1 L */
+ OUTB(dp, GPIO10, GPIO10_0O | GPIO10_0OE, &err, usberr);
+
+ /* output port#0 H, port#1 L */
+ OUTB(dp, GPIO10,
+ GPIO10_0O | GPIO10_0OE | GPIO10_1OE, &err, usberr);
+
+ /* hw link detection doesn't work correctly */
+ dp->ugc.usbgc_mii_hw_link_detection = B_FALSE;
+ }
+
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_NOTE, "!%s: %s: usberr detected", dp->name, __func__);
+ return (USB_FAILURE);
+}
+
+static int
+upf_mii_probe(struct usbgem_dev *dp)
+{
+ int err;
+ uint16_t val;
+ struct upf_dev *lp = dp->private;
+
+ if (!lp->phy_init_done) {
+ upf_enable_phy(dp);
+ lp->phy_init_done = B_TRUE;
+ }
+
+ return (usbgem_mii_probe_default(dp));
+}
+
+static int
+upf_mii_init(struct usbgem_dev *dp)
+{
+ uint16_t val;
+ int err = USB_SUCCESS;
+ struct upf_dev *lp = dp->private;
+
+ if (!lp->phy_init_done) {
+ upf_enable_phy(dp);
+ }
+ lp->phy_init_done = B_FALSE;
+
+ if (lp->chip_type == CHIP_AN986 &&
+ (lp->vid == 0x0db7 /* elecom */ ||
+ lp->vid == 0x066b /* linksys */ ||
+ lp->vid == 0x077b /* linksys */ ||
+ lp->vid == 0x2001 /* dlink */)) {
+ /* special treatment for Linksys products */
+ val = upf_mii_read(dp, 0x1b, &err) | 0x4;
+ upf_mii_write(dp, 0x1b, val, &err);
+ }
+ return (err);
+}
+
+/* ======================================================== */
+/*
+ * OS depend (device driver DKI) routine
+ */
+/* ======================================================== */
+static uint16_t
+upf_read_eeprom(struct usbgem_dev *dp, int index, int *errp)
+{
+ int i;
+ uint8_t eectrl;
+ uint16_t data;
+
+ *errp = USB_SUCCESS;
+
+ OUTB(dp, EECTRL, 0, errp, usberr);
+
+ OUTB(dp, EEOFFSET, index, errp, usberr);
+ OUTB(dp, EECTRL, EECTRL_RD, errp, usberr);
+
+ for (i = 0; i < 100; i++) {
+ INB(dp, EECTRL, &eectrl, errp, usberr);
+ if (eectrl & EECTRL_DONE) {
+ INW(dp, EEDATA, &data, errp, usberr);
+ return (data);
+ }
+ drv_usecwait(10);
+ }
+
+ /* time out */
+ *errp = USB_FAILURE;
+ return (0);
+
+usberr:
+ cmn_err(CE_CONT,
+ "!%s: %s: usberr(%d) detected", dp->name, __func__, *errp);
+ return (0);
+}
+
+static void
+upf_eeprom_dump(struct usbgem_dev *dp, int size)
+{
+ int i;
+ int err;
+
+ cmn_err(CE_CONT, "!%s: %s dump:", dp->name, __func__);
+
+ for (i = 0; i < size; i += 4) {
+ cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
+ i*2,
+ upf_read_eeprom(dp, i + 0, &err),
+ upf_read_eeprom(dp, i + 1, &err),
+ upf_read_eeprom(dp, i + 2, &err),
+ upf_read_eeprom(dp, i + 3, &err));
+ }
+}
+
+static int
+upf_attach_chip(struct usbgem_dev *dp)
+{
+ int i;
+ int err;
+ uint16_t val;
+ uint8_t *mac;
+ struct upf_dev *lp = dp->private;
+
+ /*
+ * Read mac address from EEPROM
+ */
+ mac = dp->dev_addr.ether_addr_octet;
+ for (i = 0; i < 3; i++) {
+ val = upf_read_eeprom(dp, i, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ mac[i*2+0] = (uint8_t)val;
+ mac[i*2+1] = (uint8_t)(val >> 8);
+ }
+
+ DPRINTF(0, (CE_CONT,
+ "%s: %s: mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ dp->name, __func__,
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]));
+
+ dp->misc_flag = 0;
+#ifdef CONFIG_VLAN
+ dp->misc_flag |= USBGEM_VLAN;
+#endif
+#if DEBUG_LEVEL > 3
+ upf_eeprom_dump(dp, 0x80);
+#endif
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_WARN, "!%s: %s: usb error detected", dp->name, __func__);
+ return (USB_FAILURE);
+}
+
+static int
+upfattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int i;
+ ddi_iblock_cookie_t c;
+ int ret;
+ int unit;
+ uint32_t tcr;
+ int len;
+ const char *drv_name;
+ struct usbgem_dev *dp;
+ void *base;
+ struct usbgem_conf *ugcp;
+ struct upf_dev *lp;
+
+ unit = ddi_get_instance(dip);
+ drv_name = ddi_driver_name(dip);
+
+ DPRINTF(3, (CE_CONT, "!%s%d: %s: called, cmd:%d",
+ drv_name, unit, __func__, cmd));
+
+ if (cmd == DDI_ATTACH) {
+ /*
+ * construct usbgem configration
+ */
+ ugcp = kmem_zalloc(sizeof (*ugcp), KM_SLEEP);
+
+ /* name */
+ sprintf(ugcp->usbgc_name, "%s%d", drv_name, unit);
+ ugcp->usbgc_ppa = unit;
+
+ ugcp->usbgc_ifnum = 0;
+ ugcp->usbgc_alt = 0;
+
+ ugcp->usbgc_tx_list_max = 16;
+
+ ugcp->usbgc_rx_header_len = 4;
+ ugcp->usbgc_rx_list_max = 64;
+
+ /* time out parameters */
+ ugcp->usbgc_tx_timeout = USBGEM_TX_TIMEOUT;
+ ugcp->usbgc_tx_timeout_interval = USBGEM_TX_TIMEOUT_INTERVAL;
+
+ /* flow control */
+ ugcp->usbgc_flow_control = FLOW_CONTROL_NONE;
+ ugcp->usbgc_flow_control = FLOW_CONTROL_RX_PAUSE;
+
+ /* MII timeout parameters */
+ ugcp->usbgc_mii_link_watch_interval = ONESEC;
+ ugcp->usbgc_mii_an_watch_interval = ONESEC/5;
+ ugcp->usbgc_mii_reset_timeout = MII_RESET_TIMEOUT; /* 1 sec */
+ ugcp->usbgc_mii_an_timeout = MII_AN_TIMEOUT; /* 5 sec */
+ ugcp->usbgc_mii_an_wait = MII_AN_TIMEOUT/2;
+ ugcp->usbgc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
+ ugcp->usbgc_mii_an_delay = ONESEC/10;
+
+ ugcp->usbgc_mii_linkdown_action = MII_ACTION_RESET;
+ ugcp->usbgc_mii_linkdown_timeout_action = MII_ACTION_RESET;
+ ugcp->usbgc_mii_dont_reset = B_FALSE;
+
+ /* I/O methods */
+
+ /* mac operation */
+ ugcp->usbgc_attach_chip = &upf_attach_chip;
+ ugcp->usbgc_reset_chip = &upf_reset_chip;
+ ugcp->usbgc_init_chip = &upf_init_chip;
+ ugcp->usbgc_start_chip = &upf_start_chip;
+ ugcp->usbgc_stop_chip = &upf_stop_chip;
+ ugcp->usbgc_multicast_hash = &upf_mcast_hash;
+
+ ugcp->usbgc_set_rx_filter = &upf_set_rx_filter;
+ ugcp->usbgc_set_media = &upf_set_media;
+ ugcp->usbgc_get_stats = &upf_get_stats;
+ ugcp->usbgc_interrupt = &upf_interrupt;
+
+ /* packet operation */
+ ugcp->usbgc_tx_make_packet = &upf_tx_make_packet;
+ ugcp->usbgc_rx_make_packet = &upf_rx_make_packet;
+
+ /* mii operations */
+ ugcp->usbgc_mii_probe = &upf_mii_probe;
+ ugcp->usbgc_mii_init = &upf_mii_init;
+ ugcp->usbgc_mii_config = &usbgem_mii_config_default;
+ ugcp->usbgc_mii_read = &upf_mii_read;
+ ugcp->usbgc_mii_write = &upf_mii_write;
+
+ /* mtu */
+ ugcp->usbgc_min_mtu = ETHERMTU;
+ ugcp->usbgc_max_mtu = ETHERMTU;
+ ugcp->usbgc_default_mtu = ETHERMTU;
+
+ lp = kmem_zalloc(sizeof (struct upf_dev), KM_SLEEP);
+
+ lp->vid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "usb-vendor-id", -1);
+ lp->pid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "usb-product-id", -1);
+
+ dp = usbgem_do_attach(dip, ugcp, lp, sizeof (struct upf_dev));
+
+ kmem_free(ugcp, sizeof (*ugcp));
+
+ if (dp != NULL) {
+ return (DDI_SUCCESS);
+ }
+
+err_free_mem:
+ kmem_free(lp, sizeof (struct upf_dev));
+err_close_pipe:
+err:
+ return (DDI_FAILURE);
+ }
+ if (cmd == DDI_RESUME) {
+ dp = USBGEM_GET_DEV(dip);
+ lp = dp->private;
+ lp->phy_init_done = B_FALSE;
+
+ return (usbgem_resume(dip));
+ }
+ return (DDI_FAILURE);
+}
+
+static int
+upfdetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int ret;
+
+ if (cmd == DDI_DETACH) {
+ ret = usbgem_do_detach(dip);
+ if (ret != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+ }
+ if (cmd == DDI_SUSPEND) {
+ return (usbgem_suspend(dip));
+ }
+ return (DDI_FAILURE);
+}
+
+/* ======================================================== */
+/*
+ * OS depend (loadable streams driver) routine
+ */
+/* ======================================================== */
+#ifdef USBGEM_CONFIG_GLDv3
+USBGEM_STREAM_OPS(upf_ops, upfattach, upfdetach);
+#else
+static struct module_info upfminfo = {
+ 0, /* mi_idnum */
+ "upf", /* mi_idname */
+ 0, /* mi_minpsz */
+ ETHERMTU, /* mi_maxpsz */
+ 32*1024, /* mi_hiwat */
+ 1, /* mi_lowat */
+};
+
+static struct qinit upfrinit = {
+ (int (*)()) NULL, /* qi_putp */
+ usbgem_rsrv, /* qi_srvp */
+ usbgem_open, /* qi_qopen */
+ usbgem_close, /* qi_qclose */
+ (int (*)()) NULL, /* qi_qadmin */
+ &upfminfo, /* qi_minfo */
+ NULL /* qi_mstat */
+};
+
+static struct qinit upfwinit = {
+ usbgem_wput, /* qi_putp */
+ usbgem_wsrv, /* qi_srvp */
+ (int (*)()) NULL, /* qi_qopen */
+ (int (*)()) NULL, /* qi_qclose */
+ (int (*)()) NULL, /* qi_qadmin */
+ &upfminfo, /* qi_minfo */
+ NULL /* qi_mstat */
+};
+
+static struct streamtab upf_info = {
+ &upfrinit, /* st_rdinit */
+ &upfwinit, /* st_wrinit */
+ NULL, /* st_muxrinit */
+ NULL /* st_muxwrinit */
+};
+
+static struct cb_ops cb_upf_ops = {
+ nulldev, /* cb_open */
+ nulldev, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read */
+ nodev, /* cb_write */
+ nodev, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ &upf_info, /* cb_stream */
+ D_MP /* cb_flag */
+};
+
+static struct dev_ops upf_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ usbgem_getinfo, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ upfattach, /* devo_attach */
+ upfdetach, /* devo_detach */
+ nodev, /* devo_reset */
+ &cb_upf_ops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ usbgem_power, /* devo_power */
+#if DEVO_REV >= 4
+ usbgem_quiesce, /* devo_quiesce */
+#endif
+
+};
+#endif
+static struct modldrv modldrv = {
+ &mod_driverops, /* Type of module. This one is a driver */
+ ident,
+ &upf_ops, /* driver ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, &modldrv, NULL
+};
+
+/* ======================================================== */
+/*
+ * _init : done
+ */
+/* ======================================================== */
+int
+_init(void)
+{
+ int status;
+
+ DPRINTF(2, (CE_CONT, "!upf: _init: called"));
+
+ status = usbgem_mod_init(&upf_ops, "upf");
+ if (status != DDI_SUCCESS) {
+ return (status);
+ }
+ status = mod_install(&modlinkage);
+ if (status != DDI_SUCCESS) {
+ usbgem_mod_fini(&upf_ops);
+ }
+ return (status);
+}
+
+/*
+ * _fini : done
+ */
+int
+_fini(void)
+{
+ int status;
+
+ DPRINTF(2, (CE_CONT, "!upf: _fini: called"));
+ status = mod_remove(&modlinkage);
+ if (status == DDI_SUCCESS) {
+ usbgem_mod_fini(&upf_ops);
+ }
+ return (status);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
diff --git a/usr/src/uts/common/io/urf/rtl8150reg.h b/usr/src/uts/common/io/urf/rtl8150reg.h
new file mode 100644
index 0000000000..7cba53356e
--- /dev/null
+++ b/usr/src/uts/common/io/urf/rtl8150reg.h
@@ -0,0 +1,218 @@
+/*
+ * @(#)rtl8150reg.h 1.1 04/09/16
+ * Macro definitions for Realtek 8150 USB to fast ethernet controller
+ * based on Realtek RTL8150 data sheet
+ * This file is public domain. Coded by M.Murayama (KHF04453@nifty.com)
+ */
+
+/*
+ * Register offset
+ */
+#define IDR 0x0120 /* Base of ID registers */
+#define MAR 0x0126 /* Base of multicast registers */
+#define CR 0x012e /* Command register */
+#define TCR 0x012f /* Transmit Configuration register */
+#define RCR 0x0130 /* Receive Configuration register */
+#define TSR 0x0132 /* Transmit Status register */
+#define RSR 0x0133 /* Receive Status register */
+#define CON0 0x0135 /* Configuration register 0 */
+#define CON1 0x0136 /* Configuration register 1 */
+#define MSR 0x0137 /* Media Status register */
+#define PHYADD 0x0138 /* PHY address register */
+#define PHYDAT 0x0139 /* PHY data register */
+#define PHYCNT 0x013b /* PHY control register */
+#define GPPC 0x013d /* General purpose pin control */
+#define WAKECNT 0x013e /* Wake up event control */
+#define BMCR 0x0140 /* Basic Mode Control register */
+#define BMSR 0x0142 /* Basic Mode Status register */
+#define ANAR 0x0144 /* Auto Negotiation Advertisement register */
+#define ANLP 0x0146 /* Auto Negotiation Link Partner register */
+#define ANER 0x0148 /* Auto Negotiation Expansion register */
+#define NWAYT 0x014a /* Nway test register */
+#define CSCR 0x014c /* CS configuration register */
+#define CRC0 0x014e /* Power management register for wakeup frame0 */
+#define CRC1 0x0150 /* Power management register for wakeup frame1 */
+#define CRC2 0x0152 /* Power management register for wakeup frame2 */
+#define CRC3 0x0154 /* Power management register for wakeup frame3 */
+#define CRC4 0x0156 /* Power management register for wakeup frame4 */
+#define BYTEMASK0 0x0158 /* Power management wakeup frame0 bytemask */
+#define BYTEMASK1 0x0160 /* Power management wakeup frame1 bytemask */
+#define BYTEMASK2 0x0168 /* Power management wakeup frame2 bytemask */
+#define BYTEMASK3 0x0170 /* Power management wakeup frame3 bytemask */
+#define BYTEMASK4 0x0178 /* Power management wakeup frame4 bytemask */
+#define PHY1 0x0180 /* PHY parameter 1 */
+#define PHY2 0x0184 /* PHY parameter 2 */
+#define TW1 0x0186 /* Twister parameter 1 */
+
+/*
+ * Bit field definitions
+ */
+/* CR : Command register (uint8_t) */
+#define CR_WEPROM 0x20 /* EEPROM write enable */
+#define CR_SOFT_RST 0x10 /* Reset */
+#define CR_RE 0x08 /* Ethernet receive enable */
+#define CR_TE 0x04 /* Ethernet transmit enable */
+#define CR_EP3CLREN 0x02 /* clear performance counter after EP3 */
+#define CR_AUTOLOAD 0x01 /* autoload contents of 93c46 */
+
+#define CR_BITS "\020\006WEPROM\005SOFT_RST\004RE\003TE\002EP3CLREN\001AUTOLOAD"
+
+/* TCR: Transmit Configuration register */
+#define TCR_TXRR 0xc0 /* Tx retry count */
+#define TCR_TXRR_SHIFT 6
+#define TCR_IFG 0x18 /* Interframe Gap */
+#define TCR_IFG_SHIFT 3
+#define TCR_IFG_802_3 (3 << TCR_IFG_SHIFT) /* 802.3 standard */
+#define TCR_NOCRC 0x01 /* Inhibit Appending CRC */
+
+#define TCR_BITS "\020\001NOCRC"
+
+/* Receive Configuration register */
+#define RCR_TAIL 0x0080 /* Rx header forward to host in CRC field */
+#define RCR_AER 0x0040 /* Accept Error packet */
+#define RCR_AR 0x0020 /* Accept runt */
+#define RCR_AM 0x0010 /* Accept multicast */
+#define RCR_AB 0x0008 /* Accept broadcast */
+#define RCR_AD 0x0004 /* Accept physical match */
+#define RCR_AAM 0x0002 /* Accept all Multicast */
+#define RCR_AAP 0x0001 /* Accept all physical */
+
+#define RCR_ACCEPT_MODE \
+ (RCR_AER | RCR_AR | RCR_AM | RCR_AB | RCR_AD | RCR_AAM | RCR_AAP)
+
+#define RCR_BITS \
+ "\020\010TAIL\007AER\006AR\005AM\004AB\003AD\002AAM\001AAP"
+
+/* Transmit Status register */
+
+#define TSR_ECOL 0x20 /* excessive collision indication */
+#define TSR_LCOL 0x10 /* late collision indication */
+#define TSR_LOSS_CRS 0x08 /* lost of carrier indication */
+#define TSR_JBR 0x04 /* jabber time out indication */
+#define TSR_BUF_EMPTY 0x02 /* Tx buffer is empty */
+#define TSR_BUF_FULL 0x01 /* Tx buffer is full */
+
+#define TSR_BITS \
+ "\020" \
+ "\006ECOL" \
+ "\005LCOL" \
+ "\004LOSS_CRS" \
+ "\003JBR" \
+ "\002BUF_EMPTY" \
+ "\001BUF_FULL"
+
+/* Receive status register in Rx packet field */
+#define RSR_WEVENT 0x80 /* Wakeup event indication */
+#define RSR_RX_BUF_FULL 0x40 /* Receive buffer full indication */
+#define RSR_LKCHG 0x20 /* Link change indication */
+#define RSR_RUNT 0x10 /* short packet indication */
+#define RSR_LONG 0x08 /* Long packet indication*/
+#define RSR_CRC 0x04 /* CRC error indication*/
+#define RSR_FAE 0x02 /* Frame alignment error */
+#define RSR_ROK 0x01 /* Receive OK indication */
+
+#define RSR_ERRS (RSR_RUNT | RSR_LONG | RSR_CRC | RSR_FAE)
+#define RSR_BITS \
+ "\020" \
+ "\010WEVENT" \
+ "\007RX_BUF_FULL" \
+ "\006LKCHG" \
+ "\005RUNT" \
+ "\004LONG" \
+ "\003CRC" \
+ "\002FAE" \
+ "\001ROK"
+
+/* Config 0 */
+
+#define CON0_SUSLED 0x80
+#define CON0_PARM_EN 0x40 /* parameter enable */
+#define CON0_LDPS 0x08
+#define CON0_MSEL 0x04 /* media select 1:MII, 0:auto */
+#define CON0_LEDS 0x03 /* LED pattern */
+
+/* Config 1 */
+#define CON0_BWF 0x40 /* Broadcast wakeup function 1:on 0:off */
+#define CON0_MWF 0x20 /* Multicast wakeup function 1:on 0:off */
+#define CON0_UWF 0x10 /* Unicast wakeup function 1:on 0:off */
+#define CON0_LONGWF1 0x02 /* */
+#define CON0_LONGWF0 0x01 /* */
+
+
+/* MSR : Media Status register */
+#define MSR_TXFCE 0x80 /* Tx Flow control enable */
+#define MSR_RXFCE 0x40 /* Rx Flow control enable */
+#define MSR_DUPLEX 0x10 /* full duplex */
+#define MSR_SPEED_100 0x08 /* 100Mbps mode */
+#define MSR_LINK 0x04 /* link status */
+#define MSR_TXPF 0x02 /* 8150 sends pause packet */
+#define MSR_RXPF 0x01 /* 8150 is in backoff state*/
+
+#define MSR_BITS \
+ "\020" \
+ "\010TXFCE" \
+ "\007RXFCE" \
+ "\005DUPLEX" \
+ "\004SPEED_100" \
+ "\003LINK" \
+ "\002TXPF" \
+ "\001RXPF"
+
+/* MII PHY Address */
+#define PHYADD_MASK 0x1f
+
+/* MII PHY Data */
+#define PHYCNT_OWN 0x40 /* 8150 owns:1 not owns:0 */
+#define PHYCNT_RWCR 0x20 /* write:1 read:0 */
+#define PHYCNT_PHYOFF 0x1f
+
+/* BMCR (almost same with MII_CONTROL register) */
+#define BMCR_RESET 0x8000 /* PHY reset */
+#define BMCR_Spd_Set 0x2000 /* 100Mbps */
+#define BMCR_ANE 0x1000 /* auto negotiation enable */
+#define BMCR_RSA 0x0200 /* restart auto negotiation */
+#define BMCR_duplex 0x0100 /* 100Mbps */
+
+/* Basic mode status register */
+/* Auto-negotiation Advertisement register */
+/* Auto-negotiation Link Partner Ability register */
+/* Auto-negotiation Expansion register */
+
+/* Nway test register */
+#define NWAYT_NWLPBK 0x0080
+#define NWAYT_ENNWLE 0x0008
+#define NWAYT_FLAGABD 0x0004
+#define NWAYT_FLAGPDF 0x0002
+#define NWAYT_FLAGLSC 0x0001
+
+/* CS configuration register */
+#define CS_TESTFUN 0x8000 /* */
+#define CS_LD 0x0200 /* */
+#define CS_HEARTBEAT 0x0100 /* */
+#define CS_JBEN 0x0080 /* */
+#define CS_F_LINK100 0x0040 /* */
+#define CS_F_CONNECT 0x0020 /* */
+#define CS_CON_STATUS 0x0008 /* */
+#define CS_CON_STATUS_EN 0x0004 /* */
+#define CS_PASS_SCR 0x0001 /* bypass scramble function */
+
+/*
+ * header format of rx packet
+ */
+#define RXHD_MULT 0x8000 /* multicast packet */
+#define RXHD_PHYS 0x4000 /* physical match packet */
+#define RXHD_RUNT 0x2000 /* too short */
+#define RXHD_VALID 0x1000 /* packet is ok */
+#define RXHD_BYTECNT 0x0fff /* rx byte count */
+
+#define RXHD_BITS \
+ "\020" \
+ "\020MULT" \
+ "\017PHYS" \
+ "\016RUNT" \
+ "\015VALID"
+/*
+ * Offset to EPROM contents
+ */
+#define URF_EEPROM_BASE 0x1200
+#define EPROM_EthernetID 0x0002
diff --git a/usr/src/uts/common/io/urf/urf_usbgem.c b/usr/src/uts/common/io/urf/urf_usbgem.c
new file mode 100644
index 0000000000..f61c8e3502
--- /dev/null
+++ b/usr/src/uts/common/io/urf/urf_usbgem.c
@@ -0,0 +1,1039 @@
+/*
+ * urf_usbgem.c : Realtek RTL8150 USB to Fast Ethernet Driver for Solaris
+ *
+ * Copyright (c) 2003-2012 Masayuki Murayama. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the author nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#pragma ident "%W% %E%"
+
+/*
+ * Changelog:
+ */
+
+/*
+ * TODO
+ */
+/* ======================================================= */
+
+/*
+ * Solaris system header files and macros
+ */
+
+/* minimum kernel headers for drivers */
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/debug.h>
+#include <sys/kmem.h>
+#include <sys/modctl.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/byteorder.h>
+
+/* ethernet stuff */
+#include <sys/ethernet.h>
+
+/* interface card depend stuff */
+#include <sys/stropts.h>
+#include <sys/stream.h>
+#include <sys/strlog.h>
+#include <sys/usb/usba.h>
+#include "usbgem.h"
+#include "usbgem_mii.h"
+#include "rtl8150reg.h"
+
+char ident[] = "rtl8150 usbnic driver v" VERSION;
+
+/*
+ * Useful macros
+ */
+#define ROUNDUP2(x, y) (((x)+(y)-1) & ~((y)-1))
+#define CHECK_AND_JUMP(err, label) if (err != USB_SUCCESS) goto label
+
+/*
+ * Debugging
+ */
+#ifdef DEBUG_LEVEL
+static int urf_debug = DEBUG_LEVEL;
+#define DPRINTF(n, args) if (urf_debug > (n)) cmn_err args
+#else
+#define DPRINTF(n, args)
+#endif
+
+/*
+ * Our configration for rtl8150
+ */
+/* timeouts */
+#define ONESEC (drv_usectohz(1*1000000))
+
+/*
+ * Local device definitions
+ */
+struct chip_info {
+ int flags;
+ char *name;
+ int type;
+};
+
+#define CHIPTABLESIZE (sizeof (chiptbl_8150) / sizeof (struct chip_info))
+
+struct urf_dev {
+ /*
+ * Misc HW information
+ */
+ struct chip_info *chip;
+ uint8_t cr;
+ uint8_t tsr;
+ uint16_t rcr;
+ uint8_t txok_cnt;
+};
+
+/*
+ * private functions
+ */
+
+/* mii operations */
+static uint16_t urf_mii_read(struct usbgem_dev *, uint_t, int *errp);
+static void urf_mii_write(struct usbgem_dev *, uint_t, uint16_t, int *errp);
+
+/* nic operations */
+static int urf_attach_chip(struct usbgem_dev *);
+static int urf_reset_chip(struct usbgem_dev *);
+static int urf_init_chip(struct usbgem_dev *);
+static int urf_start_chip(struct usbgem_dev *);
+static int urf_stop_chip(struct usbgem_dev *);
+static int urf_set_media(struct usbgem_dev *);
+static int urf_set_rx_filter(struct usbgem_dev *);
+static int urf_get_stats(struct usbgem_dev *);
+
+/* packet operations */
+static mblk_t *urf_tx_make_packet(struct usbgem_dev *, mblk_t *);
+static mblk_t *urf_rx_make_packet(struct usbgem_dev *, mblk_t *);
+
+/* =============================================================== */
+/*
+ * I/O functions
+ */
+/* =============================================================== */
+#define OUTB(dp, p, v, errp, label) \
+ if ((*(errp) = usbgem_ctrl_out_val((dp), \
+ /* bmRequestType */ USB_DEV_REQ_HOST_TO_DEV \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ USB_REQ_SET_ADDRESS, \
+ /* wValue */ (p), \
+ /* wIndex */ 0, \
+ /* wLength */ 1, \
+ /* value */ (v))) != USB_SUCCESS) goto label
+
+#define OUTW(dp, p, v, errp, label) \
+ if ((*(errp) = usbgem_ctrl_out_val((dp), \
+ /* bmRequestType */ USB_DEV_REQ_HOST_TO_DEV \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ USB_REQ_SET_ADDRESS, \
+ /* wValue */ (p), \
+ /* wIndex */ 0, \
+ /* wLength */ 2, \
+ /* value */ (v))) != USB_SUCCESS) goto label
+
+#define OUTS(dp, p, buf, len, errp, label) \
+ if ((*(errp) = usbgem_ctrl_out((dp), \
+ /* bmRequestType */ USB_DEV_REQ_HOST_TO_DEV \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ USB_REQ_SET_ADDRESS, \
+ /* wValue */ (p), \
+ /* wIndex */ 0, \
+ /* wLength */ (len), \
+ /* value */ (buf), \
+ /* size */ (len))) != USB_SUCCESS) goto label
+
+#define IN(dp, p, vp, errp, label) \
+ if ((*(errp) = usbgem_ctrl_in_val((dp), \
+ /* bmRequestType */ USB_DEV_REQ_DEV_TO_HOST \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ USB_REQ_SET_ADDRESS, \
+ /* wValue */ (p), \
+ /* wIndex */ 0, \
+ /* wLength */ sizeof ((*vp)), \
+ /* valuep */ (vp))) != USB_SUCCESS) goto label
+
+#define INS(dp, p, buf, len, errp, label) \
+ if ((*(errp) = usbgem_ctrl_in((dp), \
+ /* bmRequestType */ USB_DEV_REQ_DEV_TO_HOST \
+ | USB_DEV_REQ_TYPE_VENDOR | USB_DEV_REQ_RCPT_DEV, \
+ /* bRequest */ USB_REQ_SET_ADDRESS, \
+ /* wValue */ (p), \
+ /* wIndex */ 0, \
+ /* wLength */ (len), \
+ /* valuep */ (buf), \
+ /* size */ (len))) != USB_SUCCESS) goto label
+
+/* =============================================================== */
+/*
+ * variables
+ */
+/* =============================================================== */
+static int urf_ppa = 0;
+
+/* =============================================================== */
+/*
+ * Hardware manupilation
+ */
+/* =============================================================== */
+static int
+urf_reset_chip(struct usbgem_dev *dp)
+{
+ int i;
+ int err;
+ uint8_t reg;
+ struct urf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ lp->cr = 0;
+ OUTB(dp, CR, lp->cr | CR_SOFT_RST, &err, usberr);
+
+ for (i = 0; i < 100; i++) {
+ IN(dp, CR, &reg, &err, usberr);
+ if ((reg & CR_SOFT_RST) == 0) {
+ return (USB_SUCCESS);
+ }
+ }
+ /* time out */
+ cmn_err(CE_WARN, "%s: failed to reset: timeout", dp->name);
+ return (USB_FAILURE);
+
+usberr:
+ cmn_err(CE_NOTE, "!%s: %s: usberr detected", dp->name, __func__);
+ return (USB_FAILURE);
+}
+
+/*
+ * Setup rtl8150
+ */
+static int
+urf_init_chip(struct usbgem_dev *dp)
+{
+ int i;
+ uint32_t val;
+ int err;
+ struct urf_dev *lp = dp->private;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* ID registers: set later by urf_set_rx_filter */
+
+ /* Multicast registers: set later by urf_set_rx_filter */
+
+ /* Command register : Enable Tx and Rx before writing TCR and RCR */
+ lp->cr |= CR_RE | CR_TE;
+ OUTB(dp, CR, lp->cr, &err, usberr);
+
+ /* Transmit configration register : */
+ OUTB(dp, TCR, TCR_IFG_802_3, &err, usberr);
+
+ /* Receive configuration register : disable rx filter */
+ lp->rcr = RCR_TAIL | RCR_AER | RCR_AR;
+ OUTW(dp, RCR, lp->rcr, &err, usberr);
+#ifdef notdef
+ /* Media status register */
+ err = urf_set_media(dp);
+ CHECK_AND_JUMP(err, usberr);
+#endif
+ /* Configuration register 0: no need to change */
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: end (success)", dp->name, __func__));
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_NOTE, "!%s: %s: usberr detected", dp->name, __func__);
+ return (USB_FAILURE);
+}
+
+static int
+urf_start_chip(struct usbgem_dev *dp)
+{
+ struct urf_dev *lp = dp->private;
+
+ /* do nothing */
+ return (USB_SUCCESS);
+}
+
+static int
+urf_stop_chip(struct usbgem_dev *dp)
+{
+ return (urf_reset_chip(dp));
+}
+
+static int
+urf_get_stats(struct usbgem_dev *dp)
+{
+ /* do nothing */
+ return (USB_SUCCESS);
+}
+
+static uint_t
+urf_mcast_hash(struct usbgem_dev *dp, const uint8_t *addr)
+{
+ return (usbgem_ether_crc_be(addr));
+}
+
+static int
+urf_set_rx_filter(struct usbgem_dev *dp)
+{
+ int i;
+ uint16_t mode;
+ uint8_t mhash[8];
+ int err;
+ int16_t rcr;
+ struct urf_dev *lp = dp->private;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: called, rxmode:%x",
+ dp->name, __func__, dp->rxmode));
+
+ if (lp->rcr & (RCR_AB | RCR_AD | RCR_AAM | RCR_AAP | RCR_AM)) {
+#ifdef notdef
+ /* disable rx filter before changing it. */
+ lp->rcr &= ~(RCR_AB | RCR_AD | RCR_AAM | RCR_AAP | RCR_AM);
+ OUTW(dp, RCR, lp->rcr, &err, usberr);
+#else
+ /* receive all packets while we change rx filter*/
+ OUTW(dp, RCR, lp->rcr | RCR_AAM | RCR_AAP, &err, usberr);
+#endif
+ }
+
+ mode = RCR_AB /* accept broadcast */
+ | RCR_AD; /* accept physical match */
+ bzero(mhash, sizeof (mhash));
+
+ if (dp->rxmode & RXMODE_PROMISC) {
+ /* promiscious mode implies all multicast and all physical */
+ mode |= RCR_AAM | RCR_AAP;
+ } else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 64/2) {
+ /* accept all multicast packets */
+ mode |= RCR_AAM;
+ } else if (dp->mc_count > 0) {
+ /*
+ * make hash table to select interresting
+ * multicast address only.
+ */
+ mode |= RCR_AM;
+ for (i = 0; i < dp->mc_count; i++) {
+ uint_t h;
+ /* hash table is 64 = 2^6 bit width */
+ h = dp->mc_list[i].hash >> (32 - 6);
+ mhash[h / 8] |= 1 << (h % 8);
+ }
+ }
+ lp->rcr |= mode;
+
+ /* set mac address */
+ OUTS(dp, IDR, dp->cur_addr.ether_addr_octet, ETHERADDRL, &err, usberr);
+
+ /* set multicast hash table */
+ if (mode & RCR_AM) {
+ /* need to set up multicast hash table */
+ OUTS(dp, MAR, mhash, sizeof (mhash), &err, usberr);
+ }
+
+ OUTW(dp, RCR, lp->rcr, &err, usberr);
+
+#if DEBUG_LEVEL > 2
+ IN(dp, RCR, &rcr, &err, usberr);
+ cmn_err(CE_CONT, "!%s: %s: rcr:%b returned",
+ dp->name, __func__, rcr, RCR_BITS);
+#endif
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_NOTE, "!%s: %s: usberr detected", dp->name, __func__);
+ return (USB_FAILURE);
+}
+
+static int
+urf_set_media(struct usbgem_dev *dp)
+{
+ uint8_t new;
+ uint8_t old;
+ int err;
+ struct urf_dev *lp = dp->private;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* select duplex: do nothing */
+
+ /* select speed: do nothing */
+
+ /* flow control */
+ IN(dp, MSR, &old, &err, usberr);
+
+
+ /* setup flow control */
+ new = old & ~(MSR_TXFCE | MSR_RXFCE);
+ switch (dp->flow_control) {
+ case FLOW_CONTROL_SYMMETRIC:
+ new |= MSR_TXFCE | MSR_RXFCE;
+ break;
+
+ case FLOW_CONTROL_TX_PAUSE:
+ new |= MSR_TXFCE;
+ break;
+
+ case FLOW_CONTROL_RX_PAUSE:
+ new |= MSR_RXFCE;
+ break;
+
+ case FLOW_CONTROL_NONE:
+ default:
+ break;
+ }
+
+ if (new != old) {
+ OUTB(dp, MSR, new, &err, usberr);
+ }
+ DPRINTF(2, (CE_CONT, "!%s: %s: returned", dp->name, __func__));
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_NOTE, "!%s: %s: usberr detected", dp->name, __func__);
+ return (USB_FAILURE);
+}
+
+/*
+ * send/receive packet check
+ */
+static mblk_t *
+urf_tx_make_packet(struct usbgem_dev *dp, mblk_t *mp)
+{
+ size_t len;
+ mblk_t *new;
+ mblk_t *tp;
+ uint8_t *bp;
+ uint8_t *last_pos;
+
+ len = msgdsize(mp);
+
+ if (len < ETHERMIN || mp->b_cont != NULL || (len & 0x3f) == 0) {
+ /*
+ * re-allocate mp
+ */
+ len = max(len, ETHERMIN);
+
+ if ((len & 0x3f) == 0) {
+ /* workaround for buggy USB hba */
+ len++;
+ }
+
+ if ((new = allocb(len, 0)) == NULL) {
+ return (NULL);
+ }
+
+ /* copy contents of the buffer */
+ new->b_wptr = new->b_rptr + len;
+ bp = new->b_rptr;
+ for (tp = mp; tp; tp = tp->b_cont) {
+ len = tp->b_wptr - tp->b_rptr;
+ bcopy(tp->b_rptr, bp, len);
+ bp += len;
+ }
+
+ last_pos = new->b_wptr;
+ while (bp < last_pos) {
+ *bp++ = 0;
+ }
+
+ mp = new;
+ }
+
+ return (mp);
+}
+
+static void
+urf_dump_packet(struct usbgem_dev *dp, uint8_t *bp, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i += 8, bp += 8) {
+ cmn_err(CE_CONT, "%02x %02x %02x %02x %02x %02x %02x %02x",
+ bp[0], bp[1], bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
+ }
+}
+
+static mblk_t *
+urf_rx_make_packet(struct usbgem_dev *dp, mblk_t *mp)
+{
+ uint8_t *p;
+ uint16_t rxhd;
+ uint_t len;
+
+ ASSERT(mp != NULL);
+ len = msgdsize(mp);
+#ifdef DEBUG_LEVEL
+ DPRINTF(2, (CE_CONT, "!%s: time:%d %s: len:%d cont:%p",
+ dp->name, ddi_get_lbolt(), __func__, len, mp->b_cont));
+
+ if (urf_debug > 2) {
+ urf_dump_packet(dp, mp->b_rptr, max(6, len));
+ }
+#endif
+ if (len < ETHERMIN + ETHERFCSL) {
+ /* Too short */
+ dp->stats.runt++;
+ dp->stats.errrcv++;
+ return (NULL);
+ }
+
+ /* get Rx header which is placed at tail of the packet. */
+ p = mp->b_wptr - 4;
+ rxhd = (p[1] << 8) | p[0];
+ len = rxhd & RXHD_BYTECNT;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: rsr:%b len:%d",
+ dp->name, __func__, rxhd, RXHD_BITS, len));
+
+ /* check if error happen */
+ if ((rxhd & (RXHD_VALID)) == 0) {
+ DPRINTF(-1, (CE_CONT, "!%s: %s: rxhd:%b",
+ dp->name, __func__, rxhd, RXHD_BITS));
+ if (rxhd & RXHD_RUNT) {
+ dp->stats.runt++;
+ }
+
+ dp->stats.errrcv++;
+ return (NULL);
+ }
+#ifdef notdef
+ /* check packet size */
+ if (len > ETHERMAX + ETHERFCSL) {
+ /* too long */
+ dp->stats.frame_too_long++;
+ dp->stats.errrcv++;
+ return (NULL);
+ } else if (len < ETHERMIN + ETHERFCSL) {
+ dp->stats.runt++;
+ dp->stats.errrcv++;
+ return (NULL);
+ }
+#endif
+ /* remove tailing crc field */
+ mp->b_wptr -= ETHERFCSL;
+ return (mp);
+}
+
+/*
+ * MII Interfaces
+ */
+static uint16_t
+urf_mii_read(struct usbgem_dev *dp, uint_t index, int *errp)
+{
+ int reg;
+ uint16_t val;
+
+ DPRINTF(4, (CE_CONT, "!%s: %s: called, ix:%d",
+ dp->name, __func__, index));
+
+ *errp = USB_SUCCESS;
+
+ switch (index) {
+ case MII_CONTROL:
+ reg = BMCR;
+ break;
+
+ case MII_STATUS:
+ reg = BMSR;
+ break;
+
+ case MII_AN_ADVERT:
+ reg = ANAR;
+ break;
+
+ case MII_AN_LPABLE:
+ reg = ANLP;
+ break;
+
+ case MII_AN_EXPANSION:
+ reg = ANER;
+ break;
+
+ default:
+ return (0);
+ }
+
+ IN(dp, reg, &val, errp, usberr);
+
+ if (index == MII_STATUS) {
+ uint8_t msr;
+ /*
+ * Fix MII status register as it does't have LINKUP and
+ * MFPRMBLSUPR bits.
+ */
+ IN(dp, MSR, &msr, errp, usberr);
+
+ val |= (MII_STATUS_MFPRMBLSUPR | MII_STATUS_LINKUP);
+ if ((msr & MSR_LINK) == 0) {
+ val &= ~MII_STATUS_LINKUP;
+ }
+ }
+
+ return (val);
+
+usberr:
+ cmn_err(CE_CONT,
+ "!%s: %s: usberr(%d) detected", dp->name, __func__, *errp);
+
+ return (0);
+}
+
+static void
+urf_mii_write(struct usbgem_dev *dp, uint_t index, uint16_t val, int *errp)
+{
+ int reg;
+
+ DPRINTF(5, (CE_CONT, "!%s: %s called", dp->name, __func__));
+
+ *errp = USB_SUCCESS;
+
+ switch (index) {
+ case MII_CONTROL:
+ reg = BMCR;
+ break;
+
+ case MII_STATUS:
+ reg = BMSR;
+ break;
+
+ case MII_AN_ADVERT:
+ reg = ANAR;
+ break;
+
+ case MII_AN_LPABLE:
+ reg = ANLP;
+ break;
+
+ case MII_AN_EXPANSION:
+ reg = ANER;
+ break;
+
+ default:
+ return;
+ }
+
+ OUTW(dp, reg, val, errp, usberr);
+usberr:
+ ;
+}
+
+/* ======================================================== */
+/*
+ * OS depend (device driver DKI) routine
+ */
+/* ======================================================== */
+static void
+urf_eeprom_dump(struct usbgem_dev *dp, int size)
+{
+ int i;
+ int err;
+ uint16_t w0, w1, w2, w3;
+
+ cmn_err(CE_CONT, "!%s: eeprom dump:", dp->name);
+ for (i = URF_EEPROM_BASE; i < size + URF_EEPROM_BASE; i += 8) {
+ IN(dp, i + 0, &w0, &err, usberr);
+ IN(dp, i + 2, &w1, &err, usberr);
+ IN(dp, i + 4, &w2, &err, usberr);
+ IN(dp, i + 6, &w3, &err, usberr);
+ cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
+ i - URF_EEPROM_BASE, w0, w1, w2, w3);
+ }
+usberr:
+ ;
+}
+
+static int
+urf_attach_chip(struct usbgem_dev *dp)
+{
+ int i;
+ uint8_t old;
+ uint_t new;
+ uint8_t reg;
+ int err;
+ struct urf_dev *lp = dp->private;
+
+ /*
+ * setup flow control bit in eeprom
+ */
+ IN(dp, URF_EEPROM_BASE + 9, &old, &err, usberr);
+
+ DPRINTF(0, (CE_CONT, "!%s: eeprom offset 9: %02x", dp->name, old));
+
+ if (dp->ugc.usbgc_flow_control != FLOW_CONTROL_NONE) {
+ /* enable PAUSE bit */
+ new = old | 0x04;
+ } else {
+ /* clear PAUSE bit */
+ new = old & ~0x04;
+ }
+ if (new != old) {
+ /* make eeprom writable */
+ OUTB(dp, CR, lp->cr | CR_WEPROM, &err, usberr);
+
+ /* eerom allows only word access for writing */
+ IN(dp, URF_EEPROM_BASE + 8, &reg, &err, usberr);
+ new = (new << 8) | reg;
+
+ OUTW(dp, URF_EEPROM_BASE + 8, new, &err, usberr);
+
+ /* make eeprom non-writable */
+ OUTB(dp, CR, lp->cr, &err, usberr);
+ }
+
+ /*
+ * load EEPROM contents into nic
+ */
+ OUTB(dp, CR, lp->cr | CR_AUTOLOAD, &err, usberr);
+ CHECK_AND_JUMP(err, usberr);
+
+ for (i = 0; i < 100; i++) {
+ IN(dp, CR, &reg, &err, usberr);
+ if ((reg & CR_AUTOLOAD) == 0) {
+ goto autoload_done;
+ }
+ }
+ /* timeout */
+ cmn_err(CE_WARN, "%s: %s: failed to autoload: timeout",
+ dp->name, __func__);
+ goto usberr;
+
+autoload_done:
+ /*
+ * mac address in EEPROM has loaded to ID registers.
+ */
+ INS(dp, IDR, dp->dev_addr.ether_addr_octet, ETHERADDRL, &err, usberr);
+
+ /* no need to scan phy */
+ dp->mii_phy_addr = -1;
+
+#if DEBUG_LEVEL > 2
+ urf_eeprom_dump(dp, 0x80);
+#endif
+
+#ifdef CONFIG_VLAN
+ dp->misc_flag = USBGEM_VLAN;
+#endif
+ return (USB_SUCCESS);
+
+usberr:
+ cmn_err(CE_WARN, "%s: urf_attach_chip: usb error detected", dp->name);
+ return (USB_FAILURE);
+}
+
+static int
+urfattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int i;
+ ddi_iblock_cookie_t c;
+ int ret;
+ int unit;
+ struct chip_info *p;
+ const char *drv_name;
+ struct usbgem_dev *dp;
+ void *base;
+ struct usbgem_conf *ugcp;
+ struct urf_dev *lp;
+
+ unit = ddi_get_instance(dip);
+ drv_name = ddi_driver_name(dip);
+
+ DPRINTF(3, (CE_CONT, "!%s%d: %s: called, cmd:%d",
+ drv_name, __func__, unit, cmd));
+
+ if (cmd == DDI_ATTACH) {
+ /*
+ * Check if the chip is supported.
+ */
+
+ /*
+ * Check the chip if it is really realtek rtl8150
+ */
+
+ /*
+ * construct usbgem configration
+ */
+ ugcp = kmem_zalloc(sizeof (*ugcp), KM_SLEEP);
+
+ /* name */
+ sprintf(ugcp->usbgc_name,
+ "%s%d(ppa=%d)", drv_name, unit, urf_ppa);
+#ifdef USBGEM_CONFIG_GLDv3
+ ugcp->usbgc_ppa = urf_ppa;
+#else
+ ugcp->usbgc_ppa = unit;
+#endif
+ ugcp->usbgc_ifnum = 0;
+ ugcp->usbgc_alt = 0;
+
+ ugcp->usbgc_tx_list_max = 16;
+
+ /* the rx status partially replaces FCS */
+ ugcp->usbgc_rx_header_len = 0;
+ ugcp->usbgc_rx_list_max = 64;
+
+ /* time out parameters */
+ ugcp->usbgc_tx_timeout = USBGEM_TX_TIMEOUT;
+ ugcp->usbgc_tx_timeout_interval = ONESEC;
+
+ /* flow control */
+ ugcp->usbgc_flow_control = FLOW_CONTROL_RX_PAUSE;
+
+ /* MII timeout parameters */
+ ugcp->usbgc_mii_link_watch_interval = ONESEC;
+ ugcp->usbgc_mii_an_watch_interval = ONESEC/5;
+ ugcp->usbgc_mii_reset_timeout = MII_RESET_TIMEOUT; /* 1 sec */
+ ugcp->usbgc_mii_an_timeout = MII_AN_TIMEOUT; /* 5 sec */
+ ugcp->usbgc_mii_an_wait = (25*ONESEC)/10;
+ ugcp->usbgc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
+
+ ugcp->usbgc_mii_an_delay = ONESEC/10;
+ ugcp->usbgc_mii_linkdown_action = MII_ACTION_RSA;
+ ugcp->usbgc_mii_linkdown_timeout_action = MII_ACTION_RESET;
+ ugcp->usbgc_mii_dont_reset = B_FALSE;
+
+ /* I/O methods */
+
+ /* mac operation */
+ ugcp->usbgc_attach_chip = &urf_attach_chip;
+ ugcp->usbgc_reset_chip = &urf_reset_chip;
+ ugcp->usbgc_init_chip = &urf_init_chip;
+ ugcp->usbgc_start_chip = &urf_start_chip;
+ ugcp->usbgc_stop_chip = &urf_stop_chip;
+ ugcp->usbgc_multicast_hash = &urf_mcast_hash;
+
+ ugcp->usbgc_set_rx_filter = &urf_set_rx_filter;
+ ugcp->usbgc_set_media = &urf_set_media;
+ ugcp->usbgc_get_stats = &urf_get_stats;
+#ifdef notdef
+ ugcp->usbgc_interrupt = &urf_interrupt;
+#else
+ ugcp->usbgc_interrupt = NULL;
+#endif
+ /* packet operation */
+ ugcp->usbgc_tx_make_packet = &urf_tx_make_packet;
+ ugcp->usbgc_rx_make_packet = &urf_rx_make_packet;
+
+ /* mii operations */
+ ugcp->usbgc_mii_probe = &usbgem_mii_probe_default;
+ ugcp->usbgc_mii_init = &usbgem_mii_init_default;
+ ugcp->usbgc_mii_config = &usbgem_mii_config_default;
+ ugcp->usbgc_mii_read = &urf_mii_read;
+ ugcp->usbgc_mii_write = &urf_mii_write;
+
+ /* mtu */
+ ugcp->usbgc_min_mtu = ETHERMTU;
+ ugcp->usbgc_max_mtu = ETHERMTU;
+ ugcp->usbgc_default_mtu = ETHERMTU;
+
+ lp = kmem_zalloc(sizeof (struct urf_dev), KM_SLEEP);
+ lp->chip = p;
+
+ ddi_set_driver_private(dip, NULL);
+
+ dp = usbgem_do_attach(dip, ugcp, lp, sizeof (struct urf_dev));
+
+ kmem_free(ugcp, sizeof (*ugcp));
+
+ if (dp != NULL) {
+ urf_ppa++;
+ return (DDI_SUCCESS);
+ }
+
+err_free_mem:
+ kmem_free(lp, sizeof (struct urf_dev));
+err_close_pipe:
+err:
+ return (DDI_FAILURE);
+ }
+ if (cmd == DDI_RESUME) {
+ return (usbgem_resume(dip));
+ }
+ return (DDI_FAILURE);
+}
+
+static int
+urfdetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int ret;
+
+ if (cmd == DDI_DETACH) {
+ ret = usbgem_do_detach(dip);
+ if (ret != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ urf_ppa--;
+ return (DDI_SUCCESS);
+ }
+ if (cmd == DDI_SUSPEND) {
+ return (usbgem_suspend(dip));
+ }
+ return (DDI_FAILURE);
+}
+
+/* ======================================================== */
+/*
+ * OS depend (loadable streams driver) routine
+ */
+/* ======================================================== */
+#ifdef USBGEM_CONFIG_GLDv3
+USBGEM_STREAM_OPS(urf_ops, urfattach, urfdetach);
+#else
+static struct module_info urfminfo = {
+ 0, /* mi_idnum */
+ "urf", /* mi_idname */
+ 0, /* mi_minpsz */
+ ETHERMTU, /* mi_maxpsz */
+ ETHERMTU*128, /* mi_hiwat */
+ 1, /* mi_lowat */
+};
+
+static struct qinit urfrinit = {
+ (int (*)()) NULL, /* qi_putp */
+ usbgem_rsrv, /* qi_srvp */
+ usbgem_open, /* qi_qopen */
+ usbgem_close, /* qi_qclose */
+ (int (*)()) NULL, /* qi_qadmin */
+ &urfminfo, /* qi_minfo */
+ NULL /* qi_mstat */
+};
+
+static struct qinit urfwinit = {
+ usbgem_wput, /* qi_putp */
+ usbgem_wsrv, /* qi_srvp */
+ (int (*)()) NULL, /* qi_qopen */
+ (int (*)()) NULL, /* qi_qclose */
+ (int (*)()) NULL, /* qi_qadmin */
+ &urfminfo, /* qi_minfo */
+ NULL /* qi_mstat */
+};
+
+static struct streamtab urf_info = {
+ &urfrinit, /* st_rdinit */
+ &urfwinit, /* st_wrinit */
+ NULL, /* st_muxrinit */
+ NULL /* st_muxwrinit */
+};
+
+static struct cb_ops cb_urf_ops = {
+ nulldev, /* cb_open */
+ nulldev, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read */
+ nodev, /* cb_write */
+ nodev, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ &urf_info, /* cb_stream */
+ D_NEW|D_MP /* cb_flag */
+};
+
+static struct dev_ops urf_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ usbgem_getinfo, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ urfattach, /* devo_attach */
+ urfdetach, /* devo_detach */
+ nodev, /* devo_reset */
+ &cb_urf_ops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ usbgem_power, /* devo_power */
+#if DEVO_REV >= 4
+ usbgem_quiesce, /* devo_quiesce */
+#endif
+
+};
+#endif
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* Type of module. This one is a driver */
+ ident,
+ &urf_ops, /* driver ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, &modldrv, NULL
+};
+
+/* ======================================================== */
+/*
+ * _init : done
+ */
+/* ======================================================== */
+int
+_init(void)
+{
+ int status;
+
+ DPRINTF(2, (CE_CONT, "!urf: _init: called"));
+
+ status = usbgem_mod_init(&urf_ops, "urf");
+ if (status != DDI_SUCCESS) {
+ return (status);
+ }
+ status = mod_install(&modlinkage);
+ if (status != DDI_SUCCESS) {
+ usbgem_mod_fini(&urf_ops);
+ }
+ return (status);
+}
+
+/*
+ * _fini : done
+ */
+int
+_fini(void)
+{
+ int status;
+
+ DPRINTF(2, (CE_CONT, "!urf: _fini: called"));
+ status = mod_remove(&modlinkage);
+ if (status == DDI_SUCCESS) {
+ usbgem_mod_fini(&urf_ops);
+ }
+ return (status);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
diff --git a/usr/src/uts/common/io/usbgem/usbgem.c b/usr/src/uts/common/io/usbgem/usbgem.c
new file mode 100644
index 0000000000..a42f7119ef
--- /dev/null
+++ b/usr/src/uts/common/io/usbgem/usbgem.c
@@ -0,0 +1,6389 @@
+/*
+ * usbgem.c: General USB to Fast Ethernet mac driver framework
+ *
+ * Copyright (c) 2002-2012 Masayuki Murayama. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the author nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#pragma ident "@(#)usbgem.c 1.6 12/02/09"
+
+/*
+ * Change log
+ */
+
+/*
+ * TODO:
+ * implement DELAYED_START
+ */
+
+/*
+ * System Header files.
+ */
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/debug.h>
+#include <sys/kmem.h>
+#include <sys/vtrace.h>
+#include <sys/ethernet.h>
+#include <sys/modctl.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#ifndef USBGEM_CONFIG_GLDv3
+#include <sys/dlpi.h>
+#include <sys/strsubr.h>
+#endif
+#include <sys/stream.h> /* required for MBLK* */
+#include <sys/strsun.h> /* required for mionack() */
+#include <sys/byteorder.h>
+
+#include <sys/usb/usba.h>
+#ifdef USBGEM_CONFIG_GLDv3
+#include <inet/common.h>
+#include <inet/led.h>
+#include <inet/mi.h>
+#include <inet/nd.h>
+#endif
+
+/* supplement definitions */
+extern const char *usb_str_cr(usb_cr_t);
+
+#ifndef USBGEM_CONFIG_GLDv3
+#pragma weak gld_linkstate
+#endif
+#include <sys/note.h>
+
+#include "usbgem_mii.h"
+#include "usbgem.h"
+
+#ifdef MODULE
+char ident[] = "usb general ethernet mac driver v" VERSION;
+#else
+extern char ident[];
+#endif
+
+/* Debugging support */
+#ifdef USBGEM_DEBUG_LEVEL
+static int usbgem_debug = USBGEM_DEBUG_LEVEL;
+#define DPRINTF(n, args) if (usbgem_debug > (n)) cmn_err args
+#else
+#define DPRINTF(n, args)
+#endif
+
+/*
+ * Useful macros and typedefs
+ */
+#define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
+#define DEFAULT_PIPE(dp) ((dp)->reg_data->dev_default_ph)
+#define VTAG_SIZE 4
+#define BOOLEAN(x) ((x) != 0)
+/*
+ * configuration parameters
+ */
+#define USBDRV_MAJOR_VER 2
+#define USBDRV_MINOR_VER 0
+
+#define ETHERHEADERL (sizeof (struct ether_header))
+#define MAXPKTLEN(dp) ((dp)->mtu + ETHERHEADERL)
+#define MAXPKTBUF(dp) ((dp)->mtu + ETHERHEADERL + ETHERFCSL)
+
+#define WATCH_INTERVAL_FAST drv_usectohz(100*1000)
+
+#define STOP_GRACEFUL B_TRUE
+
+/*
+ * Private functions
+ */
+static int usbgem_open_pipes(struct usbgem_dev *dp);
+static int usbgem_close_pipes(struct usbgem_dev *dp);
+static void usbgem_intr_cb(usb_pipe_handle_t, usb_intr_req_t *);
+static void usbgem_bulkin_cb(usb_pipe_handle_t, usb_bulk_req_t *);
+static void usbgem_bulkout_cb(usb_pipe_handle_t, usb_bulk_req_t *);
+
+static int usbgem_mii_start(struct usbgem_dev *);
+static void usbgem_mii_stop(struct usbgem_dev *);
+
+/* local buffer management */
+static int usbgem_init_rx_buf(struct usbgem_dev *);
+
+/* internal mac interfaces */
+static void usbgem_tx_timeout(struct usbgem_dev *);
+static void usbgem_mii_link_watcher(struct usbgem_dev *);
+static int usbgem_mac_init(struct usbgem_dev *);
+static int usbgem_mac_start(struct usbgem_dev *);
+static int usbgem_mac_stop(struct usbgem_dev *, int, boolean_t);
+static void usbgem_mac_ioctl(struct usbgem_dev *, queue_t *, mblk_t *);
+
+int usbgem_speed_value[] = {10, 100, 1000};
+
+static int usbgem_ctrl_retry = 5;
+
+/* usb event support */
+static int usbgem_disconnect_cb(dev_info_t *dip);
+static int usbgem_reconnect_cb(dev_info_t *dip);
+int usbgem_suspend(dev_info_t *dip);
+int usbgem_resume(dev_info_t *dip);
+
+static uint8_t usbgem_bcastaddr[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+
+#ifdef MODULE
+extern struct mod_ops mod_miscops;
+
+static struct modlmisc modlmisc = {
+ &mod_miscops,
+ "usbgem v" VERSION,
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, &modlmisc, NULL
+};
+
+/*
+ * _init : done
+ */
+int
+_init(void)
+{
+ int status;
+
+ DPRINTF(2, (CE_CONT, "!usbgem: _init: called"));
+ status = mod_install(&modlinkage);
+
+ return (status);
+}
+
+/*
+ * _fini : done
+ */
+int
+_fini(void)
+{
+ int status;
+
+ DPRINTF(2, (CE_CONT, "!usbgem: _fini: called"));
+ status = mod_remove(&modlinkage);
+ return (status);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+#endif /* MODULE */
+
+/* ============================================================== */
+/*
+ * Ether CRC calculation utilities
+ */
+/* ============================================================== */
+/*
+ * Ether CRC calculation according to 21143 data sheet
+ */
+#define CRC32_POLY_LE 0xedb88320
+uint32_t
+usbgem_ether_crc_le(const uint8_t *addr)
+{
+ int idx;
+ int bit;
+ uint_t data;
+ uint32_t crc = 0xffffffff;
+
+ crc = 0xffffffff;
+ for (idx = 0; idx < ETHERADDRL; idx++) {
+ for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
+ crc = (crc >> 1) ^
+ (((crc ^ data) & 1) ? CRC32_POLY_LE : 0);
+ }
+ }
+ return (crc);
+}
+
+#define CRC32_POLY_BE 0x04c11db7
+uint32_t
+usbgem_ether_crc_be(const uint8_t *addr)
+{
+ int idx;
+ int bit;
+ uint_t data;
+ uint32_t crc;
+
+ crc = 0xffffffff;
+ for (idx = 0; idx < ETHERADDRL; idx++) {
+ for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
+ crc = (crc << 1) ^
+ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
+ }
+ }
+ return (crc);
+}
+
+int
+usbgem_prop_get_int(struct usbgem_dev *dp, char *prop_template, int def_val)
+{
+ char propname[32];
+
+ (void) sprintf(propname, prop_template, dp->name);
+
+ return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
+ DDI_PROP_DONTPASS, propname, def_val));
+}
+
+static int
+usbgem_population(uint32_t x)
+{
+ int i;
+ int cnt;
+
+ cnt = 0;
+ for (i = 0; i < 32; i++) {
+ if (x & (1 << i)) {
+ cnt++;
+ }
+ }
+ return (cnt);
+}
+
+static clock_t
+usbgem_timestamp_nz()
+{
+ clock_t now;
+ now = ddi_get_lbolt();
+ return (now ? now : (clock_t)1);
+}
+
+#ifdef USBGEM_DEBUG_LEVEL
+#ifdef USBGEM_DEBUG_VLAN
+#ifdef notdef
+#include <netinet/in.h>
+#endif
+static void
+usbgem_dump_packet(struct usbgem_dev *dp, char *title, mblk_t *mp,
+ boolean_t check_cksum)
+{
+ char msg[180];
+ uint8_t buf[18+20+20];
+ uint8_t *p;
+ size_t offset;
+ uint_t ethertype;
+ uint_t proto;
+ uint_t ipproto = 0;
+ uint_t iplen;
+ uint_t iphlen;
+ uint_t tcplen;
+ uint_t udplen;
+ uint_t cksum;
+ int rest;
+ int len;
+ char *bp;
+ mblk_t *tp;
+ extern uint_t ip_cksum(mblk_t *, int, uint32_t);
+
+ msg[0] = 0;
+ bp = msg;
+
+ rest = sizeof (buf);
+ offset = 0;
+ for (tp = mp; tp; tp = tp->b_cont) {
+ len = tp->b_wptr - tp->b_rptr;
+ len = min(rest, len);
+ bcopy(tp->b_rptr, &buf[offset], len);
+ rest -= len;
+ offset += len;
+ if (rest == 0) {
+ break;
+ }
+ }
+
+ offset = 0;
+ p = &buf[offset];
+
+ /* ethernet address */
+ sprintf(bp,
+ "ether: %02x:%02x:%02x:%02x:%02x:%02x"
+ " -> %02x:%02x:%02x:%02x:%02x:%02x",
+ p[6], p[7], p[8], p[9], p[10], p[11],
+ p[0], p[1], p[2], p[3], p[4], p[5]);
+ bp = &msg[strlen(msg)];
+
+ /* vlag tag and etherrtype */
+ ethertype = GET_ETHERTYPE(p);
+ if (ethertype == VTAG_TPID) {
+ sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
+ bp = &msg[strlen(msg)];
+
+ offset += VTAG_SIZE;
+ p = &buf[offset];
+ ethertype = GET_ETHERTYPE(p);
+ }
+ sprintf(bp, " type:%04x", ethertype);
+ bp = &msg[strlen(msg)];
+
+ /* ethernet packet length */
+ sprintf(bp, " mblklen:%d", msgdsize(mp));
+ bp = &msg[strlen(msg)];
+ if (mp->b_cont) {
+ sprintf(bp, "(");
+ bp = &msg[strlen(msg)];
+ for (tp = mp; tp; tp = tp->b_cont) {
+ if (tp == mp) {
+ sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
+ } else {
+ sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
+ }
+ bp = &msg[strlen(msg)];
+ }
+ sprintf(bp, ")");
+ bp = &msg[strlen(msg)];
+ }
+
+ if (ethertype != ETHERTYPE_IP) {
+ goto x;
+ }
+
+ /* ip address */
+ offset += sizeof (struct ether_header);
+ p = &buf[offset];
+ ipproto = p[9];
+ iplen = GET_NET16(&p[2]);
+ sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
+ p[12], p[13], p[14], p[15],
+ p[16], p[17], p[18], p[19],
+ ipproto, iplen);
+ bp = (void *)&msg[strlen(msg)];
+
+ iphlen = (p[0] & 0xf) * 4;
+
+ /* cksum for psuedo header */
+ cksum = *(uint16_t *)&p[12];
+ cksum += *(uint16_t *)&p[14];
+ cksum += *(uint16_t *)&p[16];
+ cksum += *(uint16_t *)&p[18];
+ cksum += BE_16(ipproto);
+
+ /* tcp or udp protocol header */
+ offset += iphlen;
+ p = &buf[offset];
+ if (ipproto == IPPROTO_TCP) {
+ tcplen = iplen - iphlen;
+ sprintf(bp, ", tcp: len:%d cksum:%x",
+ tcplen, GET_NET16(&p[16]));
+ bp = (void *)&msg[strlen(msg)];
+
+ if (check_cksum) {
+ cksum += BE_16(tcplen);
+ cksum = (uint16_t)ip_cksum(mp, offset, cksum);
+ sprintf(bp, " (%s)",
+ (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
+ bp = (void *)&msg[strlen(msg)];
+ }
+ } else if (ipproto == IPPROTO_UDP) {
+ udplen = GET_NET16(&p[4]);
+ sprintf(bp, ", udp: len:%d cksum:%x",
+ udplen, GET_NET16(&p[6]));
+ bp = (void *)&msg[strlen(msg)];
+
+ if (GET_NET16(&p[6]) && check_cksum) {
+ cksum += *(uint16_t *)&p[4];
+ cksum = (uint16_t)ip_cksum(mp, offset, cksum);
+ sprintf(bp, " (%s)",
+ (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
+ bp = (void *)&msg[strlen(msg)];
+ }
+ }
+x:
+ cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
+}
+#endif /* USBGEM_DEBUG_VLAN */
+#endif /* USBGEM_DEBUG_LEVEL */
+
+#ifdef GEM_GCC_RUNTIME
+/*
+ * gcc3 runtime routines
+ */
+#pragma weak memcmp
+int
+memcmp(const void *s1, const void *s2, size_t n)
+{
+ int i;
+ int ret;
+
+ ret = 0;
+ for (i = 0; i < n; i++) {
+ ret = (int)((uint8_t *)s1)[i] - (int)((uint8_t *)s2)[i];
+ if (ret) {
+ return (ret);
+ }
+ }
+ return (0);
+}
+
+#pragma weak memset
+void *
+memset(void *s, int c, size_t n)
+{
+ if ((c & 0xff) == 0) {
+ bzero(s, n);
+ } else {
+ while (n--) {
+ ((uint8_t *)s)[n] = c;
+ }
+ }
+ return (s);
+}
+
+#pragma weak _memcpy = memcpy
+#pragma weak memcpy
+void *
+memcpy(void *s1, const void *s2, size_t n)
+{
+ bcopy(s2, s1, n);
+ return (s1);
+}
+#endif /* GEM_GCC_RUNTIME */
+/* ============================================================== */
+/*
+ * hardware operations
+ */
+/* ============================================================== */
+static int
+usbgem_hal_reset_chip(struct usbgem_dev *dp)
+{
+ int err;
+
+ sema_p(&dp->hal_op_lock);
+ err = (*dp->ugc.usbgc_reset_chip)(dp);
+ sema_v(&dp->hal_op_lock);
+ return (err);
+}
+
+static int
+usbgem_hal_init_chip(struct usbgem_dev *dp)
+{
+ int err;
+
+ sema_p(&dp->hal_op_lock);
+ err = (*dp->ugc.usbgc_init_chip)(dp);
+ sema_v(&dp->hal_op_lock);
+ return (err);
+}
+
+static int
+usbgem_hal_attach_chip(struct usbgem_dev *dp)
+{
+ int err;
+
+ sema_p(&dp->hal_op_lock);
+ err = (*dp->ugc.usbgc_attach_chip)(dp);
+ sema_v(&dp->hal_op_lock);
+ return (err);
+}
+
+static int
+usbgem_hal_set_rx_filter(struct usbgem_dev *dp)
+{
+ int err;
+
+ sema_p(&dp->hal_op_lock);
+ err = (*dp->ugc.usbgc_set_rx_filter)(dp);
+ sema_v(&dp->hal_op_lock);
+ return (err);
+}
+
+static int
+usbgem_hal_set_media(struct usbgem_dev *dp)
+{
+ int err;
+
+ sema_p(&dp->hal_op_lock);
+ err = (*dp->ugc.usbgc_set_media)(dp);
+ sema_v(&dp->hal_op_lock);
+ return (err);
+}
+
+static int
+usbgem_hal_start_chip(struct usbgem_dev *dp)
+{
+ int err;
+
+ sema_p(&dp->hal_op_lock);
+ err = (*dp->ugc.usbgc_start_chip)(dp);
+ sema_v(&dp->hal_op_lock);
+ return (err);
+}
+
+static int
+usbgem_hal_stop_chip(struct usbgem_dev *dp)
+{
+ int err;
+
+ sema_p(&dp->hal_op_lock);
+ err = (*dp->ugc.usbgc_stop_chip)(dp);
+ sema_v(&dp->hal_op_lock);
+ return (err);
+}
+
+static int
+usbgem_hal_get_stats(struct usbgem_dev *dp)
+{
+ int err;
+
+ sema_p(&dp->hal_op_lock);
+ err = (*dp->ugc.usbgc_get_stats)(dp);
+ sema_v(&dp->hal_op_lock);
+ return (err);
+}
+
+
+/* ============================================================== */
+/*
+ * USB pipe management
+ */
+/* ============================================================== */
+static boolean_t
+usbgem_rx_start_unit(struct usbgem_dev *dp, usb_bulk_req_t *req)
+{
+ mblk_t *mp;
+ int err;
+ usb_flags_t flags;
+
+ ASSERT(req);
+
+ mp = allocb(dp->rx_buf_len, BPRI_MED);
+ if (mp == NULL) {
+ cmn_err(CE_WARN, "!%s: %s: failed to allocate mblk",
+ dp->name, __func__);
+ goto err;
+ }
+
+ req->bulk_len = dp->rx_buf_len;
+ req->bulk_data = mp;
+ req->bulk_client_private = (usb_opaque_t)dp;
+ req->bulk_timeout = 0;
+ req->bulk_attributes = USB_ATTRS_SHORT_XFER_OK;
+ req->bulk_cb = usbgem_bulkin_cb;
+ req->bulk_exc_cb = usbgem_bulkin_cb;
+ req->bulk_completion_reason = 0;
+ req->bulk_cb_flags = 0;
+
+ flags = 0;
+ err = usb_pipe_bulk_xfer(dp->bulkin_pipe, req, flags);
+
+ if (err != USB_SUCCESS) {
+ cmn_err(CE_WARN, "%s: failed to bulk_xfer for rx, err:%d",
+ dp->name, err);
+
+ /* free req and mp */
+ usb_free_bulk_req(req);
+ goto err;
+ }
+ return (B_TRUE);
+err:
+ return (B_FALSE);
+}
+
+/* ============================================================== */
+/*
+ * Rx/Tx buffer management
+ */
+/* ============================================================== */
+static int
+usbgem_init_rx_buf(struct usbgem_dev *dp)
+{
+ int i;
+ usb_bulk_req_t *req;
+
+ ASSERT(dp->mac_state == MAC_STATE_ONLINE);
+
+ for (i = 0; i < dp->ugc.usbgc_rx_list_max; i++) {
+ req = usb_alloc_bulk_req(dp->dip, 0, USB_FLAGS_SLEEP);
+ if (req == NULL) {
+ cmn_err(CE_WARN,
+ "!%s: %s: failed to allocate bulkreq for rx",
+ dp->name, __func__);
+ return (USB_FAILURE);
+ }
+ if (!usbgem_rx_start_unit(dp, req)) {
+ return (USB_FAILURE);
+ }
+ mutex_enter(&dp->rxlock);
+ dp->rx_busy_cnt++;
+ mutex_exit(&dp->rxlock);
+ }
+ return (USB_SUCCESS);
+}
+
+/* ============================================================== */
+/*
+ * memory resource management
+ */
+/* ============================================================== */
+static int
+usbgem_free_memory(struct usbgem_dev *dp)
+{
+ usb_bulk_req_t *req;
+
+ /* free all tx requst structure */
+ while ((req = dp->tx_free_list) != NULL) {
+ dp->tx_free_list =
+ (usb_bulk_req_t *)req->bulk_client_private;
+ req->bulk_data = NULL;
+ usb_free_bulk_req(req);
+ }
+ return (USB_SUCCESS);
+}
+
+static int
+usbgem_alloc_memory(struct usbgem_dev *dp)
+{
+ int i;
+ usb_bulk_req_t *req;
+
+ /* allocate tx requests */
+ dp->tx_free_list = NULL;
+ for (i = 0; i < dp->ugc.usbgc_tx_list_max; i++) {
+ req = usb_alloc_bulk_req(dp->dip, 0, USB_FLAGS_SLEEP);
+ if (req == NULL) {
+ cmn_err(CE_WARN,
+ "%s:%s failed to allocate tx requests",
+ dp->name, __func__);
+
+ /* free partially allocated tx requests */
+ (void) usbgem_free_memory(dp);
+ return (USB_FAILURE);
+ }
+
+ /* add the new one allocated into tx free list */
+ req->bulk_client_private = (usb_opaque_t)dp->tx_free_list;
+ dp->tx_free_list = req;
+ }
+
+ return (USB_SUCCESS);
+}
+
+/* ========================================================== */
+/*
+ * Start transmission.
+ * Return zero on success,
+ */
+/* ========================================================== */
+
+#ifdef TXTIMEOUT_TEST
+static int usbgem_send_cnt = 0;
+#endif
+
+/*
+ * usbgem_send is used only to send data packet into ethernet line.
+ */
+static mblk_t *
+usbgem_send_common(struct usbgem_dev *dp, mblk_t *mp, uint32_t flags)
+{
+ int err;
+ mblk_t *new;
+ usb_bulk_req_t *req;
+ int mcast;
+ int bcast;
+ int len;
+ boolean_t intr;
+ usb_flags_t usb_flags = 0;
+#ifdef USBGEM_DEBUG_LEVEL
+ usb_pipe_state_t p_state;
+#endif
+ DPRINTF(2, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ intr = (flags & 1) != 0;
+ len = msgdsize(mp);
+ bcast = 0;
+ mcast = 0;
+ if (mp->b_rptr[0] & 1) {
+ if (bcmp(mp->b_rptr, &usbgem_bcastaddr, ETHERADDRL) == 0) {
+ bcast = 1;
+ } else {
+ mcast = 1;
+ }
+ }
+ new = (*dp->ugc.usbgc_tx_make_packet)(dp, mp);
+ if (new == NULL) {
+ /*
+ * no memory resource. we don't stop downstream,
+ * we just discard the packet.
+ */
+ DPRINTF(0, (CE_CONT, "!%s: %s: no memory",
+ dp->name, __func__));
+ freemsg(mp);
+
+ mutex_enter(&dp->txlock);
+ dp->stats.noxmtbuf++;
+ dp->stats.errxmt++;
+ mutex_exit(&dp->txlock);
+
+ return (NULL);
+ }
+
+ ASSERT(new->b_cont == NULL);
+
+ mutex_enter(&dp->txlock);
+ if (dp->tx_free_list == NULL) {
+ /*
+ * no tx free slot
+ */
+ ASSERT(dp->tx_busy_cnt == dp->ugc.usbgc_tx_list_max);
+ mutex_exit(&dp->txlock);
+
+ DPRINTF(4, (CE_CONT, "!%s: %s: no free slot",
+ dp->name, __func__));
+ if (new && new != mp) {
+ /* free reallocated message */
+ freemsg(new);
+ }
+ return (mp);
+ }
+ req = dp->tx_free_list;
+ dp->tx_free_list = (usb_bulk_req_t *)req->bulk_client_private;
+ dp->tx_busy_cnt++;
+
+ if (dp->tx_free_list == NULL) {
+ intr = B_TRUE;
+ }
+ if (intr) {
+ dp->tx_intr_pended++;
+ }
+ DB_TCI(new) = intr;
+#ifdef USBGEM_DEBUG_LEVEL
+ new->b_datap->db_cksum32 = dp->tx_seq_num;
+ dp->tx_seq_num++;
+#endif
+ dp->stats.obytes += len;
+ dp->stats.opackets++;
+ if (bcast | mcast) {
+ dp->stats.obcast += bcast;
+ dp->stats.omcast += mcast;
+ }
+ mutex_exit(&dp->txlock);
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: sending", dp->name, __func__));
+
+ req->bulk_len = (long)new->b_wptr - (long)new->b_rptr;
+ req->bulk_data = new;
+ req->bulk_client_private = (usb_opaque_t)dp;
+ req->bulk_timeout = dp->bulkout_timeout; /* in second */
+ req->bulk_attributes = 0;
+ req->bulk_cb = usbgem_bulkout_cb;
+ req->bulk_exc_cb = usbgem_bulkout_cb;
+ req->bulk_completion_reason = 0;
+ req->bulk_cb_flags = 0;
+
+ if (intr) {
+ usb_flags = USB_FLAGS_SLEEP;
+ }
+ if ((err = usb_pipe_bulk_xfer(dp->bulkout_pipe, req, usb_flags))
+ != USB_SUCCESS) {
+
+ /* failed to transfer the packet, discard it. */
+ freemsg(new);
+ req->bulk_data = NULL;
+
+ /* recycle the request block */
+ mutex_enter(&dp->txlock);
+ dp->tx_busy_cnt--;
+ req->bulk_client_private = (usb_opaque_t)dp->tx_free_list;
+ dp->tx_free_list = req;
+ mutex_exit(&dp->txlock);
+
+ cmn_err(CE_NOTE,
+ "%s: %s: usb_pipe_bulk_xfer: failed: err:%d",
+ dp->name, __func__, err);
+
+ /* we use another flag to indicate error state. */
+ if (dp->fatal_error == (clock_t)0) {
+ dp->fatal_error = usbgem_timestamp_nz();
+ }
+ } else {
+ /* record the start time */
+ dp->tx_start_time = ddi_get_lbolt();
+ }
+
+ if (err == USB_SUCCESS && (usb_flags & USB_FLAGS_SLEEP)) {
+ usbgem_bulkout_cb(dp->bulkout_pipe, req);
+ }
+
+ if (new != mp) {
+ freemsg(mp);
+ }
+ return (NULL);
+}
+
+int
+usbgem_restart_nic(struct usbgem_dev *dp)
+{
+ int ret;
+ int flags = 0;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ ASSERT(dp->mac_state != MAC_STATE_DISCONNECTED);
+
+ /*
+ * ensure to stop the nic
+ */
+ if (dp->mac_state == MAC_STATE_ONLINE) {
+ (void) usbgem_mac_stop(dp, MAC_STATE_STOPPED, STOP_GRACEFUL);
+ }
+
+ /* now the nic become quiescent, reset the chip */
+ if (usbgem_hal_reset_chip(dp) != USB_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s: failed to reset chip",
+ dp->name, __func__);
+ goto err;
+ }
+
+ /*
+ * restore the nic state step by step
+ */
+ if (dp->nic_state < NIC_STATE_INITIALIZED) {
+ goto done;
+ }
+
+ if (usbgem_mac_init(dp) != USB_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s: failed to initialize chip",
+ dp->name, __func__);
+ goto err;
+ }
+
+ /* setup mac address and enable rx filter */
+ sema_p(&dp->rxfilter_lock);
+ dp->rxmode |= RXMODE_ENABLE;
+ ret = usbgem_hal_set_rx_filter(dp);
+ sema_v(&dp->rxfilter_lock);
+ if (ret != USB_SUCCESS) {
+ goto err;
+ }
+
+ /*
+ * update the link state asynchronously
+ */
+ cv_signal(&dp->link_watcher_wait_cv);
+
+ /*
+ * XXX - a panic happened because of linkdown.
+ * We must check mii_state here, because the link can be down just
+ * before the restart event happen. If the link is down now,
+ * gem_mac_start() will be called from gem_mii_link_check() when
+ * the link become up later.
+ */
+ if (dp->mii_state == MII_STATE_LINKUP) {
+ if (usbgem_hal_set_media(dp) != USB_SUCCESS) {
+ goto err;
+ }
+ if (dp->nic_state < NIC_STATE_ONLINE) {
+ goto done;
+ }
+
+ (void) usbgem_mac_start(dp);
+
+ }
+done:
+ return (USB_SUCCESS);
+err:
+#ifdef GEM_CONFIG_FMA
+ ddi_fm_service_impact(dp->dip, DDI_SERVICE_DEGRADED);
+#endif
+ return (USB_FAILURE);
+}
+
+static void
+usbgem_tx_timeout(struct usbgem_dev *dp)
+{
+ int ret;
+ uint_t rwlock;
+ clock_t now;
+
+ for (; ; ) {
+ mutex_enter(&dp->tx_watcher_lock);
+ ret = cv_timedwait(&dp->tx_watcher_cv, &dp->tx_watcher_lock,
+ dp->tx_watcher_interval + ddi_get_lbolt());
+ mutex_exit(&dp->tx_watcher_lock);
+
+ if (dp->tx_watcher_stop) {
+ break;
+ }
+
+ now = ddi_get_lbolt();
+
+ rwlock = RW_READER;
+again:
+ rw_enter(&dp->dev_state_lock, rwlock);
+
+ if ((dp->mac_state != MAC_STATE_DISCONNECTED &&
+ dp->fatal_error &&
+ now - dp->fatal_error >= dp->ugc.usbgc_tx_timeout) ||
+ (dp->mac_state == MAC_STATE_ONLINE &&
+ dp->mii_state == MII_STATE_LINKUP &&
+ dp->tx_busy_cnt != 0 &&
+ now - dp->tx_start_time >= dp->ugc.usbgc_tx_timeout)) {
+ if (rwlock == RW_READER) {
+ /*
+ * Upgrade dev_state_lock from shared mode
+ * to exclusive mode to restart nic
+ */
+ rwlock = RW_WRITER;
+ rw_exit(&dp->dev_state_lock);
+ goto again;
+ }
+ cmn_err(CE_WARN, "%s: %s: restarting the nic:"
+ " fatal_error:%ld nic_state:%d"
+ " mac_state:%d starttime:%ld",
+ dp->name, __func__,
+ dp->fatal_error ? now - dp->fatal_error: 0,
+ dp->nic_state, dp->mac_state,
+ dp->tx_busy_cnt ? now - dp->tx_start_time : 0);
+
+ (void) usbgem_restart_nic(dp);
+ }
+
+ rw_exit(&dp->dev_state_lock);
+ }
+}
+
+static int
+usbgem_tx_watcher_start(struct usbgem_dev *dp)
+{
+ int err;
+ kthread_t *wdth;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* make a first call of uwgem_lw_link_check() */
+ dp->tx_watcher_stop = 0;
+ dp->tx_watcher_interval = drv_usectohz(1000*1000);
+
+ wdth = thread_create(NULL, 0, usbgem_tx_timeout, dp, 0, &p0,
+ TS_RUN, minclsyspri);
+ if (wdth == NULL) {
+ cmn_err(CE_WARN,
+ "!%s: %s: failed to create a tx_watcher thread",
+ dp->name, __func__);
+ return (USB_FAILURE);
+ }
+ dp->tx_watcher_did = wdth->t_did;
+
+ return (USB_SUCCESS);
+}
+
+static void
+usbgem_tx_watcher_stop(struct usbgem_dev *dp)
+{
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+ if (dp->tx_watcher_did) {
+ /* Ensure timer routine stopped */
+ dp->tx_watcher_stop = 1;
+ cv_signal(&dp->tx_watcher_cv);
+ thread_join(dp->tx_watcher_did);
+ dp->tx_watcher_did = NULL;
+ }
+}
+
+/* ================================================================== */
+/*
+ * Callback handlers
+ */
+/* ================================================================== */
+static void
+usbgem_bulkin_cb(usb_pipe_handle_t pipe, usb_bulk_req_t *req)
+{
+ mblk_t *newmp;
+ mblk_t *mp;
+ mblk_t *tp;
+ int len = 0;
+ int pkts = 0;
+ int bcast = 0;
+ int mcast = 0;
+ boolean_t busy;
+ struct usbgem_dev *dp;
+
+ dp = (struct usbgem_dev *)req->bulk_client_private;
+ mp = req->bulk_data;
+ req->bulk_data = NULL;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: mp:%p, cr:%s(%d)",
+ dp->name, __func__, mp,
+ usb_str_cr(req->bulk_completion_reason),
+ req->bulk_completion_reason));
+
+ /*
+ * we cannot acquire dev_state_lock because the routine
+ * must be executed during usbgem_mac_stop() to avoid
+ * dead lock.
+ * we use a simle membar operation to get the state correctly.
+ */
+ membar_consumer();
+
+ if (req->bulk_completion_reason == USB_CR_OK &&
+ dp->nic_state == NIC_STATE_ONLINE) {
+ newmp = (*dp->ugc.usbgc_rx_make_packet)(dp, mp);
+
+ if (newmp != mp) {
+ /* the message has been reallocated, free old one */
+ freemsg(mp);
+ }
+
+ /* the message may includes one or more ethernet packets */
+ for (tp = newmp; tp; tp = tp->b_next) {
+ len += tp->b_wptr - tp->b_rptr;
+ pkts++;
+ if (tp->b_rptr[0] & 1) {
+ if (bcmp(tp->b_rptr, &usbgem_bcastaddr,
+ ETHERADDRL) == 0) {
+ bcast++;
+ } else {
+ mcast++;
+ }
+ }
+ }
+
+ /* send up if it is a valid packet */
+#ifdef USBGEM_CONFIG_GLDv3
+ mac_rx(dp->mh, NULL, newmp);
+#else
+ while (newmp) {
+ tp = newmp;
+ newmp = newmp->b_next;
+ tp->b_next = NULL;
+ gld_recv(dp->macinfo, tp);
+ }
+#endif
+ } else {
+ freemsg(mp);
+ len = 0;
+ }
+
+ mutex_enter(&dp->rxlock);
+ /* update rx_active */
+ if (dp->rx_active) {
+ dp->rx_active = dp->mac_state == MAC_STATE_ONLINE;
+ }
+
+ dp->stats.rbytes += len;
+ dp->stats.rpackets += pkts;
+ if (bcast | mcast) {
+ dp->stats.rbcast += bcast;
+ dp->stats.rmcast += mcast;
+ }
+ mutex_exit(&dp->rxlock);
+
+ if (dp->rx_active) {
+ /* prepare to receive the next packets */
+ if (usbgem_rx_start_unit(dp, req)) {
+ /* we successed */
+ goto done;
+ }
+ cmn_err(CE_WARN,
+ "!%s: %s: failed to fill next rx packet",
+ dp->name, __func__);
+ /*
+ * we use another flag to indicate error state.
+ * if we acquire dev_state_lock for RW_WRITER here,
+ * usbgem_mac_stop() may hang.
+ */
+ if (dp->fatal_error == (clock_t)0) {
+ dp->fatal_error = usbgem_timestamp_nz();
+ }
+ } else {
+ /* no need to prepare the next packets */
+ usb_free_bulk_req(req);
+ }
+
+ mutex_enter(&dp->rxlock);
+ dp->rx_active = B_FALSE;
+ dp->rx_busy_cnt--;
+ if (dp->rx_busy_cnt == 0) {
+ /* wake up someone waits for me */
+ cv_broadcast(&dp->rx_drain_cv);
+ }
+ mutex_exit(&dp->rxlock);
+done:
+ ;
+}
+
+static void
+usbgem_bulkout_cb(usb_pipe_handle_t pipe, usb_bulk_req_t *req)
+{
+ boolean_t intr;
+ boolean_t tx_sched;
+ struct usbgem_dev *dp;
+
+ dp = (struct usbgem_dev *)req->bulk_client_private;
+ tx_sched = B_FALSE;
+
+ DPRINTF(2, (CE_CONT,
+ "!%s: %s: cr:%s(%d) cb_flags:0x%x head:%d tail:%d",
+ dp->name, __func__,
+ usb_str_cr(req->bulk_completion_reason),
+ req->bulk_completion_reason,
+ req->bulk_cb_flags,
+ dp->tx_busy_cnt));
+
+ /* we have finished to transfer the packet into tx fifo */
+ intr = DB_TCI(req->bulk_data);
+ freemsg(req->bulk_data);
+
+ if (req->bulk_completion_reason != USB_CR_OK &&
+ dp->fatal_error == (clock_t)0) {
+ dp->fatal_error = usbgem_timestamp_nz();
+ }
+
+ mutex_enter(&dp->txlock);
+
+ if (intr) {
+ ASSERT(dp->tx_intr_pended > 0);
+ /* find the last interrupt we have scheduled */
+ if (--(dp->tx_intr_pended) == 0) {
+ tx_sched = B_TRUE;
+ }
+ }
+
+ ASSERT(dp->tx_busy_cnt > 0);
+ req->bulk_client_private = (usb_opaque_t)dp->tx_free_list;
+ dp->tx_free_list = req;
+ dp->tx_busy_cnt--;
+
+#ifdef CONFIG_TX_LIMITER
+ if (tx_sched) {
+ dp->tx_max_packets =
+ min(dp->tx_max_packets + 1, dp->ugc.usbgc_tx_list_max);
+ }
+#endif
+ if (dp->mac_state != MAC_STATE_ONLINE && dp->tx_busy_cnt == 0) {
+ cv_broadcast(&dp->tx_drain_cv);
+ }
+
+ mutex_exit(&dp->txlock);
+
+ if (tx_sched) {
+#ifdef USBGEM_CONFIG_GLDv3
+ mac_tx_update(dp->mh);
+#else
+ gld_sched(dp->macinfo);
+#endif
+ }
+}
+
+static void
+usbgem_intr_cb(usb_pipe_handle_t ph, usb_intr_req_t *req)
+{
+ struct usbgem_dev *dp;
+
+ dp = (struct usbgem_dev *)req->intr_client_private;
+ dp->stats.intr++;
+
+ if (req->intr_completion_reason == USB_CR_OK) {
+ (*dp->ugc.usbgc_interrupt)(dp, req->intr_data);
+ }
+
+ /* free the request and data */
+ usb_free_intr_req(req);
+}
+
+/* ======================================================================== */
+/*
+ * MII support routines
+ */
+/* ======================================================================== */
+static void
+usbgem_choose_forcedmode(struct usbgem_dev *dp)
+{
+ /* choose media mode */
+ if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
+ dp->speed = USBGEM_SPD_1000;
+ dp->full_duplex = dp->anadv_1000fdx;
+ } else if (dp->anadv_100fdx || dp->anadv_100t4) {
+ dp->speed = USBGEM_SPD_100;
+ dp->full_duplex = B_TRUE;
+ } else if (dp->anadv_100hdx) {
+ dp->speed = USBGEM_SPD_100;
+ dp->full_duplex = B_FALSE;
+ } else {
+ dp->speed = USBGEM_SPD_10;
+ dp->full_duplex = dp->anadv_10fdx;
+ }
+}
+
+static uint16_t
+usbgem_mii_read(struct usbgem_dev *dp, uint_t reg, int *errp)
+{
+ uint16_t val;
+
+ sema_p(&dp->hal_op_lock);
+ val = (*dp->ugc.usbgc_mii_read)(dp, reg, errp);
+ sema_v(&dp->hal_op_lock);
+
+ return (val);
+}
+
+static void
+usbgem_mii_write(struct usbgem_dev *dp, uint_t reg, uint16_t val, int *errp)
+{
+ sema_p(&dp->hal_op_lock);
+ (*dp->ugc.usbgc_mii_write)(dp, reg, val, errp);
+ sema_v(&dp->hal_op_lock);
+}
+
+static int
+usbgem_mii_probe(struct usbgem_dev *dp)
+{
+ int err;
+
+ err = (*dp->ugc.usbgc_mii_probe)(dp);
+ return (err);
+}
+
+static int
+usbgem_mii_init(struct usbgem_dev *dp)
+{
+ int err;
+
+ err = (*dp->ugc.usbgc_mii_init)(dp);
+ return (err);
+}
+
+#define fc_cap_decode(x) \
+ ((((x) & MII_ABILITY_PAUSE) != 0 ? 1 : 0) | \
+ (((x) & MII_ABILITY_ASM_DIR) != 0 ? 2 : 0))
+
+int
+usbgem_mii_config_default(struct usbgem_dev *dp, int *errp)
+{
+ uint16_t mii_stat;
+ uint16_t val;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /*
+ * Configure bits in advertisement register
+ */
+ mii_stat = dp->mii_status;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
+ dp->name, __func__, mii_stat, MII_STATUS_BITS));
+
+ if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
+ /* it's funny */
+ cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
+ dp->name, mii_stat, MII_STATUS_BITS);
+ return (USB_FAILURE);
+ }
+
+ /* Do not change the rest of ability bits in advert reg */
+ val = usbgem_mii_read(dp, MII_AN_ADVERT, errp) & ~MII_ABILITY_ALL;
+ if (*errp != USB_SUCCESS) {
+ goto usberr;
+ }
+
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
+ dp->name, __func__,
+ dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
+ dp->anadv_10fdx, dp->anadv_10hdx));
+
+ /* set technology bits */
+ if (dp->anadv_100t4) {
+ val |= MII_ABILITY_100BASE_T4;
+ }
+ if (dp->anadv_100fdx) {
+ val |= MII_ABILITY_100BASE_TX_FD;
+ }
+ if (dp->anadv_100hdx) {
+ val |= MII_ABILITY_100BASE_TX;
+ }
+ if (dp->anadv_10fdx) {
+ val |= MII_ABILITY_10BASE_T_FD;
+ }
+ if (dp->anadv_10hdx) {
+ val |= MII_ABILITY_10BASE_T;
+ }
+
+ /* set flow control capabilities */
+ if (dp->anadv_pause) {
+ val |= MII_ABILITY_PAUSE;
+ }
+ if (dp->anadv_asmpause) {
+ val |= MII_ABILITY_ASM_DIR;
+ }
+
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: setting MII_AN_ADVERT reg:%b, pause:%d, asmpause:%d",
+ dp->name, __func__, val, MII_ABILITY_BITS,
+ dp->anadv_pause, dp->anadv_asmpause));
+
+ usbgem_mii_write(dp, MII_AN_ADVERT, val, errp);
+ if (*errp != USB_SUCCESS) {
+ goto usberr;
+ }
+
+ if (dp->mii_status & MII_STATUS_XSTATUS) {
+ /*
+ * 1000Base-T GMII support
+ */
+ if (!dp->anadv_autoneg) {
+ /* enable manual configuration */
+ val = MII_1000TC_CFG_EN;
+ if (dp->anadv_1000t_ms == 2) {
+ val |= MII_1000TC_CFG_VAL;
+ }
+ } else {
+ val = 0;
+ if (dp->anadv_1000fdx) {
+ val |= MII_1000TC_ADV_FULL;
+ }
+ if (dp->anadv_1000hdx) {
+ val |= MII_1000TC_ADV_HALF;
+ }
+ switch (dp->anadv_1000t_ms) {
+ case 1:
+ /* slave */
+ val |= MII_1000TC_CFG_EN;
+ break;
+
+ case 2:
+ /* master */
+ val |= MII_1000TC_CFG_EN | MII_1000TC_CFG_VAL;
+ break;
+
+ default:
+ /* auto: do nothing */
+ break;
+ }
+ }
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: setting MII_1000TC reg:%b",
+ dp->name, __func__, val, MII_1000TC_BITS));
+
+ usbgem_mii_write(dp, MII_1000TC, val, errp);
+ if (*errp != USB_SUCCESS) {
+ goto usberr;
+ }
+ }
+ return (USB_SUCCESS);
+
+usberr:
+ return (*errp);
+}
+
+static char *usbgem_fc_type[] = {
+ "without",
+ "with symmetric",
+ "with tx",
+ "with rx",
+};
+
+#ifdef USBGEM_CONFIG_GLDv3
+#define USBGEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP)
+#define USBGEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN)
+#else
+#define USBGEM_LINKUP(dp) \
+ if (gld_linkstate) { \
+ gld_linkstate((dp)->macinfo, GLD_LINKSTATE_UP); \
+ }
+#define USBGEM_LINKDOWN(dp) \
+ if (gld_linkstate) { \
+ gld_linkstate((dp)->macinfo, GLD_LINKSTATE_DOWN); \
+ }
+#endif
+
+static uint8_t usbgem_fc_result[4 /* my cap */][4 /* lp cap */] = {
+/* none symm tx rx/symm */
+/* none */
+ {FLOW_CONTROL_NONE,
+ FLOW_CONTROL_NONE,
+ FLOW_CONTROL_NONE,
+ FLOW_CONTROL_NONE},
+/* sym */
+ {FLOW_CONTROL_NONE,
+ FLOW_CONTROL_SYMMETRIC,
+ FLOW_CONTROL_NONE,
+ FLOW_CONTROL_SYMMETRIC},
+/* tx */
+ {FLOW_CONTROL_NONE,
+ FLOW_CONTROL_NONE,
+ FLOW_CONTROL_NONE,
+ FLOW_CONTROL_TX_PAUSE},
+/* rx/symm */
+ {FLOW_CONTROL_NONE,
+ FLOW_CONTROL_SYMMETRIC,
+ FLOW_CONTROL_RX_PAUSE,
+ FLOW_CONTROL_SYMMETRIC},
+};
+
+static boolean_t
+usbgem_mii_link_check(struct usbgem_dev *dp, int *oldstatep, int *newstatep)
+{
+ boolean_t tx_sched = B_FALSE;
+ uint16_t status;
+ uint16_t advert;
+ uint16_t lpable;
+ uint16_t exp;
+ uint16_t ctl1000;
+ uint16_t stat1000;
+ uint16_t val;
+ clock_t now;
+ clock_t diff;
+ int linkdown_action;
+ boolean_t fix_phy = B_FALSE;
+ int err;
+ uint_t rwlock;
+
+ DPRINTF(4, (CE_CONT, "!%s: %s: time:%d state:%d",
+ dp->name, __func__, ddi_get_lbolt(), dp->mii_state));
+
+ if (dp->mii_state != MII_STATE_LINKUP) {
+ rwlock = RW_WRITER;
+ } else {
+ rwlock = RW_READER;
+ }
+again:
+ rw_enter(&dp->dev_state_lock, rwlock);
+
+ /* save old mii state */
+ *oldstatep = dp->mii_state;
+
+ if (dp->mac_state == MAC_STATE_DISCONNECTED) {
+ /* stop periodic execution of the link watcher */
+ dp->mii_interval = 0;
+ tx_sched = B_FALSE;
+ goto next;
+ }
+
+ now = ddi_get_lbolt();
+ diff = now - dp->mii_last_check;
+ dp->mii_last_check = now;
+
+ /*
+ * For NWAM, don't show linkdown state right
+ * when the device is attached.
+ */
+ if (dp->linkup_delay > 0) {
+ if (dp->linkup_delay > diff) {
+ dp->linkup_delay -= diff;
+ } else {
+ /* link up timeout */
+ dp->linkup_delay = -1;
+ }
+ }
+
+next_nowait:
+ switch (dp->mii_state) {
+ case MII_STATE_UNKNOWN:
+ goto reset_phy;
+
+ case MII_STATE_RESETTING:
+ dp->mii_timer -= diff;
+ if (dp->mii_timer > 0) {
+ /* don't read phy registers in resetting */
+ dp->mii_interval = WATCH_INTERVAL_FAST;
+ goto next;
+ }
+
+ val = usbgem_mii_read(dp, MII_CONTROL, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ if (val & MII_CONTROL_RESET) {
+ cmn_err(CE_NOTE,
+ "!%s: time:%ld resetting phy not complete."
+ " mii_control:0x%b",
+ dp->name, ddi_get_lbolt(),
+ val, MII_CONTROL_BITS);
+ }
+
+ /* ensure neither isolated nor pwrdown nor auto-nego mode */
+ usbgem_mii_write(dp, MII_CONTROL, 0, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+#if USBGEM_DEBUG_LEVEL > 10
+ val = usbgem_mii_read(dp, MII_CONTROL, &err);
+ cmn_err(CE_CONT, "!%s: readback control %b",
+ dp->name, val, MII_CONTROL_BITS);
+#endif
+ /* As resetting PHY has completed, configure PHY registers */
+ if ((*dp->ugc.usbgc_mii_config)(dp, &err) != USB_SUCCESS) {
+ /* we failed to configure PHY */
+ goto usberr;
+ }
+
+ /* prepare for forced mode */
+ usbgem_choose_forcedmode(dp);
+
+ dp->mii_lpable = 0;
+ dp->mii_advert = 0;
+ dp->mii_exp = 0;
+ dp->mii_ctl1000 = 0;
+ dp->mii_stat1000 = 0;
+
+ dp->flow_control = FLOW_CONTROL_NONE;
+
+ if (!dp->anadv_autoneg) {
+ /* skip auto-negotiation phase */
+ dp->mii_state = MII_STATE_MEDIA_SETUP;
+ dp->mii_timer = dp->ugc.usbgc_mii_linkdown_timeout;
+ goto next_nowait;
+ }
+
+ /* issue an auto-negotiation command */
+ goto autonego;
+
+ case MII_STATE_AUTONEGOTIATING:
+ /*
+ * Autonegotiation in progress
+ */
+ dp->mii_timer -= diff;
+ if (dp->mii_timer -
+ (dp->ugc.usbgc_mii_an_timeout - dp->ugc.usbgc_mii_an_wait)
+ > 0) {
+ /* wait for minimum time (2.3 - 2.5 sec) */
+ dp->mii_interval = WATCH_INTERVAL_FAST;
+ goto next;
+ }
+
+ /* read PHY status */
+ status = usbgem_mii_read(dp, MII_STATUS, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ DPRINTF(4, (CE_CONT,
+ "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
+ dp->name, __func__, dp->mii_state,
+ status, MII_STATUS_BITS));
+
+ if (status & MII_STATUS_REMFAULT) {
+ /*
+ * The link parnert told me something wrong happend.
+ * What do we do ?
+ */
+ cmn_err(CE_CONT,
+ "!%s: auto-negotiation failed: remote fault",
+ dp->name);
+ goto autonego;
+ }
+
+ if ((status & MII_STATUS_ANDONE) == 0) {
+ if (dp->mii_timer <= 0) {
+ /*
+ * Auto-negotiation has been timed out,
+ * Reset PHY and try again.
+ */
+ if (!dp->mii_supress_msg) {
+ cmn_err(CE_WARN,
+ "!%s: auto-negotiation failed:"
+ " timeout",
+ dp->name);
+ dp->mii_supress_msg = B_TRUE;
+ }
+ goto autonego;
+ }
+ /*
+ * Auto-negotiation is in progress. Wait for a while.
+ */
+ dp->mii_interval = dp->ugc.usbgc_mii_an_watch_interval;
+ goto next;
+ }
+
+ /*
+ * Auto-negotiation has been completed. Let's go to AN_DONE.
+ */
+ dp->mii_state = MII_STATE_AN_DONE;
+ dp->mii_supress_msg = B_FALSE;
+ DPRINTF(0, (CE_CONT,
+ "!%s: auto-negotiation completed, MII_STATUS:%b",
+ dp->name, status, MII_STATUS_BITS));
+
+ if (dp->ugc.usbgc_mii_an_delay > 0) {
+ dp->mii_timer = dp->ugc.usbgc_mii_an_delay;
+ dp->mii_interval = drv_usectohz(20*1000);
+ goto next;
+ }
+
+ dp->mii_timer = 0;
+ diff = 0;
+ goto next_nowait;
+
+ case MII_STATE_AN_DONE:
+ /*
+ * Auto-negotiation has done. Now we can set up media.
+ */
+ dp->mii_timer -= diff;
+ if (dp->mii_timer > 0) {
+ /* wait for a while */
+ dp->mii_interval = WATCH_INTERVAL_FAST;
+ goto next;
+ }
+
+ /*
+ * Setup speed and duplex mode according with
+ * the result of auto negotiation.
+ */
+
+ /*
+ * Read registers required to determin current
+ * duplex mode and media speed.
+ */
+ if (dp->ugc.usbgc_mii_an_delay > 0) {
+ /* the 'status' variable is not initialized yet */
+ status = usbgem_mii_read(dp, MII_STATUS, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ }
+ advert = usbgem_mii_read(dp, MII_AN_ADVERT, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ lpable = usbgem_mii_read(dp, MII_AN_LPABLE, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ exp = usbgem_mii_read(dp, MII_AN_EXPANSION, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ if (exp == 0xffff) {
+ /* some phys don't have exp register */
+ exp = 0;
+ }
+
+ ctl1000 = 0;
+ stat1000 = 0;
+ if (dp->mii_status & MII_STATUS_XSTATUS) {
+ ctl1000 = usbgem_mii_read(dp, MII_1000TC, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ stat1000 = usbgem_mii_read(dp, MII_1000TS, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ }
+ dp->mii_lpable = lpable;
+ dp->mii_advert = advert;
+ dp->mii_exp = exp;
+ dp->mii_ctl1000 = ctl1000;
+ dp->mii_stat1000 = stat1000;
+
+ cmn_err(CE_CONT,
+ "!%s: auto-negotiation done: "
+ "status:%b, advert:%b, lpable:%b, exp:%b",
+ dp->name,
+ status, MII_STATUS_BITS,
+ advert, MII_ABILITY_BITS,
+ lpable, MII_ABILITY_BITS,
+ exp, MII_AN_EXP_BITS);
+
+ DPRINTF(0, (CE_CONT, "!%s: MII_STATUS:%b",
+ dp->name, status, MII_STATUS_BITS));
+
+ if (dp->mii_status & MII_STATUS_XSTATUS) {
+ cmn_err(CE_CONT,
+ "! MII_1000TC reg:%b, MII_1000TS reg:%b",
+ ctl1000, MII_1000TC_BITS,
+ stat1000, MII_1000TS_BITS);
+ }
+
+ if (usbgem_population(lpable) <= 1 &&
+ (exp & MII_AN_EXP_LPCANAN) == 0) {
+ if ((advert & MII_ABILITY_TECH) != lpable) {
+ cmn_err(CE_WARN,
+ "!%s: but the link partner doesn't seem"
+ " to have auto-negotiation capability."
+ " please check the link configuration.",
+ dp->name);
+ }
+ /*
+ * it should be a result of pararell detection,
+ * which cannot detect duplex mode.
+ */
+ if ((advert & lpable) == 0 &&
+ lpable & MII_ABILITY_10BASE_T) {
+ /* no common technology, try 10M half mode */
+ lpable |= advert & MII_ABILITY_10BASE_T;
+ fix_phy = B_TRUE;
+ }
+ } else if (lpable == 0) {
+ cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
+ goto reset_phy;
+ }
+ /*
+ * configure current link mode according to AN priority.
+ */
+ val = advert & lpable;
+ if ((ctl1000 & MII_1000TC_ADV_FULL) &&
+ (stat1000 & MII_1000TS_LP_FULL)) {
+ /* 1000BaseT & full duplex */
+ dp->speed = USBGEM_SPD_1000;
+ dp->full_duplex = B_TRUE;
+ } else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
+ (stat1000 & MII_1000TS_LP_HALF)) {
+ /* 1000BaseT & half duplex */
+ dp->speed = USBGEM_SPD_1000;
+ dp->full_duplex = B_FALSE;
+ } else if ((val & MII_ABILITY_100BASE_TX_FD)) {
+ /* 100BaseTx & fullduplex */
+ dp->speed = USBGEM_SPD_100;
+ dp->full_duplex = B_TRUE;
+ } else if ((val & MII_ABILITY_100BASE_T4)) {
+ /* 100BaseTx & fullduplex */
+ dp->speed = USBGEM_SPD_100;
+ dp->full_duplex = B_TRUE;
+ } else if ((val & MII_ABILITY_100BASE_TX)) {
+ /* 100BaseTx & half duplex */
+ dp->speed = USBGEM_SPD_100;
+ dp->full_duplex = B_FALSE;
+ } else if ((val & MII_ABILITY_10BASE_T_FD)) {
+ /* 10BaseT & full duplex */
+ dp->speed = USBGEM_SPD_10;
+ dp->full_duplex = B_TRUE;
+ } else if ((val & MII_ABILITY_10BASE_T)) {
+ /* 10BaseT & half duplex */
+ dp->speed = USBGEM_SPD_10;
+ dp->full_duplex = B_FALSE;
+ } else {
+ /*
+ * the link partner doesn't seem to have
+ * auto-negotiation capability and our PHY
+ * could not report current mode correctly.
+ * We guess current mode by mii_control register.
+ */
+ val = usbgem_mii_read(dp, MII_CONTROL, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+
+ /* select 100m half or 10m half */
+ dp->speed = (val & MII_CONTROL_100MB) ?
+ USBGEM_SPD_100 : USBGEM_SPD_10;
+ dp->full_duplex = B_FALSE;
+ fix_phy = B_TRUE;
+
+ cmn_err(CE_NOTE,
+ "!%s: auto-negotiation done but "
+ "common ability not found.\n"
+ "PHY state: control:%b advert:%b lpable:%b\n"
+ "guessing %d Mbps %s duplex mode",
+ dp->name,
+ val, MII_CONTROL_BITS,
+ advert, MII_ABILITY_BITS,
+ lpable, MII_ABILITY_BITS,
+ usbgem_speed_value[dp->speed],
+ dp->full_duplex ? "full" : "half");
+ }
+
+ if (dp->full_duplex) {
+ dp->flow_control =
+ usbgem_fc_result[fc_cap_decode(advert)]
+ [fc_cap_decode(lpable)];
+ } else {
+ dp->flow_control = FLOW_CONTROL_NONE;
+ }
+ dp->mii_state = MII_STATE_MEDIA_SETUP;
+ dp->mii_timer = dp->ugc.usbgc_mii_linkdown_timeout;
+ goto next_nowait;
+
+ case MII_STATE_MEDIA_SETUP:
+ DPRINTF(2, (CE_CONT, "!%s: setup midia mode", dp->name));
+
+ /* assume the link state is down */
+ dp->mii_state = MII_STATE_LINKDOWN;
+ dp->mii_supress_msg = B_FALSE;
+
+ /* use short interval */
+ dp->mii_interval = WATCH_INTERVAL_FAST;
+
+ if ((!dp->anadv_autoneg) ||
+ dp->ugc.usbgc_mii_an_oneshot || fix_phy) {
+
+ /*
+ * write the result of auto negotiation back.
+ */
+ val = usbgem_mii_read(dp, MII_CONTROL, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
+ MII_CONTROL_ANE | MII_CONTROL_RSAN);
+
+ if (dp->full_duplex) {
+ val |= MII_CONTROL_FDUPLEX;
+ }
+
+ switch (dp->speed) {
+ case USBGEM_SPD_1000:
+ val |= MII_CONTROL_1000MB;
+ break;
+
+ case USBGEM_SPD_100:
+ val |= MII_CONTROL_100MB;
+ break;
+
+ default:
+ cmn_err(CE_WARN, "%s: unknown speed:%d",
+ dp->name, dp->speed);
+ /* FALLTHROUGH */
+
+ case USBGEM_SPD_10:
+ /* for USBGEM_SPD_10, do nothing */
+ break;
+ }
+
+ if (dp->mii_status & MII_STATUS_XSTATUS) {
+ usbgem_mii_write(dp,
+ MII_1000TC, MII_1000TC_CFG_EN, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ }
+ usbgem_mii_write(dp, MII_CONTROL, val, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ }
+ /*
+ * XXX -- nic state should be one of
+ * NIC_STATE_DISCONNECTED
+ * NIC_STATE_STOPPED
+ * NIC_STATE_INITIALIZED
+ * NIC_STATE_ONLINE
+ */
+ if (dp->nic_state >= NIC_STATE_INITIALIZED) {
+ /* notify the result of autonegotiation to mac */
+ if (usbgem_hal_set_media(dp) != USB_SUCCESS) {
+ goto usberr;
+ }
+ }
+ goto next_nowait;
+
+ case MII_STATE_LINKDOWN:
+ status = usbgem_mii_read(dp, MII_STATUS, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ if (status & MII_STATUS_LINKUP) {
+ /*
+ * Link is going up
+ */
+ dp->mii_state = MII_STATE_LINKUP;
+ dp->mii_supress_msg = B_FALSE;
+
+ DPRINTF(0, (CE_CONT,
+ "!%s: link up detected: status:%b",
+ dp->name, status, MII_STATUS_BITS));
+
+ /*
+ * MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are
+ * ignored when MII_CONTROL_ANE is set.
+ */
+ cmn_err(CE_CONT,
+ "!%s: Link up: %d Mbps %s duplex %s flow control",
+ dp->name,
+ usbgem_speed_value[dp->speed],
+ dp->full_duplex ? "full" : "half",
+ usbgem_fc_type[dp->flow_control]);
+
+ dp->mii_interval =
+ dp->ugc.usbgc_mii_link_watch_interval;
+
+ if (dp->ugc.usbgc_mii_hw_link_detection &&
+ dp->nic_state == NIC_STATE_ONLINE) {
+ dp->mii_interval = 0;
+ }
+
+ if (dp->nic_state == NIC_STATE_ONLINE) {
+ if (dp->mac_state == MAC_STATE_INITIALIZED) {
+ (void) usbgem_mac_start(dp);
+ }
+ tx_sched = B_TRUE;
+ }
+
+ goto next;
+ }
+
+ dp->mii_supress_msg = B_TRUE;
+ if (dp->anadv_autoneg) {
+ dp->mii_timer -= diff;
+ if (dp->mii_timer <= 0) {
+ /*
+ * the link down timer expired.
+ * need to restart auto-negotiation.
+ */
+ linkdown_action =
+ dp->ugc.usbgc_mii_linkdown_timeout_action;
+ goto restart_autonego;
+ }
+ }
+ /* don't change mii_state */
+ goto next;
+
+ case MII_STATE_LINKUP:
+ if (rwlock == RW_READER) {
+ /* first pass, read mii status */
+ status = usbgem_mii_read(dp, MII_STATUS, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ }
+ if ((status & MII_STATUS_LINKUP) == 0) {
+ /*
+ * Link is going down
+ */
+ cmn_err(CE_NOTE,
+ "!%s: link down detected: status:%b",
+ dp->name, status, MII_STATUS_BITS);
+ /*
+ * Acquire exclusive lock to change mii_state
+ */
+ if (rwlock == RW_READER) {
+ rwlock = RW_WRITER;
+ rw_exit(&dp->dev_state_lock);
+ goto again;
+ }
+
+ dp->mii_state = MII_STATE_LINKDOWN;
+ dp->mii_timer = dp->ugc.usbgc_mii_linkdown_timeout;
+
+ /*
+ * As we may change the state of the device,
+ * let us acquire exclusive lock for the state.
+ */
+ if (dp->nic_state == NIC_STATE_ONLINE &&
+ dp->mac_state == MAC_STATE_ONLINE &&
+ dp->ugc.usbgc_mii_stop_mac_on_linkdown) {
+ (void) usbgem_restart_nic(dp);
+ /* drain tx */
+ tx_sched = B_TRUE;
+ }
+
+ if (dp->anadv_autoneg) {
+ /* need to restart auto-negotiation */
+ linkdown_action =
+ dp->ugc.usbgc_mii_linkdown_action;
+ goto restart_autonego;
+ }
+ /*
+ * don't use hw link down detection until the link
+ * status become stable for a while.
+ */
+ dp->mii_interval =
+ dp->ugc.usbgc_mii_link_watch_interval;
+
+ goto next;
+ }
+
+ /*
+ * still link up, no need to change mii_state
+ */
+ if (dp->ugc.usbgc_mii_hw_link_detection &&
+ dp->nic_state == NIC_STATE_ONLINE) {
+ /*
+ * no need to check link status periodicly
+ * if nic can generate interrupts when link go down.
+ */
+ dp->mii_interval = 0;
+ }
+ goto next;
+ }
+ /* NOTREACHED */
+ cmn_err(CE_PANIC, "!%s: %s: not reached", dp->name, __func__);
+
+ /*
+ * Actions for new state.
+ */
+restart_autonego:
+ switch (linkdown_action) {
+ case MII_ACTION_RESET:
+ if (!dp->mii_supress_msg) {
+ cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
+ }
+ dp->mii_supress_msg = B_TRUE;
+ goto reset_phy;
+
+ case MII_ACTION_NONE:
+ dp->mii_supress_msg = B_TRUE;
+ if (dp->ugc.usbgc_mii_an_oneshot) {
+ goto autonego;
+ }
+ /* PHY will restart autonego automatically */
+ dp->mii_state = MII_STATE_AUTONEGOTIATING;
+ dp->mii_timer = dp->ugc.usbgc_mii_an_timeout;
+ dp->mii_interval = dp->ugc.usbgc_mii_an_watch_interval;
+ goto next;
+
+ case MII_ACTION_RSA:
+ if (!dp->mii_supress_msg) {
+ cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
+ dp->name);
+ }
+ dp->mii_supress_msg = B_TRUE;
+ goto autonego;
+
+ default:
+ cmn_err(CE_PANIC, "!%s: unknowm linkdown action: %d",
+ dp->name, dp->ugc.usbgc_mii_linkdown_action);
+ dp->mii_supress_msg = B_TRUE;
+ }
+ /* NOTREACHED */
+
+reset_phy:
+ if (!dp->mii_supress_msg) {
+ cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
+ }
+ dp->mii_state = MII_STATE_RESETTING;
+ dp->mii_timer = dp->ugc.usbgc_mii_reset_timeout;
+ if (!dp->ugc.usbgc_mii_dont_reset) {
+ usbgem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ }
+ dp->mii_interval = WATCH_INTERVAL_FAST;
+ goto next;
+
+autonego:
+ if (!dp->mii_supress_msg) {
+ cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
+ }
+ dp->mii_state = MII_STATE_AUTONEGOTIATING;
+ dp->mii_timer = dp->ugc.usbgc_mii_an_timeout;
+
+ /* start/restart autoneg */
+ val = usbgem_mii_read(dp, MII_CONTROL, &err) &
+ ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ if (val & MII_CONTROL_ANE) {
+ val |= MII_CONTROL_RSAN;
+ }
+ usbgem_mii_write(dp, MII_CONTROL,
+ val | dp->ugc.usbgc_mii_an_cmd | MII_CONTROL_ANE, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+
+ dp->mii_interval = dp->ugc.usbgc_mii_an_watch_interval;
+ goto next;
+
+usberr:
+ dp->mii_state = MII_STATE_UNKNOWN;
+ dp->mii_interval = dp->ugc.usbgc_mii_link_watch_interval;
+ tx_sched = B_TRUE;
+
+next:
+ *newstatep = dp->mii_state;
+ rw_exit(&dp->dev_state_lock);
+ return (tx_sched);
+}
+
+static void
+usbgem_mii_link_watcher(struct usbgem_dev *dp)
+{
+ int old_mii_state;
+ int new_mii_state;
+ boolean_t tx_sched;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ for (; ; ) {
+
+ mutex_enter(&dp->link_watcher_lock);
+ if (dp->mii_interval) {
+ (void) cv_timedwait(&dp->link_watcher_wait_cv,
+ &dp->link_watcher_lock,
+ dp->mii_interval + ddi_get_lbolt());
+ } else {
+ cv_wait(&dp->link_watcher_wait_cv,
+ &dp->link_watcher_lock);
+ }
+ mutex_exit(&dp->link_watcher_lock);
+
+ if (dp->link_watcher_stop) {
+ break;
+ }
+
+ /* we block callbacks from disconnect/suspend and restart */
+ tx_sched = usbgem_mii_link_check(dp,
+ &old_mii_state, &new_mii_state);
+
+ /*
+ * gld v2 notifier functions are not able to
+ * be called with any locks in this layer.
+ */
+ if (tx_sched) {
+ /* kick potentially stopped downstream */
+#ifdef USBGEM_CONFIG_GLDv3
+ mac_tx_update(dp->mh);
+#else
+ gld_sched(dp->macinfo);
+#endif
+ }
+
+ if (old_mii_state != new_mii_state) {
+ /* notify new mii link state */
+ if (new_mii_state == MII_STATE_LINKUP) {
+ dp->linkup_delay = 0;
+ USBGEM_LINKUP(dp);
+ } else if (dp->linkup_delay <= 0) {
+ USBGEM_LINKDOWN(dp);
+ }
+ } else if (dp->linkup_delay < 0) {
+ /* first linkup timeout */
+ dp->linkup_delay = 0;
+ USBGEM_LINKDOWN(dp);
+ }
+ }
+
+ thread_exit();
+}
+
+void
+usbgem_mii_update_link(struct usbgem_dev *dp)
+{
+ cv_signal(&dp->link_watcher_wait_cv);
+}
+
+int
+usbgem_mii_probe_default(struct usbgem_dev *dp)
+{
+ int phy;
+ uint16_t status;
+ uint16_t xstatus;
+ int err;
+ uint16_t adv;
+ uint16_t adv_org;
+
+ DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /*
+ * Scan PHY
+ */
+ dp->mii_status = 0;
+
+ /* Try default phy first */
+ if (dp->mii_phy_addr) {
+ status = usbgem_mii_read(dp, MII_STATUS, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ if (status != 0xffff && status != 0x0000) {
+ goto PHY_found;
+ }
+
+ if (dp->mii_phy_addr < 0) {
+ cmn_err(CE_NOTE,
+ "!%s: failed to probe default internal and/or non-MII PHY",
+ dp->name);
+ return (USB_FAILURE);
+ }
+
+ cmn_err(CE_NOTE,
+ "!%s: failed to probe default MII PHY at %d",
+ dp->name, dp->mii_phy_addr);
+ }
+
+ /* Try all possible address */
+ for (phy = dp->ugc.usbgc_mii_addr_min; phy < 32; phy++) {
+ dp->mii_phy_addr = phy;
+ status = usbgem_mii_read(dp, MII_STATUS, &err);
+ if (err != USB_SUCCESS) {
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: mii_read(status) failed",
+ dp->name, __func__));
+ goto usberr;
+ }
+
+ if (status != 0xffff && status != 0x0000) {
+ usbgem_mii_write(dp, MII_CONTROL, 0, &err);
+ if (err != USB_SUCCESS) {
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: mii_write(control) failed",
+ dp->name, __func__));
+ goto usberr;
+ }
+ goto PHY_found;
+ }
+ }
+ for (phy = dp->ugc.usbgc_mii_addr_min; phy < 32; phy++) {
+ dp->mii_phy_addr = phy;
+ usbgem_mii_write(dp, MII_CONTROL, 0, &err);
+ if (err != USB_SUCCESS) {
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: mii_write(control) failed",
+ dp->name, __func__));
+ goto usberr;
+ }
+ status = usbgem_mii_read(dp, MII_STATUS, &err);
+ if (err != USB_SUCCESS) {
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: mii_read(status) failed",
+ dp->name, __func__));
+ goto usberr;
+ }
+
+ if (status != 0xffff && status != 0) {
+ goto PHY_found;
+ }
+ }
+
+ cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
+ return (USB_FAILURE);
+
+PHY_found:
+ dp->mii_status = status;
+ dp->mii_status_ro = ~status;
+ dp->mii_phy_id = usbgem_mii_read(dp, MII_PHYIDH, &err) << 16;
+ if (err != USB_SUCCESS) {
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: mii_read(PHYIDH) failed",
+ dp->name, __func__));
+ goto usberr;
+ }
+ dp->mii_phy_id |= usbgem_mii_read(dp, MII_PHYIDL, &err);
+ if (err != USB_SUCCESS) {
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: mii_read(PHYIDL) failed",
+ dp->name, __func__));
+ goto usberr;
+ }
+
+ if (dp->mii_phy_addr < 0) {
+ cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
+ dp->name, dp->mii_phy_id);
+ } else {
+ cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
+ dp->name, dp->mii_phy_id, dp->mii_phy_addr);
+ }
+
+ cmn_err(CE_CONT,
+ "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b, exp:%b",
+ dp->name,
+ usbgem_mii_read(dp, MII_CONTROL, &err), MII_CONTROL_BITS,
+ status, MII_STATUS_BITS,
+ usbgem_mii_read(dp, MII_AN_ADVERT, &err), MII_ABILITY_BITS,
+ usbgem_mii_read(dp, MII_AN_LPABLE, &err), MII_ABILITY_BITS,
+ usbgem_mii_read(dp, MII_AN_EXPANSION, &err), MII_AN_EXP_BITS);
+
+ dp->mii_xstatus = 0;
+ if (status & MII_STATUS_XSTATUS) {
+ dp->mii_xstatus = usbgem_mii_read(dp, MII_XSTATUS, &err);
+
+ cmn_err(CE_CONT, "!%s: xstatus:%b",
+ dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
+ }
+ dp->mii_xstatus_ro = ~dp->mii_xstatus;
+
+ /* check if the phy can advertize pause abilities */
+ adv_org = usbgem_mii_read(dp, MII_AN_ADVERT, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+
+ usbgem_mii_write(dp, MII_AN_ADVERT,
+ MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+
+ adv = usbgem_mii_read(dp, MII_AN_ADVERT, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+
+ if ((adv & MII_ABILITY_PAUSE) == 0) {
+ dp->ugc.usbgc_flow_control &= ~1;
+ }
+
+ if ((adv & MII_ABILITY_ASM_DIR) == 0) {
+ dp->ugc.usbgc_flow_control &= ~2;
+ }
+
+ usbgem_mii_write(dp, MII_AN_ADVERT, adv_org, &err);
+ if (err != USB_SUCCESS) {
+ goto usberr;
+ }
+ return (USB_SUCCESS);
+
+usberr:
+ return (USB_FAILURE);
+}
+
+int
+usbgem_mii_init_default(struct usbgem_dev *dp)
+{
+ /* ENPTY */
+ return (USB_SUCCESS);
+}
+
+static int
+usbgem_mii_start(struct usbgem_dev *dp)
+{
+ int err;
+ kthread_t *lwth;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* make a first call of usbgem_mii_link_check() */
+ dp->link_watcher_stop = 0;
+ dp->mii_state = MII_STATE_UNKNOWN;
+ dp->mii_interval = drv_usectohz(1000*1000); /* 1sec */
+ dp->mii_last_check = ddi_get_lbolt();
+ dp->linkup_delay = 600 * drv_usectohz(1000*1000); /* 10 minutes */
+
+ lwth = thread_create(NULL, 0, usbgem_mii_link_watcher, dp, 0, &p0,
+ TS_RUN, minclsyspri);
+ if (lwth == NULL) {
+ cmn_err(CE_WARN,
+ "!%s: %s: failed to create a link watcher thread",
+ dp->name, __func__);
+ return (USB_FAILURE);
+ }
+ dp->link_watcher_did = lwth->t_did;
+
+ return (USB_SUCCESS);
+}
+
+static void
+usbgem_mii_stop(struct usbgem_dev *dp)
+{
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* Ensure timer routine stopped */
+ dp->link_watcher_stop = 1;
+ cv_signal(&dp->link_watcher_wait_cv);
+ thread_join(dp->link_watcher_did);
+}
+
+/* ============================================================== */
+/*
+ * internal mac register operation interface
+ */
+/* ============================================================== */
+/*
+ * usbgem_mac_init: cold start
+ */
+static int
+usbgem_mac_init(struct usbgem_dev *dp)
+{
+ int err;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ if (dp->mac_state == MAC_STATE_DISCONNECTED) {
+ /* pretend we succeeded */
+ return (USB_SUCCESS);
+ }
+
+ ASSERT(dp->mac_state == MAC_STATE_STOPPED);
+
+ /* reset fatal error timestamp */
+ dp->fatal_error = (clock_t)0;
+
+ /* reset tx side state */
+ mutex_enter(&dp->txlock);
+ dp->tx_busy_cnt = 0;
+ dp->tx_max_packets = dp->ugc.usbgc_tx_list_max;
+ mutex_exit(&dp->txlock);
+
+ /* reset rx side state */
+ mutex_enter(&dp->rxlock);
+ dp->rx_busy_cnt = 0;
+ mutex_exit(&dp->rxlock);
+
+ err = usbgem_hal_init_chip(dp);
+ if (err == USB_SUCCESS) {
+ dp->mac_state = MAC_STATE_INITIALIZED;
+ }
+
+ return (err);
+}
+
+/*
+ * usbgem_mac_start: warm start
+ */
+static int
+usbgem_mac_start(struct usbgem_dev *dp)
+{
+ int err;
+ int i;
+ usb_flags_t flags = 0;
+ usb_intr_req_t *req;
+#ifdef USBGEM_DEBUG_LEVEL
+ usb_pipe_state_t p_state;
+#endif
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ if (dp->mac_state == MAC_STATE_DISCONNECTED) {
+ /* do nothing but don't return failure */
+ return (USB_SUCCESS);
+ }
+
+ if (dp->mac_state != MAC_STATE_INITIALIZED) {
+ /* don't return failer */
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: mac_state(%d) is not MAC_STATE_INITIALIZED",
+ dp->name, __func__, dp->mac_state));
+ goto x;
+ }
+
+ dp->mac_state = MAC_STATE_ONLINE;
+
+ if (usbgem_hal_start_chip(dp) != USB_SUCCESS) {
+ cmn_err(CE_NOTE,
+ "!%s: %s: usb error was detected during start_chip",
+ dp->name, __func__);
+ goto x;
+ }
+
+#ifdef USBGEM_DEBUG_LEVEL
+ usb_pipe_get_state(dp->intr_pipe, &p_state, 0);
+ ASSERT(p_state == USB_PIPE_STATE_IDLE);
+#endif /* USBGEM_DEBUG_LEVEL */
+
+ if (dp->ugc.usbgc_interrupt && dp->intr_pipe) {
+
+ /* make a request for interrupt */
+
+ req = usb_alloc_intr_req(dp->dip, 0, USB_FLAGS_SLEEP);
+ if (req == NULL) {
+ cmn_err(CE_WARN, "!%s: %s: failed to allocate intreq",
+ dp->name, __func__);
+ goto x;
+ }
+ req->intr_data = NULL;
+ req->intr_client_private = (usb_opaque_t)dp;
+ req->intr_timeout = 0;
+ req->intr_attributes =
+ USB_ATTRS_SHORT_XFER_OK | USB_ATTRS_AUTOCLEARING;
+ req->intr_len = dp->ep_intr->wMaxPacketSize;
+ req->intr_cb = usbgem_intr_cb;
+ req->intr_exc_cb = usbgem_intr_cb;
+ req->intr_completion_reason = 0;
+ req->intr_cb_flags = 0;
+
+ err = usb_pipe_intr_xfer(dp->intr_pipe, req, flags);
+ if (err != USB_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s: err:%d failed to start polling of intr pipe",
+ dp->name, err);
+ goto x;
+ }
+ }
+
+ /* kick to receive the first packet */
+ if (usbgem_init_rx_buf(dp) != USB_SUCCESS) {
+ goto err_stop_intr;
+ }
+ dp->rx_active = B_TRUE;
+
+ return (USB_SUCCESS);
+
+err_stop_intr:
+ /* stop the interrupt pipe */
+ DPRINTF(0, (CE_CONT, "!%s: %s: FAULURE", dp->name, __func__));
+ if (dp->ugc.usbgc_interrupt && dp->intr_pipe) {
+ usb_pipe_stop_intr_polling(dp->intr_pipe, USB_FLAGS_SLEEP);
+ }
+x:
+ ASSERT(dp->mac_state == MAC_STATE_ONLINE);
+ /* we use another flag to indicate error state. */
+ if (dp->fatal_error == (clock_t)0) {
+ dp->fatal_error = usbgem_timestamp_nz();
+ }
+ return (USB_FAILURE);
+}
+
+static int
+usbgem_mac_stop(struct usbgem_dev *dp, int new_state, boolean_t graceful)
+{
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /*
+ * we must have writer lock for dev_state_lock
+ */
+ ASSERT(new_state == MAC_STATE_STOPPED
+ || new_state == MAC_STATE_DISCONNECTED);
+
+ /* stop polling interrupt pipe */
+ if (dp->ugc.usbgc_interrupt && dp->intr_pipe) {
+ usb_pipe_stop_intr_polling(dp->intr_pipe, USB_FLAGS_SLEEP);
+ }
+
+ if (new_state == MAC_STATE_STOPPED || graceful) {
+ /* stop the nic hardware completely */
+ if (usbgem_hal_stop_chip(dp) != USB_SUCCESS) {
+ (void) usbgem_hal_reset_chip(dp);
+ }
+ }
+
+ /* stop preparing new rx packets and sending new packets */
+ dp->mac_state = new_state;
+
+ /* other processors must get mac_state correctly after here */
+ membar_producer();
+
+ /* cancel all requests we have sent */
+ usb_pipe_reset(dp->dip, dp->bulkin_pipe, USB_FLAGS_SLEEP, NULL, 0);
+ usb_pipe_reset(dp->dip, dp->bulkout_pipe, USB_FLAGS_SLEEP, NULL, 0);
+
+ DPRINTF(0, (CE_CONT,
+ "!%s: %s: rx_busy_cnt:%d tx_busy_cnt:%d",
+ dp->name, __func__, dp->rx_busy_cnt, dp->tx_busy_cnt));
+
+ /*
+ * Here all rx packets has been cancelled and their call back
+ * function has been exeuted, because we called usb_pipe_reset
+ * synchronously.
+ * So actually we just ensure rx_busy_cnt == 0.
+ */
+ mutex_enter(&dp->rxlock);
+ while (dp->rx_busy_cnt > 0) {
+ cv_wait(&dp->rx_drain_cv, &dp->rxlock);
+ }
+ mutex_exit(&dp->rxlock);
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: rx_busy_cnt is %d now",
+ dp->name, __func__, dp->rx_busy_cnt));
+
+ mutex_enter(&dp->txlock);
+ while (dp->tx_busy_cnt > 0) {
+ cv_wait(&dp->tx_drain_cv, &dp->txlock);
+ }
+ mutex_exit(&dp->txlock);
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: tx_busy_cnt is %d now",
+ dp->name, __func__, dp->tx_busy_cnt));
+
+ return (USB_SUCCESS);
+}
+
+static int
+usbgem_add_multicast(struct usbgem_dev *dp, const uint8_t *ep)
+{
+ int cnt;
+ int err;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ sema_p(&dp->rxfilter_lock);
+ if (dp->mc_count_req++ < USBGEM_MAXMC) {
+ /* append the new address at the end of the mclist */
+ cnt = dp->mc_count;
+ bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
+ ETHERADDRL);
+ if (dp->ugc.usbgc_multicast_hash) {
+ dp->mc_list[cnt].hash =
+ (*dp->ugc.usbgc_multicast_hash)(dp, ep);
+ }
+ dp->mc_count = cnt + 1;
+ }
+
+ if (dp->mc_count_req != dp->mc_count) {
+ /* multicast address list overflow */
+ dp->rxmode |= RXMODE_MULTI_OVF;
+ } else {
+ dp->rxmode &= ~RXMODE_MULTI_OVF;
+ }
+
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ /* tell new multicast list to the hardware */
+ err = usbgem_hal_set_rx_filter(dp);
+ }
+ sema_v(&dp->rxfilter_lock);
+
+ return (err);
+}
+
+static int
+usbgem_remove_multicast(struct usbgem_dev *dp, const uint8_t *ep)
+{
+ size_t len;
+ int i;
+ int cnt;
+ int err;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ sema_p(&dp->rxfilter_lock);
+ dp->mc_count_req--;
+ cnt = dp->mc_count;
+ for (i = 0; i < cnt; i++) {
+ if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
+ continue;
+ }
+ /* shrink the mclist by copying forward */
+ len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
+ if (len > 0) {
+ bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
+ }
+ dp->mc_count--;
+ break;
+ }
+
+ if (dp->mc_count_req != dp->mc_count) {
+ /* multicast address list overflow */
+ dp->rxmode |= RXMODE_MULTI_OVF;
+ } else {
+ dp->rxmode &= ~RXMODE_MULTI_OVF;
+ }
+
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ err = usbgem_hal_set_rx_filter(dp);
+ }
+ sema_v(&dp->rxfilter_lock);
+
+ return (err);
+}
+
+
+/* ============================================================== */
+/*
+ * ioctl
+ */
+/* ============================================================== */
+enum ioc_reply {
+ IOC_INVAL = -1, /* bad, NAK with EINVAL */
+ IOC_DONE, /* OK, reply sent */
+ IOC_ACK, /* OK, just send ACK */
+ IOC_REPLY, /* OK, just send reply */
+ IOC_RESTART_ACK, /* OK, restart & ACK */
+ IOC_RESTART_REPLY /* OK, restart & reply */
+};
+
+
+#ifdef USBGEM_CONFIG_MAC_PROP
+static int
+usbgem_get_def_val(struct usbgem_dev *dp, mac_prop_id_t pr_num,
+ uint_t pr_valsize, void *pr_val)
+{
+ link_flowctrl_t fl;
+ int err = 0;
+
+ ASSERT(pr_valsize > 0);
+ switch (pr_num) {
+ case MAC_PROP_AUTONEG:
+ *(uint8_t *)pr_val =
+ BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
+ break;
+
+ case MAC_PROP_FLOWCTRL:
+ if (pr_valsize < sizeof (link_flowctrl_t)) {
+ return (EINVAL);
+ }
+ switch (dp->ugc.usbgc_flow_control) {
+ case FLOW_CONTROL_NONE:
+ fl = LINK_FLOWCTRL_NONE;
+ break;
+ case FLOW_CONTROL_SYMMETRIC:
+ fl = LINK_FLOWCTRL_BI;
+ break;
+ case FLOW_CONTROL_TX_PAUSE:
+ fl = LINK_FLOWCTRL_TX;
+ break;
+ case FLOW_CONTROL_RX_PAUSE:
+ fl = LINK_FLOWCTRL_RX;
+ break;
+ }
+ bcopy(&fl, pr_val, sizeof (fl));
+ break;
+
+ case MAC_PROP_ADV_1000FDX_CAP:
+ case MAC_PROP_EN_1000FDX_CAP:
+ *(uint8_t *)pr_val =
+ (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
+ (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
+ break;
+
+ case MAC_PROP_ADV_1000HDX_CAP:
+ case MAC_PROP_EN_1000HDX_CAP:
+ *(uint8_t *)pr_val =
+ (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
+ (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
+ break;
+
+ case MAC_PROP_ADV_100T4_CAP:
+ case MAC_PROP_EN_100T4_CAP:
+ *(uint8_t *)pr_val =
+ BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
+ break;
+
+ case MAC_PROP_ADV_100FDX_CAP:
+ case MAC_PROP_EN_100FDX_CAP:
+ *(uint8_t *)pr_val =
+ BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
+ break;
+
+ case MAC_PROP_ADV_100HDX_CAP:
+ case MAC_PROP_EN_100HDX_CAP:
+ *(uint8_t *)pr_val =
+ BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
+ break;
+
+ case MAC_PROP_ADV_10FDX_CAP:
+ case MAC_PROP_EN_10FDX_CAP:
+ *(uint8_t *)pr_val =
+ BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
+ break;
+
+ case MAC_PROP_ADV_10HDX_CAP:
+ case MAC_PROP_EN_10HDX_CAP:
+ *(uint8_t *)pr_val =
+ BOOLEAN(dp->mii_status & MII_STATUS_10);
+ break;
+
+ default:
+ err = ENOTSUP;
+ break;
+ }
+ return (err);
+}
+
+#ifdef MAC_VERSION_V1
+static void
+usbgem_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ mac_prop_info_handle_t prh)
+{
+ struct usbgem_dev *dp = arg;
+ link_flowctrl_t fl;
+
+ /*
+ * By default permissions are read/write unless specified
+ * otherwise by the driver.
+ */
+
+ switch (pr_num) {
+ case MAC_PROP_DUPLEX:
+ case MAC_PROP_SPEED:
+ case MAC_PROP_STATUS:
+ case MAC_PROP_ADV_1000FDX_CAP:
+ case MAC_PROP_ADV_1000HDX_CAP:
+ case MAC_PROP_ADV_100FDX_CAP:
+ case MAC_PROP_ADV_100HDX_CAP:
+ case MAC_PROP_ADV_10FDX_CAP:
+ case MAC_PROP_ADV_10HDX_CAP:
+ case MAC_PROP_ADV_100T4_CAP:
+ case MAC_PROP_EN_100T4_CAP:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ break;
+
+ case MAC_PROP_EN_1000FDX_CAP:
+ if ((dp->mii_xstatus_ro & MII_XSTATUS_1000BASET_FD) == 0) {
+ mac_prop_info_set_default_uint8(prh,
+ BOOLEAN(
+ dp->mii_xstatus & MII_XSTATUS_1000BASET_FD));
+ } else if ((dp->mii_xstatus_ro & MII_XSTATUS_1000BASEX_FD)
+ == 0) {
+ mac_prop_info_set_default_uint8(prh,
+ BOOLEAN(
+ dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD));
+ } else {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ }
+ break;
+
+ case MAC_PROP_EN_1000HDX_CAP:
+ if ((dp->mii_xstatus_ro & MII_XSTATUS_1000BASET) == 0) {
+ mac_prop_info_set_default_uint8(prh,
+ BOOLEAN(
+ dp->mii_xstatus & MII_XSTATUS_1000BASET));
+ } else if ((dp->mii_xstatus_ro & MII_XSTATUS_1000BASEX) == 0) {
+ mac_prop_info_set_default_uint8(prh,
+ BOOLEAN(
+ dp->mii_xstatus & MII_XSTATUS_1000BASEX));
+ } else {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ }
+ break;
+
+ case MAC_PROP_EN_100FDX_CAP:
+ if ((dp->mii_status_ro & MII_STATUS_100_BASEX_FD) == 0) {
+ mac_prop_info_set_default_uint8(prh,
+ BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD));
+ } else {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ }
+ break;
+
+ case MAC_PROP_EN_100HDX_CAP:
+ if ((dp->mii_status_ro & MII_STATUS_100_BASEX) == 0) {
+ mac_prop_info_set_default_uint8(prh,
+ BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX));
+ } else {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ }
+ break;
+
+ case MAC_PROP_EN_10FDX_CAP:
+ if ((dp->mii_status_ro & MII_STATUS_10_FD) == 0) {
+ mac_prop_info_set_default_uint8(prh,
+ BOOLEAN(dp->mii_status & MII_STATUS_10_FD));
+ } else {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ }
+ break;
+
+ case MAC_PROP_EN_10HDX_CAP:
+ if ((dp->mii_status_ro & MII_STATUS_10) == 0) {
+ mac_prop_info_set_default_uint8(prh,
+ BOOLEAN(dp->mii_status & MII_STATUS_10));
+ } else {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ }
+ break;
+
+ case MAC_PROP_AUTONEG:
+ if ((dp->mii_status_ro & MII_STATUS_CANAUTONEG) == 0) {
+ mac_prop_info_set_default_uint8(prh,
+ BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG));
+ } else {
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ }
+ break;
+
+ case MAC_PROP_FLOWCTRL:
+ switch (dp->ugc.usbgc_flow_control) {
+ case FLOW_CONTROL_NONE:
+ fl = LINK_FLOWCTRL_NONE;
+ break;
+ case FLOW_CONTROL_SYMMETRIC:
+ fl = LINK_FLOWCTRL_BI;
+ break;
+ case FLOW_CONTROL_TX_PAUSE:
+ fl = LINK_FLOWCTRL_TX;
+ break;
+ case FLOW_CONTROL_RX_PAUSE:
+ fl = LINK_FLOWCTRL_RX;
+ break;
+ }
+ mac_prop_info_set_default_link_flowctrl(prh, fl);
+ break;
+
+ case MAC_PROP_MTU:
+ mac_prop_info_set_range_uint32(prh,
+ dp->ugc.usbgc_min_mtu, dp->ugc.usbgc_max_mtu);
+ break;
+
+ case MAC_PROP_PRIVATE:
+ break;
+ }
+}
+#endif
+
+static int
+usbgem_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_valsize, const void *pr_val)
+{
+ struct usbgem_dev *dp = arg;
+ int err = 0;
+ boolean_t update = B_FALSE;
+ link_flowctrl_t flowctrl;
+ uint32_t cur_mtu, new_mtu;
+
+ rw_enter(&dp->dev_state_lock, RW_WRITER);
+ switch (pr_num) {
+ case MAC_PROP_EN_1000FDX_CAP:
+ if ((dp->mii_xstatus_ro & MII_XSTATUS_1000BASET_FD) == 0 ||
+ (dp->mii_xstatus_ro & MII_XSTATUS_1000BASEX_FD) == 0) {
+ if (dp->anadv_1000fdx != *(uint8_t *)pr_val) {
+ dp->anadv_1000fdx = *(uint8_t *)pr_val;
+ update = B_TRUE;
+ }
+ } else {
+ err = ENOTSUP;
+ }
+ break;
+
+ case MAC_PROP_EN_1000HDX_CAP:
+ if ((dp->mii_xstatus_ro & MII_XSTATUS_1000BASET) == 0 ||
+ (dp->mii_xstatus_ro & MII_XSTATUS_1000BASEX) == 0) {
+ if (dp->anadv_1000hdx != *(uint8_t *)pr_val) {
+ dp->anadv_1000hdx = *(uint8_t *)pr_val;
+ update = B_TRUE;
+ }
+ } else {
+ err = ENOTSUP;
+ }
+ break;
+
+ case MAC_PROP_EN_100FDX_CAP:
+ if ((dp->mii_status_ro & MII_STATUS_100_BASEX_FD) == 0) {
+ if (dp->anadv_100fdx != *(uint8_t *)pr_val) {
+ dp->anadv_100fdx = *(uint8_t *)pr_val;
+ update = B_TRUE;
+ }
+ } else {
+ err = ENOTSUP;
+ }
+ break;
+
+ case MAC_PROP_EN_100HDX_CAP:
+ if ((dp->mii_status_ro & MII_STATUS_100_BASEX) == 0) {
+ if (dp->anadv_100hdx != *(uint8_t *)pr_val) {
+ dp->anadv_100hdx = *(uint8_t *)pr_val;
+ update = B_TRUE;
+ }
+ } else {
+ err = ENOTSUP;
+ }
+ break;
+
+ case MAC_PROP_EN_10FDX_CAP:
+ if ((dp->mii_status_ro & MII_STATUS_10_FD) == 0) {
+ if (dp->anadv_10fdx != *(uint8_t *)pr_val) {
+ dp->anadv_10fdx = *(uint8_t *)pr_val;
+ update = B_TRUE;
+ }
+ } else {
+ err = ENOTSUP;
+ }
+ break;
+
+ case MAC_PROP_EN_10HDX_CAP:
+ if ((dp->mii_status_ro & MII_STATUS_10_FD) == 0) {
+ if (dp->anadv_10hdx != *(uint8_t *)pr_val) {
+ dp->anadv_10hdx = *(uint8_t *)pr_val;
+ update = B_TRUE;
+ }
+ } else {
+ err = ENOTSUP;
+ }
+ break;
+
+ case MAC_PROP_AUTONEG:
+ if ((dp->mii_status_ro & MII_STATUS_CANAUTONEG) == 0) {
+ if (dp->anadv_autoneg != *(uint8_t *)pr_val) {
+ dp->anadv_autoneg = *(uint8_t *)pr_val;
+ update = B_TRUE;
+ }
+ } else {
+ err = ENOTSUP;
+ }
+ break;
+
+ case MAC_PROP_FLOWCTRL:
+ bcopy(pr_val, &flowctrl, sizeof (flowctrl));
+
+ switch (flowctrl) {
+ default:
+ err = EINVAL;
+ break;
+
+ case LINK_FLOWCTRL_NONE:
+ if (dp->flow_control != FLOW_CONTROL_NONE) {
+ dp->flow_control = FLOW_CONTROL_NONE;
+ update = B_TRUE;
+ }
+ break;
+
+ case LINK_FLOWCTRL_RX:
+ if (dp->flow_control != FLOW_CONTROL_RX_PAUSE) {
+ dp->flow_control = FLOW_CONTROL_RX_PAUSE;
+ update = B_TRUE;
+ }
+ break;
+
+ case LINK_FLOWCTRL_TX:
+ if (dp->flow_control != FLOW_CONTROL_TX_PAUSE) {
+ dp->flow_control = FLOW_CONTROL_TX_PAUSE;
+ update = B_TRUE;
+ }
+ break;
+
+ case LINK_FLOWCTRL_BI:
+ if (dp->flow_control != FLOW_CONTROL_SYMMETRIC) {
+ dp->flow_control = FLOW_CONTROL_SYMMETRIC;
+ update = B_TRUE;
+ }
+ break;
+ }
+ break;
+
+ case MAC_PROP_ADV_1000FDX_CAP:
+ case MAC_PROP_ADV_1000HDX_CAP:
+ case MAC_PROP_ADV_100FDX_CAP:
+ case MAC_PROP_ADV_100HDX_CAP:
+ case MAC_PROP_ADV_10FDX_CAP:
+ case MAC_PROP_ADV_10HDX_CAP:
+ case MAC_PROP_STATUS:
+ case MAC_PROP_SPEED:
+ case MAC_PROP_DUPLEX:
+ err = ENOTSUP; /* read-only prop. Can't set this. */
+ break;
+
+ case MAC_PROP_MTU:
+ bcopy(pr_val, &new_mtu, sizeof (new_mtu));
+ if (new_mtu != dp->mtu) {
+ err = EINVAL;
+ }
+ break;
+
+ case MAC_PROP_PRIVATE:
+ err = ENOTSUP;
+ break;
+
+ default:
+ err = ENOTSUP;
+ break;
+ }
+
+ if (update) {
+ /* sync with PHY */
+ usbgem_choose_forcedmode(dp);
+ dp->mii_state = MII_STATE_UNKNOWN;
+ cv_signal(&dp->link_watcher_wait_cv);
+ }
+ rw_exit(&dp->dev_state_lock);
+ return (err);
+}
+
+static int
+#ifdef MAC_VERSION_V1
+usbgem_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_valsize, void *pr_val)
+#else
+usbgem_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
+#endif
+{
+ struct usbgem_dev *dp = arg;
+ int err = 0;
+ link_flowctrl_t flowctrl;
+ uint64_t tmp = 0;
+
+ if (pr_valsize == 0) {
+ return (EINVAL);
+ }
+#ifndef MAC_VERSION_V1
+ *perm = MAC_PROP_PERM_RW;
+#endif
+ bzero(pr_val, pr_valsize);
+#ifndef MAC_VERSION_V1
+ if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
+ return (usbgem_get_def_val(dp, pr_num, pr_valsize, pr_val));
+ }
+#endif
+ rw_enter(&dp->dev_state_lock, RW_READER);
+ switch (pr_num) {
+ case MAC_PROP_DUPLEX:
+#ifndef MAC_VERSION_V1
+ *perm = MAC_PROP_PERM_READ;
+#endif
+ if (pr_valsize >= sizeof (link_duplex_t)) {
+ if (dp->mii_state != MII_STATE_LINKUP) {
+ *(link_duplex_t *)pr_val = LINK_DUPLEX_UNKNOWN;
+ } else if (dp->full_duplex) {
+ *(link_duplex_t *)pr_val = LINK_DUPLEX_FULL;
+ } else {
+ *(link_duplex_t *)pr_val = LINK_DUPLEX_HALF;
+ }
+ } else {
+ err = EINVAL;
+ }
+ break;
+ case MAC_PROP_SPEED:
+#ifndef MAC_VERSION_V1
+ *perm = MAC_PROP_PERM_READ;
+#endif
+ if (pr_valsize >= sizeof (uint64_t)) {
+ switch (dp->speed) {
+ case USBGEM_SPD_1000:
+ tmp = 1000000000;
+ break;
+ case USBGEM_SPD_100:
+ tmp = 100000000;
+ break;
+ case USBGEM_SPD_10:
+ tmp = 10000000;
+ break;
+ default:
+ tmp = 0;
+ }
+ bcopy(&tmp, pr_val, sizeof (tmp));
+ } else {
+ err = EINVAL;
+ }
+ break;
+
+ case MAC_PROP_AUTONEG:
+#ifndef MAC_VERSION_V1
+ if (dp->mii_status_ro & MII_STATUS_CANAUTONEG) {
+ *perm = MAC_PROP_PERM_READ;
+ }
+#endif
+ *(uint8_t *)pr_val = dp->anadv_autoneg;
+ break;
+
+ case MAC_PROP_FLOWCTRL:
+ if (pr_valsize >= sizeof (link_flowctrl_t)) {
+ switch (dp->flow_control) {
+ case FLOW_CONTROL_NONE:
+ flowctrl = LINK_FLOWCTRL_NONE;
+ break;
+ case FLOW_CONTROL_RX_PAUSE:
+ flowctrl = LINK_FLOWCTRL_RX;
+ break;
+ case FLOW_CONTROL_TX_PAUSE:
+ flowctrl = LINK_FLOWCTRL_TX;
+ break;
+ case FLOW_CONTROL_SYMMETRIC:
+ flowctrl = LINK_FLOWCTRL_BI;
+ break;
+ }
+ bcopy(&flowctrl, pr_val, sizeof (flowctrl));
+ } else {
+ err = EINVAL;
+ }
+ break;
+
+ case MAC_PROP_ADV_1000FDX_CAP:
+ case MAC_PROP_ADV_1000HDX_CAP:
+ case MAC_PROP_ADV_100FDX_CAP:
+ case MAC_PROP_ADV_100HDX_CAP:
+ case MAC_PROP_ADV_10FDX_CAP:
+ case MAC_PROP_ADV_10HDX_CAP:
+ case MAC_PROP_ADV_100T4_CAP:
+ usbgem_get_def_val(dp, pr_num, pr_valsize, pr_val);
+ break;
+
+ case MAC_PROP_EN_1000FDX_CAP:
+#ifndef MAC_VERSION_V1
+ if ((dp->mii_xstatus_ro & MII_XSTATUS_1000BASET_FD) &&
+ (dp->mii_xstatus_ro & MII_XSTATUS_1000BASEX_FD)) {
+ *perm = MAC_PROP_PERM_READ;
+ }
+#endif
+ *(uint8_t *)pr_val = dp->anadv_1000fdx;
+ break;
+
+ case MAC_PROP_EN_1000HDX_CAP:
+#ifndef MAC_VERSION_V1
+ if ((dp->mii_xstatus_ro & MII_XSTATUS_1000BASET) &&
+ (dp->mii_xstatus_ro & MII_XSTATUS_1000BASEX)) {
+ *perm = MAC_PROP_PERM_READ;
+ }
+#endif
+ *(uint8_t *)pr_val = dp->anadv_1000hdx;
+ break;
+
+ case MAC_PROP_EN_100FDX_CAP:
+#ifndef MAC_VERSION_V1
+ if (dp->mii_status_ro & MII_STATUS_100_BASEX_FD) {
+ *perm = MAC_PROP_PERM_READ;
+ }
+#endif
+ *(uint8_t *)pr_val = dp->anadv_100fdx;
+ break;
+
+ case MAC_PROP_EN_100HDX_CAP:
+#ifndef MAC_VERSION_V1
+ if (dp->mii_status_ro & MII_STATUS_100_BASEX) {
+ *perm = MAC_PROP_PERM_READ;
+ }
+#endif
+ *(uint8_t *)pr_val = dp->anadv_100hdx;
+ break;
+
+ case MAC_PROP_EN_10FDX_CAP:
+#ifndef MAC_VERSION_V1
+ if (dp->mii_status_ro & MII_STATUS_10_FD) {
+ *perm = MAC_PROP_PERM_READ;
+ }
+#endif
+ *(uint8_t *)pr_val = dp->anadv_10fdx;
+ break;
+
+ case MAC_PROP_EN_10HDX_CAP:
+#ifndef MAC_VERSION_V1
+ if (dp->mii_status_ro & MII_STATUS_10) {
+ *perm = MAC_PROP_PERM_READ;
+ }
+#endif
+ *(uint8_t *)pr_val = dp->anadv_10hdx;
+ break;
+
+ case MAC_PROP_EN_100T4_CAP:
+#ifndef MAC_VERSION_V1
+ if (dp->mii_status_ro & MII_STATUS_100_BASE_T4) {
+ *perm = MAC_PROP_PERM_READ;
+ }
+#endif
+ *(uint8_t *)pr_val = dp->anadv_100t4;
+ break;
+
+ case MAC_PROP_PRIVATE:
+ err = ENOTSUP;
+ break;
+
+#ifndef MAC_VERSION_V1
+ case MAC_PROP_MTU: {
+ mac_propval_range_t range;
+ if (!(pr_flags & MAC_PROP_POSSIBLE)) {
+ err = ENOTSUP;
+ break;
+ }
+ if (pr_valsize < sizeof (mac_propval_range_t)) {
+ err = EINVAL;
+ break;
+ }
+ range.mpr_count = 1;
+ range.mpr_type = MAC_PROPVAL_UINT32;
+ range.range_uint32[0].mpur_min = ETHERMTU;
+ range.range_uint32[0].mpur_max = dp->mtu;
+ bcopy(&range, pr_val, sizeof (range));
+ break;
+ }
+#endif
+ default:
+ err = ENOTSUP;
+ break;
+ }
+
+ rw_exit(&dp->dev_state_lock);
+ return (err);
+}
+#endif /* USBGEM_CONFIG_MAC_PROP */
+
+#ifdef USBGEM_CONFIG_ND
+/* ============================================================== */
+/*
+ * ND interface
+ */
+/* ============================================================== */
+enum {
+ PARAM_AUTONEG_CAP,
+ PARAM_PAUSE_CAP,
+ PARAM_ASYM_PAUSE_CAP,
+ PARAM_1000FDX_CAP,
+ PARAM_1000HDX_CAP,
+ PARAM_100T4_CAP,
+ PARAM_100FDX_CAP,
+ PARAM_100HDX_CAP,
+ PARAM_10FDX_CAP,
+ PARAM_10HDX_CAP,
+
+ PARAM_ADV_AUTONEG_CAP,
+ PARAM_ADV_PAUSE_CAP,
+ PARAM_ADV_ASYM_PAUSE_CAP,
+ PARAM_ADV_1000FDX_CAP,
+ PARAM_ADV_1000HDX_CAP,
+ PARAM_ADV_100T4_CAP,
+ PARAM_ADV_100FDX_CAP,
+ PARAM_ADV_100HDX_CAP,
+ PARAM_ADV_10FDX_CAP,
+ PARAM_ADV_10HDX_CAP,
+ PARAM_ADV_1000T_MS,
+
+ PARAM_LP_AUTONEG_CAP,
+ PARAM_LP_PAUSE_CAP,
+ PARAM_LP_ASYM_PAUSE_CAP,
+ PARAM_LP_1000FDX_CAP,
+ PARAM_LP_1000HDX_CAP,
+ PARAM_LP_100T4_CAP,
+ PARAM_LP_100FDX_CAP,
+ PARAM_LP_100HDX_CAP,
+ PARAM_LP_10FDX_CAP,
+ PARAM_LP_10HDX_CAP,
+
+ PARAM_LINK_STATUS,
+ PARAM_LINK_SPEED,
+ PARAM_LINK_DUPLEX,
+
+ PARAM_LINK_AUTONEG,
+ PARAM_LINK_RX_PAUSE,
+ PARAM_LINK_TX_PAUSE,
+
+ PARAM_LOOP_MODE,
+ PARAM_MSI_CNT,
+#ifdef DEBUG_RESUME
+ PARAM_RESUME_TEST,
+#endif
+
+ PARAM_COUNT
+};
+
+struct usbgem_nd_arg {
+ struct usbgem_dev *dp;
+ int item;
+};
+
+static int
+usbgem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
+{
+ struct usbgem_dev *dp = ((struct usbgem_nd_arg *)(void *)arg)->dp;
+ int item = ((struct usbgem_nd_arg *)(void *)arg)->item;
+ long val;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called, item:%d",
+ dp->name, __func__, item));
+
+ switch (item) {
+ case PARAM_AUTONEG_CAP:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
+ DPRINTF(1, (CE_CONT, "autoneg_cap:%d", val));
+ break;
+
+ case PARAM_PAUSE_CAP:
+ val = dp->ugc.usbgc_flow_control != FLOW_CONTROL_NONE;
+ break;
+
+ case PARAM_ASYM_PAUSE_CAP:
+ val = dp->ugc.usbgc_flow_control > FLOW_CONTROL_SYMMETRIC;
+ break;
+
+ case PARAM_1000FDX_CAP:
+ val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
+ (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
+ break;
+
+ case PARAM_1000HDX_CAP:
+ val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
+ (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
+ break;
+
+ case PARAM_100T4_CAP:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
+ break;
+
+ case PARAM_100FDX_CAP:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
+ break;
+
+ case PARAM_100HDX_CAP:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
+ break;
+
+ case PARAM_10FDX_CAP:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
+ break;
+
+ case PARAM_10HDX_CAP:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_10);
+ break;
+
+ case PARAM_ADV_AUTONEG_CAP:
+ val = dp->anadv_autoneg;
+ break;
+
+ case PARAM_ADV_PAUSE_CAP:
+ val = dp->anadv_pause;
+ break;
+
+ case PARAM_ADV_ASYM_PAUSE_CAP:
+ val = dp->anadv_asmpause;
+ break;
+
+ case PARAM_ADV_1000FDX_CAP:
+ val = dp->anadv_1000fdx;
+ break;
+
+ case PARAM_ADV_1000HDX_CAP:
+ val = dp->anadv_1000hdx;
+ break;
+
+ case PARAM_ADV_100T4_CAP:
+ val = dp->anadv_100t4;
+ break;
+
+ case PARAM_ADV_100FDX_CAP:
+ val = dp->anadv_100fdx;
+ break;
+
+ case PARAM_ADV_100HDX_CAP:
+ val = dp->anadv_100hdx;
+ break;
+
+ case PARAM_ADV_10FDX_CAP:
+ val = dp->anadv_10fdx;
+ break;
+
+ case PARAM_ADV_10HDX_CAP:
+ val = dp->anadv_10hdx;
+ break;
+
+ case PARAM_ADV_1000T_MS:
+ val = dp->anadv_1000t_ms;
+ break;
+
+ case PARAM_LP_AUTONEG_CAP:
+ val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
+ break;
+
+ case PARAM_LP_PAUSE_CAP:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
+ break;
+
+ case PARAM_LP_ASYM_PAUSE_CAP:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR);
+ break;
+
+ case PARAM_LP_1000FDX_CAP:
+ val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
+ break;
+
+ case PARAM_LP_1000HDX_CAP:
+ val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
+ break;
+
+ case PARAM_LP_100T4_CAP:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
+ break;
+
+ case PARAM_LP_100FDX_CAP:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
+ break;
+
+ case PARAM_LP_100HDX_CAP:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
+ break;
+
+ case PARAM_LP_10FDX_CAP:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
+ break;
+
+ case PARAM_LP_10HDX_CAP:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
+ break;
+
+ case PARAM_LINK_STATUS:
+ val = (dp->mii_state == MII_STATE_LINKUP);
+ break;
+
+ case PARAM_LINK_SPEED:
+ val = usbgem_speed_value[dp->speed];
+ break;
+
+ case PARAM_LINK_DUPLEX:
+ val = 0;
+ if (dp->mii_state == MII_STATE_LINKUP) {
+ val = dp->full_duplex ? 2 : 1;
+ }
+ break;
+
+ case PARAM_LINK_AUTONEG:
+ val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
+ break;
+
+ case PARAM_LINK_RX_PAUSE:
+ val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
+ (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
+ break;
+
+ case PARAM_LINK_TX_PAUSE:
+ val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
+ (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
+ break;
+
+#ifdef DEBUG_RESUME
+ case PARAM_RESUME_TEST:
+ val = 0;
+ break;
+#endif
+ default:
+ cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
+ dp->name, item);
+ break;
+ }
+
+ (void) mi_mpprintf(mp, "%ld", val);
+
+ return (0);
+}
+
+static int
+usbgem_param_set(queue_t *q,
+ mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
+{
+ struct usbgem_dev *dp = ((struct usbgem_nd_arg *)(void *)arg)->dp;
+ int item = ((struct usbgem_nd_arg *)(void *)arg)->item;
+ long val;
+ char *end;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+ if (ddi_strtol(value, &end, 10, &val)) {
+ return (EINVAL);
+ }
+ if (end == value) {
+ return (EINVAL);
+ }
+
+ switch (item) {
+ case PARAM_ADV_AUTONEG_CAP:
+ if (val != 0 && val != 1) {
+ goto err;
+ }
+ if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
+ goto err;
+ }
+ dp->anadv_autoneg = (int)val;
+ break;
+
+ case PARAM_ADV_PAUSE_CAP:
+ if (val != 0 && val != 1) {
+ goto err;
+ }
+ if (val && dp->ugc.usbgc_flow_control == FLOW_CONTROL_NONE) {
+ goto err;
+ }
+ dp->anadv_pause = (int)val;
+ break;
+
+ case PARAM_ADV_ASYM_PAUSE_CAP:
+ if (val != 0 && val != 1) {
+ goto err;
+ }
+ if (val &&
+ dp->ugc.usbgc_flow_control <= FLOW_CONTROL_SYMMETRIC) {
+ goto err;
+ }
+ dp->anadv_asmpause = (int)val;
+ break;
+
+ case PARAM_ADV_1000FDX_CAP:
+ if (val != 0 && val != 1) {
+ goto err;
+ }
+ if (val && (dp->mii_xstatus &
+ (MII_XSTATUS_1000BASET_FD |
+ MII_XSTATUS_1000BASEX_FD)) == 0) {
+ goto err;
+ }
+ dp->anadv_1000fdx = (int)val;
+ break;
+
+ case PARAM_ADV_1000HDX_CAP:
+ if (val != 0 && val != 1) {
+ goto err;
+ }
+ if (val && (dp->mii_xstatus &
+ (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
+ goto err;
+ }
+ dp->anadv_1000hdx = (int)val;
+ break;
+
+ case PARAM_ADV_100T4_CAP:
+ if (val != 0 && val != 1) {
+ goto err;
+ }
+ if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
+ goto err;
+ }
+ dp->anadv_100t4 = (int)val;
+ break;
+
+ case PARAM_ADV_100FDX_CAP:
+ if (val != 0 && val != 1) {
+ goto err;
+ }
+ if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
+ goto err;
+ }
+ dp->anadv_100fdx = (int)val;
+ break;
+
+ case PARAM_ADV_100HDX_CAP:
+ if (val != 0 && val != 1) {
+ goto err;
+ }
+ if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
+ goto err;
+ }
+ dp->anadv_100hdx = (int)val;
+ break;
+
+ case PARAM_ADV_10FDX_CAP:
+ if (val != 0 && val != 1) {
+ goto err;
+ }
+ if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
+ goto err;
+ }
+ dp->anadv_10fdx = (int)val;
+ break;
+
+ case PARAM_ADV_10HDX_CAP:
+ if (val != 0 && val != 1) {
+ goto err;
+ }
+ if (val && (dp->mii_status & MII_STATUS_10) == 0) {
+ goto err;
+ }
+ dp->anadv_10hdx = (int)val;
+ break;
+
+ case PARAM_ADV_1000T_MS:
+ if (val != 0 && val != 1 && val != 2) {
+ goto err;
+ }
+ if (val && (dp->mii_xstatus &
+ (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASET_FD)) == 0) {
+ goto err;
+ }
+ dp->anadv_1000t_ms = (int)val;
+ break;
+
+#ifdef DEBUG_RESUME
+ case PARAM_RESUME_TEST:
+ mutex_exit(&dp->xmitlock);
+ mutex_exit(&dp->intrlock);
+ gem_suspend(dp->dip);
+ gem_resume(dp->dip);
+ mutex_enter(&dp->intrlock);
+ mutex_enter(&dp->xmitlock);
+ break;
+#endif
+ }
+
+ /* sync with PHY */
+ usbgem_choose_forcedmode(dp);
+
+ dp->mii_state = MII_STATE_UNKNOWN;
+ if (dp->ugc.usbgc_mii_hw_link_detection) {
+ /* wake up link watcher possiblely sleeps */
+ cv_signal(&dp->link_watcher_wait_cv);
+ }
+
+ return (0);
+err:
+ return (EINVAL);
+}
+
+static void
+usbgem_nd_load(struct usbgem_dev *dp,
+ char *name, ndgetf_t gf, ndsetf_t sf, int item)
+{
+ struct usbgem_nd_arg *arg;
+
+ ASSERT(item >= 0);
+ ASSERT(item < PARAM_COUNT);
+
+ arg = &((struct usbgem_nd_arg *)(void *)dp->nd_arg_p)[item];
+ arg->dp = dp;
+ arg->item = item;
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
+ dp->name, __func__, name, item));
+ (void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
+}
+
+static void
+usbgem_nd_setup(struct usbgem_dev *dp)
+{
+ DPRINTF(1, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
+ dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
+
+ ASSERT(dp->nd_arg_p == NULL);
+
+ dp->nd_arg_p =
+ kmem_zalloc(sizeof (struct usbgem_nd_arg) * PARAM_COUNT, KM_SLEEP);
+
+#define SETFUNC(x) ((x) ? usbgem_param_set : NULL)
+
+ usbgem_nd_load(dp, "autoneg_cap",
+ usbgem_param_get, NULL, PARAM_AUTONEG_CAP);
+ usbgem_nd_load(dp, "pause_cap",
+ usbgem_param_get, NULL, PARAM_PAUSE_CAP);
+ usbgem_nd_load(dp, "asym_pause_cap",
+ usbgem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
+ usbgem_nd_load(dp, "1000fdx_cap",
+ usbgem_param_get, NULL, PARAM_1000FDX_CAP);
+ usbgem_nd_load(dp, "1000hdx_cap",
+ usbgem_param_get, NULL, PARAM_1000HDX_CAP);
+ usbgem_nd_load(dp, "100T4_cap",
+ usbgem_param_get, NULL, PARAM_100T4_CAP);
+ usbgem_nd_load(dp, "100fdx_cap",
+ usbgem_param_get, NULL, PARAM_100FDX_CAP);
+ usbgem_nd_load(dp, "100hdx_cap",
+ usbgem_param_get, NULL, PARAM_100HDX_CAP);
+ usbgem_nd_load(dp, "10fdx_cap",
+ usbgem_param_get, NULL, PARAM_10FDX_CAP);
+ usbgem_nd_load(dp, "10hdx_cap",
+ usbgem_param_get, NULL, PARAM_10HDX_CAP);
+
+ /* Our advertised capabilities */
+ usbgem_nd_load(dp, "adv_autoneg_cap", usbgem_param_get,
+ SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
+ PARAM_ADV_AUTONEG_CAP);
+ usbgem_nd_load(dp, "adv_pause_cap", usbgem_param_get,
+ SETFUNC(dp->ugc.usbgc_flow_control & 1),
+ PARAM_ADV_PAUSE_CAP);
+ usbgem_nd_load(dp, "adv_asym_pause_cap", usbgem_param_get,
+ SETFUNC(dp->ugc.usbgc_flow_control & 2),
+ PARAM_ADV_ASYM_PAUSE_CAP);
+ usbgem_nd_load(dp, "adv_1000fdx_cap", usbgem_param_get,
+ SETFUNC(dp->mii_xstatus &
+ (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
+ PARAM_ADV_1000FDX_CAP);
+ usbgem_nd_load(dp, "adv_1000hdx_cap", usbgem_param_get,
+ SETFUNC(dp->mii_xstatus &
+ (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
+ PARAM_ADV_1000HDX_CAP);
+ usbgem_nd_load(dp, "adv_100T4_cap", usbgem_param_get,
+ SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
+ !dp->mii_advert_ro),
+ PARAM_ADV_100T4_CAP);
+ usbgem_nd_load(dp, "adv_100fdx_cap", usbgem_param_get,
+ SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
+ !dp->mii_advert_ro),
+ PARAM_ADV_100FDX_CAP);
+ usbgem_nd_load(dp, "adv_100hdx_cap", usbgem_param_get,
+ SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
+ !dp->mii_advert_ro),
+ PARAM_ADV_100HDX_CAP);
+ usbgem_nd_load(dp, "adv_10fdx_cap", usbgem_param_get,
+ SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
+ !dp->mii_advert_ro),
+ PARAM_ADV_10FDX_CAP);
+ usbgem_nd_load(dp, "adv_10hdx_cap", usbgem_param_get,
+ SETFUNC((dp->mii_status & MII_STATUS_10) &&
+ !dp->mii_advert_ro),
+ PARAM_ADV_10HDX_CAP);
+ usbgem_nd_load(dp, "adv_1000t_ms", usbgem_param_get,
+ SETFUNC(dp->mii_xstatus &
+ (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)),
+ PARAM_ADV_1000T_MS);
+
+
+ /* Partner's advertised capabilities */
+ usbgem_nd_load(dp, "lp_autoneg_cap",
+ usbgem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
+ usbgem_nd_load(dp, "lp_pause_cap",
+ usbgem_param_get, NULL, PARAM_LP_PAUSE_CAP);
+ usbgem_nd_load(dp, "lp_asym_pause_cap",
+ usbgem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
+ usbgem_nd_load(dp, "lp_1000fdx_cap",
+ usbgem_param_get, NULL, PARAM_LP_1000FDX_CAP);
+ usbgem_nd_load(dp, "lp_1000hdx_cap",
+ usbgem_param_get, NULL, PARAM_LP_1000HDX_CAP);
+ usbgem_nd_load(dp, "lp_100T4_cap",
+ usbgem_param_get, NULL, PARAM_LP_100T4_CAP);
+ usbgem_nd_load(dp, "lp_100fdx_cap",
+ usbgem_param_get, NULL, PARAM_LP_100FDX_CAP);
+ usbgem_nd_load(dp, "lp_100hdx_cap",
+ usbgem_param_get, NULL, PARAM_LP_100HDX_CAP);
+ usbgem_nd_load(dp, "lp_10fdx_cap",
+ usbgem_param_get, NULL, PARAM_LP_10FDX_CAP);
+ usbgem_nd_load(dp, "lp_10hdx_cap",
+ usbgem_param_get, NULL, PARAM_LP_10HDX_CAP);
+
+ /* Current operating modes */
+ usbgem_nd_load(dp, "link_status",
+ usbgem_param_get, NULL, PARAM_LINK_STATUS);
+ usbgem_nd_load(dp, "link_speed",
+ usbgem_param_get, NULL, PARAM_LINK_SPEED);
+ usbgem_nd_load(dp, "link_duplex",
+ usbgem_param_get, NULL, PARAM_LINK_DUPLEX);
+ usbgem_nd_load(dp, "link_autoneg",
+ usbgem_param_get, NULL, PARAM_LINK_AUTONEG);
+ usbgem_nd_load(dp, "link_rx_pause",
+ usbgem_param_get, NULL, PARAM_LINK_RX_PAUSE);
+ usbgem_nd_load(dp, "link_tx_pause",
+ usbgem_param_get, NULL, PARAM_LINK_TX_PAUSE);
+#ifdef DEBUG_RESUME
+ usbgem_nd_load(dp, "resume_test",
+ usbgem_param_get, usbgem_param_set, PARAM_RESUME_TEST);
+#endif
+#undef SETFUNC
+}
+
+static
+enum ioc_reply
+usbgem_nd_ioctl(struct usbgem_dev *dp,
+ queue_t *wq, mblk_t *mp, struct iocblk *iocp)
+{
+ boolean_t ok;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ switch (iocp->ioc_cmd) {
+ case ND_GET:
+ ok = nd_getset(wq, dp->nd_data_p, mp);
+ DPRINTF(1, (CE_CONT,
+ "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
+ return (ok ? IOC_REPLY : IOC_INVAL);
+
+ case ND_SET:
+ ok = nd_getset(wq, dp->nd_data_p, mp);
+
+ DPRINTF(1, (CE_CONT, "%s: set %s err %d",
+ dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
+
+ if (!ok) {
+ return (IOC_INVAL);
+ }
+
+ if (iocp->ioc_error) {
+ return (IOC_REPLY);
+ }
+
+ return (IOC_RESTART_REPLY);
+ }
+
+ cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
+
+ return (IOC_INVAL);
+}
+
+static void
+usbgem_nd_cleanup(struct usbgem_dev *dp)
+{
+ ASSERT(dp->nd_data_p != NULL);
+ ASSERT(dp->nd_arg_p != NULL);
+
+ nd_free(&dp->nd_data_p);
+
+ kmem_free(dp->nd_arg_p, sizeof (struct usbgem_nd_arg) * PARAM_COUNT);
+ dp->nd_arg_p = NULL;
+}
+#endif /* USBGEM_CONFIG_ND */
+
+static void
+usbgem_mac_ioctl(struct usbgem_dev *dp, queue_t *wq, mblk_t *mp)
+{
+ struct iocblk *iocp;
+ enum ioc_reply status;
+ int cmd;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /*
+ * Validate the command before bothering with the mutex ...
+ */
+ iocp = (void *)mp->b_rptr;
+ iocp->ioc_error = 0;
+ cmd = iocp->ioc_cmd;
+
+ DPRINTF(1, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
+
+#ifdef USBGEM_CONFIG_ND
+ switch (cmd) {
+ default:
+ _NOTE(NOTREACHED)
+ status = IOC_INVAL;
+ break;
+
+ case ND_GET:
+ case ND_SET:
+ status = usbgem_nd_ioctl(dp, wq, mp, iocp);
+ break;
+ }
+
+ /*
+ * Finally, decide how to reply
+ */
+ switch (status) {
+ default:
+ case IOC_INVAL:
+ /*
+ * Error, reply with a NAK and EINVAL or the specified error
+ */
+ miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
+ EINVAL : iocp->ioc_error);
+ break;
+
+ case IOC_DONE:
+ /*
+ * OK, reply already sent
+ */
+ break;
+
+ case IOC_RESTART_ACK:
+ case IOC_ACK:
+ /*
+ * OK, reply with an ACK
+ */
+ miocack(wq, mp, 0, 0);
+ break;
+
+ case IOC_RESTART_REPLY:
+ case IOC_REPLY:
+ /*
+ * OK, send prepared reply as ACK or NAK
+ */
+ mp->b_datap->db_type =
+ iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
+ qreply(wq, mp);
+ break;
+ }
+#else
+ miocnak(wq, mp, 0, EINVAL);
+ return;
+#endif /* USBGEM_CONFIG_GLDv3 */
+}
+
+#ifndef SYS_MAC_H
+#define XCVR_UNDEFINED 0
+#define XCVR_NONE 1
+#define XCVR_10 2
+#define XCVR_100T4 3
+#define XCVR_100X 4
+#define XCVR_100T2 5
+#define XCVR_1000X 6
+#define XCVR_1000T 7
+#endif
+static int
+usbgem_mac_xcvr_inuse(struct usbgem_dev *dp)
+{
+ int val = XCVR_UNDEFINED;
+
+ if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
+ if (dp->mii_status & MII_STATUS_100_BASE_T4) {
+ val = XCVR_100T4;
+ } else if (dp->mii_status &
+ (MII_STATUS_100_BASEX_FD |
+ MII_STATUS_100_BASEX)) {
+ val = XCVR_100X;
+ } else if (dp->mii_status &
+ (MII_STATUS_100_BASE_T2_FD |
+ MII_STATUS_100_BASE_T2)) {
+ val = XCVR_100T2;
+ } else if (dp->mii_status &
+ (MII_STATUS_10_FD | MII_STATUS_10)) {
+ val = XCVR_10;
+ }
+ } else if (dp->mii_xstatus &
+ (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
+ val = XCVR_1000T;
+ } else if (dp->mii_xstatus &
+ (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
+ val = XCVR_1000X;
+ }
+
+ return (val);
+}
+
+#ifdef USBGEM_CONFIG_GLDv3
+/* ============================================================== */
+/*
+ * GLDv3 interface
+ */
+/* ============================================================== */
+static int usbgem_m_getstat(void *, uint_t, uint64_t *);
+static int usbgem_m_start(void *);
+static void usbgem_m_stop(void *);
+static int usbgem_m_setpromisc(void *, boolean_t);
+static int usbgem_m_multicst(void *, boolean_t, const uint8_t *);
+static int usbgem_m_unicst(void *, const uint8_t *);
+static mblk_t *usbgem_m_tx(void *, mblk_t *);
+static void usbgem_m_ioctl(void *, queue_t *, mblk_t *);
+#ifdef GEM_CONFIG_MAC_PROP
+static int usbgem_m_setprop(void *, const char *, mac_prop_id_t,
+ uint_t, const void *);
+#ifdef MAC_VERSION_V1
+static int usbgem_m_getprop(void *, const char *, mac_prop_id_t,
+ uint_t, void *);
+#else
+static int usbgem_m_getprop(void *, const char *, mac_prop_id_t,
+ uint_t, uint_t, void *, uint_t *);
+#endif
+#endif
+
+#ifdef _SYS_MAC_PROVIDER_H
+#define GEM_M_CALLBACK_FLAGS (MC_IOCTL)
+#else
+#define GEM_M_CALLBACK_FLAGS (MC_IOCTL)
+#endif
+
+static mac_callbacks_t gem_m_callbacks = {
+#ifdef USBGEM_CONFIG_MAC_PROP
+#ifdef MAC_VERSION_V1
+ GEM_M_CALLBACK_FLAGS | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
+#else
+ GEM_M_CALLBACK_FLAGS | MC_SETPROP | MC_GETPROP,
+#endif
+#else
+ GEM_M_CALLBACK_FLAGS,
+#endif
+ usbgem_m_getstat,
+ usbgem_m_start,
+ usbgem_m_stop,
+ usbgem_m_setpromisc,
+ usbgem_m_multicst,
+ usbgem_m_unicst,
+ usbgem_m_tx,
+#ifdef _SYS_MAC_PROVIDER_H
+#ifdef MAC_VERSION_V1
+ NULL,
+#endif
+#else
+ NULL, /* m_resources */
+#endif
+ usbgem_m_ioctl,
+ NULL, /* m_getcapab */
+#ifdef USBGEM_CONFIG_MAC_PROP
+ NULL,
+ NULL,
+ usbgem_m_setprop,
+ usbgem_m_getprop,
+#endif
+#ifdef MAC_VERSION_V1
+ usbgem_m_propinfo,
+#endif
+};
+
+static int
+usbgem_m_start(void *arg)
+{
+ int ret;
+ int err;
+ struct usbgem_dev *dp = arg;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ err = EIO;
+
+ rw_enter(&dp->dev_state_lock, RW_WRITER);
+ dp->nic_state = NIC_STATE_ONLINE;
+
+ if (dp->mac_state == MAC_STATE_DISCONNECTED) {
+ err = 0;
+ goto x;
+ }
+ if (usbgem_mac_init(dp) != USB_SUCCESS) {
+ goto x;
+ }
+
+ /* initialize rx filter state */
+ sema_p(&dp->rxfilter_lock);
+ dp->mc_count = 0;
+ dp->mc_count_req = 0;
+
+ bcopy(dp->dev_addr.ether_addr_octet,
+ dp->cur_addr.ether_addr_octet, ETHERADDRL);
+ dp->rxmode |= RXMODE_ENABLE;
+
+ ret = usbgem_hal_set_rx_filter(dp);
+ sema_v(&dp->rxfilter_lock);
+
+ if (ret != USB_SUCCESS) {
+ goto x;
+ }
+
+ if (dp->mii_state == MII_STATE_LINKUP) {
+ /* setup media mode if the link have been up */
+ if (usbgem_hal_set_media(dp) != USB_SUCCESS) {
+ goto x;
+ }
+ if (usbgem_mac_start(dp) != USB_SUCCESS) {
+ goto x;
+ }
+ }
+
+ err = 0;
+x:
+ rw_exit(&dp->dev_state_lock);
+ return (err);
+}
+
+static void
+usbgem_m_stop(void *arg)
+{
+ struct usbgem_dev *dp = arg;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* stop rx gracefully */
+ rw_enter(&dp->dev_state_lock, RW_READER);
+ sema_p(&dp->rxfilter_lock);
+ dp->rxmode &= ~RXMODE_ENABLE;
+
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ (void) usbgem_hal_set_rx_filter(dp);
+ }
+ sema_v(&dp->rxfilter_lock);
+ rw_exit(&dp->dev_state_lock);
+
+ /* make the nic state inactive */
+ rw_enter(&dp->dev_state_lock, RW_WRITER);
+ dp->nic_state = NIC_STATE_STOPPED;
+
+ /* stop mac completely */
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ (void) usbgem_mac_stop(dp, MAC_STATE_STOPPED, STOP_GRACEFUL);
+ }
+ rw_exit(&dp->dev_state_lock);
+}
+
+static int
+usbgem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
+{
+ int err;
+ int ret;
+ struct usbgem_dev *dp = arg;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ rw_enter(&dp->dev_state_lock, RW_READER);
+ if (add) {
+ ret = usbgem_add_multicast(dp, ep);
+ } else {
+ ret = usbgem_remove_multicast(dp, ep);
+ }
+ rw_exit(&dp->dev_state_lock);
+
+ err = 0;
+ if (ret != USB_SUCCESS) {
+#ifdef GEM_CONFIG_FMA
+ ddi_fm_service_impact(dp->dip, DDI_SERVICE_DEGRADED);
+#endif
+ err = EIO;
+ }
+
+ return (err);
+}
+
+static int
+usbgem_m_setpromisc(void *arg, boolean_t on)
+{
+ int err;
+ struct usbgem_dev *dp = arg;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ rw_enter(&dp->dev_state_lock, RW_READER);
+
+ sema_p(&dp->rxfilter_lock);
+ if (on) {
+ dp->rxmode |= RXMODE_PROMISC;
+ } else {
+ dp->rxmode &= ~RXMODE_PROMISC;
+ }
+
+ err = 0;
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ if (usbgem_hal_set_rx_filter(dp) != USB_SUCCESS) {
+ err = EIO;
+ }
+ }
+ sema_v(&dp->rxfilter_lock);
+
+ rw_exit(&dp->dev_state_lock);
+
+#ifdef GEM_CONFIG_FMA
+ if (err != 0) {
+ ddi_fm_service_impact(dp->dip, DDI_SERVICE_DEGRADED);
+ }
+#endif
+ return (err);
+}
+
+int
+usbgem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
+{
+ int ret;
+ uint64_t val;
+ struct usbgem_dev *dp = arg;
+ struct usbgem_stats *gstp = &dp->stats;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ rw_enter(&dp->dev_state_lock, RW_READER);
+ if (dp->mac_state == MAC_STATE_DISCONNECTED) {
+ rw_exit(&dp->dev_state_lock);
+ return (0);
+ }
+ ret = usbgem_hal_get_stats(dp);
+ rw_exit(&dp->dev_state_lock);
+
+#ifdef GEM_CONFIG_FMA
+ if (ret != USB_SUCCESS) {
+ ddi_fm_service_impact(dp->dip, DDI_SERVICE_DEGRADED);
+ return (EIO);
+ }
+#endif
+
+ switch (stat) {
+ case MAC_STAT_IFSPEED:
+ val = usbgem_speed_value[dp->speed] *1000000ull;
+ break;
+
+ case MAC_STAT_MULTIRCV:
+ val = gstp->rmcast;
+ break;
+
+ case MAC_STAT_BRDCSTRCV:
+ val = gstp->rbcast;
+ break;
+
+ case MAC_STAT_MULTIXMT:
+ val = gstp->omcast;
+ break;
+
+ case MAC_STAT_BRDCSTXMT:
+ val = gstp->obcast;
+ break;
+
+ case MAC_STAT_NORCVBUF:
+ val = gstp->norcvbuf + gstp->missed;
+ break;
+
+ case MAC_STAT_IERRORS:
+ val = gstp->errrcv;
+ break;
+
+ case MAC_STAT_NOXMTBUF:
+ val = gstp->noxmtbuf;
+ break;
+
+ case MAC_STAT_OERRORS:
+ val = gstp->errxmt;
+ break;
+
+ case MAC_STAT_COLLISIONS:
+ val = gstp->collisions;
+ break;
+
+ case MAC_STAT_RBYTES:
+ val = gstp->rbytes;
+ break;
+
+ case MAC_STAT_IPACKETS:
+ val = gstp->rpackets;
+ break;
+
+ case MAC_STAT_OBYTES:
+ val = gstp->obytes;
+ break;
+
+ case MAC_STAT_OPACKETS:
+ val = gstp->opackets;
+ break;
+
+ case MAC_STAT_UNDERFLOWS:
+ val = gstp->underflow;
+ break;
+
+ case MAC_STAT_OVERFLOWS:
+ val = gstp->overflow;
+ break;
+
+ case ETHER_STAT_ALIGN_ERRORS:
+ val = gstp->frame;
+ break;
+
+ case ETHER_STAT_FCS_ERRORS:
+ val = gstp->crc;
+ break;
+
+ case ETHER_STAT_FIRST_COLLISIONS:
+ val = gstp->first_coll;
+ break;
+
+ case ETHER_STAT_MULTI_COLLISIONS:
+ val = gstp->multi_coll;
+ break;
+
+ case ETHER_STAT_SQE_ERRORS:
+ val = gstp->sqe;
+ break;
+
+ case ETHER_STAT_DEFER_XMTS:
+ val = gstp->defer;
+ break;
+
+ case ETHER_STAT_TX_LATE_COLLISIONS:
+ val = gstp->xmtlatecoll;
+ break;
+
+ case ETHER_STAT_EX_COLLISIONS:
+ val = gstp->excoll;
+ break;
+
+ case ETHER_STAT_MACXMT_ERRORS:
+ val = gstp->xmit_internal_err;
+ break;
+
+ case ETHER_STAT_CARRIER_ERRORS:
+ val = gstp->nocarrier;
+ break;
+
+ case ETHER_STAT_TOOLONG_ERRORS:
+ val = gstp->frame_too_long;
+ break;
+
+ case ETHER_STAT_MACRCV_ERRORS:
+ val = gstp->rcv_internal_err;
+ break;
+
+ case ETHER_STAT_XCVR_ADDR:
+ val = dp->mii_phy_addr;
+ break;
+
+ case ETHER_STAT_XCVR_ID:
+ val = dp->mii_phy_id;
+ break;
+
+ case ETHER_STAT_XCVR_INUSE:
+ val = usbgem_mac_xcvr_inuse(dp);
+ break;
+
+ case ETHER_STAT_CAP_1000FDX:
+ val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
+ (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
+ break;
+
+ case ETHER_STAT_CAP_1000HDX:
+ val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
+ (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
+ break;
+
+ case ETHER_STAT_CAP_100FDX:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
+ break;
+
+ case ETHER_STAT_CAP_100HDX:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
+ break;
+
+ case ETHER_STAT_CAP_10FDX:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
+ break;
+
+ case ETHER_STAT_CAP_10HDX:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_10);
+ break;
+
+ case ETHER_STAT_CAP_ASMPAUSE:
+ val = dp->ugc.usbgc_flow_control > FLOW_CONTROL_SYMMETRIC;
+ break;
+
+ case ETHER_STAT_CAP_PAUSE:
+ val = dp->ugc.usbgc_flow_control != FLOW_CONTROL_NONE;
+ break;
+
+ case ETHER_STAT_CAP_AUTONEG:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
+ break;
+
+ case ETHER_STAT_ADV_CAP_1000FDX:
+ val = dp->anadv_1000fdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_1000HDX:
+ val = dp->anadv_1000hdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_100FDX:
+ val = dp->anadv_100fdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_100HDX:
+ val = dp->anadv_100hdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_10FDX:
+ val = dp->anadv_10fdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_10HDX:
+ val = dp->anadv_10hdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_ASMPAUSE:
+ val = dp->anadv_asmpause;
+ break;
+
+ case ETHER_STAT_ADV_CAP_PAUSE:
+ val = dp->anadv_pause;
+ break;
+
+ case ETHER_STAT_ADV_CAP_AUTONEG:
+ val = dp->anadv_autoneg;
+ break;
+
+ case ETHER_STAT_LP_CAP_1000FDX:
+ val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
+ break;
+
+ case ETHER_STAT_LP_CAP_1000HDX:
+ val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
+ break;
+
+ case ETHER_STAT_LP_CAP_100FDX:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
+ break;
+
+ case ETHER_STAT_LP_CAP_100HDX:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
+ break;
+
+ case ETHER_STAT_LP_CAP_10FDX:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
+ break;
+
+ case ETHER_STAT_LP_CAP_10HDX:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
+ break;
+
+ case ETHER_STAT_LP_CAP_ASMPAUSE:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR);
+ break;
+
+ case ETHER_STAT_LP_CAP_PAUSE:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
+ break;
+
+ case ETHER_STAT_LP_CAP_AUTONEG:
+ val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
+ break;
+
+ case ETHER_STAT_LINK_ASMPAUSE:
+ val = BOOLEAN(dp->flow_control & 2);
+ break;
+
+ case ETHER_STAT_LINK_PAUSE:
+ val = BOOLEAN(dp->flow_control & 1);
+ break;
+
+ case ETHER_STAT_LINK_AUTONEG:
+ val = dp->anadv_autoneg &&
+ BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
+ break;
+
+ case ETHER_STAT_LINK_DUPLEX:
+ val = (dp->mii_state == MII_STATE_LINKUP) ?
+ (dp->full_duplex ? 2 : 1) : 0;
+ break;
+
+ case ETHER_STAT_TOOSHORT_ERRORS:
+ val = gstp->runt;
+ break;
+#ifdef NEVER /* it doesn't make sense */
+ case ETHER_STAT_CAP_REMFAULT:
+ val = B_TRUE;
+ break;
+
+ case ETHER_STAT_ADV_REMFAULT:
+ val = dp->anadv_remfault;
+ break;
+#endif
+ case ETHER_STAT_LP_REMFAULT:
+ val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
+ break;
+
+ case ETHER_STAT_JABBER_ERRORS:
+ val = gstp->jabber;
+ break;
+
+ case ETHER_STAT_CAP_100T4:
+ val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
+ break;
+
+ case ETHER_STAT_ADV_CAP_100T4:
+ val = dp->anadv_100t4;
+ break;
+
+ case ETHER_STAT_LP_CAP_100T4:
+ val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
+ break;
+
+ default:
+#if GEM_DEBUG_LEVEL > 2
+ cmn_err(CE_WARN,
+ "%s: unrecognized parameter value = %d",
+ __func__, stat);
+#endif
+ *valp = 0;
+ return (ENOTSUP);
+ }
+
+ *valp = val;
+
+ return (0);
+}
+
+static int
+usbgem_m_unicst(void *arg, const uint8_t *mac)
+{
+ int err;
+ struct usbgem_dev *dp = arg;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ rw_enter(&dp->dev_state_lock, RW_READER);
+
+ sema_p(&dp->rxfilter_lock);
+ bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
+ dp->rxmode |= RXMODE_ENABLE;
+
+ err = 0;
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ if (usbgem_hal_set_rx_filter(dp) != USB_SUCCESS) {
+ err = EIO;
+ }
+ }
+ sema_v(&dp->rxfilter_lock);
+ rw_exit(&dp->dev_state_lock);
+
+#ifdef GEM_CONFIG_FMA
+ if (err != 0) {
+ ddi_fm_service_impact(dp->dip, DDI_SERVICE_DEGRADED);
+ }
+#endif
+ return (err);
+}
+
+/*
+ * usbgem_m_tx is used only for sending data packets into ethernet wire.
+ */
+static mblk_t *
+usbgem_m_tx(void *arg, mblk_t *mp_head)
+{
+ int limit;
+ mblk_t *mp;
+ mblk_t *nmp;
+ uint32_t flags;
+ struct usbgem_dev *dp = arg;
+
+ DPRINTF(4, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ mp = mp_head;
+ flags = 0;
+
+ rw_enter(&dp->dev_state_lock, RW_READER);
+
+ if (dp->mii_state != MII_STATE_LINKUP ||
+ dp->mac_state != MAC_STATE_ONLINE) {
+ /* some nics hate to send packets during the link is down */
+ for (; mp; mp = nmp) {
+ nmp = mp->b_next;
+ mp->b_next = NULL;
+ freemsg(mp);
+ }
+ goto x;
+ }
+
+ ASSERT(dp->nic_state == NIC_STATE_ONLINE);
+
+ limit = dp->tx_max_packets;
+ for (; limit-- && mp; mp = nmp) {
+ nmp = mp->b_next;
+ mp->b_next = NULL;
+ if (usbgem_send_common(dp, mp,
+ (limit == 0 && nmp) ? 1 : 0)) {
+ mp->b_next = nmp;
+ break;
+ }
+ }
+#ifdef CONFIG_TX_LIMITER
+ if (mp == mp_head) {
+ /* no packets were sent, descrease allocation limit */
+ mutex_enter(&dp->txlock);
+ dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
+ mutex_exit(&dp->txlock);
+ }
+#endif
+x:
+ rw_exit(&dp->dev_state_lock);
+
+ return (mp);
+}
+
+static void
+usbgem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
+{
+ struct usbgem_dev *dp = arg;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called",
+ ((struct usbgem_dev *)arg)->name, __func__));
+
+ rw_enter(&dp->dev_state_lock, RW_READER);
+ usbgem_mac_ioctl((struct usbgem_dev *)arg, wq, mp);
+ rw_exit(&dp->dev_state_lock);
+}
+
+static void
+usbgem_gld3_init(struct usbgem_dev *dp, mac_register_t *macp)
+{
+ macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
+ macp->m_driver = dp;
+ macp->m_dip = dp->dip;
+ macp->m_src_addr = dp->dev_addr.ether_addr_octet;
+ macp->m_callbacks = &gem_m_callbacks;
+ macp->m_min_sdu = 0;
+ macp->m_max_sdu = dp->mtu;
+
+ if (dp->misc_flag & USBGEM_VLAN) {
+ macp->m_margin = VTAG_SIZE;
+ }
+}
+#else
+/* ============================================================== */
+/*
+ * GLDv2 interface
+ */
+/* ============================================================== */
+static int usbgem_gld_reset(gld_mac_info_t *);
+static int usbgem_gld_start(gld_mac_info_t *);
+static int usbgem_gld_stop(gld_mac_info_t *);
+static int usbgem_gld_set_mac_address(gld_mac_info_t *, uint8_t *);
+static int usbgem_gld_set_multicast(gld_mac_info_t *, uint8_t *, int);
+static int usbgem_gld_set_promiscuous(gld_mac_info_t *, int);
+static int usbgem_gld_get_stats(gld_mac_info_t *, struct gld_stats *);
+static int usbgem_gld_send(gld_mac_info_t *, mblk_t *);
+static int usbgem_gld_send_tagged(gld_mac_info_t *, mblk_t *, uint32_t);
+
+static int
+usbgem_gld_reset(gld_mac_info_t *macinfo)
+{
+ int err;
+ struct usbgem_dev *dp;
+
+ err = GLD_SUCCESS;
+ dp = (struct usbgem_dev *)macinfo->gldm_private;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ rw_enter(&dp->dev_state_lock, RW_WRITER);
+ if (usbgem_mac_init(dp) != USB_SUCCESS) {
+ err = GLD_FAILURE;
+ goto x;
+ }
+
+ dp->nic_state = NIC_STATE_INITIALIZED;
+
+ /* setup media mode if the link have been up */
+ if (dp->mii_state == MII_STATE_LINKUP) {
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ (void) usbgem_hal_set_media(dp);
+ }
+ }
+x:
+ rw_exit(&dp->dev_state_lock);
+ return (err);
+}
+
+static int
+usbgem_gld_start(gld_mac_info_t *macinfo)
+{
+ int err;
+ struct usbgem_dev *dp;
+
+ dp = (struct usbgem_dev *)macinfo->gldm_private;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ rw_enter(&dp->dev_state_lock, RW_WRITER);
+
+ dp->nic_state = NIC_STATE_ONLINE;
+
+ if (dp->mii_state == MII_STATE_LINKUP) {
+ if (usbgem_mac_start(dp) != USB_SUCCESS) {
+ /* sema_v(&dp->mii_lock); */
+ err = GLD_FAILURE;
+ goto x;
+ }
+ }
+
+ /*
+ * XXX - don't call gld_linkstate() here,
+ * otherwise it cause recursive mutex call.
+ */
+ err = GLD_SUCCESS;
+x:
+ rw_exit(&dp->dev_state_lock);
+
+ return (err);
+}
+
+static int
+usbgem_gld_stop(gld_mac_info_t *macinfo)
+{
+ int err = GLD_SUCCESS;
+ struct usbgem_dev *dp;
+
+ dp = (struct usbgem_dev *)macinfo->gldm_private;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /* try to stop rx gracefully */
+ rw_enter(&dp->dev_state_lock, RW_READER);
+ sema_p(&dp->rxfilter_lock);
+ dp->rxmode &= ~RXMODE_ENABLE;
+
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ (void) usbgem_hal_set_rx_filter(dp);
+ }
+ sema_v(&dp->rxfilter_lock);
+ rw_exit(&dp->dev_state_lock);
+
+ /* make the nic state inactive */
+ rw_enter(&dp->dev_state_lock, RW_WRITER);
+ dp->nic_state = NIC_STATE_STOPPED;
+
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ if (usbgem_mac_stop(dp, MAC_STATE_STOPPED, STOP_GRACEFUL)
+ != USB_SUCCESS) {
+ err = GLD_FAILURE;
+ }
+ }
+ rw_exit(&dp->dev_state_lock);
+
+ return (err);
+}
+
+static int
+usbgem_gld_set_multicast(gld_mac_info_t *macinfo, uint8_t *ep, int flag)
+{
+ int err;
+ int ret;
+ struct usbgem_dev *dp;
+
+ dp = (struct usbgem_dev *)macinfo->gldm_private;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ rw_enter(&dp->dev_state_lock, RW_READER);
+ if (flag == GLD_MULTI_ENABLE) {
+ ret = usbgem_add_multicast(dp, ep);
+ } else {
+ ret = usbgem_remove_multicast(dp, ep);
+ }
+ rw_exit(&dp->dev_state_lock);
+
+ err = GLD_SUCCESS;
+ if (ret != USB_SUCCESS) {
+#ifdef GEM_CONFIG_FMA
+ ddi_fm_service_impact(dp->dip, DDI_SERVICE_DEGRADED);
+#endif
+ err = GLD_FAILURE;
+ }
+ return (err);
+}
+
+static int
+usbgem_gld_set_promiscuous(gld_mac_info_t *macinfo, int flag)
+{
+ boolean_t need_to_change = B_TRUE;
+ struct usbgem_dev *dp;
+
+ dp = (struct usbgem_dev *)macinfo->gldm_private;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ sema_p(&dp->rxfilter_lock);
+ if (flag == GLD_MAC_PROMISC_NONE) {
+ dp->rxmode &= ~(RXMODE_PROMISC | RXMODE_ALLMULTI_REQ);
+ } else if (flag == GLD_MAC_PROMISC_MULTI) {
+ dp->rxmode |= RXMODE_ALLMULTI_REQ;
+ } else if (flag == GLD_MAC_PROMISC_PHYS) {
+ dp->rxmode |= RXMODE_PROMISC;
+ } else {
+ /* mode unchanged */
+ need_to_change = B_FALSE;
+ }
+
+ if (need_to_change) {
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ (void) usbgem_hal_set_rx_filter(dp);
+ }
+ }
+ sema_v(&dp->rxfilter_lock);
+
+ return (GLD_SUCCESS);
+}
+
+static int
+usbgem_gld_set_mac_address(gld_mac_info_t *macinfo, uint8_t *mac)
+{
+ struct usbgem_dev *dp;
+ dp = (struct usbgem_dev *)macinfo->gldm_private;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ sema_p(&dp->rxfilter_lock);
+ bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
+ dp->rxmode |= RXMODE_ENABLE;
+
+ if (dp->mac_state != MAC_STATE_DISCONNECTED) {
+ (void) usbgem_hal_set_rx_filter(dp);
+ }
+ sema_v(&dp->rxfilter_lock);
+
+ return (GLD_SUCCESS);
+}
+
+static int
+usbgem_gld_get_stats(gld_mac_info_t *macinfo, struct gld_stats *gs)
+{
+ struct usbgem_dev *dp;
+ struct usbgem_stats *vs;
+
+ dp = (struct usbgem_dev *)macinfo->gldm_private;
+
+ if ((*dp->ugc.usbgc_get_stats)(dp) != USB_SUCCESS) {
+#ifdef GEM_CONFIG_FMA
+ ddi_fm_service_impact(dp->dip, DDI_SERVICE_DEGRADED);
+#endif
+ return (USB_FAILURE);
+ }
+
+ vs = &dp->stats;
+
+ gs->glds_errxmt = vs->errxmt;
+ gs->glds_errrcv = vs->errrcv;
+ gs->glds_collisions = vs->collisions;
+
+ gs->glds_excoll = vs->excoll;
+ gs->glds_defer = vs->defer;
+ gs->glds_frame = vs->frame;
+ gs->glds_crc = vs->crc;
+
+ gs->glds_overflow = vs->overflow; /* fifo err,underrun,rbufovf */
+ gs->glds_underflow = vs->underflow;
+ gs->glds_short = vs->runt;
+ gs->glds_missed = vs->missed; /* missed pkts while rbuf ovf */
+ gs->glds_xmtlatecoll = vs->xmtlatecoll;
+ gs->glds_nocarrier = vs->nocarrier;
+ gs->glds_norcvbuf = vs->norcvbuf; /* OS resource exaust */
+ gs->glds_intr = vs->intr;
+
+ /* all before here must be kept in place for v0 compatibility */
+ gs->glds_speed = usbgem_speed_value[dp->speed] * 1000000;
+ gs->glds_media = GLDM_PHYMII;
+ gs->glds_duplex = dp->full_duplex ? GLD_DUPLEX_FULL : GLD_DUPLEX_HALF;
+
+ /* gs->glds_media_specific */
+ gs->glds_dot3_first_coll = vs->first_coll;
+ gs->glds_dot3_multi_coll = vs->multi_coll;
+ gs->glds_dot3_sqe_error = 0;
+ gs->glds_dot3_mac_xmt_error = 0;
+ gs->glds_dot3_mac_rcv_error = 0;
+ gs->glds_dot3_frame_too_long = vs->frame_too_long;
+
+ return (GLD_SUCCESS);
+}
+
+static int
+usbgem_gld_ioctl(gld_mac_info_t *macinfo, queue_t *wq, mblk_t *mp)
+{
+ struct usbgem_dev *dp;
+
+ dp = (struct usbgem_dev *)macinfo->gldm_private;
+ usbgem_mac_ioctl(dp, wq, mp);
+
+ return (GLD_SUCCESS);
+}
+
+/*
+ * gem_gld_send is used only for sending data packets into ethernet wire.
+ */
+static int
+usbgem_gld_send(gld_mac_info_t *macinfo, mblk_t *mp)
+{
+ int ret;
+ uint32_t flags = 0;
+ struct usbgem_dev *dp;
+
+ dp = (struct usbgem_dev *)macinfo->gldm_private;
+
+ /* nic state must be online of suspended */
+ rw_enter(&dp->dev_state_lock, RW_READER);
+
+ ASSERT(dp->nic_state == NIC_STATE_ONLINE);
+ ASSERT(mp->b_next == NULL);
+
+ if (dp->mii_state != MII_STATE_LINKUP) {
+ /* Some nics hate to send packets while the link is down. */
+ /* we discard the untransmitted packets silently */
+ rw_exit(&dp->dev_state_lock);
+
+ freemsg(mp);
+#ifdef GEM_CONFIG_FMA
+ /* FIXME - should we ignore the error? */
+ ddi_fm_service_impact(dp->dip, DDI_SERVICE_DEGRADED);
+#endif
+ return (GLD_SUCCESS);
+ }
+
+ ret = (usbgem_send_common(dp, mp, flags) == NULL)
+ ? GLD_SUCCESS : GLD_NORESOURCES;
+ rw_exit(&dp->dev_state_lock);
+
+ return (ret);
+}
+
+/*
+ * usbgem_gld_send is used only for sending data packets into ethernet wire.
+ */
+static int
+usbgem_gld_send_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag)
+{
+ uint32_t flags;
+ struct usbgem_dev *dp;
+
+ dp = (struct usbgem_dev *)macinfo->gldm_private;
+
+ /*
+ * Some nics hate to send packets while the link is down.
+ */
+ if (dp->mii_state != MII_STATE_LINKUP) {
+ /* we dicard the untransmitted packets silently */
+ freemsg(mp);
+#ifdef GEM_CONFIG_FMA
+ /* FIXME - should we ignore the error? */
+ ddi_fm_service_impact(dp->dip, DDI_SERVICE_UNAFFECTED);
+#endif
+ return (GLD_SUCCESS);
+ }
+#ifdef notyet
+ flags = GLD_VTAG_TCI(vtag) << GEM_SEND_VTAG_SHIFT;
+#endif
+ return ((usbgem_send_common(dp, mp, 0) == NULL) ?
+ GLD_SUCCESS : GLD_NORESOURCES);
+}
+
+static void
+usbgem_gld_init(struct usbgem_dev *dp, gld_mac_info_t *macinfo, char *ident)
+{
+ /*
+ * configure GLD
+ */
+ macinfo->gldm_devinfo = dp->dip;
+ macinfo->gldm_private = (caddr_t)dp;
+
+ macinfo->gldm_reset = usbgem_gld_reset;
+ macinfo->gldm_start = usbgem_gld_start;
+ macinfo->gldm_stop = usbgem_gld_stop;
+ macinfo->gldm_set_mac_addr = usbgem_gld_set_mac_address;
+ macinfo->gldm_send = usbgem_gld_send;
+ macinfo->gldm_set_promiscuous = usbgem_gld_set_promiscuous;
+ macinfo->gldm_get_stats = usbgem_gld_get_stats;
+ macinfo->gldm_ioctl = usbgem_gld_ioctl;
+ macinfo->gldm_set_multicast = usbgem_gld_set_multicast;
+ macinfo->gldm_intr = NULL;
+ macinfo->gldm_mctl = NULL;
+
+ macinfo->gldm_ident = ident;
+ macinfo->gldm_type = DL_ETHER;
+ macinfo->gldm_minpkt = 0;
+ macinfo->gldm_maxpkt = dp->mtu;
+ macinfo->gldm_addrlen = ETHERADDRL;
+ macinfo->gldm_saplen = -2;
+ macinfo->gldm_ppa = ddi_get_instance(dp->dip);
+#ifdef GLD_CAP_LINKSTATE
+ macinfo->gldm_capabilities = GLD_CAP_LINKSTATE;
+#endif
+ macinfo->gldm_vendor_addr = dp->dev_addr.ether_addr_octet;
+ macinfo->gldm_broadcast_addr = usbgem_bcastaddr;
+}
+#endif /* USBGEM_CONFIG_GLDv3 */
+
+
+/* ======================================================================== */
+/*
+ * .conf interface
+ */
+/* ======================================================================== */
+void
+usbgem_generate_macaddr(struct usbgem_dev *dp, uint8_t *mac)
+{
+ extern char hw_serial[];
+ char *hw_serial_p;
+ int i;
+ uint64_t val;
+ uint64_t key;
+
+ cmn_err(CE_NOTE,
+ "!%s: using temp ether address,"
+ " do not use this for long time",
+ dp->name);
+
+ /* prefer a fixed address for DHCP */
+ hw_serial_p = &hw_serial[0];
+ val = stoi(&hw_serial_p);
+
+ key = 0;
+ for (i = 0; i < USBGEM_NAME_LEN; i++) {
+ if (dp->name[i] == 0) {
+ break;
+ }
+ key ^= dp->name[i];
+ }
+ key ^= ddi_get_instance(dp->dip);
+ val ^= key << 32;
+
+ /* generate a local address */
+ mac[0] = 0x02;
+ mac[1] = (uint8_t)(val >> 32);
+ mac[2] = (uint8_t)(val >> 24);
+ mac[3] = (uint8_t)(val >> 16);
+ mac[4] = (uint8_t)(val >> 8);
+ mac[5] = (uint8_t)val;
+}
+
+boolean_t
+usbgem_get_mac_addr_conf(struct usbgem_dev *dp)
+{
+ char propname[32];
+ char *valstr;
+ uint8_t mac[ETHERADDRL];
+ char *cp;
+ int c;
+ int i;
+ int j;
+ uint8_t v;
+ uint8_t d;
+ uint8_t ored;
+
+ DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+ /*
+ * Get ethernet address from .conf file
+ */
+ (void) sprintf(propname, "mac-addr");
+ if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
+ DDI_PROP_DONTPASS, propname, &valstr)) != DDI_PROP_SUCCESS) {
+ return (B_FALSE);
+ }
+
+ if (strlen(valstr) != ETHERADDRL*3-1) {
+ goto syntax_err;
+ }
+
+ cp = valstr;
+ j = 0;
+ ored = 0;
+ for (;;) {
+ v = 0;
+ for (i = 0; i < 2; i++) {
+ c = *cp++;
+
+ if (c >= 'a' && c <= 'f') {
+ d = c - 'a' + 10;
+ } else if (c >= 'A' && c <= 'F') {
+ d = c - 'A' + 10;
+ } else if (c >= '0' && c <= '9') {
+ d = c - '0';
+ } else {
+ goto syntax_err;
+ }
+ v = (v << 4) | d;
+ }
+
+ mac[j++] = v;
+ ored |= v;
+ if (j == ETHERADDRL) {
+ /* done */
+ break;
+ }
+
+ c = *cp++;
+ if (c != ':') {
+ goto syntax_err;
+ }
+ }
+
+ if (ored == 0) {
+ usbgem_generate_macaddr(dp, mac);
+ }
+ for (i = 0; i < ETHERADDRL; i++) {
+ dp->dev_addr.ether_addr_octet[i] = mac[i];
+ }
+ ddi_prop_free(valstr);
+ return (B_TRUE);
+
+syntax_err:
+ cmn_err(CE_CONT,
+ "!%s: read mac addr: trying .conf: syntax err %s",
+ dp->name, valstr);
+ ddi_prop_free(valstr);
+
+ return (B_FALSE);
+}
+
+static void
+usbgem_read_conf(struct usbgem_dev *dp)
+{
+ int val;
+
+ DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ /*
+ * Get media mode infomation from .conf file
+ */
+ dp->anadv_autoneg = usbgem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
+ dp->anadv_1000fdx = usbgem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
+ dp->anadv_1000hdx = usbgem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
+ dp->anadv_100t4 = usbgem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
+ dp->anadv_100fdx = usbgem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
+ dp->anadv_100hdx = usbgem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
+ dp->anadv_10fdx = usbgem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
+ dp->anadv_10hdx = usbgem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
+ dp->anadv_1000t_ms = usbgem_prop_get_int(dp, "adv_1000t_ms", 0);
+
+ if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
+ DDI_PROP_DONTPASS, "full-duplex"))) {
+ dp->full_duplex =
+ usbgem_prop_get_int(dp, "full-duplex", 1) != 0;
+ dp->anadv_autoneg = B_FALSE;
+ if (dp->full_duplex) {
+ dp->anadv_1000hdx = B_FALSE;
+ dp->anadv_100hdx = B_FALSE;
+ dp->anadv_10hdx = B_FALSE;
+ } else {
+ dp->anadv_1000fdx = B_FALSE;
+ dp->anadv_100fdx = B_FALSE;
+ dp->anadv_10fdx = B_FALSE;
+ }
+ }
+
+ if ((val = usbgem_prop_get_int(dp, "speed", 0)) > 0) {
+ dp->anadv_autoneg = B_FALSE;
+ switch (val) {
+ case 1000:
+ dp->speed = USBGEM_SPD_1000;
+ dp->anadv_100t4 = B_FALSE;
+ dp->anadv_100fdx = B_FALSE;
+ dp->anadv_100hdx = B_FALSE;
+ dp->anadv_10fdx = B_FALSE;
+ dp->anadv_10hdx = B_FALSE;
+ break;
+ case 100:
+ dp->speed = USBGEM_SPD_100;
+ dp->anadv_1000fdx = B_FALSE;
+ dp->anadv_1000hdx = B_FALSE;
+ dp->anadv_10fdx = B_FALSE;
+ dp->anadv_10hdx = B_FALSE;
+ break;
+ case 10:
+ dp->speed = USBGEM_SPD_10;
+ dp->anadv_1000fdx = B_FALSE;
+ dp->anadv_1000hdx = B_FALSE;
+ dp->anadv_100t4 = B_FALSE;
+ dp->anadv_100fdx = B_FALSE;
+ dp->anadv_100hdx = B_FALSE;
+ break;
+ default:
+ cmn_err(CE_WARN,
+ "!%s: property %s: illegal value:%d",
+ dp->name, "speed", val);
+ dp->anadv_autoneg = B_TRUE;
+ break;
+ }
+ }
+ val = usbgem_prop_get_int(dp,
+ "adv_pause", dp->ugc.usbgc_flow_control & 1);
+ val |= usbgem_prop_get_int(dp,
+ "adv_asmpause", BOOLEAN(dp->ugc.usbgc_flow_control & 2)) << 1;
+ if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
+ cmn_err(CE_WARN,
+ "!%s: property %s: illegal value:%d",
+ dp->name, "flow-control", val);
+ } else {
+ val = min(val, dp->ugc.usbgc_flow_control);
+ }
+ dp->anadv_pause = BOOLEAN(val & 1);
+ dp->anadv_asmpause = BOOLEAN(val & 2);
+
+ dp->mtu = usbgem_prop_get_int(dp, "mtu", dp->mtu);
+ dp->txthr = usbgem_prop_get_int(dp, "txthr", dp->txthr);
+ dp->rxthr = usbgem_prop_get_int(dp, "rxthr", dp->rxthr);
+ dp->txmaxdma = usbgem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
+ dp->rxmaxdma = usbgem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
+#ifdef GEM_CONFIG_POLLING
+ dp->poll_pkt_delay =
+ usbgem_prop_get_int(dp, "pkt_delay", dp->poll_pkt_delay);
+
+ dp->max_poll_interval[GEM_SPD_10] =
+ usbgem_prop_get_int(dp, "max_poll_interval_10",
+ dp->max_poll_interval[GEM_SPD_10]);
+ dp->max_poll_interval[GEM_SPD_100] =
+ usbgem_prop_get_int(dp, "max_poll_interval_100",
+ dp->max_poll_interval[GEM_SPD_100]);
+ dp->max_poll_interval[GEM_SPD_1000] =
+ usbgem_prop_get_int(dp, "max_poll_interval_1000",
+ dp->max_poll_interval[GEM_SPD_1000]);
+
+ dp->min_poll_interval[GEM_SPD_10] =
+ usbgem_prop_get_int(dp, "min_poll_interval_10",
+ dp->min_poll_interval[GEM_SPD_10]);
+ dp->min_poll_interval[GEM_SPD_100] =
+ usbgem_prop_get_int(dp, "min_poll_interval_100",
+ dp->min_poll_interval[GEM_SPD_100]);
+ dp->min_poll_interval[GEM_SPD_1000] =
+ usbgem_prop_get_int(dp, "min_poll_interval_1000",
+ dp->min_poll_interval[GEM_SPD_1000]);
+#endif
+}
+
+/*
+ * usbem kstat support
+ */
+#ifndef GEM_CONFIG_GLDv3
+/* kstat items based from dmfe driver */
+
+struct usbgem_kstat_named {
+ struct kstat_named ks_xcvr_addr;
+ struct kstat_named ks_xcvr_id;
+ struct kstat_named ks_xcvr_inuse;
+ struct kstat_named ks_link_up;
+ struct kstat_named ks_link_duplex; /* 0:unknwon, 1:half, 2:full */
+ struct kstat_named ks_cap_1000fdx;
+ struct kstat_named ks_cap_1000hdx;
+ struct kstat_named ks_cap_100fdx;
+ struct kstat_named ks_cap_100hdx;
+ struct kstat_named ks_cap_10fdx;
+ struct kstat_named ks_cap_10hdx;
+#ifdef NEVER
+ struct kstat_named ks_cap_remfault;
+#endif
+ struct kstat_named ks_cap_autoneg;
+
+ struct kstat_named ks_adv_cap_1000fdx;
+ struct kstat_named ks_adv_cap_1000hdx;
+ struct kstat_named ks_adv_cap_100fdx;
+ struct kstat_named ks_adv_cap_100hdx;
+ struct kstat_named ks_adv_cap_10fdx;
+ struct kstat_named ks_adv_cap_10hdx;
+#ifdef NEVER
+ struct kstat_named ks_adv_cap_remfault;
+#endif
+ struct kstat_named ks_adv_cap_autoneg;
+ struct kstat_named ks_lp_cap_1000fdx;
+ struct kstat_named ks_lp_cap_1000hdx;
+ struct kstat_named ks_lp_cap_100fdx;
+ struct kstat_named ks_lp_cap_100hdx;
+ struct kstat_named ks_lp_cap_10fdx;
+ struct kstat_named ks_lp_cap_10hdx;
+ struct kstat_named ks_lp_cap_remfault;
+ struct kstat_named ks_lp_cap_autoneg;
+};
+
+static int
+usbgem_kstat_update(kstat_t *ksp, int rw)
+{
+ struct usbgem_kstat_named *knp;
+ struct usbgem_dev *dp = (struct usbgem_dev *)ksp->ks_private;
+
+ if (rw != KSTAT_READ) {
+ return (0);
+ }
+
+ knp = (struct usbgem_kstat_named *)ksp->ks_data;
+
+ knp->ks_xcvr_addr.value.ul = dp->mii_phy_addr;
+ knp->ks_xcvr_id.value.ul = dp->mii_phy_id;
+ knp->ks_xcvr_inuse.value.ul = usbgem_mac_xcvr_inuse(dp);
+ knp->ks_link_up.value.ul = dp->mii_state == MII_STATE_LINKUP;
+ knp->ks_link_duplex.value.ul =
+ (dp->mii_state == MII_STATE_LINKUP) ?
+ (dp->full_duplex ? 2 : 1) : 0;
+
+ knp->ks_cap_1000fdx.value.ul =
+ (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
+ (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
+ knp->ks_cap_1000hdx.value.ul =
+ (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
+ (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
+ knp->ks_cap_100fdx.value.ul =
+ BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
+ knp->ks_cap_100hdx.value.ul =
+ BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
+ knp->ks_cap_10fdx.value.ul =
+ BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
+ knp->ks_cap_10hdx.value.ul =
+ BOOLEAN(dp->mii_status & MII_STATUS_10);
+#ifdef NEVER
+ knp->ks_cap_remfault.value.ul = B_TRUE;
+#endif
+ knp->ks_cap_autoneg.value.ul =
+ BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
+
+ knp->ks_adv_cap_1000fdx.value.ul = dp->anadv_1000fdx;
+ knp->ks_adv_cap_1000hdx.value.ul = dp->anadv_1000hdx;
+ knp->ks_adv_cap_100fdx.value.ul = dp->anadv_100fdx;
+ knp->ks_adv_cap_100hdx.value.ul = dp->anadv_100hdx;
+ knp->ks_adv_cap_10fdx.value.ul = dp->anadv_10fdx;
+ knp->ks_adv_cap_10hdx.value.ul = dp->anadv_10hdx;
+#ifdef NEVER
+ knp->ks_adv_cap_remfault.value.ul = 0;
+#endif
+ knp->ks_adv_cap_autoneg.value.ul = dp->anadv_autoneg;
+
+ knp->ks_lp_cap_1000fdx.value.ul =
+ BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
+ knp->ks_lp_cap_1000hdx.value.ul =
+ BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
+ knp->ks_lp_cap_100fdx.value.ul =
+ BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
+ knp->ks_lp_cap_100hdx.value.ul =
+ BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
+ knp->ks_lp_cap_10fdx.value.ul =
+ BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
+ knp->ks_lp_cap_10hdx.value.ul =
+ BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
+ knp->ks_lp_cap_remfault.value.ul =
+ BOOLEAN(dp->mii_exp & MII_AN_EXP_PARFAULT);
+ knp->ks_lp_cap_autoneg.value.ul =
+ BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
+
+ return (0);
+}
+
+
+static int
+usbgem_kstat_init(struct usbgem_dev *dp)
+{
+ int i;
+ kstat_t *ksp;
+ struct usbgem_kstat_named *knp;
+
+ ksp = kstat_create(
+ (char *)ddi_driver_name(dp->dip), ddi_get_instance(dp->dip),
+ "mii", "net", KSTAT_TYPE_NAMED,
+ sizeof (*knp) / sizeof (knp->ks_xcvr_addr), 0);
+
+ if (ksp == NULL) {
+ cmn_err(CE_WARN, "%s: %s() for mii failed",
+ dp->name, __func__);
+ return (USB_FAILURE);
+ }
+
+ knp = (struct usbgem_kstat_named *)ksp->ks_data;
+
+ kstat_named_init(&knp->ks_xcvr_addr, "xcvr_addr",
+ KSTAT_DATA_INT32);
+ kstat_named_init(&knp->ks_xcvr_id, "xcvr_id",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_xcvr_inuse, "xcvr_inuse",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_link_up, "link_up",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_link_duplex, "link_duplex",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_cap_1000fdx, "cap_1000fdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_cap_1000hdx, "cap_1000hdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_cap_100fdx, "cap_100fdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_cap_100hdx, "cap_100hdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_cap_10fdx, "cap_10fdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_cap_10hdx, "cap_10hdx",
+ KSTAT_DATA_UINT32);
+#ifdef NEVER
+ kstat_named_init(&knp->ks_cap_remfault, "cap_rem_fault",
+ KSTAT_DATA_UINT32);
+#endif
+ kstat_named_init(&knp->ks_cap_autoneg, "cap_autoneg",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_adv_cap_1000fdx, "adv_cap_1000fdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_adv_cap_1000hdx, "adv_cap_1000hdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_adv_cap_100fdx, "adv_cap_100fdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_adv_cap_100hdx, "adv_cap_100hdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_adv_cap_10fdx, "adv_cap_10fdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_adv_cap_10hdx, "adv_cap_10hdx",
+ KSTAT_DATA_UINT32);
+#ifdef NEVER
+ kstat_named_init(&knp->ks_adv_cap_remfault, "adv_rem_fault",
+ KSTAT_DATA_UINT32);
+#endif
+ kstat_named_init(&knp->ks_adv_cap_autoneg, "adv_cap_autoneg",
+ KSTAT_DATA_UINT32);
+
+ kstat_named_init(&knp->ks_lp_cap_1000fdx, "lp_cap_1000fdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_lp_cap_1000hdx, "lp_cap_1000hdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_lp_cap_100fdx, "lp_cap_100fdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_lp_cap_100hdx, "lp_cap_100hdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_lp_cap_10fdx, "lp_cap_10fdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_lp_cap_10hdx, "lp_cap_10hdx",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_lp_cap_remfault, "lp_cap_rem_fault",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&knp->ks_lp_cap_autoneg, "lp_cap_autoneg",
+ KSTAT_DATA_UINT32);
+
+ ksp->ks_private = (void *) dp;
+ ksp->ks_update = usbgem_kstat_update;
+ dp->ksp = ksp;
+
+ kstat_install(ksp);
+
+ return (USB_SUCCESS);
+}
+#endif /* GEM_CONFIG_GLDv3 */
+/* ======================================================================== */
+/*
+ * attach/detatch/usb support
+ */
+/* ======================================================================== */
+int
+usbgem_ctrl_out(struct usbgem_dev *dp,
+ uint8_t reqt, uint8_t req, uint16_t val, uint16_t ix, uint16_t len,
+ void *bp, int size)
+{
+ mblk_t *data;
+ usb_ctrl_setup_t setup;
+ usb_cr_t completion_reason;
+ usb_cb_flags_t cb_flags;
+ usb_flags_t flags;
+ int i;
+ int ret;
+
+ DPRINTF(4, (CE_CONT, "!%s: %s "
+ "reqt:0x%02x req:0x%02x val:0x%04x ix:0x%04x len:0x%02x "
+ "bp:0x%p nic_state:%d",
+ dp->name, __func__, reqt, req, val, ix, len, bp, dp->nic_state));
+
+ if (dp->mac_state == MAC_STATE_DISCONNECTED) {
+ return (USB_PIPE_ERROR);
+ }
+
+ data = NULL;
+ if (size > 0) {
+ if ((data = allocb(size, 0)) == NULL) {
+ return (USB_FAILURE);
+ }
+
+ bcopy(bp, data->b_rptr, size);
+ data->b_wptr = data->b_rptr + size;
+ }
+
+ setup.bmRequestType = reqt;
+ setup.bRequest = req;
+ setup.wValue = val;
+ setup.wIndex = ix;
+ setup.wLength = len;
+ setup.attrs = 0; /* attributes */
+
+ for (i = usbgem_ctrl_retry; i > 0; i--) {
+ completion_reason = 0;
+ cb_flags = 0;
+
+ ret = usb_pipe_ctrl_xfer_wait(DEFAULT_PIPE(dp),
+ &setup, &data, &completion_reason, &cb_flags, 0);
+
+ if (ret == USB_SUCCESS) {
+ break;
+ }
+ if (i == 1) {
+ cmn_err(CE_WARN,
+ "!%s: %s failed: "
+ "reqt:0x%x req:0x%x val:0x%x ix:0x%x len:0x%x "
+ "ret:%d cr:%s(%d), cb_flags:0x%x %s",
+ dp->name, __func__, reqt, req, val, ix, len,
+ ret, usb_str_cr(completion_reason),
+ completion_reason,
+ cb_flags,
+ (i > 1) ? "retrying..." : "fatal");
+ }
+ }
+
+ if (data != NULL) {
+ freemsg(data);
+ }
+
+ return (ret);
+}
+
+int
+usbgem_ctrl_in(struct usbgem_dev *dp,
+ uint8_t reqt, uint8_t req, uint16_t val, uint16_t ix, uint16_t len,
+ void *bp, int size)
+{
+ mblk_t *data;
+ usb_ctrl_setup_t setup;
+ usb_cr_t completion_reason;
+ usb_cb_flags_t cb_flags;
+ int i;
+ int ret;
+ int reclen;
+
+ DPRINTF(4, (CE_CONT,
+ "!%s: %s:"
+ " reqt:0x%02x req:0x%02x val:0x%04x ix:0x%04x len:0x%02x"
+ " bp:x%p mac_state:%d",
+ dp->name, __func__, reqt, req, val, ix, len, bp, dp->mac_state));
+
+ if (dp->mac_state == MAC_STATE_DISCONNECTED) {
+ return (USB_PIPE_ERROR);
+ }
+
+ data = NULL;
+
+ setup.bmRequestType = reqt;
+ setup.bRequest = req;
+ setup.wValue = val;
+ setup.wIndex = ix;
+ setup.wLength = len;
+ setup.attrs = USB_ATTRS_AUTOCLEARING; /* XXX */
+
+ for (i = usbgem_ctrl_retry; i > 0; i--) {
+ completion_reason = 0;
+ cb_flags = 0;
+ ret = usb_pipe_ctrl_xfer_wait(DEFAULT_PIPE(dp), &setup, &data,
+ &completion_reason, &cb_flags, 0);
+
+ if (ret == USB_SUCCESS) {
+ reclen = msgdsize(data);
+ bcopy(data->b_rptr, bp, min(reclen, size));
+ break;
+ }
+ if (i == 1) {
+ cmn_err(CE_WARN,
+ "!%s: %s failed: "
+ "reqt:0x%x req:0x%x val:0x%x ix:0x%x len:0x%x "
+ "ret:%d cr:%s(%d) cb_flags:0x%x %s",
+ dp->name, __func__,
+ reqt, req, val, ix, len,
+ ret, usb_str_cr(completion_reason),
+ completion_reason,
+ cb_flags,
+ (i > 1) ? "retrying..." : "fatal");
+ }
+ }
+
+ if (data) {
+ freemsg(data);
+ }
+
+ return (ret);
+}
+
+int
+usbgem_ctrl_out_val(struct usbgem_dev *dp,
+ uint8_t reqt, uint8_t req, uint16_t val, uint16_t ix, uint16_t len,
+ uint32_t v)
+{
+ uint8_t buf[4];
+
+ /* convert to little endian from native byte order */
+ switch (len) {
+ case 4:
+ buf[3] = v >> 24;
+ buf[2] = v >> 16;
+ /* fall thru */
+ case 2:
+ buf[1] = v >> 8;
+ /* fall thru */
+ case 1:
+ buf[0] = v;
+ }
+
+ return (usbgem_ctrl_out(dp, reqt, req, val, ix, len, buf, len));
+}
+
+int
+usbgem_ctrl_in_val(struct usbgem_dev *dp,
+ uint8_t reqt, uint8_t req, uint16_t val, uint16_t ix, uint16_t len,
+ void *valp)
+{
+ uint8_t buf[4];
+ uint_t v;
+ int err;
+
+#ifdef SANITY
+ bzero(buf, sizeof (buf));
+#endif
+ err = usbgem_ctrl_in(dp, reqt, req, val, ix, len, buf, len);
+ if (err == USB_SUCCESS) {
+ v = 0;
+ switch (len) {
+ case 4:
+ v |= buf[3] << 24;
+ v |= buf[2] << 16;
+ /* FALLTHROUGH */
+ case 2:
+ v |= buf[1] << 8;
+ /* FALLTHROUGH */
+ case 1:
+ v |= buf[0];
+ }
+
+ switch (len) {
+ case 4:
+ *(uint32_t *)valp = v;
+ break;
+ case 2:
+ *(uint16_t *)valp = v;
+ break;
+ case 1:
+ *(uint8_t *)valp = v;
+ break;
+ }
+ }
+ return (err);
+}
+
+/*
+ * Attach / detach / disconnect / reconnect management
+ */
+static int
+usbgem_open_pipes(struct usbgem_dev *dp)
+{
+ int i;
+ int ret;
+ int ifnum;
+ int alt;
+ usb_client_dev_data_t *reg_data;
+ usb_ep_data_t *ep_tree_node;
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ ifnum = dp->ugc.usbgc_ifnum;
+ alt = dp->ugc.usbgc_alt;
+
+ ep_tree_node = usb_lookup_ep_data(dp->dip, dp->reg_data, ifnum, alt,
+ 0, USB_EP_ATTR_BULK, USB_EP_DIR_IN);
+ if (ep_tree_node == NULL) {
+ cmn_err(CE_WARN, "!%s: %s: ep_bulkin is NULL",
+ dp->name, __func__);
+ goto err;
+ }
+ dp->ep_bulkin = &ep_tree_node->ep_descr;
+
+ ep_tree_node = usb_lookup_ep_data(dp->dip, dp->reg_data, ifnum, alt,
+ 0, USB_EP_ATTR_BULK, USB_EP_DIR_OUT);
+ if (ep_tree_node == NULL) {
+ cmn_err(CE_WARN, "!%s: %s: ep_bulkout is NULL",
+ dp->name, __func__);
+ goto err;
+ }
+ dp->ep_bulkout = &ep_tree_node->ep_descr;
+
+ ep_tree_node = usb_lookup_ep_data(dp->dip, dp->reg_data, ifnum, alt,
+ 0, USB_EP_ATTR_INTR, USB_EP_DIR_IN);
+ if (ep_tree_node) {
+ dp->ep_intr = &ep_tree_node->ep_descr;
+ } else {
+ /* don't care */
+ DPRINTF(1, (CE_CONT, "!%s: %s: ep_intr is NULL",
+ dp->name, __func__));
+ dp->ep_intr = NULL;
+ }
+
+ /* XXX -- no need to open default pipe */
+
+ /* open bulk out pipe */
+ bzero(&dp->policy_bulkout, sizeof (usb_pipe_policy_t));
+ dp->policy_bulkout.pp_max_async_reqs = 1;
+
+ if ((ret = usb_pipe_open(dp->dip,
+ dp->ep_bulkout, &dp->policy_bulkout, USB_FLAGS_SLEEP,
+ &dp->bulkout_pipe)) != USB_SUCCESS) {
+ cmn_err(CE_WARN,
+ "!%s: %s: err:%x: failed to open bulk-out pipe",
+ dp->name, __func__, ret);
+ dp->bulkout_pipe = NULL;
+ goto err;
+ }
+ DPRINTF(1, (CE_CONT, "!%s: %s: bulkout_pipe opened successfully",
+ dp->name, __func__));
+
+ /* open bulk in pipe */
+ bzero(&dp->policy_bulkin, sizeof (usb_pipe_policy_t));
+ dp->policy_bulkin.pp_max_async_reqs = 1;
+ if ((ret = usb_pipe_open(dp->dip,
+ dp->ep_bulkin, &dp->policy_bulkin, USB_FLAGS_SLEEP,
+ &dp->bulkin_pipe)) != USB_SUCCESS) {
+ cmn_err(CE_WARN,
+ "!%s: %s: ret:%x failed to open bulk-in pipe",
+ dp->name, __func__, ret);
+ dp->bulkin_pipe = NULL;
+ goto err;
+ }
+ DPRINTF(1, (CE_CONT, "!%s: %s: bulkin_pipe opened successfully",
+ dp->name, __func__));
+
+ if (dp->ep_intr) {
+ /* open interrupt pipe */
+ bzero(&dp->policy_interrupt, sizeof (usb_pipe_policy_t));
+ dp->policy_interrupt.pp_max_async_reqs = 1;
+ if ((ret = usb_pipe_open(dp->dip, dp->ep_intr,
+ &dp->policy_interrupt, USB_FLAGS_SLEEP,
+ &dp->intr_pipe)) != USB_SUCCESS) {
+ cmn_err(CE_WARN,
+ "!%s: %s: ret:%x failed to open interrupt pipe",
+ dp->name, __func__, ret);
+ dp->intr_pipe = NULL;
+ goto err;
+ }
+ }
+ DPRINTF(1, (CE_CONT, "!%s: %s: intr_pipe opened successfully",
+ dp->name, __func__));
+
+ return (USB_SUCCESS);
+
+err:
+ if (dp->bulkin_pipe) {
+ usb_pipe_close(dp->dip,
+ dp->bulkin_pipe, USB_FLAGS_SLEEP, NULL, 0);
+ dp->bulkin_pipe = NULL;
+ }
+ if (dp->bulkout_pipe) {
+ usb_pipe_close(dp->dip,
+ dp->bulkout_pipe, USB_FLAGS_SLEEP, NULL, 0);
+ dp->bulkout_pipe = NULL;
+ }
+ if (dp->intr_pipe) {
+ usb_pipe_close(dp->dip,
+ dp->intr_pipe, USB_FLAGS_SLEEP, NULL, 0);
+ dp->intr_pipe = NULL;
+ }
+
+ return (USB_FAILURE);
+}
+
+static int
+usbgem_close_pipes(struct usbgem_dev *dp)
+{
+ DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
+
+ if (dp->intr_pipe) {
+ usb_pipe_close(dp->dip,
+ dp->intr_pipe, USB_FLAGS_SLEEP, NULL, 0);
+ dp->intr_pipe = NULL;
+ }
+ DPRINTF(1, (CE_CONT, "!%s: %s: 1", dp->name, __func__));
+
+ ASSERT(dp->bulkin_pipe);
+ usb_pipe_close(dp->dip, dp->bulkin_pipe, USB_FLAGS_SLEEP, NULL, 0);
+ dp->bulkin_pipe = NULL;
+ DPRINTF(1, (CE_CONT, "!%s: %s: 2", dp->name, __func__));
+
+ ASSERT(dp->bulkout_pipe);
+ usb_pipe_close(dp->dip, dp->bulkout_pipe, USB_FLAGS_SLEEP, NULL, 0);
+ dp->bulkout_pipe = NULL;
+ DPRINTF(1, (CE_CONT, "!%s: %s: 3", dp->name, __func__));
+
+ return (USB_SUCCESS);
+}
+
+#define FREEZE_GRACEFUL (B_TRUE)
+#define FREEZE_NO_GRACEFUL (B_FALSE)
+static int
+usbgem_freeze_device(struct usbgem_dev *dp, boolean_t graceful)
+{
+ DPRINTF(0, (CE_NOTE, "!%s: %s: called", dp->name, __func__));
+
+ /* stop nic activity */
+ (void) usbgem_mac_stop(dp, MAC_STATE_DISCONNECTED, graceful);
+
+ /*
+ * Here we free all memory resource allocated, because it will
+ * cause to panic the system that we free usb_bulk_req objects
+ * during the usb device is disconnected.
+ */
+ (void) usbgem_free_memory(dp);
+
+ return (USB_SUCCESS);
+}
+
+static int
+usbgem_disconnect_cb(dev_info_t *dip)
+{
+ int ret;
+ struct usbgem_dev *dp;
+
+ dp = USBGEM_GET_DEV(dip);
+
+ cmn_err(CE_NOTE, "!%s: the usb device was disconnected (dp=%p)",
+ dp->name, dp);
+
+ /* start serialize */
+ rw_enter(&dp->dev_state_lock, RW_WRITER);
+
+ ret = usbgem_freeze_device(dp, 0);
+
+ /* end of serialize */
+ rw_exit(&dp->dev_state_lock);
+
+ return (ret);
+}
+
+static int
+usbgem_recover_device(struct usbgem_dev *dp)
+{
+ int err;
+
+ DPRINTF(0, (CE_NOTE, "!%s: %s: called", dp->name, __func__));
+
+ err = USB_SUCCESS;
+
+ /* reinitialize the usb connection */
+ usbgem_close_pipes(dp);
+ if ((err = usbgem_open_pipes(dp)) != USB_SUCCESS) {
+ goto x;
+ }
+
+ /* initialize nic state */
+ dp->mac_state = MAC_STATE_STOPPED;
+ dp->mii_state = MII_STATE_UNKNOWN;
+
+ /* allocate memory resources again */
+ if ((err = usbgem_alloc_memory(dp)) != USB_SUCCESS) {
+ goto x;
+ }
+
+ /* restart nic and recover state */
+ (void) usbgem_restart_nic(dp);
+
+ usbgem_mii_init(dp);
+
+ /* kick potentially stopped house keeping thread */
+ cv_signal(&dp->link_watcher_wait_cv);
+x:
+ return (err);
+}
+
+static int
+usbgem_reconnect_cb(dev_info_t *dip)
+{
+ int err = USB_SUCCESS;
+ struct usbgem_dev *dp;
+
+ dp = USBGEM_GET_DEV(dip);
+ DPRINTF(0, (CE_CONT, "!%s: dp=%p", ddi_get_name(dip), dp));
+#ifdef notdef
+ /* check device changes after disconnect */
+ if (usb_check_same_device(dp->dip, NULL, USB_LOG_L2, -1,
+ USB_CHK_BASIC | USB_CHK_CFG, NULL) != USB_SUCCESS) {
+ cmn_err(CE_CONT,
+ "!%s: no or different device installed", dp->name);
+ return (DDI_SUCCESS);
+ }
+#endif
+ cmn_err(CE_NOTE, "%s: the usb device was reconnected", dp->name);
+
+ /* start serialize */
+ rw_enter(&dp->dev_state_lock, RW_WRITER);
+
+ if (dp->mac_state == MAC_STATE_DISCONNECTED) {
+ err = usbgem_recover_device(dp);
+ }
+
+ /* end of serialize */
+ rw_exit(&dp->dev_state_lock);
+
+ return (err == USB_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
+}
+
+int
+usbgem_suspend(dev_info_t *dip)
+{
+ int err = USB_SUCCESS;
+ struct usbgem_dev *dp;
+
+ dp = USBGEM_GET_DEV(dip);
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: callded", dp->name, __func__));
+
+ /* start serialize */
+ rw_enter(&dp->dev_state_lock, RW_WRITER);
+
+ if (dp->mac_state == MAC_STATE_DISCONNECTED) {
+ err = usbgem_freeze_device(dp, STOP_GRACEFUL);
+ }
+
+ /* end of serialize */
+ rw_exit(&dp->dev_state_lock);
+
+ return (err == USB_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
+}
+
+int
+usbgem_resume(dev_info_t *dip)
+{
+ int err = USB_SUCCESS;
+ struct usbgem_dev *dp;
+
+ dp = USBGEM_GET_DEV(dip);
+
+ DPRINTF(0, (CE_CONT, "!%s: %s: callded", dp->name, __func__));
+#ifdef notdef
+ /* check device changes after disconnect */
+ if (usb_check_same_device(dp->dip, NULL, USB_LOG_L2, -1,
+ USB_CHK_BASIC | USB_CHK_CFG, NULL) != USB_SUCCESS) {
+ cmn_err(CE_CONT,
+ "!%s: no or different device installed", dp->name);
+ return (DDI_SUCCESS);
+ }
+#endif
+ /* start serialize */
+ rw_enter(&dp->dev_state_lock, RW_WRITER);
+
+ if (dp->mac_state == MAC_STATE_DISCONNECTED) {
+ err = usbgem_recover_device(dp);
+ }
+
+ /* end of serialize */
+ rw_exit(&dp->dev_state_lock);
+
+ return (err == USB_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
+}
+
+#define USBGEM_LOCAL_DATA_SIZE(gc) \
+ (sizeof (struct usbgem_dev) + USBGEM_MCALLOC)
+
+struct usbgem_dev *
+usbgem_do_attach(dev_info_t *dip,
+ struct usbgem_conf *gc, void *lp, int lmsize)
+{
+ struct usbgem_dev *dp;
+ int i;
+#ifdef USBGEM_CONFIG_GLDv3
+ mac_register_t *macp = NULL;
+#else
+ gld_mac_info_t *macinfo;
+ void *tmp;
+#endif
+ int ret;
+ int unit;
+ int err;
+
+ unit = ddi_get_instance(dip);
+
+ DPRINTF(2, (CE_CONT, "!usbgem%d: %s: called", unit, __func__));
+
+ /*
+ * Allocate soft data structure
+ */
+ dp = kmem_zalloc(USBGEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
+ if (dp == NULL) {
+#ifndef USBGEM_CONFIG_GLDv3
+ gld_mac_free(macinfo);
+#endif
+ return (NULL);
+ }
+#ifdef USBGEM_CONFIG_GLDv3
+ if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
+ cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
+ unit, __func__);
+ return (NULL);
+ }
+#else
+ macinfo = gld_mac_alloc(dip);
+ dp->macinfo = macinfo;
+#endif
+
+ /* link to private area */
+ dp->private = lp;
+ dp->priv_size = lmsize;
+ dp->mc_list = (struct mcast_addr *)&dp[1];
+
+ dp->dip = dip;
+ bcopy(gc->usbgc_name, dp->name, USBGEM_NAME_LEN);
+
+ /*
+ * register with usb service
+ */
+ if (usb_client_attach(dip, USBDRV_VERSION, 0) != USB_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s: %s: usb_client_attach failed",
+ dp->name, __func__);
+ goto err_free_private;
+ }
+
+ if (usb_get_dev_data(dip, &dp->reg_data,
+ USB_PARSE_LVL_ALL, 0) != USB_SUCCESS) {
+ dp->reg_data = NULL;
+ goto err_unregister_client;
+ }
+#ifdef USBGEM_DEBUG_LEVEL
+ usb_print_descr_tree(dp->dip, dp->reg_data);
+#endif
+
+ if (usbgem_open_pipes(dp) != USB_SUCCESS) {
+ /* failed to open pipes */
+ cmn_err(CE_WARN, "!%s: %s: failed to open pipes",
+ dp->name, __func__);
+ goto err_unregister_client;
+ }
+
+ /*
+ * Initialize mutexs and condition variables
+ */
+ mutex_init(&dp->rxlock, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&dp->txlock, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&dp->rx_drain_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
+ rw_init(&dp->dev_state_lock, NULL, RW_DRIVER, NULL);
+ mutex_init(&dp->link_watcher_lock, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&dp->link_watcher_wait_cv, NULL, CV_DRIVER, NULL);
+ sema_init(&dp->hal_op_lock, 1, NULL, SEMA_DRIVER, NULL);
+ sema_init(&dp->rxfilter_lock, 1, NULL, SEMA_DRIVER, NULL);
+
+ /*
+ * Initialize configuration
+ */
+ dp->ugc = *gc;
+
+ dp->mtu = ETHERMTU;
+ dp->rxmode = 0;
+ dp->speed = USBGEM_SPD_10; /* default is 10Mbps */
+ dp->full_duplex = B_FALSE; /* default is half */
+ dp->flow_control = FLOW_CONTROL_NONE;
+
+ dp->nic_state = NIC_STATE_STOPPED;
+ dp->mac_state = MAC_STATE_STOPPED;
+ dp->mii_state = MII_STATE_UNKNOWN;
+
+ /* performance tuning parameters */
+ dp->txthr = ETHERMAX; /* tx fifo threshoold */
+ dp->txmaxdma = 16*4; /* tx max dma burst size */
+ dp->rxthr = 128; /* rx fifo threshoold */
+ dp->rxmaxdma = 16*4; /* rx max dma burst size */
+
+ /*
+ * Get media mode infomation from .conf file
+ */
+ usbgem_read_conf(dp);
+
+ /* rx_buf_len depend on MTU */
+ dp->rx_buf_len = MAXPKTBUF(dp) + dp->ugc.usbgc_rx_header_len;
+
+ /*
+ * Reset the chip
+ */
+ if (usbgem_hal_reset_chip(dp) != USB_SUCCESS) {
+ cmn_err(CE_WARN,
+ "!%s: %s: failed to reset the usb device",
+ dp->name, __func__);
+ goto err_destroy_locks;
+ }
+
+ /*
+ * HW dependant paremeter initialization
+ */
+ if (usbgem_hal_attach_chip(dp) != USB_SUCCESS) {
+ cmn_err(CE_WARN,
+ "!%s: %s: failed to attach the usb device",
+ dp->name, __func__);
+ goto err_destroy_locks;
+ }
+
+ /* allocate resources */
+ if (usbgem_alloc_memory(dp) != USB_SUCCESS) {
+ goto err_destroy_locks;
+ }
+
+ DPRINTF(0, (CE_CONT,
+ "!%s: %02x:%02x:%02x:%02x:%02x:%02x",
+ dp->name,
+ dp->dev_addr.ether_addr_octet[0],
+ dp->dev_addr.ether_addr_octet[1],
+ dp->dev_addr.ether_addr_octet[2],
+ dp->dev_addr.ether_addr_octet[3],
+ dp->dev_addr.ether_addr_octet[4],
+ dp->dev_addr.ether_addr_octet[5]));
+
+ /* copy mac address */
+ dp->cur_addr = dp->dev_addr;
+
+ /* pre-calculated tx timeout in second for performance */
+ dp->bulkout_timeout =
+ dp->ugc.usbgc_tx_timeout / drv_usectohz(1000*1000);
+
+#ifdef USBGEM_CONFIG_GLDv3
+ usbgem_gld3_init(dp, macp);
+#else
+ usbgem_gld_init(dp, macinfo, ident);
+#endif
+
+ /* Probe MII phy (scan phy) */
+ dp->mii_lpable = 0;
+ dp->mii_advert = 0;
+ dp->mii_exp = 0;
+ dp->mii_ctl1000 = 0;
+ dp->mii_stat1000 = 0;
+
+ dp->mii_status_ro = 0;
+ dp->mii_xstatus_ro = 0;
+
+ if (usbgem_mii_probe(dp) != USB_SUCCESS) {
+ cmn_err(CE_WARN, "!%s: %s: mii_probe failed",
+ dp->name, __func__);
+ goto err_free_memory;
+ }
+
+ /* mask unsupported abilities */
+ dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
+ dp->anadv_1000fdx &=
+ BOOLEAN(dp->mii_xstatus &
+ (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
+ dp->anadv_1000hdx &=
+ BOOLEAN(dp->mii_xstatus &
+ (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
+ dp->anadv_100t4 &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
+ dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
+ dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
+ dp->anadv_10fdx &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
+ dp->anadv_10hdx &= BOOLEAN(dp->mii_status & MII_STATUS_10);
+
+ if (usbgem_mii_init(dp) != USB_SUCCESS) {
+ cmn_err(CE_WARN, "!%s: %s: mii_init failed",
+ dp->name, __func__);
+ goto err_free_memory;
+ }
+
+ /*
+ * initialize kstats including mii statistics
+ */
+#ifdef USBGEM_CONFIG_GLDv3
+#ifdef USBGEM_CONFIG_ND
+ usbgem_nd_setup(dp);
+#endif
+#else
+ if (usbgem_kstat_init(dp) != USB_SUCCESS) {
+ goto err_free_memory;
+ }
+#endif
+
+ /*
+ * Add interrupt to system.
+ */
+#ifdef USBGEM_CONFIG_GLDv3
+ if (ret = mac_register(macp, &dp->mh)) {
+ cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
+ dp->name, ret);
+ goto err_release_stats;
+ }
+ mac_free(macp);
+ macp = NULL;
+#else
+ /* gld_register will corrupts driver_private */
+ tmp = ddi_get_driver_private(dip);
+ if (gld_register(dip,
+ (char *)ddi_driver_name(dip), macinfo) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "!%s: %s: gld_register failed",
+ dp->name, __func__);
+ ddi_set_driver_private(dip, tmp);
+ goto err_release_stats;
+ }
+ /* restore driver private */
+ ddi_set_driver_private(dip, tmp);
+#endif /* USBGEM_CONFIG_GLDv3 */
+ if (usb_register_hotplug_cbs(dip,
+ usbgem_suspend, usbgem_resume) != USB_SUCCESS) {
+ cmn_err(CE_WARN,
+ "!%s: %s: failed to register hotplug cbs",
+ dp->name, __func__);
+ goto err_unregister_gld;
+ }
+
+ /* reset mii and start mii link watcher */
+ if (usbgem_mii_start(dp) != USB_SUCCESS) {
+ goto err_unregister_hotplug;
+ }
+
+ /* start tx watchdow watcher */
+ if (usbgem_tx_watcher_start(dp)) {
+ goto err_usbgem_mii_stop;
+ }
+
+ ddi_set_driver_private(dip, (caddr_t)dp);
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: return: success", dp->name, __func__));
+
+ return (dp);
+
+err_usbgem_mii_stop:
+ usbgem_mii_stop(dp);
+
+err_unregister_hotplug:
+ usb_unregister_hotplug_cbs(dip);
+
+err_unregister_gld:
+#ifdef USBGEM_CONFIG_GLDv3
+ mac_unregister(dp->mh);
+#else
+ gld_unregister(macinfo);
+#endif
+
+err_release_stats:
+#ifdef USBGEM_CONFIG_GLDv3
+#ifdef USBGEM_CONFIG_ND
+ /* release NDD resources */
+ usbgem_nd_cleanup(dp);
+#endif
+#else
+ kstat_delete(dp->ksp);
+#endif
+
+err_free_memory:
+ usbgem_free_memory(dp);
+
+err_destroy_locks:
+ cv_destroy(&dp->tx_drain_cv);
+ cv_destroy(&dp->rx_drain_cv);
+ mutex_destroy(&dp->txlock);
+ mutex_destroy(&dp->rxlock);
+ rw_destroy(&dp->dev_state_lock);
+ mutex_destroy(&dp->link_watcher_lock);
+ cv_destroy(&dp->link_watcher_wait_cv);
+ sema_destroy(&dp->hal_op_lock);
+ sema_destroy(&dp->rxfilter_lock);
+
+err_close_pipes:
+ (void) usbgem_close_pipes(dp);
+
+err_unregister_client:
+ usb_client_detach(dp->dip, dp->reg_data);
+
+err_free_private:
+#ifdef USBGEM_CONFIG_GLDv3
+ if (macp) {
+ mac_free(macp);
+ }
+#else
+ gld_mac_free(macinfo);
+#endif
+ kmem_free((caddr_t)dp, USBGEM_LOCAL_DATA_SIZE(gc));
+
+ return (NULL);
+}
+
+int
+usbgem_do_detach(dev_info_t *dip)
+{
+ struct usbgem_dev *dp;
+
+ dp = USBGEM_GET_DEV(dip);
+
+#ifdef USBGEM_CONFIG_GLDv3
+ /* unregister with gld v3 */
+ if (mac_unregister(dp->mh) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+#else
+ /* unregister with gld v2 */
+ if (gld_unregister(dp->macinfo) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+#endif
+ /* unregister with hotplug service */
+ usb_unregister_hotplug_cbs(dip);
+
+ /* stop tx watchdog watcher*/
+ usbgem_tx_watcher_stop(dp);
+
+ /* stop the link manager */
+ usbgem_mii_stop(dp);
+
+ /* unregister with usb service */
+ (void) usbgem_free_memory(dp);
+ (void) usbgem_close_pipes(dp);
+ usb_client_detach(dp->dip, dp->reg_data);
+ dp->reg_data = NULL;
+
+ /* unregister with kernel statistics */
+#ifdef USBGEM_CONFIG_GLDv3
+#ifdef USBGEM_CONFIG_ND
+ /* release ndd resources */
+ usbgem_nd_cleanup(dp);
+#endif
+#else
+ /* destroy kstat objects */
+ kstat_delete(dp->ksp);
+#endif
+
+ /* release locks and condition variables */
+ mutex_destroy(&dp->txlock);
+ mutex_destroy(&dp->rxlock);
+ cv_destroy(&dp->tx_drain_cv);
+ cv_destroy(&dp->rx_drain_cv);
+ rw_destroy(&dp->dev_state_lock);
+ mutex_destroy(&dp->link_watcher_lock);
+ cv_destroy(&dp->link_watcher_wait_cv);
+ sema_destroy(&dp->hal_op_lock);
+ sema_destroy(&dp->rxfilter_lock);
+
+ /* release basic memory resources */
+#ifndef USBGEM_CONFIG_GLDv3
+ gld_mac_free(dp->macinfo);
+#endif
+ kmem_free((caddr_t)(dp->private), dp->priv_size);
+ kmem_free((caddr_t)dp, USBGEM_LOCAL_DATA_SIZE(&dp->ugc));
+
+ DPRINTF(2, (CE_CONT, "!%s: %s: return: success",
+ ddi_driver_name(dip), __func__));
+
+ return (DDI_SUCCESS);
+}
+
+int
+usbgem_mod_init(struct dev_ops *dop, char *name)
+{
+#ifdef USBGEM_CONFIG_GLDv3
+ major_t major;
+ major = ddi_name_to_major(name);
+ if (major == DDI_MAJOR_T_NONE) {
+ return (DDI_FAILURE);
+ }
+ mac_init_ops(dop, name);
+#endif
+ return (DDI_SUCCESS);
+}
+
+void
+usbgem_mod_fini(struct dev_ops *dop)
+{
+#ifdef USBGEM_CONFIG_GLDv3
+ mac_fini_ops(dop);
+#endif
+}
+
+int
+usbgem_quiesce(dev_info_t *dip)
+{
+ struct usbgem_dev *dp;
+
+ dp = USBGEM_GET_DEV(dip);
+
+ ASSERT(dp != NULL);
+
+ if (dp->mac_state != MAC_STATE_DISCONNECTED &&
+ dp->mac_state != MAC_STATE_STOPPED) {
+ if (usbgem_hal_stop_chip(dp) != USB_SUCCESS) {
+ (void) usbgem_hal_reset_chip(dp);
+ }
+ }
+
+ /* devo_quiesce() must return DDI_SUCCESS always */
+ return (DDI_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/usbgem/usbgem.h b/usr/src/uts/common/io/usbgem/usbgem.h
new file mode 100644
index 0000000000..80b89a260e
--- /dev/null
+++ b/usr/src/uts/common/io/usbgem/usbgem.h
@@ -0,0 +1,428 @@
+/*
+ * usbgem.h: General USB to Ethernet MAC driver framework
+ * @(#)usbgem.h 1.4 12/02/09
+ * (C) Copyright 2003-2009 Masayuki Murayama KHF04453@nifty.ne.jp
+ */
+
+#ifndef __USBGEM_H__
+#define __USBGEM_H__
+
+#pragma ident "@(#)usbgem.h 1.4 12/02/09"
+
+#ifdef USBGEM_CONFIG_GLDv3
+#include <sys/mac.h>
+#ifndef MAC_VERSION
+#include <sys/mac_provider.h>
+#endif
+#include <sys/mac_ether.h>
+#else
+#include <sys/gld.h>
+#endif /* GLDv3 */
+
+/*
+ * Useful macros and typedefs
+ */
+#define USBGEM_NAME_LEN 32
+
+#define USBGEM_TX_TIMEOUT (drv_usectohz(3*1000000))
+#define USBGEM_TX_TIMEOUT_INTERVAL (drv_usectohz(1*1000000))
+#define USBGEM_LINK_WATCH_INTERVAL (drv_usectohz(1*1000000))
+
+/* general return code */
+#define USBGEM_SUCCESS 0
+#define USBGEM_FAILURE 1
+
+/* return code of usbgem_tx_done */
+#define INTR_RESTART_TX 0x80000000U
+
+struct usbgem_stats {
+ uint32_t intr;
+
+ uint32_t crc;
+ uint32_t errrcv;
+ uint32_t overflow;
+ uint32_t frame;
+ uint32_t missed;
+ uint32_t runt;
+ uint32_t frame_too_long;
+ uint32_t norcvbuf;
+ uint32_t sqe;
+
+ uint32_t collisions;
+ uint32_t first_coll;
+ uint32_t multi_coll;
+ uint32_t excoll;
+ uint32_t xmit_internal_err;
+ uint32_t nocarrier;
+ uint32_t defer;
+ uint32_t errxmt;
+ uint32_t underflow;
+ uint32_t xmtlatecoll;
+ uint32_t noxmtbuf;
+ uint32_t jabber;
+
+
+ uint64_t rbytes;
+ uint64_t obytes;
+ uint64_t rpackets;
+ uint64_t opackets;
+ uint32_t rbcast;
+ uint32_t obcast;
+ uint32_t rmcast;
+ uint32_t omcast;
+ uint32_t rcv_internal_err;
+};
+
+struct mcast_addr {
+ struct ether_addr addr;
+ uint32_t hash;
+};
+
+#define USBGEM_MAXMC 64
+#define USBGEM_MCALLOC (sizeof(struct mcast_addr) * USBGEM_MAXMC)
+
+#define SLOT(dp, n) ((n) % (dp)->ugc.usbgc_tx_list_max)
+
+/*
+ * mac soft state
+ */
+struct usbgem_dev {
+ dev_info_t *dip;
+#ifdef USBGEM_CONFIG_GLDv3
+ mac_handle_t mh;
+#else
+ void *macinfo; /* opaque handle for upper layer */
+#endif
+ char name[USBGEM_NAME_LEN];
+
+ /* pointer to usb private data */
+ usb_client_dev_data_t *reg_data;
+
+ /* usb handles */
+ usb_pipe_handle_t default_pipe;
+ usb_pipe_handle_t bulkin_pipe;
+ usb_pipe_handle_t bulkout_pipe;
+ usb_pipe_handle_t intr_pipe;
+
+ /* usb endpoints */
+ usb_ep_descr_t *ep_default;
+ usb_ep_descr_t *ep_bulkin;
+ usb_ep_descr_t *ep_bulkout;
+ usb_ep_descr_t *ep_intr;
+
+ /* usb policies */
+ usb_pipe_policy_t policy_default;
+ usb_pipe_policy_t policy_bulkin;
+ usb_pipe_policy_t policy_bulkout;
+ usb_pipe_policy_t policy_interrupt;
+
+ /* MAC address information */
+ struct ether_addr cur_addr;
+ struct ether_addr dev_addr;
+
+ /* RX state and resource management */
+ kmutex_t rxlock;
+ int rx_busy_cnt;
+ boolean_t rx_active;
+ kcondvar_t rx_drain_cv;
+
+ /* RX buffer management */
+ int rx_buf_len;
+
+ /* TX state and resource management */
+ kmutex_t txlock;
+ int tx_busy_cnt;
+ usb_bulk_req_t *tx_free_list;
+ kcondvar_t tx_drain_cv;
+ clock_t tx_start_time;
+ int bulkout_timeout; /* in second */
+ int tx_max_packets;
+ int tx_seq_num;
+ int tx_intr_pended;
+
+ /* NIC state from OS view */
+ int nic_state;
+#define NIC_STATE_UNKNOWN 0
+#define NIC_STATE_STOPPED 1
+#define NIC_STATE_INITIALIZED 2
+#define NIC_STATE_ONLINE 3
+
+ /* MAC state from hardware view */
+ int mac_state;
+#define MAC_STATE_DISCONNECTED 0 /* it includes suspended state too */
+#define MAC_STATE_STOPPED 1 /* powered up / buf not initialized */
+#define MAC_STATE_INITIALIZED 2 /* initialized */
+#define MAC_STATE_ONLINE 3 /* working correctly */
+#define MAC_STATE_ERROR 4 /* need to restart nic */
+
+ clock_t fatal_error;
+
+ /* robustness: timer and watchdog */
+ uint_t tx_watcher_stop;
+ kt_did_t tx_watcher_did;
+ kcondvar_t tx_watcher_cv;
+ kmutex_t tx_watcher_lock;
+ clock_t tx_watcher_timeout;
+ clock_t tx_watcher_interval;
+
+ /* MII mamagement */
+ boolean_t anadv_autoneg:1;
+ boolean_t anadv_1000fdx:1;
+ boolean_t anadv_1000hdx:1;
+ boolean_t anadv_100t4:1;
+ boolean_t anadv_100fdx:1;
+ boolean_t anadv_100hdx:1;
+ boolean_t anadv_10fdx:1;
+ boolean_t anadv_10hdx:1;
+ boolean_t anadv_1000t_ms:2;
+ boolean_t anadv_pause:1;
+ boolean_t anadv_asmpause:1;
+ boolean_t mii_advert_ro:1;
+
+ boolean_t full_duplex:1;
+ int speed:3;
+#define USBGEM_SPD_10 0
+#define USBGEM_SPD_100 1
+#define USBGEM_SPD_1000 2
+#define USBGEM_SPD_NUM 3
+ unsigned int flow_control:2;
+#define FLOW_CONTROL_NONE 0
+#define FLOW_CONTROL_SYMMETRIC 1
+#define FLOW_CONTROL_TX_PAUSE 2
+#define FLOW_CONTROL_RX_PAUSE 3
+
+ boolean_t mii_supress_msg:1;
+
+ uint32_t mii_phy_id;
+ uint16_t mii_status;
+ uint16_t mii_advert;
+ uint16_t mii_lpable;
+ uint16_t mii_exp;
+ uint16_t mii_ctl1000;
+ uint16_t mii_stat1000;
+ uint16_t mii_xstatus;
+ int8_t mii_phy_addr; /* must be signed */
+
+ uint16_t mii_status_ro;
+ uint16_t mii_xstatus_ro;
+
+ int mii_state;
+#define MII_STATE_UNKNOWN 0
+#define MII_STATE_RESETTING 1
+#define MII_STATE_AUTONEGOTIATING 2
+#define MII_STATE_AN_DONE 3
+#define MII_STATE_MEDIA_SETUP 4
+#define MII_STATE_LINKUP 5
+#define MII_STATE_LINKDOWN 6
+
+ clock_t mii_last_check; /* in tick */
+ clock_t mii_timer; /* in tick */
+#define MII_RESET_TIMEOUT drv_usectohz(1000*1000)
+#define MII_AN_TIMEOUT drv_usectohz(5000*1000)
+#define MII_LINKDOWN_TIMEOUT drv_usectohz(10000*1000)
+
+ clock_t mii_interval; /* in tick */
+ clock_t linkup_delay; /* in tick */
+
+ uint_t link_watcher_stop;
+ kt_did_t link_watcher_did;
+ kcondvar_t link_watcher_wait_cv;
+ kmutex_t link_watcher_lock;
+
+ krwlock_t dev_state_lock; /* mac_state and nic_state */
+ ksema_t hal_op_lock; /* serialize hw operations */
+ ksema_t drv_op_lock; /* hotplug op lock */
+
+ /* multcast list */
+ ksema_t rxfilter_lock;
+ int mc_count;
+ int mc_count_req;
+ struct mcast_addr *mc_list;
+ int rxmode;
+#define RXMODE_PROMISC 0x01
+#define RXMODE_ALLMULTI_REQ 0x02
+#define RXMODE_MULTI_OVF 0x04
+#define RXMODE_ENABLE 0x08
+#define RXMODE_ALLMULTI (RXMODE_ALLMULTI_REQ | RXMODE_MULTI_OVF)
+#define RXMODE_BITS \
+ "\020" \
+ "\004ENABLE" \
+ "\003MULTI_OVF" \
+ "\002ALLMULTI_REQ" \
+ "\001PROMISC"
+
+ /* statistcs */
+ struct usbgem_stats stats;
+
+ /* pointer to local structure */
+ void *private;
+ int priv_size;
+
+ /* configuration */
+ struct usbgem_conf {
+ /* name */
+ char usbgc_name[USBGEM_NAME_LEN];
+ int usbgc_ppa;
+
+ /* specification on usb */
+ int usbgc_ifnum; /* interface number */
+ int usbgc_alt; /* alternate */
+
+ /* specification on tx engine */
+ int usbgc_tx_list_max;
+
+ /* specification on rx engine */
+ int usbgc_rx_header_len;
+ int usbgc_rx_list_max;
+
+ /* time out parameters */
+ clock_t usbgc_tx_timeout;
+ clock_t usbgc_tx_timeout_interval;
+
+ /* flow control */
+ int usbgc_flow_control;
+
+ /* MII timeout parameters */
+ clock_t usbgc_mii_linkdown_timeout;
+ clock_t usbgc_mii_link_watch_interval;
+ clock_t usbgc_mii_reset_timeout;
+
+ clock_t usbgc_mii_an_watch_interval;
+ clock_t usbgc_mii_an_timeout;
+ clock_t usbgc_mii_an_wait;
+ clock_t usbgc_mii_an_delay;
+
+ /* MII configuration */
+ int usbgc_mii_addr_min;
+ int usbgc_mii_linkdown_action;
+ int usbgc_mii_linkdown_timeout_action;
+#define MII_ACTION_NONE 0
+#define MII_ACTION_RESET 1
+#define MII_ACTION_RSA 2
+ boolean_t usbgc_mii_dont_reset:1;
+ boolean_t usbgc_mii_an_oneshot:1;
+ boolean_t usbgc_mii_hw_link_detection:1;
+ boolean_t usbgc_mii_stop_mac_on_linkdown:1;
+ uint16_t usbgc_mii_an_cmd;
+
+ /* I/O methods */
+
+ /* mac operation */
+ int (*usbgc_attach_chip)(struct usbgem_dev *dp);
+ int (*usbgc_reset_chip)(struct usbgem_dev *dp);
+ int (*usbgc_init_chip)(struct usbgem_dev *dp);
+ int (*usbgc_start_chip)(struct usbgem_dev *dp);
+ int (*usbgc_stop_chip)(struct usbgem_dev *dp);
+ uint32_t (*usbgc_multicast_hash)(struct usbgem_dev *dp,
+ const uint8_t *);
+ int (*usbgc_set_rx_filter)(struct usbgem_dev *dp);
+ int (*usbgc_set_media)(struct usbgem_dev *dp);
+ int (*usbgc_get_stats)(struct usbgem_dev *dp);
+ void (*usbgc_interrupt)(struct usbgem_dev *dp, mblk_t *mp);
+
+ /* packet manupilation */
+ mblk_t *(*usbgc_tx_make_packet)(struct usbgem_dev *dp, mblk_t *mp);
+ mblk_t *(*usbgc_rx_make_packet)(struct usbgem_dev *dp, mblk_t *mp);
+ /* mii operations */
+ int (*usbgc_mii_probe)(struct usbgem_dev *dp);
+ int (*usbgc_mii_init)(struct usbgem_dev *dp);
+ int (*usbgc_mii_config)(struct usbgem_dev *dp, int *errp);
+ uint16_t (*usbgc_mii_read)(struct usbgem_dev *dp, uint_t reg, int *errp);
+ void (*usbgc_mii_write)(struct usbgem_dev *dp, uint_t reg, uint16_t val, int *errp);
+
+ /* jumbo frame */
+ int usbgc_max_mtu;
+ int usbgc_default_mtu;
+ int usbgc_min_mtu;
+ } ugc;
+
+ int misc_flag;
+#define USBGEM_VLAN 0x0001
+ timeout_id_t intr_watcher_id;
+
+ /* buffer size */
+ uint_t mtu;
+
+ /* performance tuning parameters */
+ uint_t txthr; /* tx fifo threshoold */
+ uint_t txmaxdma; /* tx max dma burst size */
+ uint_t rxthr; /* rx fifo threshoold */
+ uint_t rxmaxdma; /* tx max dma burst size */
+
+ /* kstat stuff */
+ kstat_t *ksp;
+
+ /* ndd stuff */
+ caddr_t nd_data_p;
+ caddr_t nd_arg_p;
+
+#ifdef USBGEM_DEBUG_LEVEL
+ int tx_cnt;
+#endif
+};
+
+/*
+ * Exported functions
+ */
+int usbgem_ctrl_out(struct usbgem_dev *dp,
+ uint8_t reqt, uint8_t req, uint16_t val, uint16_t ix, uint16_t len,
+ void *bp, int size);
+
+int usbgem_ctrl_in(struct usbgem_dev *dp,
+ uint8_t reqt, uint8_t req, uint16_t val, uint16_t ix, uint16_t len,
+ void *bp, int size);
+
+int usbgem_ctrl_out_val(struct usbgem_dev *dp,
+ uint8_t reqt, uint8_t req, uint16_t val, uint16_t ix, uint16_t len,
+ uint32_t v);
+
+int usbgem_ctrl_in_val(struct usbgem_dev *dp,
+ uint8_t reqt, uint8_t req, uint16_t val, uint16_t ix, uint16_t len,
+ void *valp);
+
+void usbgem_generate_macaddr(struct usbgem_dev *, uint8_t *);
+boolean_t usbgem_get_mac_addr_conf(struct usbgem_dev *);
+int usbgem_mii_probe_default(struct usbgem_dev *);
+int usbgem_mii_init_default(struct usbgem_dev *);
+int usbgem_mii_config_default(struct usbgem_dev *, int *errp);
+void usbgem_mii_update_link(struct usbgem_dev *);
+void usbgem_restart_tx(struct usbgem_dev *);
+boolean_t usbgem_tx_done(struct usbgem_dev *, int);
+void usbgem_receive(struct usbgem_dev *);
+struct usbgem_dev *usbgem_do_attach(dev_info_t *,
+ struct usbgem_conf *, void *, int);
+int usbgem_do_detach(dev_info_t *);
+
+uint32_t usbgem_ether_crc_le(const uint8_t *addr);
+uint32_t usbgem_ether_crc_be(const uint8_t *addr);
+
+int usbgem_resume(dev_info_t *);
+int usbgem_suspend(dev_info_t *);
+int usbgem_quiesce(dev_info_t *);
+
+#ifdef USBGEM_CONFIG_GLDv3
+#if DEVO_REV < 4
+#define USBGEM_STREAM_OPS(dev_ops, attach, detach) \
+ DDI_DEFINE_STREAM_OPS(dev_ops, nulldev, nulldev, attach, detach, \
+ nodev, NULL, D_MP, NULL)
+#else
+#define USBGEM_STREAM_OPS(dev_ops, attach, detach) \
+ DDI_DEFINE_STREAM_OPS(dev_ops, nulldev, nulldev, attach, detach, \
+ nodev, NULL, D_MP, NULL, usbgem_quiesce)
+#endif
+#else
+#define usbgem_getinfo gld_getinfo
+#define usbgem_open gld_open
+#define usbgem_close gld_close
+#define usbgem_wput gld_wput
+#define usbgem_wsrv gld_wsrv
+#define usbgem_rsrv gld_rsrv
+#define usbgem_power NULL
+#endif
+int usbgem_mod_init(struct dev_ops *, char *);
+void usbgem_mod_fini(struct dev_ops *);
+
+#define USBGEM_GET_DEV(dip) \
+ ((struct usbgem_dev *)(ddi_get_driver_private(dip)))
+
+#endif /* __USBGEM_H__ */
diff --git a/usr/src/uts/common/io/usbgem/usbgem_mii.h b/usr/src/uts/common/io/usbgem/usbgem_mii.h
new file mode 100644
index 0000000000..2b4176a340
--- /dev/null
+++ b/usr/src/uts/common/io/usbgem/usbgem_mii.h
@@ -0,0 +1,242 @@
+/*
+ * gem_mii.h: mii header for gem
+ *
+ * Copyright (c) 2002-2007 Masayuki Murayama. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the author nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+#pragma ident "@(#)gem_mii.h 1.4 07/11/30"
+
+/*
+ * gem_mii.h : MII registers
+ */
+#ifndef _GEM_MII_H_
+#define _GEM_MII_H_
+
+#ifdef GEM_CONFIG_GLDv3
+#include <sys/miiregs.h>
+#else
+#define MII_CONTROL 0
+#define MII_STATUS 1
+#define MII_PHYIDH 2
+#define MII_PHYIDL 3
+#define MII_AN_ADVERT 4
+#define MII_AN_LPABLE 5
+#define MII_AN_EXPANSION 6
+#define MII_AN_NXTPGXMIT 7
+#endif /* GEM_CONFIG_GLDv3 */
+
+#define MII_AN_LPANXT 8
+#define MII_MS_CONTROL 9
+#define MII_MS_STATUS 10
+#define MII_XSTATUS 15
+
+/* for 1000BaseT support */
+#define MII_1000TC MII_MS_CONTROL
+#define MII_1000TS MII_MS_STATUS
+#ifndef GEM_CONFIG_GLDv3
+#define MII_CONTROL_RESET 0x8000
+#define MII_CONTROL_LOOPBACK 0x4000
+#define MII_CONTROL_100MB 0x2000
+#define MII_CONTROL_ANE 0x1000
+#define MII_CONTROL_PWRDN 0x0800
+#define MII_CONTROL_ISOLATE 0x0400
+#define MII_CONTROL_RSAN 0x0200
+#define MII_CONTROL_FDUPLEX 0x0100
+#define MII_CONTROL_COLTST 0x0080
+#endif /* !GEM_CONFIG_GLDv3 */
+#define MII_CONTROL_SPEED 0x2040
+
+#define MII_CONTROL_10MB 0x0000
+#define MII_CONTROL_1000MB 0x0040
+
+#define MII_CONTROL_BITS \
+ "\020" \
+ "\020RESET" \
+ "\017LOOPBACK" \
+ "\016100MB" \
+ "\015ANE" \
+ "\014PWRDN" \
+ "\013ISOLATE" \
+ "\012RSAN" \
+ "\011FDUPLEX" \
+ "\010COLTST" \
+ "\0071000M"
+#ifndef GEM_CONFIG_GLDv3
+#define MII_STATUS_100_BASE_T4 0x8000
+#define MII_STATUS_100_BASEX_FD 0x4000
+#define MII_STATUS_100_BASEX 0x2000
+#define MII_STATUS_10_FD 0x1000
+#define MII_STATUS_10 0x0800
+#define MII_STATUS_MFPRMBLSUPR 0x0040
+#define MII_STATUS_ANDONE 0x0020
+#define MII_STATUS_REMFAULT 0x0010
+#define MII_STATUS_CANAUTONEG 0x0008
+#define MII_STATUS_LINKUP 0x0004
+#define MII_STATUS_JABBERING 0x0002
+#define MII_STATUS_EXTENDED 0x0001
+#endif /* !GEM_CONFIG_GLDv3 */
+#define MII_STATUS_XSTATUS 0x0100
+#define MII_STATUS_100_BASE_T2_FD 0x0400
+#define MII_STATUS_100_BASE_T2 0x0200
+
+#define MII_STATUS_ABILITY_TECH \
+ (MII_STATUS_100_BASE_T4 | \
+ MII_STATUS_100_BASEX_FD | \
+ MII_STATUS_100_BASEX | \
+ MII_STATUS_10 | \
+ MII_STATUS_10_FD)
+
+
+#define MII_STATUS_BITS \
+ "\020" \
+ "\020100_BASE_T4" \
+ "\017100_BASEX_FD" \
+ "\016100_BASEX" \
+ "\01510_BASE_FD" \
+ "\01410_BASE" \
+ "\013100_BASE_T2_FD" \
+ "\012100_BASE_T2" \
+ "\011XSTATUS" \
+ "\007MFPRMBLSUPR" \
+ "\006ANDONE" \
+ "\005REMFAULT" \
+ "\004CANAUTONEG" \
+ "\003LINKUP" \
+ "\002JABBERING" \
+ "\001EXTENDED"
+#ifndef GEM_CONFIG_GLDv3
+#define MII_AN_ADVERT_NP 0x8000
+#define MII_AN_ADVERT_REMFAULT 0x2000
+#define MII_AN_ADVERT_SELECTOR 0x001f
+#endif /* !GEM_CONFIG_GLDv3 */
+
+#define MII_ABILITY_ASM_DIR 0x0800 /* for annex 28B */
+#ifndef MII_ABILITY_PAUSE
+#define MII_ABILITY_PAUSE 0x0400 /* for IEEE 802.3x */
+#endif
+#ifndef GEM_CONFIG_GLDv3
+#define MII_ABILITY_100BASE_T4 0x0200
+#define MII_ABILITY_100BASE_TX_FD 0x0100
+#define MII_ABILITY_100BASE_TX 0x0080
+#define MII_ABILITY_10BASE_T_FD 0x0040
+#define MII_ABILITY_10BASE_T 0x0020
+#endif /* !GEM_CONFIG_GLDv3 */
+
+#define MII_AN_LPABLE_NP 0x8000
+
+#define MII_ABILITY_TECH \
+ (MII_ABILITY_100BASE_T4 | \
+ MII_ABILITY_100BASE_TX_FD | \
+ MII_ABILITY_100BASE_TX | \
+ MII_ABILITY_10BASE_T | \
+ MII_ABILITY_10BASE_T_FD)
+
+#define MII_ABILITY_ALL \
+ (MII_AN_ADVERT_REMFAULT | \
+ MII_ABILITY_ASM_DIR | \
+ MII_ABILITY_PAUSE | \
+ MII_ABILITY_TECH)
+
+
+#define MII_ABILITY_BITS \
+ "\020" \
+ "\016REMFAULT" \
+ "\014ASM_DIR" \
+ "\013PAUSE" \
+ "\012100BASE_T4" \
+ "\011100BASE_TX_FD" \
+ "\010100BASE_TX" \
+ "\00710BASE_T_FD" \
+ "\00610BASE_T"
+#ifndef GEM_CONFIG_GLDv3
+#define MII_AN_EXP_PARFAULT 0x0010
+#define MII_AN_EXP_LPCANNXTP 0x0008
+#define MII_AN_EXP_CANNXTPP 0x0004
+#define MII_AN_EXP_PAGERCVD 0x0002
+#define MII_AN_EXP_LPCANAN 0x0001
+#endif /* !GEM_CONFIG_GLDv3 */
+
+#define MII_AN_EXP_BITS \
+ "\020" \
+ "\005PARFAULT" \
+ "\004LPCANNXTP" \
+ "\003CANNXTPP" \
+ "\002PAGERCVD" \
+ "\001LPCANAN"
+
+#define MII_1000TC_TESTMODE 0xe000
+#define MII_1000TC_CFG_EN 0x1000
+#define MII_1000TC_CFG_VAL 0x0800
+#define MII_1000TC_PORTTYPE 0x0400
+#define MII_1000TC_ADV_FULL 0x0200
+#define MII_1000TC_ADV_HALF 0x0100
+
+#define MII_1000TC_BITS \
+ "\020" \
+ "\015CFG_EN" \
+ "\014CFG_VAL" \
+ "\013PORTTYPE" \
+ "\012FULL" \
+ "\011HALF"
+
+#define MII_1000TS_CFG_FAULT 0x8000
+#define MII_1000TS_CFG_MASTER 0x4000
+#define MII_1000TS_LOCALRXOK 0x2000
+#define MII_1000TS_REMOTERXOK 0x1000
+#define MII_1000TS_LP_FULL 0x0800
+#define MII_1000TS_LP_HALF 0x0400
+
+#define MII_1000TS_BITS \
+ "\020" \
+ "\020CFG_FAULT" \
+ "\017CFG_MASTER" \
+ "\014CFG_LOCALRXOK" \
+ "\013CFG_REMOTERXOK" \
+ "\012LP_FULL" \
+ "\011LP_HALF"
+
+#define MII_XSTATUS_1000BASEX_FD 0x8000
+#define MII_XSTATUS_1000BASEX 0x4000
+#define MII_XSTATUS_1000BASET_FD 0x2000
+#define MII_XSTATUS_1000BASET 0x1000
+
+#define MII_XSTATUS_BITS \
+ "\020" \
+ "\0201000BASEX_FD" \
+ "\0171000BASEX" \
+ "\0161000BASET_FD" \
+ "\0151000BASET"
+
+#define MII_READ_CMD(p, r) \
+ ((6<<(18+5+5)) | ((p)<<(18+5)) | ((r)<<18))
+
+#define MII_WRITE_CMD(p, r, v) \
+ ((5<<(18+5+5)) | ((p)<<(18+5)) | ((r)<<18) | (2 << 16) | (v))
+
+#endif /* _GEM_MII_H_ */
diff --git a/usr/src/uts/common/io/vioif/vioif.c b/usr/src/uts/common/io/vioif/vioif.c
index 0d1132febc..27241894aa 100644
--- a/usr/src/uts/common/io/vioif/vioif.c
+++ b/usr/src/uts/common/io/vioif/vioif.c
@@ -12,6 +12,7 @@
/*
* Copyright 2013 Nexenta Inc. All rights reserved.
* Copyright (c) 2014, 2015 by Delphix. All rights reserved.
+ * Copyright 2015 Joyent, Inc.
*/
/* Based on the NetBSD virtio driver by Minoura Makoto. */
@@ -285,6 +286,13 @@ struct vioif_softc {
unsigned int sc_tx_csum:1;
unsigned int sc_tx_tso4:1;
+ /*
+ * For debugging, it is useful to know whether the MAC address we
+ * are using came from the host (via VIRTIO_NET_CONFIG_MAC) or
+ * was otherwise generated or set from within the guest.
+ */
+ unsigned int sc_mac_from_host:1;
+
int sc_mtu;
uint8_t sc_mac[ETHERADDRL];
/*
@@ -312,7 +320,10 @@ struct vioif_softc {
/* Copying small packets turns out to be faster then mapping them. */
unsigned long sc_rxcopy_thresh;
unsigned long sc_txcopy_thresh;
- /* Some statistic coming here */
+
+ /*
+ * Statistics visible through mac:
+ */
uint64_t sc_ipackets;
uint64_t sc_opackets;
uint64_t sc_rbytes;
@@ -325,6 +336,18 @@ struct vioif_softc {
uint64_t sc_notxbuf;
uint64_t sc_ierrors;
uint64_t sc_oerrors;
+
+ /*
+ * Internal debugging statistics:
+ */
+ uint64_t sc_rxfail_dma_handle;
+ uint64_t sc_rxfail_dma_buffer;
+ uint64_t sc_rxfail_dma_bind;
+ uint64_t sc_rxfail_chain_undersize;
+ uint64_t sc_rxfail_no_descriptors;
+ uint64_t sc_txfail_dma_handle;
+ uint64_t sc_txfail_dma_bind;
+ uint64_t sc_txfail_indirect_limit;
};
#define ETHER_HEADER_LEN sizeof (struct ether_header)
@@ -474,8 +497,7 @@ vioif_rx_construct(void *buffer, void *user_arg, int kmflags)
if (ddi_dma_alloc_handle(sc->sc_dev, &vioif_mapped_buf_dma_attr,
DDI_DMA_SLEEP, NULL, &buf->rb_mapping.vbm_dmah)) {
- dev_err(sc->sc_dev, CE_WARN,
- "Can't allocate dma handle for rx buffer");
+ sc->sc_rxfail_dma_handle++;
goto exit_handle;
}
@@ -483,8 +505,7 @@ vioif_rx_construct(void *buffer, void *user_arg, int kmflags)
VIOIF_RX_SIZE + sizeof (struct virtio_net_hdr),
&vioif_bufattr, DDI_DMA_STREAMING, DDI_DMA_SLEEP,
NULL, &buf->rb_mapping.vbm_buf, &len, &buf->rb_mapping.vbm_acch)) {
- dev_err(sc->sc_dev, CE_WARN,
- "Can't allocate rx buffer");
+ sc->sc_rxfail_dma_buffer++;
goto exit_alloc;
}
ASSERT(len >= VIOIF_RX_SIZE);
@@ -493,8 +514,7 @@ vioif_rx_construct(void *buffer, void *user_arg, int kmflags)
buf->rb_mapping.vbm_buf, len, DDI_DMA_READ | DDI_DMA_STREAMING,
DDI_DMA_SLEEP, NULL, &buf->rb_mapping.vbm_dmac,
&buf->rb_mapping.vbm_ncookies)) {
- dev_err(sc->sc_dev, CE_WARN, "Can't bind tx buffer");
-
+ sc->sc_rxfail_dma_bind++;
goto exit_bind;
}
@@ -716,27 +736,24 @@ vioif_add_rx(struct vioif_softc *sc, int kmflag)
struct vioif_rx_buf *buf;
ve = vq_alloc_entry(sc->sc_rx_vq);
- if (!ve) {
+ if (ve == NULL) {
/*
* Out of free descriptors - ring already full.
- * It would be better to update sc_norxdescavail
- * but MAC does not ask for this info, hence we
- * update sc_norecvbuf.
*/
+ sc->sc_rxfail_no_descriptors++;
sc->sc_norecvbuf++;
goto exit_vq;
}
buf = sc->sc_rxbufs[ve->qe_index];
- if (!buf) {
+ if (buf == NULL) {
/* First run, allocate the buffer. */
buf = kmem_cache_alloc(sc->sc_rxbuf_cache, kmflag);
sc->sc_rxbufs[ve->qe_index] = buf;
}
/* Still nothing? Bye. */
- if (!buf) {
- dev_err(sc->sc_dev, CE_WARN, "Can't allocate rx buffer");
+ if (buf == NULL) {
sc->sc_norecvbuf++;
goto exit_buf;
}
@@ -789,20 +806,19 @@ static int
vioif_populate_rx(struct vioif_softc *sc, int kmflag)
{
int i = 0;
- int ret;
for (;;) {
- ret = vioif_add_rx(sc, kmflag);
- if (ret)
+ if (vioif_add_rx(sc, kmflag) != DDI_SUCCESS) {
/*
* We could not allocate some memory. Try to work with
* what we've got.
*/
break;
+ }
i++;
}
- if (i)
+ if (i != 0)
virtio_sync_vq(sc->sc_rx_vq);
return (i);
@@ -823,8 +839,7 @@ vioif_process_rx(struct vioif_softc *sc)
ASSERT(buf);
if (len < sizeof (struct virtio_net_hdr)) {
- dev_err(sc->sc_dev, CE_WARN, "RX: Cnain too small: %u",
- len - (uint32_t)sizeof (struct virtio_net_hdr));
+ sc->sc_rxfail_chain_undersize++;
sc->sc_ierrors++;
virtio_free_chain(ve);
continue;
@@ -838,7 +853,7 @@ vioif_process_rx(struct vioif_softc *sc)
*/
if (len < sc->sc_rxcopy_thresh) {
mp = allocb(len, 0);
- if (!mp) {
+ if (mp == NULL) {
sc->sc_norecvbuf++;
sc->sc_ierrors++;
@@ -855,7 +870,7 @@ vioif_process_rx(struct vioif_softc *sc)
buf->rb_mapping.vbm_buf +
sizeof (struct virtio_net_hdr) +
VIOIF_IP_ALIGN, len, 0, &buf->rb_frtn);
- if (!mp) {
+ if (mp == NULL) {
sc->sc_norecvbuf++;
sc->sc_ierrors++;
@@ -901,31 +916,32 @@ vioif_reclaim_used_tx(struct vioif_softc *sc)
struct vioif_tx_buf *buf;
uint32_t len;
mblk_t *mp;
- int i = 0;
+ unsigned chains = 0;
while ((ve = virtio_pull_chain(sc->sc_tx_vq, &len))) {
/* We don't chain descriptors for tx, so don't expect any. */
- ASSERT(!ve->qe_next);
+ ASSERT(ve->qe_next == NULL);
buf = &sc->sc_txbufs[ve->qe_index];
mp = buf->tb_mp;
buf->tb_mp = NULL;
- if (mp) {
- for (i = 0; i < buf->tb_external_num; i++)
+ if (mp != NULL) {
+ for (int i = 0; i < buf->tb_external_num; i++) {
(void) ddi_dma_unbind_handle(
buf->tb_external_mapping[i].vbm_dmah);
+ }
}
virtio_free_chain(ve);
/* External mapping used, mp was not freed in vioif_send() */
- if (mp)
+ if (mp != NULL)
freemsg(mp);
- i++;
+ chains++;
}
- if (sc->sc_tx_stopped && i) {
+ if (sc->sc_tx_stopped != 0 && chains > 0) {
sc->sc_tx_stopped = 0;
mac_tx_update(sc->sc_mac_handle);
}
@@ -962,8 +978,7 @@ vioif_tx_lazy_handle_alloc(struct vioif_softc *sc, struct vioif_tx_buf *buf,
&vioif_mapped_buf_dma_attr, DDI_DMA_SLEEP, NULL,
&buf->tb_external_mapping[i].vbm_dmah);
if (ret != DDI_SUCCESS) {
- dev_err(sc->sc_dev, CE_WARN,
- "Can't allocate dma handle for external tx buffer");
+ sc->sc_txfail_dma_handle++;
}
}
@@ -1017,17 +1032,14 @@ vioif_tx_external(struct vioif_softc *sc, struct vq_entry *ve, mblk_t *mp,
DDI_DMA_SLEEP, NULL, &dmac, &ncookies);
if (ret != DDI_SUCCESS) {
+ sc->sc_txfail_dma_bind++;
sc->sc_oerrors++;
- dev_err(sc->sc_dev, CE_NOTE,
- "TX: Failed to bind external handle");
goto exit_bind;
}
/* Check if we still fit into the indirect table. */
if (virtio_ve_indirect_available(ve) < ncookies) {
- dev_err(sc->sc_dev, CE_NOTE,
- "TX: Indirect descriptor table limit reached."
- " It took %d fragments.", i);
+ sc->sc_txfail_indirect_limit++;
sc->sc_notxbuf++;
sc->sc_oerrors++;
@@ -1086,7 +1098,7 @@ vioif_send(struct vioif_softc *sc, mblk_t *mp)
ve = vq_alloc_entry(sc->sc_tx_vq);
- if (!ve) {
+ if (ve == NULL) {
sc->sc_notxbuf++;
/* Out of free descriptors - try later. */
return (B_FALSE);
@@ -1138,9 +1150,9 @@ vioif_send(struct vioif_softc *sc, mblk_t *mp)
/* meanwhile update the statistic */
if (mp->b_rptr[0] & 0x1) {
if (bcmp(mp->b_rptr, vioif_broadcast, ETHERADDRL) != 0)
- sc->sc_multixmt++;
- else
- sc->sc_brdcstxmt++;
+ sc->sc_multixmt++;
+ else
+ sc->sc_brdcstxmt++;
}
/*
@@ -1202,8 +1214,7 @@ vioif_start(void *arg)
{
struct vioif_softc *sc = arg;
- mac_link_update(sc->sc_mac_handle,
- vioif_link_state(sc));
+ mac_link_update(sc->sc_mac_handle, vioif_link_state(sc));
virtio_start_vq_intr(sc->sc_rx_vq);
@@ -1404,10 +1415,8 @@ vioif_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
case MAC_PROP_PRIVATE:
bzero(valstr, sizeof (valstr));
if (strcmp(pr_name, vioif_txcopy_thresh) == 0) {
-
value = sc->sc_txcopy_thresh;
- } else if (strcmp(pr_name,
- vioif_rxcopy_thresh) == 0) {
+ } else if (strcmp(pr_name, vioif_rxcopy_thresh) == 0) {
value = sc->sc_rxcopy_thresh;
} else {
return;
@@ -1483,7 +1492,6 @@ vioif_show_features(struct vioif_softc *sc, const char *prefix,
bufp += virtio_show_features(features, bufp, bufend - bufp);
*bufp = '\0';
-
/* Using '!' to only CE_NOTE this to the system log. */
dev_err(sc->sc_dev, CE_NOTE, "!%s Vioif (%b)", buf, features,
VIRTIO_NET_FEATURE_BITS);
@@ -1512,8 +1520,8 @@ vioif_dev_features(struct vioif_softc *sc)
sc->sc_virtio.sc_features);
if (!(sc->sc_virtio.sc_features & VIRTIO_F_RING_INDIRECT_DESC)) {
- dev_err(sc->sc_dev, CE_NOTE,
- "Host does not support RING_INDIRECT_DESC, bye.");
+ dev_err(sc->sc_dev, CE_WARN,
+ "Host does not support RING_INDIRECT_DESC. Cannot attach.");
return (DDI_FAILURE);
}
@@ -1535,6 +1543,7 @@ vioif_set_mac(struct vioif_softc *sc)
virtio_write_device_config_1(&sc->sc_virtio,
VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]);
}
+ sc->sc_mac_from_host = 0;
}
/* Get the mac address out of the hardware, or make up one. */
@@ -1548,8 +1557,7 @@ vioif_get_mac(struct vioif_softc *sc)
&sc->sc_virtio,
VIRTIO_NET_CONFIG_MAC + i);
}
- dev_err(sc->sc_dev, CE_NOTE, "Got MAC address from host: %s",
- ether_sprintf((struct ether_addr *)sc->sc_mac));
+ sc->sc_mac_from_host = 1;
} else {
/* Get a few random bytes */
(void) random_get_pseudo_bytes(sc->sc_mac, ETHERADDRL);
@@ -1561,7 +1569,7 @@ vioif_get_mac(struct vioif_softc *sc)
vioif_set_mac(sc);
dev_err(sc->sc_dev, CE_NOTE,
- "Generated a random MAC address: %s",
+ "!Generated a random MAC address: %s",
ether_sprintf((struct ether_addr *)sc->sc_mac));
}
}
@@ -1624,7 +1632,7 @@ vioif_check_features(struct vioif_softc *sc)
if (!vioif_has_feature(sc, VIRTIO_NET_F_GUEST_CSUM)) {
sc->sc_rx_csum = 0;
}
- cmn_err(CE_NOTE, "Csum enabled.");
+ dev_err(sc->sc_dev, CE_NOTE, "!Csum enabled.");
if (vioif_has_feature(sc, VIRTIO_NET_F_HOST_TSO4)) {
@@ -1638,11 +1646,11 @@ vioif_check_features(struct vioif_softc *sc)
*/
if (!vioif_has_feature(sc, VIRTIO_NET_F_HOST_ECN)) {
dev_err(sc->sc_dev, CE_NOTE,
- "TSO4 supported, but not ECN. "
+ "!TSO4 supported, but not ECN. "
"Not using LSO.");
sc->sc_tx_tso4 = 0;
} else {
- cmn_err(CE_NOTE, "LSO enabled");
+ dev_err(sc->sc_dev, CE_NOTE, "!LSO enabled");
}
}
}
@@ -1766,7 +1774,7 @@ vioif_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
vioif_check_features(sc);
- if (vioif_alloc_mems(sc))
+ if (vioif_alloc_mems(sc) != 0)
goto exit_alloc_mems;
if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
@@ -1854,7 +1862,7 @@ vioif_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
return (DDI_FAILURE);
}
- if (sc->sc_rxloan) {
+ if (sc->sc_rxloan > 0) {
dev_err(devinfo, CE_WARN, "!Some rx buffers are still upstream,"
" not detaching.");
return (DDI_FAILURE);
diff --git a/usr/src/uts/common/io/vnd/frameio.c b/usr/src/uts/common/io/vnd/frameio.c
new file mode 100644
index 0000000000..e4e700fa12
--- /dev/null
+++ b/usr/src/uts/common/io/vnd/frameio.c
@@ -0,0 +1,464 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright (c) 2014, Joyent, Inc. All rights reserved.
+ */
+
+/*
+ * Frame I/O utility functions
+ */
+
+#include <sys/frameio.h>
+
+#include <sys/file.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/sysmacros.h>
+#include <sys/inttypes.h>
+
+static kmem_cache_t *frameio_cache;
+
+int
+frameio_init(void)
+{
+ frameio_cache = kmem_cache_create("frameio_cache",
+ sizeof (frameio_t) + sizeof (framevec_t) * FRAMEIO_NVECS_MAX,
+ 0, NULL, NULL, NULL, NULL, NULL, 0);
+ if (frameio_cache == NULL)
+ return (1);
+
+ return (0);
+}
+
+void
+frameio_fini(void)
+{
+ if (frameio_cache != NULL)
+ kmem_cache_destroy(frameio_cache);
+}
+
+frameio_t *
+frameio_alloc(int kmflags)
+{
+ return (kmem_cache_alloc(frameio_cache, kmflags));
+}
+
+void
+frameio_free(frameio_t *fio)
+{
+ return (kmem_cache_free(frameio_cache, fio));
+}
+
+/*
+ * Ensure that we don't see any garbage in the framevecs that we're nominally
+ * supposed to work with. Specifically we want to make sure that the buflen and
+ * the address are not zero.
+ */
+static int
+frameio_hdr_check_vecs(frameio_t *fio)
+{
+ int i;
+ for (i = 0; i < fio->fio_nvecs; i++)
+ if (fio->fio_vecs[i].fv_buf == NULL ||
+ fio->fio_vecs[i].fv_buflen == 0)
+ return (EINVAL);
+
+ return (0);
+}
+
+/*
+ * We have to copy in framevec32_t's. To work around the data model issues and
+ * trying not to copy memory we first copy in the framevec32_t data into the
+ * standard fio_vec space. Next we work backwards copying a given framevec32_t
+ * to a temporaory framevec_t and then overwrite the frameio_t's data. Note that
+ * it is important that we do this in reverse so as to ensure that we don't
+ * clobber data as the framevec_t is larger than the framevec32_t.
+ */
+static int
+frameio_hdr_copyin_ilp32(frameio_t *fio, const void *addr)
+{
+ framevec32_t *vec32p;
+ framevec_t fv;
+ int i;
+
+ vec32p = (framevec32_t *)&fio->fio_vecs[0];
+
+ if (ddi_copyin(addr, vec32p, sizeof (framevec32_t) * fio->fio_nvecs,
+ 0) != 0)
+ return (EFAULT);
+
+ for (i = fio->fio_nvecs - 1; i >= 0; i--) {
+ fv.fv_buf = (void *)(uintptr_t)vec32p[i].fv_buf;
+ fv.fv_buflen = vec32p[i].fv_buflen;
+ fv.fv_actlen = vec32p[i].fv_actlen;
+ fio->fio_vecs[i].fv_buf = fv.fv_buf;
+ fio->fio_vecs[i].fv_buflen = fv.fv_buflen;
+ fio->fio_vecs[i].fv_actlen = fv.fv_actlen;
+ }
+
+ return (frameio_hdr_check_vecs(fio));
+}
+
+/*
+ * Copy in a frame io header into fio with space for up to nvecs. If the frameio
+ * contains more vectors than specified it will be ignored. mode should contain
+ * information about the datamodel.
+ */
+int
+frameio_hdr_copyin(frameio_t *fio, int max_vecs, const void *addr, uint_t mode)
+{
+ int model = ddi_model_convert_from(mode & FMODELS);
+ int cpf = mode & FKIOCTL ? FKIOCTL : 0;
+ size_t fsize = model == DDI_MODEL_ILP32 ?
+ sizeof (frameio32_t) : sizeof (frameio_t);
+
+ /*
+ * The start of the header is the same in all data models for the
+ * current verison.
+ */
+ if (ddi_copyin(addr, fio, fsize, cpf) != 0)
+ return (EFAULT);
+
+ if (fio->fio_version != FRAMEIO_VERSION_ONE)
+ return (EINVAL);
+
+ if (fio->fio_nvecs > FRAMEIO_NVECS_MAX || fio->fio_nvecs == 0)
+ return (EINVAL);
+
+ if (fio->fio_nvpf == 0)
+ return (EINVAL);
+
+ if (fio->fio_nvecs % fio->fio_nvpf != 0)
+ return (EINVAL);
+
+ if (fio->fio_nvecs > max_vecs)
+ return (EOVERFLOW);
+
+ addr = (void *)((uintptr_t)addr + fsize);
+ if (model == DDI_MODEL_ILP32) {
+ if (cpf != 0)
+ return (EINVAL);
+ return (frameio_hdr_copyin_ilp32(fio, addr));
+ }
+
+ if (ddi_copyin(addr, &fio->fio_vecs[0],
+ sizeof (framevec_t) * fio->fio_nvecs, cpf) != 0)
+ return (EFAULT);
+
+ return (frameio_hdr_check_vecs(fio));
+}
+
+static mblk_t *
+frameio_allocb(size_t sz)
+{
+ mblk_t *mp;
+
+ mp = allocb(sz, 0);
+ if (mp == NULL)
+ return (NULL);
+
+ mp->b_datap->db_type = M_DATA;
+ return (mp);
+}
+
+static int
+framevec_mblk_read(framevec_t *fv, mblk_t **mpp, int cpf)
+{
+ mblk_t *mp;
+ cpf = cpf != 0 ? FKIOCTL : 0;
+
+ mp = frameio_allocb(fv->fv_buflen);
+
+ if (mp == NULL) {
+ freemsg(mp);
+ return (EAGAIN);
+ }
+
+ if (ddi_copyin(fv->fv_buf, mp->b_wptr, fv->fv_buflen,
+ cpf) != 0) {
+ freemsg(mp);
+ return (EFAULT);
+ }
+
+ mp->b_wptr += fv->fv_buflen;
+ *mpp = mp;
+ return (0);
+}
+
+/*
+ * Read a set of frame vectors that make up a single message boundary and return
+ * that as a single message in *mpp that consists of multiple data parts.
+ */
+static int
+frameio_mblk_read(frameio_t *fio, framevec_t *fv, mblk_t **mpp, int cpf)
+{
+ int nparts = fio->fio_nvpf;
+ int part, error;
+ mblk_t *mp;
+
+ *mpp = NULL;
+ cpf = cpf != 0 ? FKIOCTL : 0;
+
+ /*
+ * Construct the initial frame
+ */
+ for (part = 0; part < nparts; part++) {
+ error = framevec_mblk_read(fv, &mp, cpf);
+ if (error != 0) {
+ freemsg(*mpp);
+ return (error);
+ }
+
+ if (*mpp == NULL)
+ *mpp = mp;
+ else
+ linkb(*mpp, mp);
+ fv++;
+ }
+
+ return (0);
+}
+
+/*
+ * Read data from a series of frameio vectors into a message block chain. A
+ * given frameio request has a number of discrete messages divided into
+ * individual vectors based on fio->fio_nvcspframe. Each discrete message will
+ * be constructed into a message block chain pointed to by b_next.
+ *
+ * If we get an EAGAIN while trying to construct a given message block what we
+ * return depends on what else we've done so far. If we have succesfully
+ * completed at least one message then we free everything else we've done so
+ * far and return that. If no messages have been completed we return EAGAIN. If
+ * instead we encounter a different error, say EFAULT, then all of the fv_actlen
+ * entries values are undefined.
+ */
+int
+frameio_mblk_chain_read(frameio_t *fio, mblk_t **mpp, int *nvecs, int cpf)
+{
+ int error = ENOTSUP;
+ int nframes = fio->fio_nvecs / fio->fio_nvpf;
+ int frame;
+ framevec_t *fv;
+ mblk_t *mp, *bmp = NULL;
+
+ /*
+ * Protect against bogus kernel subsystems.
+ */
+ VERIFY(fio->fio_nvecs > 0);
+ VERIFY(fio->fio_nvecs % fio->fio_nvpf == 0);
+
+ *mpp = NULL;
+ cpf = cpf != 0 ? FKIOCTL : 0;
+
+ fv = &fio->fio_vecs[0];
+ for (frame = 0; frame < nframes; frame++) {
+ error = frameio_mblk_read(fio, fv, &mp, cpf);
+ if (error != 0)
+ goto failed;
+
+ if (bmp != NULL)
+ bmp->b_next = mp;
+ else
+ *mpp = mp;
+ bmp = mp;
+ }
+
+ *nvecs = nframes;
+ return (0);
+failed:
+ /*
+ * On EAGAIN we've already taken care of making sure that we have no
+ * leftover messages, eg. they were never linked in.
+ */
+ if (error == EAGAIN) {
+ if (frame != 0)
+ error = 0;
+ if (*nvecs != NULL)
+ *nvecs = frame;
+ ASSERT(*mpp != NULL);
+ } else {
+ for (mp = *mpp; mp != NULL; mp = bmp) {
+ bmp = mp->b_next;
+ freemsg(mp);
+ }
+ if (nvecs != NULL)
+ *nvecs = 0;
+ *mpp = NULL;
+ }
+ return (error);
+}
+
+size_t
+frameio_frame_length(frameio_t *fio, framevec_t *fv)
+{
+ int i;
+ size_t len = 0;
+
+ for (i = 0; i < fio->fio_nvpf; i++, fv++)
+ len += fv->fv_buflen;
+
+ return (len);
+}
+
+/*
+ * Write a portion of an mblk to the current.
+ */
+static int
+framevec_write_mblk_part(framevec_t *fv, mblk_t *mp, size_t len, size_t moff,
+ size_t foff, int cpf)
+{
+ ASSERT(len <= MBLKL(mp) - moff);
+ ASSERT(len <= fv->fv_buflen - fv->fv_actlen);
+ cpf = cpf != 0 ? FKIOCTL : 0;
+
+ if (ddi_copyout(mp->b_rptr + moff, fv->fv_buf + foff, len, cpf) != 0)
+ return (EFAULT);
+ fv->fv_actlen += len;
+
+ return (0);
+}
+
+/*
+ * Because copying this out to the user might fail we don't want to update the
+ * b_rptr in case we need to copy it out again.
+ */
+static int
+framevec_map_blk(frameio_t *fio, framevec_t *fv, mblk_t *mp, int cpf)
+{
+ int err;
+ size_t msize, blksize, len, moff, foff;
+
+ msize = msgsize(mp);
+ if (msize > frameio_frame_length(fio, fv))
+ return (EOVERFLOW);
+
+ moff = 0;
+ foff = 0;
+ blksize = MBLKL(mp);
+ fv->fv_actlen = 0;
+ while (msize != 0) {
+ len = MIN(blksize, fv->fv_buflen - fv->fv_actlen);
+ err = framevec_write_mblk_part(fv, mp, len, moff, foff, cpf);
+ if (err != 0)
+ return (err);
+
+ msize -= len;
+ blksize -= len;
+ moff += len;
+ foff += len;
+
+ if (blksize == 0 && msize != 0) {
+ mp = mp->b_cont;
+ ASSERT(mp != NULL);
+ moff = 0;
+ blksize = MBLKL(mp);
+ }
+
+ if (fv->fv_buflen == fv->fv_actlen && msize != 0) {
+ fv++;
+ fv->fv_actlen = 0;
+ foff = 0;
+ }
+ }
+
+ return (0);
+}
+
+int
+frameio_mblk_chain_write(frameio_t *fio, frameio_write_mblk_map_t map,
+ mblk_t *mp, int *nwrite, int cpf)
+{
+ int mcount = 0;
+ int ret = 0;
+
+ if (map != MAP_BLK_FRAME)
+ return (EINVAL);
+
+ while (mp != NULL && mcount < fio->fio_nvecs) {
+ ret = framevec_map_blk(fio, &fio->fio_vecs[mcount], mp, cpf);
+ if (ret != 0)
+ break;
+ mcount += fio->fio_nvpf;
+ mp = mp->b_next;
+ }
+
+ if (ret != 0 && mcount == 0) {
+ if (nwrite != NULL)
+ *nwrite = 0;
+ return (ret);
+ }
+
+ if (nwrite != NULL)
+ *nwrite = mcount / fio->fio_nvpf;
+
+ return (0);
+}
+
+/*
+ * Copy out nframes worth of frameio header data back to userland.
+ */
+int
+frameio_hdr_copyout(frameio_t *fio, int nframes, void *addr, uint_t mode)
+{
+ int i;
+ int model = ddi_model_convert_from(mode & FMODELS);
+ framevec32_t *vec32p;
+ framevec32_t f;
+
+ if (fio->fio_nvecs / fio->fio_nvpf < nframes)
+ return (EINVAL);
+
+ fio->fio_nvecs = nframes * fio->fio_nvpf;
+
+ if (model == DDI_MODEL_NONE) {
+ if (ddi_copyout(fio, addr,
+ sizeof (frameio_t) + fio->fio_nvecs * sizeof (framevec_t),
+ mode & FKIOCTL) != 0)
+ return (EFAULT);
+ return (0);
+ }
+
+ ASSERT(model == DDI_MODEL_ILP32);
+
+ vec32p = (framevec32_t *)&fio->fio_vecs[0];
+ for (i = 0; i < fio->fio_nvecs; i++) {
+ f.fv_buf = (caddr32_t)(uintptr_t)fio->fio_vecs[i].fv_buf;
+ if (fio->fio_vecs[i].fv_buflen > UINT_MAX ||
+ fio->fio_vecs[i].fv_actlen > UINT_MAX)
+ return (EOVERFLOW);
+ f.fv_buflen = fio->fio_vecs[i].fv_buflen;
+ f.fv_actlen = fio->fio_vecs[i].fv_actlen;
+ vec32p[i].fv_buf = f.fv_buf;
+ vec32p[i].fv_buflen = f.fv_buflen;
+ vec32p[i].fv_actlen = f.fv_actlen;
+ }
+
+ if (ddi_copyout(fio, addr,
+ sizeof (frameio32_t) + fio->fio_nvecs * sizeof (framevec32_t),
+ mode & FKIOCTL) != 0)
+ return (EFAULT);
+ return (0);
+}
+
+void
+frameio_mark_consumed(frameio_t *fio, int nframes)
+{
+ int i;
+
+ ASSERT(fio->fio_nvecs / fio->fio_nvpf >= nframes);
+ for (i = 0; i < nframes * fio->fio_nvpf; i++)
+ fio->fio_vecs[i].fv_actlen = fio->fio_vecs[i].fv_buflen;
+}
diff --git a/usr/src/uts/common/io/vnd/vnd.c b/usr/src/uts/common/io/vnd/vnd.c
new file mode 100644
index 0000000000..2abb6f9464
--- /dev/null
+++ b/usr/src/uts/common/io/vnd/vnd.c
@@ -0,0 +1,5800 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2016 Joyent, Inc.
+ */
+
+/*
+ * vnd - virtual (machine) networking datapath
+ *
+ * vnd's purpose is to provide a highly performant data path for Layer 2 network
+ * traffic and exist side by side an active IP netstack, each servicing
+ * different datalinks. vnd provides many of the same capabilities as the
+ * current TCP/IP stack does and some specific to layer two. Specifically:
+ *
+ * o Use of the DLD fastpath
+ * o Packet capture hooks
+ * o Ability to use hardware capabilities
+ * o Useful interfaces for handling multiple frames
+ *
+ * The following image shows where vnd fits into today's networking stack:
+ *
+ * +---------+----------+----------+
+ * | libdlpi | libvnd | libsocket|
+ * +---------+----------+----------+
+ * | · · VFS |
+ * | VFS · VFS +----------+
+ * | · | sockfs |
+ * +---------+----------+----------+
+ * | | VND | IP |
+ * | +----------+----------+
+ * | DLD/DLS |
+ * +-------------------------------+
+ * | MAC |
+ * +-------------------------------+
+ * | GLDv3 |
+ * +-------------------------------+
+ *
+ * -----------------------------------------
+ * A Tale of Two Devices - DDI Device Basics
+ * -----------------------------------------
+ *
+ * vnd presents itself to userland as a character device; however, it also is a
+ * STREAMS device so that it can interface with dld and the rest of the
+ * networking stack. Users never interface with the STREAMs devices directly and
+ * they are purely an implementation detail of vnd. Opening the STREAMS device
+ * require kcred and as such userland cannot interact with it or push it onto
+ * the stream head.
+ *
+ * The main vnd character device, /dev/vnd/ctl, is a self-cloning device. Every
+ * clone gets its own minor number; however, minor nodes are not created in the
+ * devices tree for these instances. In this state a user may do two different
+ * things. They may issue ioctls that affect global state or they may issue
+ * ioctls that try to attach it to a given datalink. Once a minor device has
+ * been attached to a datalink, all operations on it are scoped to that context,
+ * therefore subsequent global operations are not permitted.
+ *
+ * A given device can be linked into the /devices and /dev name space via a link
+ * ioctl. That ioctl causes a minor node to be created in /devices and then it
+ * will also appear under /dev/vnd/ due to vnd's sdev plugin. This is similar
+ * to, but simpler than, IP's persistence mechanism.
+ *
+ * ---------------------
+ * Binding to a datalink
+ * ---------------------
+ *
+ * Datalinks are backed by the dld (datalink device) and dls (datalink services)
+ * drivers. These drivers provide a STREAMS device for datalinks on the system
+ * which are exposed through /dev/net. Userland generally manipulates datalinks
+ * through libdlpi. When an IP interface is being plumbed up what actually
+ * happens is that someone does a dlpi_open(3DLPI) of the underlying datalink
+ * and then pushes on the ip STREAMS module with an I_PUSH ioctl. Modules may
+ * then can negotiate with dld and dls to obtain access to various capabilities
+ * and fast paths via a series of STREAMS messages.
+ *
+ * In vnd, we do the same thing, but we leave our STREAMS module as an
+ * implementation detail of the system. We don't want users to be able to
+ * arbitrarily push vnd STREAMS module onto any stream, so we explicitly require
+ * kcred to manipulate it. Thus, when a user issues a request to attach a
+ * datalink to a minor instance of the character device, that vnd minor instance
+ * itself does a layered open (ldi_open_by_name(9F)) of the specified datalink.
+ * vnd does that open using the passed in credentials from the ioctl, not kcred.
+ * This ensures that users who doesn't have permissions to open the device
+ * cannot. Once that's been opened, we push on the vnd streams module.
+ *
+ * Once the vnd STREAMS instance has been created for this device, eg. the
+ * I_PUSH ioctl returns, we explicitly send a STREAMS ioctl
+ * (VND_STRIOC_ASSOCIATE) to associate the vnd STREAMS and character devices.
+ * This association begins the STREAM device's initialization. We start up an
+ * asynchronous state machine that takes care of all the different aspects of
+ * plumbing up the device with dld and dls and enabling the MAC fast path. We
+ * need to guarantee to consumers of the character device that by the time their
+ * ioctl returns, the data path has been fully initialized.
+ *
+ * The state progression is fairly linear. There are two general steady states.
+ * The first is VND_S_ONLINE, which means that everything is jacked up and good
+ * to go. The alternative is VND_S_ZOMBIE, which means that the streams device
+ * encountered an error or we have finished tearing it down and the character
+ * device can clean it up. The following is our state progression and the
+ * meaning of each state:
+ *
+ * |
+ * |
+ * V
+ * +---------------+
+ * | VNS_S_INITIAL | This is our initial state. Every
+ * +---------------+ vnd STREAMS device starts here.
+ * | While in this state, only dlpi
+ * | M_PROTO and M_IOCTL messages can be
+ * | sent or received. All STREAMS based
+ * | data messages are dropped.
+ * | We transition out of this state by
+ * | sending a DL_INFO_REQ to obtain
+ * | information about the underlying
+ * | link.
+ * v
+ * +-----------------+
+ * +--<-| VNS_S_INFO_SENT | In this state, we verify and
+ * | +-----------------+ record information about the
+ * | | underlying device. If the device is
+ * | | not suitable, eg. not of type
+ * v | DL_ETHER, then we immediately
+ * | | become a ZOMBIE. To leave this
+ * | | state we request exclusive active
+ * | | access to the device via
+ * v | DL_EXCLUSIVE_REQ.
+ * | v
+ * | +----------------------+
+ * +--<-| VNS_S_EXCLUSIVE_SENT | In this state, we verify whether
+ * | +----------------------+ or not we were able to obtain
+ * | | | exclusive access to the device. If
+ * | | | we were not able to, then we leave,
+ * v | | as that means that something like
+ * | | | IP is already plumbed up on top of
+ * | | | the datalink. We leave this state
+ * | | | by progressing through to the
+ * | | | appropriate DLPI primitive, either
+ * v | | DLPI_ATTACH_REQ or DLPI_BIND_REQ
+ * | | | depending on the style of the
+ * | | | datalink.
+ * | | v
+ * | | +-------------------+
+ * +------ |--<-| VNS_S_ATTACH_SENT | In this state, we verify we were
+ * | | +-------------------+ able to perform a standard DLPI
+ * | | | attach and if so, go ahead and
+ * v | | send a DLPI_BIND_REQ.
+ * | v v
+ * | +-------------------+
+ * +--<-| VNS_S_BIND_SENT | In this state we see the result of
+ * | +-------------------+ our attempt to bind to PPA 0 of the
+ * v | underlying device. Because we're
+ * | | trying to be a layer two datapath,
+ * | | the specific attachment point isn't
+ * | | too important as we're going to
+ * v | have to enable promiscuous mode. We
+ * | | transition out of this by sending
+ * | | our first of three promiscuous mode
+ * | | requests.
+ * v v
+ * | +------------------------+
+ * +--<-| VNS_S_SAP_PROMISC_SENT | In this state we verify that we
+ * | +------------------------+ were able to enable promiscuous
+ * | | mode at the physical level. We
+ * | | transition out of this by enabling
+ * | | multicast and broadcast promiscuous
+ * v | mode.
+ * | v
+ * | +--------------------------+
+ * +--<-| VNS_S_MULTI_PROMISC_SENT | In this state we verify that we
+ * | +--------------------------+ have enabled DL_PROMISC_MULTI and
+ * v | move onto the second promiscuous
+ * | | mode request.
+ * | v
+ * | +----------------------------+
+ * +--<-| VNS_S_RX_ONLY_PROMISC_SENT | In this state we verify that we
+ * | +----------------------------+ enabled RX_ONLY promiscuous mode.
+ * | | We specifically do this as we don't
+ * v | want to receive our own traffic
+ * | | that we'll send out. We leave this
+ * | | state by enabling the final flag
+ * | | DL_PROMISC_FIXUPS.
+ * | v
+ * | +--------------------------+
+ * +--<-| VNS_S_FIXUP_PROMISC_SENT | In this state we verify that we
+ * | +--------------------------+ enabled FIXUP promiscuous mode.
+ * | | We specifically do this as we need
+ * v | to ensure that traffic which is
+ * | | received by being looped back to us
+ * | | correctly has checksums fixed. We
+ * | | leave this state by requesting the
+ * | | dld/dls capabilities that we can
+ * v | process.
+ * | v
+ * | +--------------------+
+ * +--<-| VNS_S_CAPAB_Q_SENT | We loop over the set of
+ * | +--------------------+ capabilities that dld advertised
+ * | | and enable the ones that currently
+ * v | support for use. See the section
+ * | | later on regarding capabilities
+ * | | for more information. We leave this
+ * | | state by sending an enable request.
+ * v v
+ * | +--------------------+
+ * +--<-| VNS_S_CAPAB_E_SENT | Here we finish all capability
+ * | +--------------------+ initialization. Once finished, we
+ * | | transition to the next state. If
+ * v | the dld fast path is not available,
+ * | | we become a zombie.
+ * | v
+ * | +--------------+
+ * | | VNS_S_ONLINE | This is a vnd STREAMS device's
+ * | +--------------+ steady state. It will normally
+ * | | reside in this state while it is in
+ * | | active use. It will only transition
+ * v | to the next state when the STREAMS
+ * | | device is closed by the character
+ * | | device. In this state, all data
+ * | | flows over the dld fast path.
+ * | v
+ * | +---------------------+
+ * +--->| VNS_S_SHUTTING_DOWN | This vnd state takes care of
+ * | +---------------------+ disabling capabilities and
+ * | | flushing all data. At this point
+ * | | any additional data that we receive
+ * | | will be dropped. We leave this
+ * v | state by trying to remove multicast
+ * | | promiscuity.
+ * | |
+ * | v
+ * | +---------------------------------+
+ * +-->| VNS_S_MULTICAST_PROMISCOFF_SENT | In this state, we check if we have
+ * | +---------------------------------+ successfully removed multicast
+ * | | promiscuous mode. If we have
+ * | | failed, we still carry on but only
+ * | | warn. We leave this state by trying
+ * | | to disable SAP level promiscuous
+ * | | mode.
+ * | v
+ * | +---------------------------+
+ * +-->| VNS_S_SAP_PROMISCOFF_SENT | In this state, we check if we have
+ * | +---------------------------+ successfully removed SAP level
+ * | | promiscuous mode. If we have
+ * | | failed, we still carry on but only
+ * | | warn. Note that we don't worry
+ * | | about either of
+ * | | DL_PROMISC_FIXUPS or
+ * | | DL_PROMISC_RX_ONLY. If these are
+ * | | the only two entries left, then we
+ * | | should have anything that MAC is
+ * | | doing for us at this point,
+ * | | therefore it's safe for us to
+ * | | proceed to unbind, which is how we
+ * | | leave this state via a
+ * | v DL_UNBIND_REQ.
+ * | +-------------------+
+ * +--->| VNS_S_UNBIND_SENT | Here, we check how the unbind
+ * | +-------------------+ request went. Regardless of its
+ * | | success, we always transition to
+ * | | a zombie state.
+ * | v
+ * | +--------------+
+ * +--->| VNS_S_ZOMBIE | In this state, the vnd STREAMS
+ * +--------------+ device is waiting to finish being
+ * reaped. Because we have no more
+ * ways to receive data it should be
+ * safe to destroy all remaining data
+ * structures.
+ *
+ * If the stream association fails for any reason the state machine reaches
+ * VNS_S_ZOMBIE. A more detailed vnd_errno_t will propagate back through the
+ * STREAMS ioctl to the character device. That will fail the user ioctl and
+ * propagate the vnd_errno_t back to userland. If, on the other hand, the
+ * association succeeds, then the vnd STREAMS device will be fully plumbed up
+ * and ready to transmit and receive message blocks. Consumers will be able to
+ * start using the other cbops(9E) entry points once the attach has fully
+ * finished, which will occur after the original user attach ioctl to the
+ * character device returns.
+ *
+ * It's quite important that we end up sending the full series of STREAMS
+ * messages when tearing down. While it's tempting to say that we should just
+ * rely on the STREAMS device being closed to properly ensure that we have no
+ * more additional data, that's not sufficient due to our use of direct
+ * callbacks. DLS does not ensure that by the time we change the direct
+ * callback (vnd_mac_input) that all callers to it will have been quiesced.
+ * However, it does guarantee that if we disable promiscuous mode ourselves and
+ * we turn off the main data path via DL_UNBIND_REQ that it will work.
+ * Therefore, we make sure to do this ourselves rather than letting DLS/DLD do
+ * it as part of tearing down the STREAMS device. This ensures that we'll
+ * quiesce all data before we destroy our data structures and thus we should
+ * eliminate the race in changing the data function.
+ *
+ * --------------------
+ * General Architecture
+ * --------------------
+ *
+ * There are several different devices and structures in the vnd driver. There
+ * is a per-netstack component, pieces related to the character device that
+ * consumers see, the internal STREAMS device state, and the data queues
+ * themselves. The following ASCII art picture describes their relationships and
+ * some of the major pieces of data that contain them. These are not exhaustive,
+ * e.g. synchronization primitives are left out.
+ *
+ * +----------------+ +-----------------+
+ * | global | | global |
+ * | device list | | netstack list |
+ * | vnd_dev_list | | vnd_nsd_list |
+ * +----------------+ +-----------------+
+ * | |
+ * | v
+ * | +-------------------+ +-------------------+
+ * | | per-netstack data | ---> | per-netstack data | --> ...
+ * | | vnd_pnsd_t | | vnd_pnsd_t |
+ * | | | +-------------------+
+ * | | |
+ * | | nestackid_t ---+----> Netstack ID
+ * | | vnd_pnsd_flags_t -+----> Status flags
+ * | | zoneid_t ---+----> Zone ID for this netstack
+ * | | hook_family_t ---+----> VND IPv4 Hooks
+ * | | hook_family_t ---+----> VND IPv6 Hooks
+ * | | list_t ----+ |
+ * | +------------+------+
+ * | |
+ * | v
+ * | +------------------+ +------------------+
+ * | | character device | ---> | character device | -> ...
+ * +---------->| vnd_dev_t | | vnd_dev_t |
+ * | | +------------------+
+ * | |
+ * | minor_t ---+--> device minor number
+ * | ldi_handle_t ---+--> handle to /dev/net/%datalink
+ * | vnd_dev_flags_t -+--> device flags, non blocking, etc.
+ * | char[] ---+--> name if linked
+ * | vnd_str_t * -+ |
+ * +--------------+---+
+ * |
+ * v
+ * +-------------------------+
+ * | STREAMS device |
+ * | vnd_str_t |
+ * | |
+ * | vnd_str_state_t ---+---> State machine state
+ * | gsqueue_t * ---+---> mblk_t Serialization queue
+ * | vnd_str_stat_t ---+---> per-device kstats
+ * | vnd_str_capab_t ---+----------------------------+
+ * | vnd_data_queue_t ---+ | |
+ * | vnd_data_queue_t -+ | | v
+ * +-------------------+-+---+ +---------------------+
+ * | | | Stream capabilities |
+ * | | | vnd_str_capab_t |
+ * | | | |
+ * | | supported caps <--+-- vnd_capab_flags_t |
+ * | | dld cap handle <--+-- void * |
+ * | | direct tx func <--+-- vnd_dld_tx_t |
+ * | | +---------------------+
+ * | |
+ * +----------------+ +-------------+
+ * | |
+ * v v
+ * +-------------------+ +-------------------+
+ * | Read data queue | | Write data queue |
+ * | vnd_data_queue_t | | vnd_data_queue_t |
+ * | | | |
+ * | size_t ----+--> Current size | size_t ----+--> Current size
+ * | size_t ----+--> Max size | size_t ----+--> Max size
+ * | mblk_t * ----+--> Queue head | mblk_t * ----+--> Queue head
+ * | mblk_t * ----+--> Queue tail | mblk_t * ----+--> Queue tail
+ * +-------------------+ +-------------------+
+ *
+ *
+ * Globally, we maintain two lists. One list contains all of the character
+ * device soft states. The other maintains a list of all our netstack soft
+ * states. Each netstack maintains a list of active devices that have been
+ * associated with a datalink in its netstack.
+ *
+ * Recall that a given minor instance of the character device exists in one of
+ * two modes. It can either be a cloned open of /dev/vnd/ctl, the control node,
+ * or it can be associated with a given datalink. When minor instances are in
+ * the former state, they do not exist in a given vnd_pnsd_t's list of devices.
+ * As part of attaching to a datalink, the given vnd_dev_t will be inserted into
+ * the appropriate vnd_pnsd_t. In addition, this will cause a STREAMS device, a
+ * vnd_str_t, to be created and associated to a vnd_dev_t.
+ *
+ * The character device, and its vnd_dev_t, is the interface to the rest of the
+ * system. The vnd_dev_t keeps track of various aspects like whether various
+ * operations, such as read, write and the frameio ioctls, are considered
+ * blocking or non-blocking in the O_NONBLOCK sense. It also is responsible for
+ * keeping track of things like the name of the device, if any, in /dev. The
+ * vnd_str_t, on the other hand manages aspects like buffer sizes and the actual
+ * data queues. However, ioctls that manipulate these properties all go through
+ * the vnd_dev_t to its associated vnd_str_t.
+ *
+ * Each of the STREAMS devices, the vnd_str_t, maintains two data queues. One
+ * for frames to transmit (write queue) and one for frames received (read
+ * queue). These data queues have a maximum size and attempting to add data
+ * beyond that maximum size will result in data being dropped. The sizes are
+ * configurable via ioctls VND_IOC_SETTXBUF, VND_IOC_SETRXBUF. Data either sits
+ * in those buffers or has a reservation in those buffers while they are in vnd
+ * and waiting to be consumed by the user or by mac.
+ *
+ * Finally, the vnd_str_t also has a vnd_str_capab_t which we use to manage the
+ * available, negotiated, and currently active features.
+ *
+ * ----------------------
+ * Data Path and gsqueues
+ * ----------------------
+ *
+ * There's a lot of plumbing in vnd to get to the point where we can send data,
+ * but vnd's bread and butter is the data path, so it's worth diving into it in
+ * more detail. Data enters and exits the system from two ends.
+ *
+ * The first end is the vnd consumer. This comes in the form of read and write
+ * system calls as well as the frame I/O ioctls. The read and write system calls
+ * operate on a single frame at a time. Think of a frame as a single message
+ * that has come in off the wire, which may itself comprise multiple mblk_t's
+ * linked together in the kernel. readv(2) and writev(2) have the same
+ * limitations as read(2) and write(2). We enforce this as the system is
+ * required to fill up every uio(9S) buffer before moving onto the next one.
+ * This means that if you have a MTU sized buffer and two frames come in which
+ * are less than half of the MTU they must fill up the given iovec. Even if we
+ * didn't want to do this, we have no way of informing the supplier of the
+ * iovecs that they were only partially filled or where one frame ends and
+ * another begins. That's life, as such we have frame I/O which solves this
+ * problem. It allows for multiple frames to be consumed as well as for frames
+ * to be broken down into multiple vector components.
+ *
+ * The second end is the mac direct calls. As part of negotiating capabilities
+ * via dld, we give mac a function of ours to call when packets are received
+ * [vnd_mac_input()] and a callback to indicate that flow has been restored
+ * [vnd_mac_flow_control()]. In turn, we also get a function pointer that we can
+ * transmit data with. As part of the contract with mac, mac is allowed to flow
+ * control us by returning a cookie to the transmit function. When that happens,
+ * all outbound traffic is halted until our callback function is called and we
+ * can schedule drains.
+ *
+ * It's worth looking at these in further detail. We'll start with the rx path.
+ *
+ *
+ * |
+ * * . . . packets from gld
+ * |
+ * v
+ * +-------------+
+ * | mac |
+ * +-------------+
+ * |
+ * v
+ * +-------------+
+ * | dld |
+ * +-------------+
+ * |
+ * * . . . dld direct callback
+ * |
+ * v
+ * +---------------+
+ * | vnd_mac_input |
+ * +---------------+
+ * |
+ * v
+ * +---------+ +-------------+
+ * | dropped |<--*---------| vnd_hooks |
+ * | by | . +-------------+
+ * | hooks | . drop probe |
+ * +---------+ kstat bump * . . . Do we have free
+ * | buffer space?
+ * |
+ * no . | . yes
+ * . + .
+ * +---*--+------*-------+
+ * | |
+ * * . . drop probe * . . recv probe
+ * | kstat bump | kstat bump
+ * v |
+ * +---------+ * . . fire pollin
+ * | freemsg | v
+ * +---------+ +-----------------------+
+ * | vnd_str_t`vns_dq_read |
+ * +-----------------------+
+ * ^ ^
+ * +----------+ | | +---------+
+ * | read(9E) |-->-+ +--<--| frameio |
+ * +----------+ +---------+
+ *
+ * The rx path is rather linear. Packets come into us from mac. We always run
+ * them through the various hooks, and if they come out of that, we inspect the
+ * read data queue. If there is not enough space for a packet, we drop it.
+ * Otherwise, we append it to the data queue, and fire read notifications
+ * targetting anyone polling or doing blocking I/O on this device. Those
+ * consumers then drain the head of the data queue.
+ *
+ * The tx path is more complicated due to mac flow control. After any call into
+ * mac, we may have to potentially suspend writes and buffer data for an
+ * arbitrary amount of time. As such, we need to carefully track the total
+ * amount of outstanding data so that we don't waste kernel memory. This is
+ * further complicated by the fact that mac will asynchronously tell us when our
+ * flow has been resumed.
+ *
+ * For data to be able to enter the system, it needs to be able to take a
+ * reservation from the write data queue. Once the reservation has been
+ * obtained, we enter the gsqueue so that we can actually append it. We use
+ * gsqueues (serialization queues) to ensure that packets are manipulated in
+ * order as we deal with the draining and appending packets. We also leverage
+ * its worker thread to help us do draining after mac has restorted our flow.
+ *
+ * The following image describes the flow:
+ *
+ * +-----------+ +--------------+ +-------------------------+ +------+
+ * | write(9E) |-->| Space in the |--*--->| gsqueue_enter_one() |-->| Done |
+ * | frameio | | write queue? | . | +->vnd_squeue_tx_append | +------+
+ * +-----------+ +--------------+ . +-------------------------+
+ * | ^ .
+ * | | . reserve space from gsqueue
+ * | | |
+ * queue . . . * | space v
+ * full | * . . . avail +------------------------+
+ * v | | vnd_squeue_tx_append() |
+ * +--------+ +------------+ +------------------------+
+ * | EAGAIN |<--*------| Non-block? |<-+ |
+ * +--------+ . +------------+ | v
+ * . yes v | wait +--------------+
+ * no . .* * . . for | append chain |
+ * +----+ space | to outgoing |
+ * | mblk chain |
+ * from gsqueue +--------------+
+ * | |
+ * | +-------------------------------------------------+
+ * | |
+ * | | yes . . .
+ * v v .
+ * +-----------------------+ +--------------+ . +------+
+ * | vnd_squeue_tx_drain() |--->| mac blocked? |----*---->| Done |
+ * +-----------------------+ +--------------+ +------+
+ * | |
+ * +---------------------------------|---------------------+
+ * | | tx |
+ * | no . . * queue . . *
+ * | flow controlled . | empty * . fire pollout
+ * | . v | if mblk_t's
+ * +-------------+ . +---------------------+ | sent
+ * | set blocked |<----*------| vnd_squeue_tx_one() |--------^-------+
+ * | flags | +---------------------+ |
+ * +-------------+ More data | | | More data |
+ * and limit ^ v * . . and limit ^
+ * not reached . . * | | reached |
+ * +----+ | |
+ * v |
+ * +----------+ +-------------+ +---------------------------+
+ * | mac flow |--------->| remove mac |--->| gsqueue_enter_one() with |
+ * | control | | block flags | | vnd_squeue_tx_drain() and |
+ * | callback | +-------------+ | GSQUEUE_FILL flag, iff |
+ * +----------+ | not already scheduled |
+ * +---------------------------+
+ *
+ * The final path taken for a given write(9E)/frameio ioctl depends on whether
+ * or not the vnd_dev_t is non-blocking. That controls the initial path of
+ * trying to take a reservation in write data queue. If the device is in
+ * non-blocking mode, we'll return EAGAIN when there is not enough space
+ * available, otherwise, the calling thread blocks on the data queue.
+ *
+ * Today when we call into vnd_squeue_tx_drain() we will not try to drain the
+ * entire queue, as that could be quite large and we don't want to necessarily
+ * keep the thread that's doing the drain until it's been finished. Not only
+ * could more data be coming in, but the draining thread could be a userland
+ * thread that has more work to do. We have two limits today. There is an upper
+ * bound on the total amount of data and the total number of mblk_t chains. If
+ * we hit either limit, then we will schedule another drain in the gsqueue and
+ * go from there.
+ *
+ * It's worth taking some time to describe how we interact with gsqueues. vnd
+ * has a gsqueue_set_t for itself. It's important that it has its own set, as
+ * the profile of work that vnd does is different from other sub-systems in the
+ * kernel. When we open a STREAMS device in vnd_s_open, we get a random gsqueue.
+ * Unlike TCP/IP which uses an gsqueue for per TCP connection, we end up
+ * maintaining one for a given device. Because of that, we want to use a
+ * pseudo-random one to try and spread out the load, and picking one at random
+ * is likely to be just as good as any fancy algorithm we might come up with,
+ * especially as any two devices could have radically different transmit
+ * profiles.
+ *
+ * While some of the write path may seem complicated, it does allow us to
+ * maintain an important property. Once we have acknowledged a write(9E) or
+ * frameio ioctl, we will not drop the packet, excepting something like ipf via
+ * the firewall hooks.
+ *
+ * There is one other source of flow control that can exist in the system which
+ * is in the form of a barrier. The barrier is an internal mechanism used for
+ * ensuring that an gsqueue is drained for a given device. We use this as part
+ * of tearing down. Specifically we disable the write path so nothing new can be
+ * inserted into the gsqueue and then insert a barrier block. Once the barrier
+ * block comes out of the gsqueue, then we know nothing else in the gsqueue that
+ * could refer to the vnd_str_t, being destroyed, exists.
+ *
+ * ---------------------
+ * vnd, zones, netstacks
+ * ---------------------
+ *
+ * vnd devices are scoped to datalinks and datalinks are scoped to a netstack.
+ * Because of that, vnd is also a netstack module. It registers with the
+ * netstack sub-system and receives callbacks every time a netstack is created,
+ * being shutdown, and destroyed. The netstack callbacks drive the creation and
+ * destruction of the vnd_pnsd_t structures.
+ *
+ * Recall from the earlier architecture diagrams that every vnd device is scoped
+ * to a netstack and known about by a given vnd_pnsd_t. When that netstack is
+ * torn down, we also tear down any vnd devices that are hanging around. When
+ * the netstack is torn down, we know that any zones that are scoped to that
+ * netstack are being shut down and have no processes remaining. This is going
+ * to be the case whether they are shared or exclusive stack zones. We have to
+ * perform a careful dance.
+ *
+ * There are two different callbacks that happen on tear down, the first is a
+ * shutdown callback, the second is a destroy callback. When the shutdown
+ * callback is fired we need to prepare for the netstack to go away and ensure
+ * that nothing can continue to persist itself.
+ *
+ * More specifically, when we get notice of a stack being shutdown we first
+ * remove the netstack from the global netstack list to ensure that no one new
+ * can come in and find the netstack and get a reference to it. After that, we
+ * notify the neti hooks that they're going away. Once that's all done, we get
+ * to the heart of the matter.
+ *
+ * When shutting down there could be any number of outstanding contexts that
+ * have a reference on the vnd_pnsd_t and on the individual links. However, we
+ * know that no one new will be able to find the vnd_pnsd_t. To account for
+ * things that have existing references we mark the vnd_pnsd_t`vpnd_flags with
+ * VND_NS_CONDEMNED. This is checked by code paths that wish to append a device
+ * to the netstack's list. If this is set, then they must not append to it.
+ * Once this is set, we know that the netstack's list of devices can never grow,
+ * only shrink.
+ *
+ * Next, for each device we tag it with VND_D_ZONE_DYING. This indicates that
+ * the container for the device is being destroyed and that we should not allow
+ * additional references to the device to be created, whether via open, or
+ * linking. The presence of this bit also allows things like the list ioctl and
+ * sdev to know not to consider its existence. At the conclusion of this being
+ * set, we know that no one else should be able to obtain a new reference to the
+ * device.
+ *
+ * Once that has been set for all devices, we go through and remove any existing
+ * links that have been established in sdev. Because doing that may cause the
+ * final reference for the device to be dropped, which still has a reference to
+ * the netstack, we have to restart our walk due to dropped locks. We know that
+ * this walk will eventually complete because the device cannot be relinked and
+ * no new devices will be attached in this netstack due to VND_NS_CONDEMNED.
+ * Once that's finished, the shutdown callback returns.
+ *
+ * When we reach the destroy callback, we simply wait for references on the
+ * netstack to disappear. Because the zone has been shut down, all processes in
+ * it that have open references have been terminated and reaped. Any threads
+ * that are newly trying to reference it will fail. However, there is one thing
+ * that can halt this that we have no control over, which is the global zone
+ * holding open a reference to the device. In this case the zone halt will hang
+ * in vnd_stack_destroy. Once the last references is dropped we finish destroy
+ * the netinfo hooks and free the vnd_pnsd_t.
+ *
+ * ----
+ * sdev
+ * ----
+ *
+ * vnd registers a sdev plugin which allows it to dynamically fill out /dev/vnd
+ * for both the global and non-global zones. In any given zone we always supply
+ * a control node via /dev/vnd/ctl. This is the self-cloning node. Each zone
+ * will also have an entry per-link in that zone under /dev/vnd/%datalink, eg.
+ * if a link was named net0, there would be a /dev/vnd/net0. The global zone can
+ * also see every link for every zone, ala /dev/net, under
+ * /dev/vnd/%zonename/%datalink, eg. if a zone named 'turin' had a vnd device
+ * named net0, the global zone would have /dev/vnd/turin/net0.
+ *
+ * The sdev plugin has three interfaces that it supplies back to sdev. One is to
+ * validate that a given node is still valid. The next is a callback from sdev
+ * to say that it is no longer using the node. The third and final one is from
+ * sdev where it asks us to fill a directory. All of the heavy lifting is done
+ * in directory filling and in valiation. We opt not to maintain a reference on
+ * the device while there is an sdev node present. This makes the removal of
+ * nodes much simpler and most of the possible failure modes shouldn't cause any
+ * real problems. For example, the open path has to handle both dev_t's which no
+ * longer exist and which are no longer linked.
+ *
+ * -----
+ * hooks
+ * -----
+ *
+ * Like IP, vnd sends all L3 packets through its firewall hooks. Currently vnd
+ * provides these for L3 IP and IPv6 traffic. Each netstack provides these hooks
+ * in a minimal fashion. While we will allow traffic to be filtered through the
+ * hooks, we do not provide means for packet injection or additional inspection
+ * at this time. There are a total of four different events created:
+ *
+ * o IPv4 physical in
+ * o IPv4 physical out
+ * o IPv6 physical in
+ * o IPv6 physical out
+ *
+ * ---------------
+ * Synchronization
+ * ---------------
+ *
+ * To make our synchronization simpler, we've put more effort into making the
+ * metadata/setup paths do more work. That work allows the data paths to make
+ * assumptions around synchronization that simplify the general case. Each major
+ * structure, the vnd_pnsd_t, vnd_dev_t, vnd_str_t, and vnd_data_queue_t is
+ * annotated with the protection that its members receives. The following
+ * annotations are used:
+ *
+ * A Atomics; these values are only modified using atomics values.
+ * Currently this only applies to kstat values.
+ * E Existence; no lock is needed to access this member, it does not
+ * change while the structure is valid.
+ * GL Global Lock; these members are protected by the global
+ * vnd_dev_lock.
+ * L Locked; access to the member is controlled by a lock that is in
+ * the structure.
+ * NSL netstack lock; this member is protected by the containing
+ * netstack. This only applies to the vnd_dev_t`vdd_nslink.
+ * X This member is special, and is discussed in this section.
+ *
+ * In addition to locking, we also have reference counts on the vnd_dev_t and
+ * the vnd_pnsd_t. The reference counts describe the lifetimes of the structure.
+ * With rare exception, once a reference count is decremented, the consumer
+ * should not assume that the data is valid any more. The only exception to this
+ * is the case where we're removing an extant reference count from a link into
+ * /devices or /dev. Reference counts are obtained on these structures as a part
+ * of looking them up.
+ *
+ * # Global Lock Ordering
+ * ######################
+ *
+ * The following is the order that you must take locks in vnd:
+ *
+ * 1) vnd`vnd_dev_lock
+ * 2) vnd_pnsd_t`vpnd_lock
+ * 3) vnd_dev_t`vnd_lock
+ * 4) vnd_str_t`vns_lock
+ * 5) vnd_data_queue_t`vdq_lock
+ *
+ * One must adhere to the following rules:
+ *
+ * o You must acquire a lower numbered lock before a high numbered lock.
+ * o It is NOT legal to hold two locks of the same level concurrently, eg. you
+ * can not hold two different vnd_dev_t's vnd_lock at the same time.
+ * o You may release locks in any order.
+ * o If you release a lock, you must honor the locking rules before acquiring
+ * it again.
+ * o You should not hold any locks when calling any of the rele functions.
+ *
+ * # Special Considerations
+ * ########################
+ *
+ * While most of the locking is what's expected, it's worth going into the
+ * special nature that a few members hold. Today, only two structures have
+ * special considerations: the vnd_dev_t and the vnd_str_t. All members with
+ * special considerations have an additional annotation that describes how you
+ * should interact with it.
+ *
+ * vnd_dev_t: The vdd_nsd and vdd_cr are only valid when the minor node is
+ * attached or in the process of attaching. If the code path that goes through
+ * requires an attached vnd_dev_t, eg. the data path and tear down path, then it
+ * is always legal to dereference that member without a lock held. When they are
+ * added to the system, they should be done under the vdd_lock and done as part
+ * of setting the VND_D_ATTACH_INFLIGHT flag. These should not change during the
+ * lifetime of the vnd_dev_t.
+ *
+ * vnd_dev_t: The vdd_ldih is similar to the vdd_nsd and vdd_cr, except that it
+ * always exists as it is a part of the structure. The only time that it's valid
+ * to be using it is during the attach path with the VND_D_ATTACH_INFLIGHT flag
+ * set or during tear down. Outside of those paths which are naturally
+ * serialized, there is no explicit locking around the member.
+ *
+ * vnd_str_t: The vns_dev and vns_nsd work in similar ways. They are not
+ * initially set as part of creating the structure, but are set as part of
+ * responding to the association ioctl. Anything in the data path or metadata
+ * path that requires association may assume that they exist, as we do not kick
+ * off the state machine until they're set.
+ *
+ * vnd_str_t: The vns_drainblk and vns_barrierblk are similarly special. The
+ * members are designed to be used as part of various operations with the
+ * gsqueues. A lock isn't needed to use them, but to work with them, the
+ * appropriate flag in the vnd_str_t`vns_flags must have been set by the current
+ * thread. Otherwise, it is always fair game to refer to their addresses. Their
+ * contents are ignored by vnd, but some members are manipulated by the gsqueue
+ * subsystem.
+ */
+
+#include <sys/conf.h>
+#include <sys/devops.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/open.h>
+#include <sys/ddi.h>
+#include <sys/ethernet.h>
+#include <sys/stropts.h>
+#include <sys/sunddi.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/ksynch.h>
+#include <sys/taskq_impl.h>
+#include <sys/sdt.h>
+#include <sys/debug.h>
+#include <sys/sysmacros.h>
+#include <sys/dlpi.h>
+#include <sys/cred.h>
+#include <sys/id_space.h>
+#include <sys/list.h>
+#include <sys/ctype.h>
+#include <sys/policy.h>
+#include <sys/sunldi.h>
+#include <sys/cred.h>
+#include <sys/strsubr.h>
+#include <sys/poll.h>
+#include <sys/neti.h>
+#include <sys/hook.h>
+#include <sys/hook_event.h>
+#include <sys/vlan.h>
+#include <sys/dld.h>
+#include <sys/mac_client.h>
+#include <sys/netstack.h>
+#include <sys/fs/sdev_plugin.h>
+#include <sys/kstat.h>
+#include <sys/atomic.h>
+#include <sys/disp.h>
+#include <sys/random.h>
+#include <sys/gsqueue.h>
+
+#include <inet/ip.h>
+#include <inet/ip6.h>
+
+#include <sys/vnd.h>
+
+/*
+ * Globals
+ */
+static dev_info_t *vnd_dip;
+static taskq_t *vnd_taskq;
+static kmem_cache_t *vnd_str_cache;
+static kmem_cache_t *vnd_dev_cache;
+static kmem_cache_t *vnd_pnsd_cache;
+static id_space_t *vnd_minors;
+static int vnd_list_init = 0;
+static sdev_plugin_hdl_t vnd_sdev_hdl;
+static gsqueue_set_t *vnd_sqset;
+
+static kmutex_t vnd_dev_lock;
+static list_t vnd_dev_list; /* Protected by the vnd_dev_lock */
+static list_t vnd_nsd_list; /* Protected by the vnd_dev_lock */
+
+/*
+ * STREAMs ioctls
+ *
+ * The STREAMs ioctls are internal to vnd. No one should be seeing them, as such
+ * they aren't a part of the header file.
+ */
+#define VND_STRIOC (('v' << 24) | ('n' << 16) | ('d' << 8) | 0x80)
+
+/*
+ * Private ioctl to associate a given streams instance with a minor instance of
+ * the character device.
+ */
+#define VND_STRIOC_ASSOCIATE (VND_STRIOC | 0x1)
+
+typedef struct vnd_strioc_associate {
+ minor_t vsa_minor; /* minor device node */
+ netstackid_t vsa_nsid; /* netstack id */
+ vnd_errno_t vsa_errno; /* errno */
+} vnd_strioc_associate_t;
+
+typedef enum vnd_strioc_state {
+ VSS_UNKNOWN = 0,
+ VSS_COPYIN = 1,
+ VSS_COPYOUT = 2,
+} vnd_strioc_state_t;
+
+typedef struct vnd_strioc {
+ vnd_strioc_state_t vs_state;
+ caddr_t vs_addr;
+} vnd_strioc_t;
+
+/*
+ * VND SQUEUE TAGS, start at 0x42 so we don't overlap with extent tags. Though
+ * really, overlap is at the end of the day, inevitable.
+ */
+#define VND_SQUEUE_TAG_TX_DRAIN 0x42
+#define VND_SQUEUE_TAG_MAC_FLOW_CONTROL 0x43
+#define VND_SQUEUE_TAG_VND_WRITE 0x44
+#define VND_SQUEUE_TAG_ND_FRAMEIO_WRITE 0x45
+#define VND_SQUEUE_TAG_STRBARRIER 0x46
+
+/*
+ * vnd reserved names. These are names which are reserved by vnd and thus
+ * shouldn't be used by some external program.
+ */
+static char *vnd_reserved_names[] = {
+ "ctl",
+ "zone",
+ NULL
+};
+
+/*
+ * vnd's DTrace probe macros
+ *
+ * DTRACE_VND* are all for a stable provider. We also have an unstable internal
+ * set of probes for reference count manipulation.
+ */
+#define DTRACE_VND3(name, type1, arg1, type2, arg2, type3, arg3) \
+ DTRACE_PROBE3(__vnd_##name, type1, arg1, type2, arg2, type3, arg3);
+
+#define DTRACE_VND4(name, type1, arg1, type2, arg2, type3, arg3, type4, arg4) \
+ DTRACE_PROBE4(__vnd_##name, type1, arg1, type2, arg2, type3, arg3, \
+ type4, arg4);
+
+#define DTRACE_VND5(name, type1, arg1, type2, arg2, type3, arg3, \
+ type4, arg4, type5, arg5) \
+ DTRACE_PROBE5(__vnd_##name, type1, arg1, type2, arg2, type3, arg3, \
+ type4, arg4, type5, arg5);
+
+#define DTRACE_VND_REFINC(vdp) \
+ DTRACE_PROBE2(vnd__ref__inc, vnd_dev_t *, vdp, int, vdp->vdd_ref);
+#define DTRACE_VND_REFDEC(vdp) \
+ DTRACE_PROBE2(vnd__ref__dec, vnd_dev_t *, vdp, int, vdp->vdd_ref);
+
+
+/*
+ * Tunables
+ */
+size_t vnd_vdq_default_size = 1024 * 64; /* 64 KB */
+size_t vnd_vdq_hard_max = 1024 * 1024 * 4; /* 4 MB */
+
+/*
+ * These numbers are designed as per-device tunables that are applied when a new
+ * vnd device is attached. They're a rough stab at what may be a reasonable
+ * amount of work to do in one burst in an squeue.
+ */
+size_t vnd_flush_burst_size = 1520 * 10; /* 10 1500 MTU packets */
+size_t vnd_flush_nburst = 10; /* 10 frames */
+
+/*
+ * Constants related to our sdev plugins
+ */
+#define VND_SDEV_NAME "vnd"
+#define VND_SDEV_ROOT "/dev/vnd"
+#define VND_SDEV_ZROOT "/dev/vnd/zone"
+
+/*
+ * Statistic macros
+ */
+#define VND_STAT_INC(vsp, field, val) \
+ atomic_add_64(&(vsp)->vns_ksdata.field.value.ui64, val)
+#define VND_LATENCY_1MS 1000000
+#define VND_LATENCY_10MS 10000000
+#define VND_LATENCY_100MS 100000000
+#define VND_LATENCY_1S 1000000000
+#define VND_LATENCY_10S 10000000000
+
+/*
+ * Constants for vnd hooks
+ */
+static uint8_t vnd_bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+#define IPV4_MCAST_LEN 3
+static uint8_t vnd_ipv4_mcast[3] = { 0x01, 0x00, 0x5E };
+#define IPV6_MCAST_LEN 2
+static uint8_t vnd_ipv6_mcast[2] = { 0x33, 0x33 };
+
+/*
+ * vnd internal data structures and types
+ */
+
+struct vnd_str;
+struct vnd_dev;
+struct vnd_pnsd;
+
+/*
+ * As part of opening the device stream we need to properly communicate with our
+ * underlying stream. This is a bit of an asynchronous dance and we need to
+ * properly work with dld to get everything set up. We have to initiate the
+ * conversation with dld and as such we keep track of our state here.
+ */
+typedef enum vnd_str_state {
+ VNS_S_INITIAL = 0,
+ VNS_S_INFO_SENT,
+ VNS_S_EXCLUSIVE_SENT,
+ VNS_S_ATTACH_SENT,
+ VNS_S_BIND_SENT,
+ VNS_S_SAP_PROMISC_SENT,
+ VNS_S_MULTI_PROMISC_SENT,
+ VNS_S_RX_ONLY_PROMISC_SENT,
+ VNS_S_FIXUP_PROMISC_SENT,
+ VNS_S_CAPAB_Q_SENT,
+ VNS_S_CAPAB_E_SENT,
+ VNS_S_ONLINE,
+ VNS_S_SHUTTING_DOWN,
+ VNS_S_MULTICAST_PROMISCOFF_SENT,
+ VNS_S_SAP_PROMISCOFF_SENT,
+ VNS_S_UNBIND_SENT,
+ VNS_S_ZOMBIE
+} vnd_str_state_t;
+
+typedef enum vnd_str_flags {
+ VNS_F_NEED_ZONE = 0x1,
+ VNS_F_TASKQ_DISPATCHED = 0x2,
+ VNS_F_CONDEMNED = 0x4,
+ VNS_F_FLOW_CONTROLLED = 0x8,
+ VNS_F_DRAIN_SCHEDULED = 0x10,
+ VNS_F_BARRIER = 0x20,
+ VNS_F_BARRIER_DONE = 0x40
+} vnd_str_flags_t;
+
+typedef enum vnd_capab_flags {
+ VNS_C_HCKSUM = 0x1,
+ VNS_C_DLD = 0x2,
+ VNS_C_DIRECT = 0x4,
+ VNS_C_HCKSUM_BADVERS = 0x8
+} vnd_capab_flags_t;
+
+/*
+ * Definitions to interact with direct callbacks
+ */
+typedef void (*vnd_rx_t)(struct vnd_str *, mac_resource_t *, mblk_t *,
+ mac_header_info_t *);
+typedef uintptr_t vnd_mac_cookie_t;
+/* DLD Direct capability function */
+typedef int (*vnd_dld_cap_t)(void *, uint_t, void *, uint_t);
+/* DLD Direct tx function */
+typedef vnd_mac_cookie_t (*vnd_dld_tx_t)(void *, mblk_t *, uint64_t, uint16_t);
+/* DLD Direct function to set flow control callback */
+typedef void *(*vnd_dld_set_fcb_t)(void *, void (*)(void *, vnd_mac_cookie_t),
+ void *);
+/* DLD Direct function to see if flow controlled still */
+typedef int (*vnd_dld_is_fc_t)(void *, vnd_mac_cookie_t);
+
+/*
+ * The vnd_str_capab_t is always protected by the vnd_str_t it's a member of.
+ */
+typedef struct vnd_str_capab {
+ vnd_capab_flags_t vsc_flags;
+ t_uscalar_t vsc_hcksum_opts;
+ vnd_dld_cap_t vsc_capab_f;
+ void *vsc_capab_hdl;
+ vnd_dld_tx_t vsc_tx_f;
+ void *vsc_tx_hdl;
+ vnd_dld_set_fcb_t vsc_set_fcb_f;
+ void *vsc_set_fcb_hdl;
+ vnd_dld_is_fc_t vsc_is_fc_f;
+ void *vsc_is_fc_hdl;
+ vnd_mac_cookie_t vsc_fc_cookie;
+ void *vsc_tx_fc_hdl;
+} vnd_str_capab_t;
+
+/*
+ * The vnd_data_queue is a simple construct for storing a series of messages in
+ * a queue.
+ *
+ * See synchronization section of the big theory statement for member
+ * annotations.
+ */
+typedef struct vnd_data_queue {
+ struct vnd_str *vdq_vns; /* E */
+ kmutex_t vdq_lock;
+ kcondvar_t vdq_ready; /* Uses vdq_lock */
+ ssize_t vdq_max; /* L */
+ ssize_t vdq_cur; /* L */
+ mblk_t *vdq_head; /* L */
+ mblk_t *vdq_tail; /* L */
+} vnd_data_queue_t;
+
+typedef struct vnd_str_stat {
+ kstat_named_t vks_rbytes;
+ kstat_named_t vks_rpackets;
+ kstat_named_t vks_obytes;
+ kstat_named_t vks_opackets;
+ kstat_named_t vks_nhookindrops;
+ kstat_named_t vks_nhookoutdrops;
+ kstat_named_t vks_ndlpidrops;
+ kstat_named_t vks_ndataindrops;
+ kstat_named_t vks_ndataoutdrops;
+ kstat_named_t vks_tdrops;
+ kstat_named_t vks_linkname;
+ kstat_named_t vks_zonename;
+ kstat_named_t vks_nmacflow;
+ kstat_named_t vks_tmacflow;
+ kstat_named_t vks_mac_flow_1ms;
+ kstat_named_t vks_mac_flow_10ms;
+ kstat_named_t vks_mac_flow_100ms;
+ kstat_named_t vks_mac_flow_1s;
+ kstat_named_t vks_mac_flow_10s;
+} vnd_str_stat_t;
+
+/*
+ * vnd stream structure
+ *
+ * See synchronization section of the big theory statement for member
+ * annotations.
+ */
+typedef struct vnd_str {
+ kmutex_t vns_lock;
+ kcondvar_t vns_cancelcv; /* Uses vns_lock */
+ kcondvar_t vns_barriercv; /* Uses vns_lock */
+ kcondvar_t vns_stcv; /* Uses vns_lock */
+ vnd_str_state_t vns_state; /* L */
+ vnd_str_state_t vns_laststate; /* L */
+ vnd_errno_t vns_errno; /* L */
+ vnd_str_flags_t vns_flags; /* L */
+ vnd_str_capab_t vns_caps; /* L */
+ taskq_ent_t vns_tqe; /* L */
+ vnd_data_queue_t vns_dq_read; /* E */
+ vnd_data_queue_t vns_dq_write; /* E */
+ mblk_t *vns_dlpi_inc; /* L */
+ queue_t *vns_rq; /* E */
+ queue_t *vns_wq; /* E */
+ queue_t *vns_lrq; /* E */
+ t_uscalar_t vns_dlpi_style; /* L */
+ t_uscalar_t vns_minwrite; /* L */
+ t_uscalar_t vns_maxwrite; /* L */
+ hrtime_t vns_fclatch; /* L */
+ hrtime_t vns_fcupdate; /* L */
+ kstat_t *vns_kstat; /* E */
+ gsqueue_t *vns_squeue; /* E */
+ mblk_t vns_drainblk; /* E + X */
+ mblk_t vns_barrierblk; /* E + X */
+ vnd_str_stat_t vns_ksdata; /* A */
+ size_t vns_nflush; /* L */
+ size_t vns_bsize; /* L */
+ struct vnd_dev *vns_dev; /* E + X */
+ struct vnd_pnsd *vns_nsd; /* E + X */
+} vnd_str_t;
+
+typedef enum vnd_dev_flags {
+ VND_D_ATTACH_INFLIGHT = 0x001,
+ VND_D_ATTACHED = 0x002,
+ VND_D_LINK_INFLIGHT = 0x004,
+ VND_D_LINKED = 0x008,
+ VND_D_CONDEMNED = 0x010,
+ VND_D_ZONE_DYING = 0x020,
+ VND_D_OPENED = 0x040
+} vnd_dev_flags_t;
+
+/*
+ * This represents the data associated with a minor device instance.
+ *
+ * See synchronization section of the big theory statement for member
+ * annotations.
+ */
+typedef struct vnd_dev {
+ kmutex_t vdd_lock;
+ list_node_t vdd_link; /* GL */
+ list_node_t vdd_nslink; /* NSL */
+ int vdd_ref; /* L */
+ vnd_dev_flags_t vdd_flags; /* L */
+ minor_t vdd_minor; /* E */
+ dev_t vdd_devid; /* E */
+ ldi_ident_t vdd_ldiid; /* E */
+ ldi_handle_t vdd_ldih; /* X */
+ cred_t *vdd_cr; /* X */
+ vnd_str_t *vdd_str; /* L */
+ struct pollhead vdd_ph; /* E */
+ struct vnd_pnsd *vdd_nsd; /* E + X */
+ char vdd_datalink[VND_NAMELEN]; /* L */
+ char vdd_lname[VND_NAMELEN]; /* L */
+} vnd_dev_t;
+
+typedef enum vnd_pnsd_flags {
+ VND_NS_CONDEMNED = 0x1
+} vnd_pnsd_flags_t;
+
+/*
+ * Per netstack data structure.
+ *
+ * See synchronization section of the big theory statement for member
+ * annotations.
+ */
+typedef struct vnd_pnsd {
+ list_node_t vpnd_link; /* protected by global dev lock */
+ zoneid_t vpnd_zid; /* E */
+ netstackid_t vpnd_nsid; /* E */
+ boolean_t vpnd_hooked; /* E */
+ net_handle_t vpnd_neti_v4; /* E */
+ hook_family_t vpnd_family_v4; /* E */
+ hook_event_t vpnd_event_in_v4; /* E */
+ hook_event_t vpnd_event_out_v4; /* E */
+ hook_event_token_t vpnd_token_in_v4; /* E */
+ hook_event_token_t vpnd_token_out_v4; /* E */
+ net_handle_t vpnd_neti_v6; /* E */
+ hook_family_t vpnd_family_v6; /* E */
+ hook_event_t vpnd_event_in_v6; /* E */
+ hook_event_t vpnd_event_out_v6; /* E */
+ hook_event_token_t vpnd_token_in_v6; /* E */
+ hook_event_token_t vpnd_token_out_v6; /* E */
+ kmutex_t vpnd_lock; /* Protects remaining members */
+ kcondvar_t vpnd_ref_change; /* Uses vpnd_lock */
+ int vpnd_ref; /* L */
+ vnd_pnsd_flags_t vpnd_flags; /* L */
+ list_t vpnd_dev_list; /* L */
+} vnd_pnsd_t;
+
+static void vnd_squeue_tx_drain(void *, mblk_t *, gsqueue_t *, void *);
+
+/*
+ * Drop function signature.
+ */
+typedef void (*vnd_dropper_f)(vnd_str_t *, mblk_t *, const char *);
+
+static void
+vnd_drop_ctl(vnd_str_t *vsp, mblk_t *mp, const char *reason)
+{
+ DTRACE_VND4(drop__ctl, mblk_t *, mp, vnd_str_t *, vsp, mblk_t *,
+ mp, const char *, reason);
+ if (mp != NULL) {
+ freemsg(mp);
+ }
+ VND_STAT_INC(vsp, vks_ndlpidrops, 1);
+ VND_STAT_INC(vsp, vks_tdrops, 1);
+}
+
+static void
+vnd_drop_in(vnd_str_t *vsp, mblk_t *mp, const char *reason)
+{
+ DTRACE_VND4(drop__in, mblk_t *, mp, vnd_str_t *, vsp, mblk_t *,
+ mp, const char *, reason);
+ if (mp != NULL) {
+ freemsg(mp);
+ }
+ VND_STAT_INC(vsp, vks_ndataindrops, 1);
+ VND_STAT_INC(vsp, vks_tdrops, 1);
+}
+
+static void
+vnd_drop_out(vnd_str_t *vsp, mblk_t *mp, const char *reason)
+{
+ DTRACE_VND4(drop__out, mblk_t *, mp, vnd_str_t *, vsp, mblk_t *,
+ mp, const char *, reason);
+ if (mp != NULL) {
+ freemsg(mp);
+ }
+ VND_STAT_INC(vsp, vks_ndataoutdrops, 1);
+ VND_STAT_INC(vsp, vks_tdrops, 1);
+}
+
+static void
+vnd_drop_hook_in(vnd_str_t *vsp, mblk_t *mp, const char *reason)
+{
+ DTRACE_VND4(drop__in, mblk_t *, mp, vnd_str_t *, vsp, mblk_t *,
+ mp, const char *, reason);
+ if (mp != NULL) {
+ freemsg(mp);
+ }
+ VND_STAT_INC(vsp, vks_nhookindrops, 1);
+ VND_STAT_INC(vsp, vks_tdrops, 1);
+}
+
+static void
+vnd_drop_hook_out(vnd_str_t *vsp, mblk_t *mp, const char *reason)
+{
+ DTRACE_VND4(drop__out, mblk_t *, mp, vnd_str_t *, vsp, mblk_t *,
+ mp, const char *, reason);
+ if (mp != NULL) {
+ freemsg(mp);
+ }
+ VND_STAT_INC(vsp, vks_nhookoutdrops, 1);
+ VND_STAT_INC(vsp, vks_tdrops, 1);
+}
+
+static void
+vnd_drop_panic(vnd_str_t *vsp, mblk_t *mp, const char *reason)
+{
+ panic("illegal vnd drop");
+}
+
+static void
+vnd_mac_drop_input(vnd_str_t *vsp, mac_resource_t *unused, mblk_t *mp_chain,
+ mac_header_info_t *mhip)
+{
+ mblk_t *mp;
+
+ while (mp_chain != NULL) {
+ mp = mp_chain;
+ mp_chain = mp->b_next;
+ vnd_drop_hook_in(vsp, mp, "stream not associated");
+ }
+}
+
+static vnd_pnsd_t *
+vnd_nsd_lookup(netstackid_t nsid)
+{
+ vnd_pnsd_t *nsp;
+
+ mutex_enter(&vnd_dev_lock);
+ for (nsp = list_head(&vnd_nsd_list); nsp != NULL;
+ nsp = list_next(&vnd_nsd_list, nsp)) {
+ if (nsp->vpnd_nsid == nsid) {
+ mutex_enter(&nsp->vpnd_lock);
+ VERIFY(nsp->vpnd_ref >= 0);
+ nsp->vpnd_ref++;
+ mutex_exit(&nsp->vpnd_lock);
+ break;
+ }
+ }
+ mutex_exit(&vnd_dev_lock);
+ return (nsp);
+}
+
+static vnd_pnsd_t *
+vnd_nsd_lookup_by_zid(zoneid_t zid)
+{
+ netstack_t *ns;
+ vnd_pnsd_t *nsp;
+ ns = netstack_find_by_zoneid(zid);
+ if (ns == NULL)
+ return (NULL);
+ nsp = vnd_nsd_lookup(ns->netstack_stackid);
+ netstack_rele(ns);
+ return (nsp);
+}
+
+static vnd_pnsd_t *
+vnd_nsd_lookup_by_zonename(char *zname)
+{
+ zone_t *zonep;
+ vnd_pnsd_t *nsp;
+
+ zonep = zone_find_by_name(zname);
+ if (zonep == NULL)
+ return (NULL);
+
+ nsp = vnd_nsd_lookup_by_zid(zonep->zone_id);
+ zone_rele(zonep);
+ return (nsp);
+}
+
+static void
+vnd_nsd_ref(vnd_pnsd_t *nsp)
+{
+ mutex_enter(&nsp->vpnd_lock);
+ /*
+ * This can only be used on something that has been obtained through
+ * some other means. As such, the caller should already have a reference
+ * before adding another one. This function should not be used as a
+ * means of creating the initial reference.
+ */
+ VERIFY(nsp->vpnd_ref > 0);
+ nsp->vpnd_ref++;
+ mutex_exit(&nsp->vpnd_lock);
+ cv_broadcast(&nsp->vpnd_ref_change);
+}
+
+static void
+vnd_nsd_rele(vnd_pnsd_t *nsp)
+{
+ mutex_enter(&nsp->vpnd_lock);
+ VERIFY(nsp->vpnd_ref > 0);
+ nsp->vpnd_ref--;
+ mutex_exit(&nsp->vpnd_lock);
+ cv_broadcast(&nsp->vpnd_ref_change);
+}
+
+static vnd_dev_t *
+vnd_dev_lookup(minor_t m)
+{
+ vnd_dev_t *vdp;
+ mutex_enter(&vnd_dev_lock);
+ for (vdp = list_head(&vnd_dev_list); vdp != NULL;
+ vdp = list_next(&vnd_dev_list, vdp)) {
+ if (vdp->vdd_minor == m) {
+ mutex_enter(&vdp->vdd_lock);
+ VERIFY(vdp->vdd_ref > 0);
+ vdp->vdd_ref++;
+ DTRACE_VND_REFINC(vdp);
+ mutex_exit(&vdp->vdd_lock);
+ break;
+ }
+ }
+ mutex_exit(&vnd_dev_lock);
+ return (vdp);
+}
+
+static void
+vnd_dev_free(vnd_dev_t *vdp)
+{
+ /*
+ * When the STREAM exists we need to go through and make sure
+ * communication gets torn down. As part of closing the stream, we
+ * guarantee that nothing else should be able to enter the stream layer
+ * at this point. That means no one should be able to call
+ * read(),write() or one of the frameio ioctls.
+ */
+ if (vdp->vdd_flags & VND_D_ATTACHED) {
+ ldi_close(vdp->vdd_ldih, FREAD | FWRITE, vdp->vdd_cr);
+ crfree(vdp->vdd_cr);
+ vdp->vdd_cr = NULL;
+
+ /*
+ * We have to remove ourselves from our parents list now. It is
+ * really quite important that we have already set the condemend
+ * flag here so that our containing netstack basically knows
+ * that we're on the way down and knows not to wait for us. It's
+ * also important that we do that before we put a rele on the
+ * the device as that is the point at which it will check again.
+ */
+ mutex_enter(&vdp->vdd_nsd->vpnd_lock);
+ list_remove(&vdp->vdd_nsd->vpnd_dev_list, vdp);
+ mutex_exit(&vdp->vdd_nsd->vpnd_lock);
+ vnd_nsd_rele(vdp->vdd_nsd);
+ vdp->vdd_nsd = NULL;
+ }
+ ASSERT(vdp->vdd_flags & VND_D_CONDEMNED);
+ id_free(vnd_minors, vdp->vdd_minor);
+ mutex_destroy(&vdp->vdd_lock);
+ kmem_cache_free(vnd_dev_cache, vdp);
+}
+
+static void
+vnd_dev_ref(vnd_dev_t *vdp)
+{
+ mutex_enter(&vdp->vdd_lock);
+ VERIFY(vdp->vdd_ref > 0);
+ vdp->vdd_ref++;
+ DTRACE_VND_REFINC(vdp);
+ mutex_exit(&vdp->vdd_lock);
+}
+
+/*
+ * As part of releasing the hold on this we may tear down a given vnd_dev_t As
+ * such we need to make sure that we grab the list lock first before grabbing
+ * the vnd_dev_t's lock to ensure proper lock ordering.
+ */
+static void
+vnd_dev_rele(vnd_dev_t *vdp)
+{
+ mutex_enter(&vnd_dev_lock);
+ mutex_enter(&vdp->vdd_lock);
+ VERIFY(vdp->vdd_ref > 0);
+ vdp->vdd_ref--;
+ DTRACE_VND_REFDEC(vdp);
+ if (vdp->vdd_ref > 0) {
+ mutex_exit(&vdp->vdd_lock);
+ mutex_exit(&vnd_dev_lock);
+ return;
+ }
+
+ /*
+ * Now that we've removed this from the list, we can go ahead and
+ * drop the list lock. No one else can find this device and reference
+ * it. As its reference count is zero, it by definition does not have
+ * any remaining entries in /devices that could lead someone back to
+ * this.
+ */
+ vdp->vdd_flags |= VND_D_CONDEMNED;
+ list_remove(&vnd_dev_list, vdp);
+ mutex_exit(&vdp->vdd_lock);
+ mutex_exit(&vnd_dev_lock);
+
+ vnd_dev_free(vdp);
+}
+
+/*
+ * Insert a mesage block chain if there's space, otherwise drop it. Return one
+ * so someone who was waiting for data would now end up having found it. eg.
+ * caller should consider a broadcast.
+ */
+static int
+vnd_dq_push(vnd_data_queue_t *vqp, mblk_t *mp, boolean_t reserved,
+ vnd_dropper_f dropf)
+{
+ size_t msize;
+
+ ASSERT(MUTEX_HELD(&vqp->vdq_lock));
+ if (reserved == B_FALSE) {
+ msize = msgsize(mp);
+ if (vqp->vdq_cur + msize > vqp->vdq_max) {
+ dropf(vqp->vdq_vns, mp, "buffer full");
+ return (0);
+ }
+ vqp->vdq_cur += msize;
+ }
+
+ if (vqp->vdq_head == NULL) {
+ ASSERT(vqp->vdq_tail == NULL);
+ vqp->vdq_head = mp;
+ vqp->vdq_tail = mp;
+ } else {
+ vqp->vdq_tail->b_next = mp;
+ vqp->vdq_tail = mp;
+ }
+
+ return (1);
+}
+
+/*
+ * Remove a message message block chain. If the amount of space in the buffer
+ * has changed we return 1. We have no way of knowing whether or not there is
+ * enough space overall for a given writer who is blocked, so we always end up
+ * having to return true and thus tell consumers that they should consider
+ * signalling.
+ */
+static int
+vnd_dq_pop(vnd_data_queue_t *vqp, mblk_t **mpp)
+{
+ size_t msize;
+ mblk_t *mp;
+
+ ASSERT(MUTEX_HELD(&vqp->vdq_lock));
+ ASSERT(mpp != NULL);
+ if (vqp->vdq_head == NULL) {
+ ASSERT(vqp->vdq_tail == NULL);
+ *mpp = NULL;
+ return (0);
+ }
+
+ mp = vqp->vdq_head;
+ msize = msgsize(mp);
+
+ vqp->vdq_cur -= msize;
+ if (mp->b_next == NULL) {
+ vqp->vdq_head = NULL;
+ vqp->vdq_tail = NULL;
+ /*
+ * We can't be certain that this is always going to be zero.
+ * Someone may have basically taken a reservation of space on
+ * the data queue, eg. claimed spae but not yet pushed it on
+ * yet.
+ */
+ ASSERT(vqp->vdq_cur >= 0);
+ } else {
+ vqp->vdq_head = mp->b_next;
+ ASSERT(vqp->vdq_cur > 0);
+ }
+ mp->b_next = NULL;
+ *mpp = mp;
+ return (1);
+}
+
+/*
+ * Reserve space in the queue. This will bump up the size of the queue and
+ * entitle the user to push something on later without bumping the space.
+ */
+static int
+vnd_dq_reserve(vnd_data_queue_t *vqp, ssize_t size)
+{
+ ASSERT(MUTEX_HELD(&vqp->vdq_lock));
+ ASSERT(size >= 0);
+
+ if (size == 0)
+ return (0);
+
+ if (size + vqp->vdq_cur > vqp->vdq_max)
+ return (0);
+
+ vqp->vdq_cur += size;
+ return (1);
+}
+
+static void
+vnd_dq_unreserve(vnd_data_queue_t *vqp, ssize_t size)
+{
+ ASSERT(MUTEX_HELD(&vqp->vdq_lock));
+ ASSERT(size > 0);
+ ASSERT(size <= vqp->vdq_cur);
+
+ vqp->vdq_cur -= size;
+}
+
+static void
+vnd_dq_flush(vnd_data_queue_t *vqp, vnd_dropper_f dropf)
+{
+ mblk_t *mp, *next;
+
+ mutex_enter(&vqp->vdq_lock);
+ for (mp = vqp->vdq_head; mp != NULL; mp = next) {
+ next = mp->b_next;
+ mp->b_next = NULL;
+ dropf(vqp->vdq_vns, mp, "vnd_dq_flush");
+ }
+ vqp->vdq_cur = 0;
+ vqp->vdq_head = NULL;
+ vqp->vdq_tail = NULL;
+ mutex_exit(&vqp->vdq_lock);
+}
+
+static boolean_t
+vnd_dq_is_empty(vnd_data_queue_t *vqp)
+{
+ boolean_t ret;
+
+ mutex_enter(&vqp->vdq_lock);
+ if (vqp->vdq_head == NULL)
+ ret = B_TRUE;
+ else
+ ret = B_FALSE;
+ mutex_exit(&vqp->vdq_lock);
+
+ return (ret);
+}
+
+/*
+ * Get a network uint16_t from the message and translate it into something the
+ * host understands.
+ */
+static int
+vnd_mbc_getu16(mblk_t *mp, off_t off, uint16_t *out)
+{
+ size_t mpsize;
+ uint8_t *bp;
+
+ mpsize = msgsize(mp);
+ /* Check for overflow */
+ if (off + sizeof (uint16_t) > mpsize)
+ return (1);
+
+ mpsize = MBLKL(mp);
+ while (off >= mpsize) {
+ mp = mp->b_cont;
+ off -= mpsize;
+ mpsize = MBLKL(mp);
+ }
+
+ /*
+ * Data is in network order. Note the second byte of data might be in
+ * the next mp.
+ */
+ bp = mp->b_rptr + off;
+ *out = *bp << 8;
+ if (off + 1 == mpsize) {
+ mp = mp->b_cont;
+ bp = mp->b_rptr;
+ } else {
+ bp++;
+ }
+
+ *out |= *bp;
+ return (0);
+}
+
+/*
+ * Given an mblk chain find the mblk and address of a particular offset.
+ */
+static int
+vnd_mbc_getoffset(mblk_t *mp, off_t off, mblk_t **mpp, uintptr_t *offp)
+{
+ size_t mpsize;
+
+ if (off >= msgsize(mp))
+ return (1);
+
+ mpsize = MBLKL(mp);
+ while (off >= mpsize) {
+ mp = mp->b_cont;
+ off -= mpsize;
+ mpsize = MBLKL(mp);
+ }
+ *mpp = mp;
+ *offp = (uintptr_t)mp->b_rptr + off;
+
+ return (0);
+}
+
+/*
+ * Fetch the destination mac address. Set *dstp to that mac address. If the data
+ * is not contiguous in the first mblk_t, fill in datap and set *dstp to it.
+ */
+static int
+vnd_mbc_getdstmac(mblk_t *mp, uint8_t **dstpp, uint8_t *datap)
+{
+ int i;
+
+ if (MBLKL(mp) >= ETHERADDRL) {
+ *dstpp = mp->b_rptr;
+ return (0);
+ }
+
+ *dstpp = datap;
+ for (i = 0; i < ETHERADDRL; i += 2, datap += 2) {
+ if (vnd_mbc_getu16(mp, i, (uint16_t *)datap) != 0)
+ return (1);
+ }
+
+ return (0);
+}
+
+static int
+vnd_hook(vnd_str_t *vsp, mblk_t **mpp, net_handle_t netiv4, hook_event_t hev4,
+ hook_event_token_t hetv4, net_handle_t netiv6, hook_event_t hev6,
+ hook_event_token_t hetv6, vnd_dropper_f hdrop, vnd_dropper_f ddrop)
+{
+ uint16_t etype;
+ int vlan = 0;
+ hook_pkt_event_t info;
+ size_t offset, mblen;
+ uint8_t *dstp;
+ uint8_t dstaddr[6];
+ hook_event_t he;
+ hook_event_token_t het;
+ net_handle_t neti;
+
+ /*
+ * Before we can ask if we're interested we have to do enough work to
+ * determine the ethertype.
+ */
+
+ /* Byte 12 is either the VLAN tag or the ethertype */
+ if (vnd_mbc_getu16(*mpp, 12, &etype) != 0) {
+ ddrop(vsp, *mpp, "packet has incomplete ethernet header");
+ *mpp = NULL;
+ return (1);
+ }
+
+ if (etype == ETHERTYPE_VLAN) {
+ vlan = 1;
+ /* Actual ethertype is another four bytes in */
+ if (vnd_mbc_getu16(*mpp, 16, &etype) != 0) {
+ ddrop(vsp, *mpp,
+ "packet has incomplete ethernet vlan header");
+ *mpp = NULL;
+ return (1);
+ }
+ offset = sizeof (struct ether_vlan_header);
+ } else {
+ offset = sizeof (struct ether_header);
+ }
+
+ /*
+ * At the moment we only hook on the kinds of things that the IP module
+ * would normally.
+ */
+ if (etype != ETHERTYPE_IP && etype != ETHERTYPE_IPV6)
+ return (0);
+
+ if (etype == ETHERTYPE_IP) {
+ neti = netiv4;
+ he = hev4;
+ het = hetv4;
+ } else {
+ neti = netiv6;
+ he = hev6;
+ het = hetv6;
+ }
+
+ if (!he.he_interested)
+ return (0);
+
+
+ if (vnd_mbc_getdstmac(*mpp, &dstp, dstaddr) != 0) {
+ ddrop(vsp, *mpp, "packet has incomplete ethernet header");
+ *mpp = NULL;
+ return (1);
+ }
+
+ /*
+ * Now that we know we're interested, we have to do some additional
+ * sanity checking for IPF's sake, ala ip_check_length(). Specifically
+ * we need to check to make sure that the remaining packet size,
+ * excluding MAC, is at least the size of an IP header.
+ */
+ mblen = msgsize(*mpp);
+ if ((etype == ETHERTYPE_IP &&
+ mblen - offset < IP_SIMPLE_HDR_LENGTH) ||
+ (etype == ETHERTYPE_IPV6 && mblen - offset < IPV6_HDR_LEN)) {
+ ddrop(vsp, *mpp, "packet has invalid IP header");
+ *mpp = NULL;
+ return (1);
+ }
+
+ info.hpe_protocol = neti;
+ info.hpe_ifp = (phy_if_t)vsp;
+ info.hpe_ofp = (phy_if_t)vsp;
+ info.hpe_mp = mpp;
+ info.hpe_flags = 0;
+
+ if (bcmp(vnd_bcast_addr, dstp, ETHERADDRL) == 0)
+ info.hpe_flags |= HPE_BROADCAST;
+ else if (etype == ETHERTYPE_IP &&
+ bcmp(vnd_ipv4_mcast, vnd_bcast_addr, IPV4_MCAST_LEN) == 0)
+ info.hpe_flags |= HPE_MULTICAST;
+ else if (etype == ETHERTYPE_IPV6 &&
+ bcmp(vnd_ipv6_mcast, vnd_bcast_addr, IPV6_MCAST_LEN) == 0)
+ info.hpe_flags |= HPE_MULTICAST;
+
+ if (vnd_mbc_getoffset(*mpp, offset, &info.hpe_mb,
+ (uintptr_t *)&info.hpe_hdr) != 0) {
+ ddrop(vsp, *mpp, "packet too small -- "
+ "unable to find payload");
+ *mpp = NULL;
+ return (1);
+ }
+
+ if (hook_run(neti->netd_hooks, het, (hook_data_t)&info) != 0) {
+ hdrop(vsp, *mpp, "drooped by hooks");
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * This should not be used for DL_INFO_REQ.
+ */
+static mblk_t *
+vnd_dlpi_alloc(size_t len, t_uscalar_t prim)
+{
+ mblk_t *mp;
+ mp = allocb(len, BPRI_MED);
+ if (mp == NULL)
+ return (NULL);
+
+ mp->b_datap->db_type = M_PROTO;
+ mp->b_wptr = mp->b_rptr + len;
+ bzero(mp->b_rptr, len);
+ ((dl_unitdata_req_t *)mp->b_rptr)->dl_primitive = prim;
+
+ return (mp);
+}
+
+static void
+vnd_dlpi_inc_push(vnd_str_t *vsp, mblk_t *mp)
+{
+ mblk_t **mpp;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ ASSERT(mp->b_next == NULL);
+ mpp = &vsp->vns_dlpi_inc;
+ while (*mpp != NULL)
+ mpp = &((*mpp)->b_next);
+ *mpp = mp;
+}
+
+static mblk_t *
+vnd_dlpi_inc_pop(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vsp->vns_dlpi_inc;
+ if (mp != NULL) {
+ VERIFY(mp->b_next == NULL || mp->b_next != mp);
+ vsp->vns_dlpi_inc = mp->b_next;
+ mp->b_next = NULL;
+ }
+ return (mp);
+}
+
+static int
+vnd_st_sinfo(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+ dl_info_req_t *dlir;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = allocb(MAX(sizeof (dl_info_req_t), sizeof (dl_info_ack_t)),
+ BPRI_HI);
+ if (mp == NULL) {
+ vsp->vns_errno = VND_E_NOMEM;
+ return (1);
+ }
+ vsp->vns_state = VNS_S_INFO_SENT;
+ cv_broadcast(&vsp->vns_stcv);
+
+ mp->b_datap->db_type = M_PCPROTO;
+ dlir = (dl_info_req_t *)mp->b_rptr;
+ mp->b_wptr = (uchar_t *)&dlir[1];
+ dlir->dl_primitive = DL_INFO_REQ;
+ putnext(vsp->vns_wq, mp);
+
+ return (0);
+}
+
+static int
+vnd_st_info(vnd_str_t *vsp)
+{
+ dl_info_ack_t *dlia;
+ mblk_t *mp;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_inc_pop(vsp);
+ dlia = (dl_info_ack_t *)mp->b_rptr;
+ vsp->vns_dlpi_style = dlia->dl_provider_style;
+ vsp->vns_minwrite = dlia->dl_min_sdu;
+ vsp->vns_maxwrite = dlia->dl_max_sdu;
+
+ /*
+ * At this time we only support DL_ETHER devices.
+ */
+ if (dlia->dl_mac_type != DL_ETHER) {
+ freemsg(mp);
+ vsp->vns_errno = VND_E_NOTETHER;
+ return (1);
+ }
+
+ /*
+ * Because vnd operates on entire packets, we need to manually account
+ * for the ethernet header information. We add the size of the
+ * ether_vlan_header to account for this, regardless if it is using
+ * vlans or not.
+ */
+ vsp->vns_maxwrite += sizeof (struct ether_vlan_header);
+
+ freemsg(mp);
+ return (0);
+}
+
+static int
+vnd_st_sexclusive(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_alloc(sizeof (dl_attach_req_t), DL_EXCLUSIVE_REQ);
+ if (mp == NULL) {
+ vsp->vns_errno = VND_E_NOMEM;
+ return (1);
+ }
+
+ vsp->vns_state = VNS_S_EXCLUSIVE_SENT;
+ cv_broadcast(&vsp->vns_stcv);
+ putnext(vsp->vns_wq, mp);
+ return (0);
+}
+
+static int
+vnd_st_exclusive(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+ t_uscalar_t prim, cprim;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_inc_pop(vsp);
+ prim = ((dl_error_ack_t *)mp->b_rptr)->dl_primitive;
+ cprim = ((dl_ok_ack_t *)mp->b_rptr)->dl_correct_primitive;
+
+ if (prim != DL_OK_ACK && prim != DL_ERROR_ACK) {
+ vnd_drop_ctl(vsp, mp,
+ "wrong dlpi primitive for vnd_st_exclusive");
+ vsp->vns_errno = VND_E_DLPIINVAL;
+ return (1);
+ }
+
+ if (cprim != DL_EXCLUSIVE_REQ) {
+ vnd_drop_ctl(vsp, mp,
+ "vnd_st_exclusive: got ack/nack for wrong primitive");
+ vsp->vns_errno = VND_E_DLPIINVAL;
+ return (1);
+ }
+
+ if (prim == DL_ERROR_ACK)
+ vsp->vns_errno = VND_E_DLEXCL;
+
+ freemsg(mp);
+ return (prim == DL_ERROR_ACK);
+}
+
+/*
+ * Send down a DLPI_ATTACH_REQ.
+ */
+static int
+vnd_st_sattach(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_alloc(sizeof (dl_attach_req_t), DL_ATTACH_REQ);
+ if (mp == NULL) {
+ vsp->vns_errno = VND_E_NOMEM;
+ return (1);
+ }
+
+ ((dl_attach_req_t *)mp->b_rptr)->dl_ppa = 0;
+ vsp->vns_state = VNS_S_ATTACH_SENT;
+ cv_broadcast(&vsp->vns_stcv);
+ putnext(vsp->vns_wq, mp);
+
+ return (0);
+}
+
+static int
+vnd_st_attach(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+ t_uscalar_t prim, cprim;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_inc_pop(vsp);
+ prim = ((dl_ok_ack_t *)mp->b_rptr)->dl_primitive;
+ cprim = ((dl_ok_ack_t *)mp->b_rptr)->dl_correct_primitive;
+
+
+ if (prim != DL_OK_ACK && prim != DL_ERROR_ACK) {
+ vnd_drop_ctl(vsp, mp, "vnd_st_attach: unknown primitive type");
+ vsp->vns_errno = VND_E_DLPIINVAL;
+ return (1);
+ }
+
+ if (cprim != DL_ATTACH_REQ) {
+ vnd_drop_ctl(vsp, mp,
+ "vnd_st_attach: Got ack/nack for wrong primitive");
+ vsp->vns_errno = VND_E_DLPIINVAL;
+ return (1);
+ }
+
+ if (prim == DL_ERROR_ACK)
+ vsp->vns_errno = VND_E_ATTACHFAIL;
+
+ freemsg(mp);
+ return (prim == DL_ERROR_ACK);
+}
+
+static int
+vnd_st_sbind(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+ dl_bind_req_t *dbrp;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_alloc(sizeof (dl_bind_req_t) + sizeof (long),
+ DL_BIND_REQ);
+ if (mp == NULL) {
+ vsp->vns_errno = VND_E_NOMEM;
+ return (1);
+ }
+ dbrp = (dl_bind_req_t *)(mp->b_rptr);
+ dbrp->dl_sap = 0;
+ dbrp->dl_service_mode = DL_CLDLS;
+
+ vsp->vns_state = VNS_S_BIND_SENT;
+ cv_broadcast(&vsp->vns_stcv);
+ putnext(vsp->vns_wq, mp);
+
+ return (0);
+}
+
+static int
+vnd_st_bind(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+ t_uscalar_t prim;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_inc_pop(vsp);
+ prim = ((dl_error_ack_t *)mp->b_rptr)->dl_primitive;
+
+ if (prim != DL_BIND_ACK && prim != DL_ERROR_ACK) {
+ vnd_drop_ctl(vsp, mp, "wrong dlpi primitive for vnd_st_bind");
+ vsp->vns_errno = VND_E_DLPIINVAL;
+ return (1);
+ }
+
+ if (prim == DL_ERROR_ACK)
+ vsp->vns_errno = VND_E_BINDFAIL;
+
+ freemsg(mp);
+ return (prim == DL_ERROR_ACK);
+}
+
+static int
+vnd_st_spromisc(vnd_str_t *vsp, int type, vnd_str_state_t next)
+{
+ mblk_t *mp;
+ dl_promiscon_req_t *dprp;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_alloc(sizeof (dl_promiscon_req_t), DL_PROMISCON_REQ);
+ if (mp == NULL) {
+ vsp->vns_errno = VND_E_NOMEM;
+ return (1);
+ }
+
+ dprp = (dl_promiscon_req_t *)mp->b_rptr;
+ dprp->dl_level = type;
+
+ vsp->vns_state = next;
+ cv_broadcast(&vsp->vns_stcv);
+ putnext(vsp->vns_wq, mp);
+
+ return (0);
+}
+
+static int
+vnd_st_promisc(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+ t_uscalar_t prim, cprim;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_inc_pop(vsp);
+ prim = ((dl_ok_ack_t *)mp->b_rptr)->dl_primitive;
+ cprim = ((dl_ok_ack_t *)mp->b_rptr)->dl_correct_primitive;
+
+ if (prim != DL_OK_ACK && prim != DL_ERROR_ACK) {
+ vnd_drop_ctl(vsp, mp,
+ "wrong dlpi primitive for vnd_st_promisc");
+ vsp->vns_errno = VND_E_DLPIINVAL;
+ return (1);
+ }
+
+ if (cprim != DL_PROMISCON_REQ) {
+ vnd_drop_ctl(vsp, mp,
+ "vnd_st_promisc: Got ack/nack for wrong primitive");
+ vsp->vns_errno = VND_E_DLPIINVAL;
+ return (1);
+ }
+
+ if (prim == DL_ERROR_ACK)
+ vsp->vns_errno = VND_E_PROMISCFAIL;
+
+ freemsg(mp);
+ return (prim == DL_ERROR_ACK);
+}
+
+static int
+vnd_st_scapabq(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+
+ mp = vnd_dlpi_alloc(sizeof (dl_capability_req_t), DL_CAPABILITY_REQ);
+ if (mp == NULL) {
+ vsp->vns_errno = VND_E_NOMEM;
+ return (1);
+ }
+
+ vsp->vns_state = VNS_S_CAPAB_Q_SENT;
+ cv_broadcast(&vsp->vns_stcv);
+ putnext(vsp->vns_wq, mp);
+
+ return (0);
+}
+
+static void
+vnd_mac_input(vnd_str_t *vsp, mac_resource_t *unused, mblk_t *mp_chain,
+ mac_header_info_t *mhip)
+{
+ int signal = 0;
+ mblk_t *mp;
+ vnd_pnsd_t *nsp = vsp->vns_nsd;
+
+ ASSERT(vsp != NULL);
+ ASSERT(mp_chain != NULL);
+
+ for (mp = mp_chain; mp != NULL; mp = mp_chain) {
+ uint16_t vid;
+ mp_chain = mp->b_next;
+ mp->b_next = NULL;
+
+ /*
+ * If we were operating in a traditional dlpi context then we
+ * would have enabled DLIOCRAW and rather than the fast path, we
+ * would come through dld_str_rx_raw. That function does two
+ * things that we have to consider doing ourselves. The first is
+ * that it adjusts the b_rptr back to account for dld bumping us
+ * past the mac header. It also tries to account for cases where
+ * mac provides an illusion of the mac header. Fortunately, dld
+ * only allows the fastpath when the media type is the same as
+ * the native type. Therefore all we have to do here is adjust
+ * the b_rptr.
+ */
+ ASSERT(mp->b_rptr >= DB_BASE(mp) + mhip->mhi_hdrsize);
+ mp->b_rptr -= mhip->mhi_hdrsize;
+ vid = VLAN_ID(mhip->mhi_tci);
+ if (mhip->mhi_istagged && vid != VLAN_ID_NONE) {
+ bcopy(mp->b_rptr, mp->b_rptr + 4, 12);
+ mp->b_rptr += 4;
+ }
+
+ if (nsp->vpnd_hooked && vnd_hook(vsp, &mp, nsp->vpnd_neti_v4,
+ nsp->vpnd_event_in_v4, nsp->vpnd_token_in_v4,
+ nsp->vpnd_neti_v6, nsp->vpnd_event_in_v6,
+ nsp->vpnd_token_in_v6, vnd_drop_hook_in, vnd_drop_in) != 0)
+ continue;
+
+ VND_STAT_INC(vsp, vks_rpackets, 1);
+ VND_STAT_INC(vsp, vks_rbytes, msgsize(mp));
+ DTRACE_VND5(recv, mblk_t *, mp, void *, NULL, void *, NULL,
+ vnd_str_t *, vsp, mblk_t *, mp);
+ mutex_enter(&vsp->vns_dq_read.vdq_lock);
+ signal |= vnd_dq_push(&vsp->vns_dq_read, mp, B_FALSE,
+ vnd_drop_in);
+ mutex_exit(&vsp->vns_dq_read.vdq_lock);
+
+ }
+
+ if (signal != 0) {
+ cv_broadcast(&vsp->vns_dq_read.vdq_ready);
+ pollwakeup(&vsp->vns_dev->vdd_ph, POLLIN | POLLRDNORM);
+ }
+
+}
+
+static void
+vnd_mac_flow_control_stat(vnd_str_t *vsp, hrtime_t diff)
+{
+ VND_STAT_INC(vsp, vks_nmacflow, 1);
+ VND_STAT_INC(vsp, vks_tmacflow, diff);
+ if (diff >= VND_LATENCY_1MS)
+ VND_STAT_INC(vsp, vks_mac_flow_1ms, 1);
+ if (diff >= VND_LATENCY_10MS)
+ VND_STAT_INC(vsp, vks_mac_flow_10ms, 1);
+ if (diff >= VND_LATENCY_100MS)
+ VND_STAT_INC(vsp, vks_mac_flow_100ms, 1);
+ if (diff >= VND_LATENCY_1S)
+ VND_STAT_INC(vsp, vks_mac_flow_1s, 1);
+ if (diff >= VND_LATENCY_10S)
+ VND_STAT_INC(vsp, vks_mac_flow_10s, 1);
+}
+
+/*
+ * This is a callback from MAC that indicates that we are allowed to send
+ * packets again.
+ */
+static void
+vnd_mac_flow_control(void *arg, vnd_mac_cookie_t cookie)
+{
+ vnd_str_t *vsp = arg;
+ hrtime_t now, diff;
+
+ mutex_enter(&vsp->vns_lock);
+ now = gethrtime();
+
+ /*
+ * Check for the case that we beat vnd_squeue_tx_one to the punch.
+ * There's also an additional case here that we got notified because
+ * we're sharing a device that ran out of tx descriptors, even though it
+ * wasn't because of us.
+ */
+ if (!(vsp->vns_flags & VNS_F_FLOW_CONTROLLED)) {
+ vsp->vns_fcupdate = now;
+ mutex_exit(&vsp->vns_lock);
+ return;
+ }
+
+ ASSERT(vsp->vns_flags & VNS_F_FLOW_CONTROLLED);
+ ASSERT(vsp->vns_caps.vsc_fc_cookie == cookie);
+ vsp->vns_flags &= ~VNS_F_FLOW_CONTROLLED;
+ vsp->vns_caps.vsc_fc_cookie = NULL;
+ diff = now - vsp->vns_fclatch;
+ vsp->vns_fclatch = 0;
+ DTRACE_VND3(flow__resumed, vnd_str_t *, vsp, uint64_t,
+ vsp->vns_dq_write.vdq_cur, uintptr_t, cookie);
+ /*
+ * If someone has asked to flush the squeue and thus inserted a barrier,
+ * than we shouldn't schedule a drain.
+ */
+ if (!(vsp->vns_flags & (VNS_F_DRAIN_SCHEDULED | VNS_F_BARRIER))) {
+ vsp->vns_flags |= VNS_F_DRAIN_SCHEDULED;
+ gsqueue_enter_one(vsp->vns_squeue, &vsp->vns_drainblk,
+ vnd_squeue_tx_drain, vsp, GSQUEUE_FILL,
+ VND_SQUEUE_TAG_MAC_FLOW_CONTROL);
+ }
+ mutex_exit(&vsp->vns_lock);
+}
+
+static void
+vnd_mac_enter(vnd_str_t *vsp, mac_perim_handle_t *mphp)
+{
+ ASSERT(MUTEX_HELD(&vsp->vns_lock));
+ VERIFY(vsp->vns_caps.vsc_capab_f(vsp->vns_caps.vsc_capab_hdl,
+ DLD_CAPAB_PERIM, mphp, DLD_ENABLE) == 0);
+}
+
+static void
+vnd_mac_exit(vnd_str_t *vsp, mac_perim_handle_t mph)
+{
+ ASSERT(MUTEX_HELD(&vsp->vns_lock));
+ VERIFY(vsp->vns_caps.vsc_capab_f(vsp->vns_caps.vsc_capab_hdl,
+ DLD_CAPAB_PERIM, mph, DLD_DISABLE) == 0);
+}
+
+static int
+vnd_dld_cap_enable(vnd_str_t *vsp, vnd_rx_t rxfunc)
+{
+ int ret;
+ dld_capab_direct_t d;
+ mac_perim_handle_t mph;
+ vnd_str_capab_t *c = &vsp->vns_caps;
+
+ bzero(&d, sizeof (d));
+ d.di_rx_cf = (uintptr_t)rxfunc;
+ d.di_rx_ch = vsp;
+ d.di_flags = DI_DIRECT_RAW;
+
+ vnd_mac_enter(vsp, &mph);
+
+ /*
+ * If we're coming in here for a second pass, we need to make sure that
+ * we remove an existing flow control notification callback, otherwise
+ * we'll create a duplicate that will remain with garbage data.
+ */
+ if (c->vsc_tx_fc_hdl != NULL) {
+ ASSERT(c->vsc_set_fcb_hdl != NULL);
+ (void) c->vsc_set_fcb_f(c->vsc_set_fcb_hdl, NULL,
+ c->vsc_tx_fc_hdl);
+ c->vsc_tx_fc_hdl = NULL;
+ }
+
+ if (vsp->vns_caps.vsc_capab_f(c->vsc_capab_hdl,
+ DLD_CAPAB_DIRECT, &d, DLD_ENABLE) == 0) {
+ c->vsc_tx_f = (vnd_dld_tx_t)d.di_tx_df;
+ c->vsc_tx_hdl = d.di_tx_dh;
+ c->vsc_set_fcb_f = (vnd_dld_set_fcb_t)d.di_tx_cb_df;
+ c->vsc_set_fcb_hdl = d.di_tx_cb_dh;
+ c->vsc_is_fc_f = (vnd_dld_is_fc_t)d.di_tx_fctl_df;
+ c->vsc_is_fc_hdl = d.di_tx_fctl_dh;
+ c->vsc_tx_fc_hdl = c->vsc_set_fcb_f(c->vsc_set_fcb_hdl,
+ vnd_mac_flow_control, vsp);
+ c->vsc_flags |= VNS_C_DIRECT;
+ ret = 0;
+ } else {
+ vsp->vns_errno = VND_E_DIRECTFAIL;
+ ret = 1;
+ }
+ vnd_mac_exit(vsp, mph);
+ return (ret);
+}
+
+static int
+vnd_st_capabq(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+ dl_capability_ack_t *cap;
+ dl_capability_sub_t *subp;
+ dl_capab_hcksum_t *hck;
+ dl_capab_dld_t *dld;
+ unsigned char *rp;
+ int ret = 0;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_inc_pop(vsp);
+
+ rp = mp->b_rptr;
+ cap = (dl_capability_ack_t *)rp;
+ if (cap->dl_sub_length == 0)
+ goto done;
+
+ /* Don't try to process something too big */
+ if (sizeof (dl_capability_ack_t) + cap->dl_sub_length > MBLKL(mp)) {
+ VND_STAT_INC(vsp, vks_ndlpidrops, 1);
+ VND_STAT_INC(vsp, vks_tdrops, 1);
+ vsp->vns_errno = VND_E_CAPACKINVAL;
+ ret = 1;
+ goto done;
+ }
+
+ rp += cap->dl_sub_offset;
+
+ while (cap->dl_sub_length > 0) {
+ subp = (dl_capability_sub_t *)rp;
+ /* Sanity check something crazy from down below */
+ if (subp->dl_length + sizeof (dl_capability_sub_t) >
+ cap->dl_sub_length) {
+ VND_STAT_INC(vsp, vks_ndlpidrops, 1);
+ VND_STAT_INC(vsp, vks_tdrops, 1);
+ vsp->vns_errno = VND_E_SUBCAPINVAL;
+ ret = 1;
+ goto done;
+ }
+
+ switch (subp->dl_cap) {
+ case DL_CAPAB_HCKSUM:
+ hck = (dl_capab_hcksum_t *)(rp +
+ sizeof (dl_capability_sub_t));
+ if (hck->hcksum_version != HCKSUM_CURRENT_VERSION) {
+ vsp->vns_caps.vsc_flags |= VNS_C_HCKSUM_BADVERS;
+ break;
+ }
+ if (dlcapabcheckqid(&hck->hcksum_mid, vsp->vns_lrq) !=
+ B_TRUE) {
+ vsp->vns_errno = VND_E_CAPABPASS;
+ ret = 1;
+ goto done;
+ }
+ vsp->vns_caps.vsc_flags |= VNS_C_HCKSUM;
+ vsp->vns_caps.vsc_hcksum_opts = hck->hcksum_txflags;
+ break;
+ case DL_CAPAB_DLD:
+ dld = (dl_capab_dld_t *)(rp +
+ sizeof (dl_capability_sub_t));
+ if (dld->dld_version != DLD_CURRENT_VERSION) {
+ vsp->vns_errno = VND_E_DLDBADVERS;
+ ret = 1;
+ goto done;
+ }
+ if (dlcapabcheckqid(&dld->dld_mid, vsp->vns_lrq) !=
+ B_TRUE) {
+ vsp->vns_errno = VND_E_CAPABPASS;
+ ret = 1;
+ goto done;
+ }
+ vsp->vns_caps.vsc_flags |= VNS_C_DLD;
+ vsp->vns_caps.vsc_capab_f =
+ (vnd_dld_cap_t)dld->dld_capab;
+ vsp->vns_caps.vsc_capab_hdl =
+ (void *)dld->dld_capab_handle;
+ /*
+ * At this point in time, we have to set up a direct
+ * function that drops all input. This validates that
+ * we'll be able to set up direct input and that we can
+ * easily switch it earlier to the real data function
+ * when we've plumbed everything up.
+ */
+ if (vnd_dld_cap_enable(vsp, vnd_mac_drop_input) != 0) {
+ /* vns_errno set by vnd_dld_cap_enable */
+ ret = 1;
+ goto done;
+ }
+ break;
+ default:
+ /* Ignore unsupported cap */
+ break;
+ }
+
+ rp += sizeof (dl_capability_sub_t) + subp->dl_length;
+ cap->dl_sub_length -= sizeof (dl_capability_sub_t) +
+ subp->dl_length;
+ }
+
+done:
+ /* Make sure we enabled direct callbacks */
+ if (ret == 0 && !(vsp->vns_caps.vsc_flags & VNS_C_DIRECT)) {
+ vsp->vns_errno = VND_E_DIRECTNOTSUP;
+ ret = 1;
+ }
+
+ freemsg(mp);
+ return (ret);
+}
+
+static void
+vnd_st_sonline(vnd_str_t *vsp)
+{
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ vsp->vns_state = VNS_S_ONLINE;
+ cv_broadcast(&vsp->vns_stcv);
+}
+
+static void
+vnd_st_shutdown(vnd_str_t *vsp)
+{
+ mac_perim_handle_t mph;
+ vnd_str_capab_t *vsc = &vsp->vns_caps;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+
+ /*
+ * At this point in time we know that there is no one transmitting as
+ * our final reference has been torn down and that vnd_s_close inserted
+ * a barrier to validate that everything is flushed.
+ */
+ if (vsc->vsc_flags & VNS_C_DIRECT) {
+ vnd_mac_enter(vsp, &mph);
+ vsc->vsc_flags &= ~VNS_C_DIRECT;
+ (void) vsc->vsc_set_fcb_f(vsc->vsc_set_fcb_hdl, NULL,
+ vsc->vsc_tx_fc_hdl);
+ vsc->vsc_tx_fc_hdl = NULL;
+ (void) vsc->vsc_capab_f(vsc->vsc_capab_hdl, DLD_CAPAB_DIRECT,
+ NULL, DLD_DISABLE);
+ vnd_mac_exit(vsp, mph);
+ }
+}
+
+static boolean_t
+vnd_st_spromiscoff(vnd_str_t *vsp, int type, vnd_str_state_t next)
+{
+ boolean_t ret = B_TRUE;
+ mblk_t *mp;
+ dl_promiscoff_req_t *dprp;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+ mp = vnd_dlpi_alloc(sizeof (dl_promiscon_req_t), DL_PROMISCOFF_REQ);
+ if (mp == NULL) {
+ cmn_err(CE_NOTE, "!vnd failed to allocate mblk_t for "
+ "promiscoff request");
+ ret = B_FALSE;
+ goto next;
+ }
+
+ dprp = (dl_promiscoff_req_t *)mp->b_rptr;
+ dprp->dl_level = type;
+
+ putnext(vsp->vns_wq, mp);
+next:
+ vsp->vns_state = next;
+ cv_broadcast(&vsp->vns_stcv);
+ return (ret);
+}
+
+static void
+vnd_st_promiscoff(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+ t_uscalar_t prim, cprim;
+
+ VERIFY(MUTEX_HELD(&vsp->vns_lock));
+
+ /*
+ * Unlike other cases where we guard against the incoming packet being
+ * NULL, during tear down we try to keep driving and therefore we may
+ * have gotten here due to an earlier failure, so there's nothing to do.
+ */
+ mp = vnd_dlpi_inc_pop(vsp);
+ if (mp == NULL)
+ return;
+
+ prim = ((dl_ok_ack_t *)mp->b_rptr)->dl_primitive;
+ cprim = ((dl_ok_ack_t *)mp->b_rptr)->dl_correct_primitive;
+
+ if (prim != DL_OK_ACK && prim != DL_ERROR_ACK) {
+ vnd_drop_ctl(vsp, mp,
+ "wrong dlpi primitive for vnd_st_promiscoff");
+ return;
+ }
+
+ if (cprim != DL_PROMISCOFF_REQ) {
+ vnd_drop_ctl(vsp, mp,
+ "vnd_st_promiscoff: Got ack/nack for wrong primitive");
+ return;
+ }
+
+ if (prim == DL_ERROR_ACK) {
+ cmn_err(CE_WARN, "!failed to disable promiscuos mode during "
+ "vnd teardown");
+ }
+}
+
+static boolean_t
+vnd_st_sunbind(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+ boolean_t ret = B_TRUE;
+
+ mp = vnd_dlpi_alloc(sizeof (dl_unbind_req_t), DL_UNBIND_REQ);
+ if (mp == NULL) {
+ cmn_err(CE_NOTE, "!vnd failed to allocate mblk_t for "
+ "unbind request");
+ ret = B_FALSE;
+ goto next;
+ }
+
+ putnext(vsp->vns_wq, mp);
+next:
+ vsp->vns_state = VNS_S_UNBIND_SENT;
+ cv_broadcast(&vsp->vns_stcv);
+ return (ret);
+}
+
+static void
+vnd_st_unbind(vnd_str_t *vsp)
+{
+ mblk_t *mp;
+ t_uscalar_t prim, cprim;
+
+ /*
+ * Unlike other cases where we guard against the incoming packet being
+ * NULL, during tear down we try to keep driving and therefore we may
+ * have gotten here due to an earlier failure, so there's nothing to do.
+ */
+ mp = vnd_dlpi_inc_pop(vsp);
+ if (mp == NULL)
+ goto next;
+
+ prim = ((dl_ok_ack_t *)mp->b_rptr)->dl_primitive;
+ cprim = ((dl_ok_ack_t *)mp->b_rptr)->dl_correct_primitive;
+
+ if (prim != DL_OK_ACK && prim != DL_ERROR_ACK) {
+ vnd_drop_ctl(vsp, mp,
+ "wrong dlpi primitive for vnd_st_unbind");
+ goto next;
+ }
+
+ if (cprim != DL_UNBIND_REQ) {
+ vnd_drop_ctl(vsp, mp,
+ "vnd_st_unbind: Got ack/nack for wrong primitive");
+ goto next;
+ }
+
+ if (prim == DL_ERROR_ACK) {
+ cmn_err(CE_WARN, "!failed to unbind stream during vnd "
+ "teardown");
+ }
+
+next:
+ vsp->vns_state = VNS_S_ZOMBIE;
+ cv_broadcast(&vsp->vns_stcv);
+}
+
+/*
+ * Perform state transitions. This is a one way shot down the flow chart
+ * described in the big theory statement.
+ */
+static void
+vnd_str_state_transition(void *arg)
+{
+ boolean_t died = B_FALSE;
+ vnd_str_t *vsp = arg;
+ mblk_t *mp;
+
+ mutex_enter(&vsp->vns_lock);
+ if (vsp->vns_dlpi_inc == NULL && (vsp->vns_state != VNS_S_INITIAL &&
+ vsp->vns_state != VNS_S_SHUTTING_DOWN)) {
+ mutex_exit(&vsp->vns_lock);
+ return;
+ }
+
+ /*
+ * When trying to shut down, or unwinding from a failed enabling, rather
+ * than immediately entering the ZOMBIE state, we may instead opt to try
+ * and enter the next state in the progression. This is especially
+ * important when trying to tear everything down.
+ */
+loop:
+ DTRACE_PROBE2(vnd__state__transition, uintptr_t, vsp,
+ vnd_str_state_t, vsp->vns_state);
+ switch (vsp->vns_state) {
+ case VNS_S_INITIAL:
+ VERIFY(vsp->vns_dlpi_inc == NULL);
+ if (vnd_st_sinfo(vsp) != 0)
+ died = B_TRUE;
+ break;
+ case VNS_S_INFO_SENT:
+ VERIFY(vsp->vns_dlpi_inc != NULL);
+ if (vnd_st_info(vsp) == 0) {
+ if (vnd_st_sexclusive(vsp) != 0)
+ died = B_TRUE;
+ } else {
+ died = B_TRUE;
+ }
+ break;
+ case VNS_S_EXCLUSIVE_SENT:
+ VERIFY(vsp->vns_dlpi_inc != NULL);
+ if (vnd_st_exclusive(vsp) == 0) {
+ if (vsp->vns_dlpi_style == DL_STYLE2) {
+ if (vnd_st_sattach(vsp) != 0)
+ died = B_TRUE;
+ } else {
+ if (vnd_st_sbind(vsp) != 0)
+ died = B_TRUE;
+ }
+ } else {
+ died = B_TRUE;
+ }
+ break;
+ case VNS_S_ATTACH_SENT:
+ VERIFY(vsp->vns_dlpi_inc != NULL);
+ if (vnd_st_attach(vsp) == 0) {
+ if (vnd_st_sbind(vsp) != 0)
+ died = B_TRUE;
+ } else {
+ died = B_TRUE;
+ }
+ break;
+ case VNS_S_BIND_SENT:
+ VERIFY(vsp->vns_dlpi_inc != NULL);
+ if (vnd_st_bind(vsp) == 0) {
+ if (vnd_st_spromisc(vsp, DL_PROMISC_SAP,
+ VNS_S_SAP_PROMISC_SENT) != 0)
+ died = B_TRUE;
+ } else {
+ died = B_TRUE;
+ }
+ break;
+ case VNS_S_SAP_PROMISC_SENT:
+ VERIFY(vsp->vns_dlpi_inc != NULL);
+ if (vnd_st_promisc(vsp) == 0) {
+ if (vnd_st_spromisc(vsp, DL_PROMISC_MULTI,
+ VNS_S_MULTI_PROMISC_SENT) != 0)
+ died = B_TRUE;
+ } else {
+ died = B_TRUE;
+ }
+ break;
+ case VNS_S_MULTI_PROMISC_SENT:
+ VERIFY(vsp->vns_dlpi_inc != NULL);
+ if (vnd_st_promisc(vsp) == 0) {
+ if (vnd_st_spromisc(vsp, DL_PROMISC_RX_ONLY,
+ VNS_S_RX_ONLY_PROMISC_SENT) != 0)
+ died = B_TRUE;
+ } else {
+ died = B_TRUE;
+ }
+ break;
+ case VNS_S_RX_ONLY_PROMISC_SENT:
+ VERIFY(vsp->vns_dlpi_inc != NULL);
+ if (vnd_st_promisc(vsp) == 0) {
+ if (vnd_st_spromisc(vsp, DL_PROMISC_FIXUPS,
+ VNS_S_FIXUP_PROMISC_SENT) != 0)
+ died = B_TRUE;
+ } else {
+ died = B_TRUE;
+ }
+ break;
+ case VNS_S_FIXUP_PROMISC_SENT:
+ VERIFY(vsp->vns_dlpi_inc != NULL);
+ if (vnd_st_promisc(vsp) == 0) {
+ if (vnd_st_scapabq(vsp) != 0)
+ died = B_TRUE;
+ } else {
+ died = B_TRUE;
+ }
+ break;
+ case VNS_S_CAPAB_Q_SENT:
+ if (vnd_st_capabq(vsp) != 0)
+ died = B_TRUE;
+ else
+ vnd_st_sonline(vsp);
+ break;
+ case VNS_S_SHUTTING_DOWN:
+ vnd_st_shutdown(vsp);
+ if (vnd_st_spromiscoff(vsp, DL_PROMISC_MULTI,
+ VNS_S_MULTICAST_PROMISCOFF_SENT) == B_FALSE)
+ goto loop;
+ break;
+ case VNS_S_MULTICAST_PROMISCOFF_SENT:
+ vnd_st_promiscoff(vsp);
+ if (vnd_st_spromiscoff(vsp, DL_PROMISC_SAP,
+ VNS_S_SAP_PROMISCOFF_SENT) == B_FALSE)
+ goto loop;
+ break;
+ case VNS_S_SAP_PROMISCOFF_SENT:
+ vnd_st_promiscoff(vsp);
+ if (vnd_st_sunbind(vsp) == B_FALSE)
+ goto loop;
+ break;
+ case VNS_S_UNBIND_SENT:
+ vnd_st_unbind(vsp);
+ break;
+ case VNS_S_ZOMBIE:
+ while ((mp = vnd_dlpi_inc_pop(vsp)) != NULL)
+ vnd_drop_ctl(vsp, mp, "vsp received data as a zombie");
+ break;
+ default:
+ panic("vnd_str_t entered an unknown state");
+ }
+
+ if (died == B_TRUE) {
+ ASSERT(vsp->vns_errno != VND_E_SUCCESS);
+ vsp->vns_laststate = vsp->vns_state;
+ vsp->vns_state = VNS_S_ZOMBIE;
+ cv_broadcast(&vsp->vns_stcv);
+ }
+
+ mutex_exit(&vsp->vns_lock);
+}
+
+static void
+vnd_dlpi_taskq_dispatch(void *arg)
+{
+ vnd_str_t *vsp = arg;
+ int run = 1;
+
+ while (run != 0) {
+ vnd_str_state_transition(vsp);
+ mutex_enter(&vsp->vns_lock);
+ if (vsp->vns_flags & VNS_F_CONDEMNED ||
+ vsp->vns_dlpi_inc == NULL) {
+ run = 0;
+ vsp->vns_flags &= ~VNS_F_TASKQ_DISPATCHED;
+ }
+ if (vsp->vns_flags & VNS_F_CONDEMNED)
+ cv_signal(&vsp->vns_cancelcv);
+ mutex_exit(&vsp->vns_lock);
+ }
+}
+
+static int
+vnd_neti_getifname(net_handle_t neti, phy_if_t phy, char *buf, const size_t len)
+{
+ return (-1);
+}
+
+static int
+vnd_neti_getmtu(net_handle_t neti, phy_if_t phy, lif_if_t ifdata)
+{
+ return (-1);
+}
+
+static int
+vnd_neti_getptmue(net_handle_t neti)
+{
+ return (-1);
+}
+
+static int
+vnd_neti_getlifaddr(net_handle_t neti, phy_if_t phy, lif_if_t ifdata,
+ size_t nelem, net_ifaddr_t type[], void *storage)
+{
+ return (-1);
+}
+
+static int
+vnd_neti_getlifzone(net_handle_t neti, phy_if_t phy, lif_if_t ifdata,
+ zoneid_t *zid)
+{
+ return (-1);
+}
+
+static int
+vnd_neti_getlifflags(net_handle_t neti, phy_if_t phy, lif_if_t ifdata,
+ uint64_t *flags)
+{
+ return (-1);
+}
+
+static phy_if_t
+vnd_neti_phygetnext(net_handle_t neti, phy_if_t phy)
+{
+ return (-1);
+}
+
+static phy_if_t
+vnd_neti_phylookup(net_handle_t neti, const char *name)
+{
+ return (-1);
+}
+
+static lif_if_t
+vnd_neti_lifgetnext(net_handle_t neti, phy_if_t phy, lif_if_t ifdata)
+{
+ return (-1);
+}
+
+static int
+vnd_neti_inject(net_handle_t neti, inject_t style, net_inject_t *packet)
+{
+ return (-1);
+}
+
+static phy_if_t
+vnd_neti_route(net_handle_t neti, struct sockaddr *address,
+ struct sockaddr *next)
+{
+ return ((phy_if_t)-1);
+}
+
+static int
+vnd_neti_ispchksum(net_handle_t neti, mblk_t *mp)
+{
+ return (-1);
+}
+
+static int
+vnd_neti_isvchksum(net_handle_t neti, mblk_t *mp)
+{
+ return (-1);
+}
+
+static net_protocol_t vnd_neti_info_v4 = {
+ NETINFO_VERSION,
+ NHF_VND_INET,
+ vnd_neti_getifname,
+ vnd_neti_getmtu,
+ vnd_neti_getptmue,
+ vnd_neti_getlifaddr,
+ vnd_neti_getlifzone,
+ vnd_neti_getlifflags,
+ vnd_neti_phygetnext,
+ vnd_neti_phylookup,
+ vnd_neti_lifgetnext,
+ vnd_neti_inject,
+ vnd_neti_route,
+ vnd_neti_ispchksum,
+ vnd_neti_isvchksum
+};
+
+static net_protocol_t vnd_neti_info_v6 = {
+ NETINFO_VERSION,
+ NHF_VND_INET6,
+ vnd_neti_getifname,
+ vnd_neti_getmtu,
+ vnd_neti_getptmue,
+ vnd_neti_getlifaddr,
+ vnd_neti_getlifzone,
+ vnd_neti_getlifflags,
+ vnd_neti_phygetnext,
+ vnd_neti_phylookup,
+ vnd_neti_lifgetnext,
+ vnd_neti_inject,
+ vnd_neti_route,
+ vnd_neti_ispchksum,
+ vnd_neti_isvchksum
+};
+
+
+static int
+vnd_netinfo_init(vnd_pnsd_t *nsp)
+{
+ nsp->vpnd_neti_v4 = net_protocol_register(nsp->vpnd_nsid,
+ &vnd_neti_info_v4);
+ ASSERT(nsp->vpnd_neti_v4 != NULL);
+
+ nsp->vpnd_neti_v6 = net_protocol_register(nsp->vpnd_nsid,
+ &vnd_neti_info_v6);
+ ASSERT(nsp->vpnd_neti_v6 != NULL);
+
+ nsp->vpnd_family_v4.hf_version = HOOK_VERSION;
+ nsp->vpnd_family_v4.hf_name = "vnd_inet";
+
+ if (net_family_register(nsp->vpnd_neti_v4, &nsp->vpnd_family_v4) != 0) {
+ net_protocol_unregister(nsp->vpnd_neti_v4);
+ net_protocol_unregister(nsp->vpnd_neti_v6);
+ cmn_err(CE_NOTE, "vnd_netinfo_init: net_family_register "
+ "failed for stack %d", nsp->vpnd_nsid);
+ return (1);
+ }
+
+ nsp->vpnd_family_v6.hf_version = HOOK_VERSION;
+ nsp->vpnd_family_v6.hf_name = "vnd_inet6";
+
+ if (net_family_register(nsp->vpnd_neti_v6, &nsp->vpnd_family_v6) != 0) {
+ net_family_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_family_v4);
+ net_protocol_unregister(nsp->vpnd_neti_v4);
+ net_protocol_unregister(nsp->vpnd_neti_v6);
+ cmn_err(CE_NOTE, "vnd_netinfo_init: net_family_register "
+ "failed for stack %d", nsp->vpnd_nsid);
+ return (1);
+ }
+
+ nsp->vpnd_event_in_v4.he_version = HOOK_VERSION;
+ nsp->vpnd_event_in_v4.he_name = NH_PHYSICAL_IN;
+ nsp->vpnd_event_in_v4.he_flags = 0;
+ nsp->vpnd_event_in_v4.he_interested = B_FALSE;
+
+ nsp->vpnd_token_in_v4 = net_event_register(nsp->vpnd_neti_v4,
+ &nsp->vpnd_event_in_v4);
+ if (nsp->vpnd_token_in_v4 == NULL) {
+ net_family_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_family_v4);
+ net_family_unregister(nsp->vpnd_neti_v6, &nsp->vpnd_family_v6);
+ net_protocol_unregister(nsp->vpnd_neti_v4);
+ net_protocol_unregister(nsp->vpnd_neti_v6);
+ cmn_err(CE_NOTE, "vnd_netinfo_init: net_event_register "
+ "failed for stack %d", nsp->vpnd_nsid);
+ return (1);
+ }
+
+ nsp->vpnd_event_in_v6.he_version = HOOK_VERSION;
+ nsp->vpnd_event_in_v6.he_name = NH_PHYSICAL_IN;
+ nsp->vpnd_event_in_v6.he_flags = 0;
+ nsp->vpnd_event_in_v6.he_interested = B_FALSE;
+
+ nsp->vpnd_token_in_v6 = net_event_register(nsp->vpnd_neti_v6,
+ &nsp->vpnd_event_in_v6);
+ if (nsp->vpnd_token_in_v6 == NULL) {
+ net_event_shutdown(nsp->vpnd_neti_v4, &nsp->vpnd_event_in_v4);
+ net_event_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_event_in_v4);
+ net_family_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_family_v4);
+ net_family_unregister(nsp->vpnd_neti_v6, &nsp->vpnd_family_v6);
+ net_protocol_unregister(nsp->vpnd_neti_v4);
+ net_protocol_unregister(nsp->vpnd_neti_v6);
+ cmn_err(CE_NOTE, "vnd_netinfo_init: net_event_register "
+ "failed for stack %d", nsp->vpnd_nsid);
+ return (1);
+ }
+
+ nsp->vpnd_event_out_v4.he_version = HOOK_VERSION;
+ nsp->vpnd_event_out_v4.he_name = NH_PHYSICAL_OUT;
+ nsp->vpnd_event_out_v4.he_flags = 0;
+ nsp->vpnd_event_out_v4.he_interested = B_FALSE;
+
+ nsp->vpnd_token_out_v4 = net_event_register(nsp->vpnd_neti_v4,
+ &nsp->vpnd_event_out_v4);
+ if (nsp->vpnd_token_out_v4 == NULL) {
+ net_event_shutdown(nsp->vpnd_neti_v6, &nsp->vpnd_event_in_v6);
+ net_event_unregister(nsp->vpnd_neti_v6, &nsp->vpnd_event_in_v6);
+ net_event_shutdown(nsp->vpnd_neti_v4, &nsp->vpnd_event_in_v4);
+ net_event_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_event_in_v4);
+ net_family_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_family_v4);
+ net_family_unregister(nsp->vpnd_neti_v6, &nsp->vpnd_family_v6);
+ net_protocol_unregister(nsp->vpnd_neti_v4);
+ net_protocol_unregister(nsp->vpnd_neti_v6);
+ cmn_err(CE_NOTE, "vnd_netinfo_init: net_event_register "
+ "failed for stack %d", nsp->vpnd_nsid);
+ return (1);
+ }
+
+ nsp->vpnd_event_out_v6.he_version = HOOK_VERSION;
+ nsp->vpnd_event_out_v6.he_name = NH_PHYSICAL_OUT;
+ nsp->vpnd_event_out_v6.he_flags = 0;
+ nsp->vpnd_event_out_v6.he_interested = B_FALSE;
+
+ nsp->vpnd_token_out_v6 = net_event_register(nsp->vpnd_neti_v6,
+ &nsp->vpnd_event_out_v6);
+ if (nsp->vpnd_token_out_v6 == NULL) {
+ net_event_shutdown(nsp->vpnd_neti_v6, &nsp->vpnd_event_in_v6);
+ net_event_unregister(nsp->vpnd_neti_v6, &nsp->vpnd_event_in_v6);
+ net_event_shutdown(nsp->vpnd_neti_v6, &nsp->vpnd_event_in_v6);
+ net_event_unregister(nsp->vpnd_neti_v6, &nsp->vpnd_event_in_v6);
+ net_event_shutdown(nsp->vpnd_neti_v4, &nsp->vpnd_event_in_v4);
+ net_event_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_event_in_v4);
+ net_family_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_family_v4);
+ net_family_unregister(nsp->vpnd_neti_v6, &nsp->vpnd_family_v6);
+ net_protocol_unregister(nsp->vpnd_neti_v4);
+ net_protocol_unregister(nsp->vpnd_neti_v6);
+ cmn_err(CE_NOTE, "vnd_netinfo_init: net_event_register "
+ "failed for stack %d", nsp->vpnd_nsid);
+ return (1);
+ }
+
+ return (0);
+}
+
+static void
+vnd_netinfo_shutdown(vnd_pnsd_t *nsp)
+{
+ int ret;
+
+ ret = net_event_shutdown(nsp->vpnd_neti_v4, &nsp->vpnd_event_in_v4);
+ VERIFY(ret == 0);
+ ret = net_event_shutdown(nsp->vpnd_neti_v4, &nsp->vpnd_event_out_v4);
+ VERIFY(ret == 0);
+ ret = net_event_shutdown(nsp->vpnd_neti_v6, &nsp->vpnd_event_in_v6);
+ VERIFY(ret == 0);
+ ret = net_event_shutdown(nsp->vpnd_neti_v6, &nsp->vpnd_event_out_v6);
+ VERIFY(ret == 0);
+}
+
+static void
+vnd_netinfo_fini(vnd_pnsd_t *nsp)
+{
+ int ret;
+
+ ret = net_event_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_event_in_v4);
+ VERIFY(ret == 0);
+ ret = net_event_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_event_out_v4);
+ VERIFY(ret == 0);
+ ret = net_event_unregister(nsp->vpnd_neti_v6, &nsp->vpnd_event_in_v6);
+ VERIFY(ret == 0);
+ ret = net_event_unregister(nsp->vpnd_neti_v6, &nsp->vpnd_event_out_v6);
+ VERIFY(ret == 0);
+ ret = net_family_unregister(nsp->vpnd_neti_v4, &nsp->vpnd_family_v4);
+ VERIFY(ret == 0);
+ ret = net_family_unregister(nsp->vpnd_neti_v6, &nsp->vpnd_family_v6);
+ VERIFY(ret == 0);
+ ret = net_protocol_unregister(nsp->vpnd_neti_v4);
+ VERIFY(ret == 0);
+ ret = net_protocol_unregister(nsp->vpnd_neti_v6);
+ VERIFY(ret == 0);
+}
+
+static void
+vnd_strbarrier_cb(void *arg, mblk_t *bmp, gsqueue_t *gsp, void *dummy)
+{
+ vnd_str_t *vsp = arg;
+
+ VERIFY(bmp == &vsp->vns_barrierblk);
+ mutex_enter(&vsp->vns_lock);
+ VERIFY(vsp->vns_flags & VNS_F_BARRIER);
+ VERIFY(!(vsp->vns_flags & VNS_F_BARRIER_DONE));
+ vsp->vns_flags |= VNS_F_BARRIER_DONE;
+ mutex_exit(&vsp->vns_lock);
+
+ /*
+ * For better or worse, we have to broadcast here as we could have a
+ * thread that's blocked for completion as well as one that's blocked
+ * waiting to do a barrier itself.
+ */
+ cv_broadcast(&vsp->vns_barriercv);
+}
+
+/*
+ * This is a data barrier for the stream while it is in fastpath mode. It blocks
+ * and ensures that there is nothing else in the squeue.
+ */
+static void
+vnd_strbarrier(vnd_str_t *vsp)
+{
+ mutex_enter(&vsp->vns_lock);
+ while (vsp->vns_flags & VNS_F_BARRIER)
+ cv_wait(&vsp->vns_barriercv, &vsp->vns_lock);
+ vsp->vns_flags |= VNS_F_BARRIER;
+ mutex_exit(&vsp->vns_lock);
+
+ gsqueue_enter_one(vsp->vns_squeue, &vsp->vns_barrierblk,
+ vnd_strbarrier_cb, vsp, GSQUEUE_PROCESS, VND_SQUEUE_TAG_STRBARRIER);
+
+ mutex_enter(&vsp->vns_lock);
+ while (!(vsp->vns_flags & VNS_F_BARRIER_DONE))
+ cv_wait(&vsp->vns_barriercv, &vsp->vns_lock);
+ vsp->vns_flags &= ~VNS_F_BARRIER;
+ vsp->vns_flags &= ~VNS_F_BARRIER_DONE;
+ mutex_exit(&vsp->vns_lock);
+
+ /*
+ * We have to broadcast in case anyone is waiting for the barrier
+ * themselves.
+ */
+ cv_broadcast(&vsp->vns_barriercv);
+}
+
+/*
+ * Based on the type of message that we're dealing with we're going to want to
+ * do one of several things. Basically if it looks like it's something we know
+ * about, we should probably handle it in one of our transition threads.
+ * Otherwise, we should just simply putnext.
+ */
+static int
+vnd_s_rput(queue_t *q, mblk_t *mp)
+{
+ t_uscalar_t prim;
+ int dispatch = 0;
+ vnd_str_t *vsp = q->q_ptr;
+
+ switch (DB_TYPE(mp)) {
+ case M_PROTO:
+ case M_PCPROTO:
+ if (MBLKL(mp) < sizeof (t_uscalar_t)) {
+ vnd_drop_ctl(vsp, mp, "PROTO message too short");
+ break;
+ }
+
+ prim = ((union DL_primitives *)mp->b_rptr)->dl_primitive;
+ if (prim == DL_UNITDATA_REQ || prim == DL_UNITDATA_IND) {
+ vnd_drop_ctl(vsp, mp,
+ "recieved an unsupported dlpi DATA req");
+ break;
+ }
+
+ /*
+ * Enqueue the entry and fire off a taskq dispatch.
+ */
+ mutex_enter(&vsp->vns_lock);
+ vnd_dlpi_inc_push(vsp, mp);
+ if (!(vsp->vns_flags & VNS_F_TASKQ_DISPATCHED)) {
+ dispatch = 1;
+ vsp->vns_flags |= VNS_F_TASKQ_DISPATCHED;
+ }
+ mutex_exit(&vsp->vns_lock);
+ if (dispatch != 0)
+ taskq_dispatch_ent(vnd_taskq, vnd_dlpi_taskq_dispatch,
+ vsp, 0, &vsp->vns_tqe);
+ break;
+ case M_DATA:
+ vnd_drop_in(vsp, mp, "M_DATA via put(9E)");
+ break;
+ default:
+ putnext(vsp->vns_rq, mp);
+ }
+ return (0);
+}
+
+static void
+vnd_strioctl(queue_t *q, vnd_str_t *vsp, mblk_t *mp, struct iocblk *iocp)
+{
+ int error;
+ vnd_strioc_t *visp;
+
+ if (iocp->ioc_cmd != VND_STRIOC_ASSOCIATE ||
+ iocp->ioc_count != TRANSPARENT) {
+ error = EINVAL;
+ goto nak;
+ }
+
+ /*
+ * All streams ioctls that we support must use kcred as a means to
+ * distinguish that this is a layered open by the kernel as opposed to
+ * one by a user who has done an I_PUSH of the module.
+ */
+ if (iocp->ioc_cr != kcred) {
+ error = EPERM;
+ goto nak;
+ }
+
+ if (mp->b_cont == NULL) {
+ error = EAGAIN;
+ goto nak;
+ }
+
+ visp = kmem_alloc(sizeof (vnd_strioc_t), KM_SLEEP);
+ ASSERT(MBLKL(mp->b_cont) == sizeof (caddr_t));
+ visp->vs_addr = *(caddr_t *)mp->b_cont->b_rptr;
+ visp->vs_state = VSS_COPYIN;
+
+ mcopyin(mp, (void *)visp, sizeof (vnd_strioc_associate_t), NULL);
+ qreply(q, mp);
+
+ return;
+
+nak:
+ if (mp->b_cont != NULL) {
+ freemsg(mp->b_cont);
+ mp->b_cont = NULL;
+ }
+
+ iocp->ioc_error = error;
+ mp->b_datap->db_type = M_IOCNAK;
+ iocp->ioc_count = 0;
+ qreply(q, mp);
+}
+
+static void
+vnd_striocdata(queue_t *q, vnd_str_t *vsp, mblk_t *mp, struct copyresp *csp)
+{
+ int error;
+ vnd_str_state_t state;
+ struct copyreq *crp;
+ vnd_strioc_associate_t *vss;
+ vnd_dev_t *vdp = NULL;
+ vnd_pnsd_t *nsp = NULL;
+ char iname[2*VND_NAMELEN];
+ zone_t *zone;
+ vnd_strioc_t *visp;
+
+ visp = (vnd_strioc_t *)csp->cp_private;
+
+ /* If it's not ours, it's not our problem */
+ if (csp->cp_cmd != VND_STRIOC_ASSOCIATE) {
+ if (q->q_next != NULL) {
+ putnext(q, mp);
+ } else {
+ VND_STAT_INC(vsp, vks_ndlpidrops, 1);
+ VND_STAT_INC(vsp, vks_tdrops, 1);
+ vnd_drop_ctl(vsp, mp, "uknown cmd for M_IOCDATA");
+ }
+ kmem_free(visp, sizeof (vnd_strioc_t));
+ return;
+ }
+
+ /* The nak is already sent for us */
+ if (csp->cp_rval != 0) {
+ vnd_drop_ctl(vsp, mp, "M_COPYIN failed");
+ kmem_free(visp, sizeof (vnd_strioc_t));
+ return;
+ }
+
+ /* Data is sitting for us in b_cont */
+ if (mp->b_cont == NULL ||
+ MBLKL(mp->b_cont) != sizeof (vnd_strioc_associate_t)) {
+ kmem_free(visp, sizeof (vnd_strioc_t));
+ miocnak(q, mp, 0, EINVAL);
+ return;
+ }
+
+ vss = (vnd_strioc_associate_t *)mp->b_cont->b_rptr;
+ vdp = vnd_dev_lookup(vss->vsa_minor);
+ if (vdp == NULL) {
+ error = EIO;
+ vss->vsa_errno = VND_E_NODEV;
+ goto nak;
+ }
+
+ nsp = vnd_nsd_lookup(vss->vsa_nsid);
+ if (nsp == NULL) {
+ error = EIO;
+ vss->vsa_errno = VND_E_NONETSTACK;
+ goto nak;
+ }
+
+ mutex_enter(&vsp->vns_lock);
+ if (!(vsp->vns_flags & VNS_F_NEED_ZONE)) {
+ mutex_exit(&vsp->vns_lock);
+ error = EEXIST;
+ vss->vsa_errno = VND_E_ASSOCIATED;
+ goto nak;
+ }
+
+ vsp->vns_nsd = nsp;
+ vsp->vns_flags &= ~VNS_F_NEED_ZONE;
+ vsp->vns_flags |= VNS_F_TASKQ_DISPATCHED;
+ mutex_exit(&vsp->vns_lock);
+
+ taskq_dispatch_ent(vnd_taskq, vnd_dlpi_taskq_dispatch, vsp, 0,
+ &vsp->vns_tqe);
+
+
+ /* At this point we need to wait until we have transitioned to ONLINE */
+ mutex_enter(&vsp->vns_lock);
+ while (vsp->vns_state != VNS_S_ONLINE && vsp->vns_state != VNS_S_ZOMBIE)
+ cv_wait(&vsp->vns_stcv, &vsp->vns_lock);
+ state = vsp->vns_state;
+ mutex_exit(&vsp->vns_lock);
+
+ if (state == VNS_S_ZOMBIE) {
+ vss->vsa_errno = vsp->vns_errno;
+ error = EIO;
+ goto nak;
+ }
+
+ mutex_enter(&vdp->vdd_lock);
+ mutex_enter(&vsp->vns_lock);
+ VERIFY(vdp->vdd_str == NULL);
+ /*
+ * Now initialize the remaining kstat properties and let's go ahead and
+ * create it.
+ */
+ (void) snprintf(iname, sizeof (iname), "z%d_%d",
+ vdp->vdd_nsd->vpnd_zid, vdp->vdd_minor);
+ vsp->vns_kstat = kstat_create_zone("vnd", vdp->vdd_minor, iname, "net",
+ KSTAT_TYPE_NAMED, sizeof (vnd_str_stat_t) / sizeof (kstat_named_t),
+ KSTAT_FLAG_VIRTUAL, GLOBAL_ZONEID);
+ if (vsp->vns_kstat == NULL) {
+ error = EIO;
+ vss->vsa_errno = VND_E_KSTATCREATE;
+ mutex_exit(&vsp->vns_lock);
+ mutex_exit(&vdp->vdd_lock);
+ goto nak;
+ }
+ vdp->vdd_str = vsp;
+ vsp->vns_dev = vdp;
+
+ /*
+ * Now, it's time to do the las thing that can fail, changing out the
+ * input function. After this we know that we can receive data, so we
+ * should make sure that we're ready.
+ */
+ if (vnd_dld_cap_enable(vsp, vnd_mac_input) != 0) {
+ error = EIO;
+ vss->vsa_errno = VND_E_DIRECTFAIL;
+ vdp->vdd_str = NULL;
+ vsp->vns_dev = NULL;
+ mutex_exit(&vsp->vns_lock);
+ mutex_exit(&vdp->vdd_lock);
+ goto nak;
+ }
+
+ zone = zone_find_by_id(vdp->vdd_nsd->vpnd_zid);
+ ASSERT(zone != NULL);
+ vsp->vns_kstat->ks_data = &vsp->vns_ksdata;
+ /* Account for zone name */
+ vsp->vns_kstat->ks_data_size += strlen(zone->zone_name) + 1;
+ /* Account for eventual link name */
+ vsp->vns_kstat->ks_data_size += VND_NAMELEN;
+ kstat_named_setstr(&vsp->vns_ksdata.vks_zonename, zone->zone_name);
+ kstat_named_setstr(&vdp->vdd_str->vns_ksdata.vks_linkname,
+ vdp->vdd_lname);
+ zone_rele(zone);
+ kstat_install(vsp->vns_kstat);
+
+ mutex_exit(&vsp->vns_lock);
+ mutex_exit(&vdp->vdd_lock);
+
+ /*
+ * Note that the vnd_str_t does not keep a permanent hold on the
+ * vnd_pnsd_t. We leave that up to the vnd_dev_t as that's also what
+ * the nestack goes through to take care of everything.
+ */
+ vss->vsa_errno = VND_E_SUCCESS;
+nak:
+ if (vdp != NULL)
+ vnd_dev_rele(vdp);
+ if (nsp != NULL)
+ vnd_nsd_rele(nsp);
+ /*
+ * Change the copyin request to a copyout. Note that we can't use
+ * mcopyout here as it only works when the DB_TYPE is M_IOCTL. That's
+ * okay, as the copyin vs. copyout is basically the same.
+ */
+ DB_TYPE(mp) = M_COPYOUT;
+ visp->vs_state = VSS_COPYOUT;
+ crp = (struct copyreq *)mp->b_rptr;
+ crp->cq_private = (void *)visp;
+ crp->cq_addr = visp->vs_addr;
+ crp->cq_size = sizeof (vnd_strioc_associate_t);
+ qreply(q, mp);
+}
+
+static void
+vnd_stroutdata(queue_t *q, vnd_str_t *vsp, mblk_t *mp, struct copyresp *csp)
+{
+ ASSERT(csp->cp_private != NULL);
+ kmem_free(csp->cp_private, sizeof (vnd_strioc_t));
+ if (csp->cp_cmd != VND_STRIOC_ASSOCIATE) {
+ if (q->q_next != NULL) {
+ putnext(q, mp);
+ } else {
+ VND_STAT_INC(vsp, vks_ndlpidrops, 1);
+ VND_STAT_INC(vsp, vks_tdrops, 1);
+ vnd_drop_ctl(vsp, mp, "uknown cmd for M_IOCDATA");
+ }
+ return;
+ }
+
+ /* The nak is already sent for us */
+ if (csp->cp_rval != 0) {
+ vnd_drop_ctl(vsp, mp, "M_COPYOUT failed");
+ return;
+ }
+
+ /* Ack and let's be done with it all */
+ miocack(q, mp, 0, 0);
+}
+
+static int
+vnd_s_wput(queue_t *q, mblk_t *mp)
+{
+ vnd_str_t *vsp = q->q_ptr;
+ struct copyresp *crp;
+ vnd_strioc_state_t vstate;
+ vnd_strioc_t *visp;
+
+ switch (DB_TYPE(mp)) {
+ case M_IOCTL:
+ vnd_strioctl(q, vsp, mp, (struct iocblk *)mp->b_rptr);
+ return (0);
+ case M_IOCDATA:
+ crp = (struct copyresp *)mp->b_rptr;
+ ASSERT(crp->cp_private != NULL);
+ visp = (vnd_strioc_t *)crp->cp_private;
+ vstate = visp->vs_state;
+ ASSERT(vstate == VSS_COPYIN || vstate == VSS_COPYOUT);
+ if (vstate == VSS_COPYIN)
+ vnd_striocdata(q, vsp, mp,
+ (struct copyresp *)mp->b_rptr);
+ else
+ vnd_stroutdata(q, vsp, mp,
+ (struct copyresp *)mp->b_rptr);
+ return (0);
+ default:
+ break;
+ }
+ if (q->q_next != NULL)
+ putnext(q, mp);
+ else
+ vnd_drop_ctl(vsp, mp, "!M_IOCTL in wput");
+
+ return (0);
+}
+
+static int
+vnd_s_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp)
+{
+ vnd_str_t *vsp;
+ uint_t rand;
+
+ if (q->q_ptr != NULL)
+ return (EINVAL);
+
+ if (!(sflag & MODOPEN))
+ return (ENXIO);
+
+ if (credp != kcred)
+ return (EPERM);
+
+ vsp = kmem_cache_alloc(vnd_str_cache, KM_SLEEP);
+ bzero(vsp, sizeof (*vsp));
+ mutex_init(&vsp->vns_lock, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&vsp->vns_cancelcv, NULL, CV_DRIVER, NULL);
+ cv_init(&vsp->vns_barriercv, NULL, CV_DRIVER, NULL);
+ cv_init(&vsp->vns_stcv, NULL, CV_DRIVER, NULL);
+ vsp->vns_state = VNS_S_INITIAL;
+
+ mutex_init(&vsp->vns_dq_read.vdq_lock, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&vsp->vns_dq_write.vdq_lock, NULL, MUTEX_DRIVER, NULL);
+ mutex_enter(&vnd_dev_lock);
+ vsp->vns_dq_read.vdq_max = vnd_vdq_default_size;
+ vsp->vns_dq_read.vdq_vns = vsp;
+ vsp->vns_dq_write.vdq_max = vnd_vdq_default_size;
+ vsp->vns_dq_write.vdq_vns = vsp;
+ mutex_exit(&vnd_dev_lock);
+ vsp->vns_rq = q;
+ vsp->vns_wq = WR(q);
+ q->q_ptr = WR(q)->q_ptr = vsp;
+ vsp->vns_flags = VNS_F_NEED_ZONE;
+ vsp->vns_nflush = vnd_flush_nburst;
+ vsp->vns_bsize = vnd_flush_burst_size;
+
+ (void) random_get_pseudo_bytes((uint8_t *)&rand, sizeof (rand));
+ vsp->vns_squeue = gsqueue_set_get(vnd_sqset, rand);
+
+ /*
+ * We create our kstat and initialize all of its fields now, but we
+ * don't install it until we actually do the zone association so we can
+ * get everything.
+ */
+ kstat_named_init(&vsp->vns_ksdata.vks_rbytes, "rbytes",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_rpackets, "rpackets",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_obytes, "obytes",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_opackets, "opackets",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_nhookindrops, "nhookindrops",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_nhookoutdrops, "nhookoutdrops",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_ndlpidrops, "ndlpidrops",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_ndataindrops, "ndataindrops",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_ndataoutdrops, "ndataoutdrops",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_tdrops, "total_drops",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_linkname, "linkname",
+ KSTAT_DATA_STRING);
+ kstat_named_init(&vsp->vns_ksdata.vks_zonename, "zonename",
+ KSTAT_DATA_STRING);
+ kstat_named_init(&vsp->vns_ksdata.vks_nmacflow, "flowcontrol_events",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_tmacflow, "flowcontrol_time",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_mac_flow_1ms, "flowcontrol_1ms",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_mac_flow_10ms, "flowcontrol_10ms",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_mac_flow_100ms,
+ "flowcontrol_100ms", KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_mac_flow_1s, "flowcontrol_1s",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&vsp->vns_ksdata.vks_mac_flow_10s, "flowcontrol_10s",
+ KSTAT_DATA_UINT64);
+ qprocson(q);
+ /*
+ * Now that we've called qprocson, grab the lower module for making sure
+ * that we don't have any pass through modules.
+ */
+ vsp->vns_lrq = RD(vsp->vns_wq->q_next);
+
+ return (0);
+}
+
+static int
+vnd_s_close(queue_t *q, int flag, cred_t *credp)
+{
+ vnd_str_t *vsp;
+ mblk_t *mp;
+
+ VERIFY(WR(q)->q_next != NULL);
+
+ vsp = q->q_ptr;
+ ASSERT(vsp != NULL);
+
+ /*
+ * We need to transition ourselves down. This means that we have a few
+ * important different things to do in the process of tearing down our
+ * input and output buffers, making sure we've drained the current
+ * squeue, and disabling the fast path. Before we disable the fast path,
+ * we should make sure the squeue is drained. Because we're in streams
+ * close, we know that no packets can come into us from userland, but we
+ * can receive more. As such, the following is the exact order of things
+ * that we do:
+ *
+ * 1) flush the vns_dq_read
+ * 2) Insert the drain mblk
+ * 3) When it's been received, tear down the fast path by kicking
+ * off the state machine.
+ * 4) One final flush of both the vns_dq_read,vns_dq_write
+ */
+
+ vnd_dq_flush(&vsp->vns_dq_read, vnd_drop_in);
+ vnd_strbarrier(vsp);
+ mutex_enter(&vsp->vns_lock);
+ vsp->vns_state = VNS_S_SHUTTING_DOWN;
+ if (!(vsp->vns_flags & VNS_F_TASKQ_DISPATCHED)) {
+ vsp->vns_flags |= VNS_F_TASKQ_DISPATCHED;
+ taskq_dispatch_ent(vnd_taskq, vnd_dlpi_taskq_dispatch, vsp,
+ 0, &vsp->vns_tqe);
+ }
+ while (vsp->vns_state != VNS_S_ZOMBIE)
+ cv_wait(&vsp->vns_stcv, &vsp->vns_lock);
+ mutex_exit(&vsp->vns_lock);
+
+ qprocsoff(q);
+ mutex_enter(&vsp->vns_lock);
+ vsp->vns_flags |= VNS_F_CONDEMNED;
+ while (vsp->vns_flags & VNS_F_TASKQ_DISPATCHED)
+ cv_wait(&vsp->vns_cancelcv, &vsp->vns_lock);
+
+ while ((mp = vnd_dlpi_inc_pop(vsp)) != NULL)
+ vnd_drop_ctl(vsp, mp, "vnd_s_close");
+ mutex_exit(&vsp->vns_lock);
+
+ q->q_ptr = NULL;
+ vnd_dq_flush(&vsp->vns_dq_read, vnd_drop_in);
+ vnd_dq_flush(&vsp->vns_dq_write, vnd_drop_out);
+ mutex_destroy(&vsp->vns_dq_read.vdq_lock);
+ mutex_destroy(&vsp->vns_dq_write.vdq_lock);
+
+ if (vsp->vns_kstat != NULL)
+ kstat_delete(vsp->vns_kstat);
+ mutex_destroy(&vsp->vns_lock);
+ cv_destroy(&vsp->vns_stcv);
+ cv_destroy(&vsp->vns_barriercv);
+ cv_destroy(&vsp->vns_cancelcv);
+ kmem_cache_free(vnd_str_cache, vsp);
+
+ return (0);
+}
+
+static vnd_mac_cookie_t
+vnd_squeue_tx_one(vnd_str_t *vsp, mblk_t *mp)
+{
+ hrtime_t txtime;
+ vnd_mac_cookie_t vc;
+
+ VND_STAT_INC(vsp, vks_opackets, 1);
+ VND_STAT_INC(vsp, vks_obytes, msgsize(mp));
+ DTRACE_VND5(send, mblk_t *, mp, void *, NULL, void *, NULL,
+ vnd_str_t *, vsp, mblk_t *, mp);
+ /* Actually tx now */
+ txtime = gethrtime();
+ vc = vsp->vns_caps.vsc_tx_f(vsp->vns_caps.vsc_tx_hdl,
+ mp, 0, MAC_DROP_ON_NO_DESC);
+
+ /*
+ * We need to check two different conditions before we immediately set
+ * the flow control lock. The first thing that we need to do is verify
+ * that this is an instance of hard flow control, so to say. The flow
+ * control callbacks won't always fire in cases where we still get a
+ * cookie returned. The explicit check for flow control will guarantee
+ * us that we'll get a subsequent notification callback.
+ *
+ * The second case comes about because we do not hold the
+ * vnd_str_t`vns_lock across calls to tx, we need to determine if a flow
+ * control notification already came across for us in a different thread
+ * calling vnd_mac_flow_control(). To deal with this, we record a
+ * timestamp every time that we change the flow control state. We grab
+ * txtime here before we transmit because that guarantees that the
+ * hrtime_t of the call to vnd_mac_flow_control() will be after txtime.
+ *
+ * If the flow control notification beat us to the punch, the value of
+ * vns_fcupdate will be larger than the value of txtime, and we should
+ * just record the statistics. However, if we didn't beat it to the
+ * punch (txtime > vns_fcupdate), then we know that it's safe to wait
+ * for a notification.
+ */
+ if (vc != NULL) {
+ hrtime_t diff;
+
+ if (vsp->vns_caps.vsc_is_fc_f(vsp->vns_caps.vsc_is_fc_hdl,
+ vc) == 0)
+ return (NULL);
+ mutex_enter(&vsp->vns_lock);
+ diff = vsp->vns_fcupdate - txtime;
+ if (diff > 0) {
+ mutex_exit(&vsp->vns_lock);
+ vnd_mac_flow_control_stat(vsp, diff);
+ return (NULL);
+ }
+ vsp->vns_flags |= VNS_F_FLOW_CONTROLLED;
+ vsp->vns_caps.vsc_fc_cookie = vc;
+ vsp->vns_fclatch = txtime;
+ vsp->vns_fcupdate = txtime;
+ DTRACE_VND3(flow__blocked, vnd_str_t *, vsp,
+ uint64_t, vsp->vns_dq_write.vdq_cur, uintptr_t, vc);
+ mutex_exit(&vsp->vns_lock);
+ }
+
+ return (vc);
+}
+
+static void
+vnd_squeue_tx_drain(void *arg, mblk_t *drain_mp, gsqueue_t *gsp, void *dummy)
+{
+ mblk_t *mp;
+ int nmps;
+ size_t mptot, nflush, bsize;
+ boolean_t blocked, empty;
+ vnd_data_queue_t *vqp;
+ vnd_str_t *vsp = arg;
+
+ mutex_enter(&vsp->vns_lock);
+ /*
+ * We either enter here via an squeue or via vnd_squeue_tx_append(). In
+ * the former case we need to mark that there is no longer an active
+ * user of the drain block.
+ */
+ if (drain_mp != NULL) {
+ VERIFY(drain_mp == &vsp->vns_drainblk);
+ VERIFY(vsp->vns_flags & VNS_F_DRAIN_SCHEDULED);
+ vsp->vns_flags &= ~VNS_F_DRAIN_SCHEDULED;
+ }
+
+ /*
+ * If we're still flow controlled or under a flush barrier, nothing to
+ * do.
+ */
+ if (vsp->vns_flags & (VNS_F_FLOW_CONTROLLED | VNS_F_BARRIER)) {
+ mutex_exit(&vsp->vns_lock);
+ return;
+ }
+
+ nflush = vsp->vns_nflush;
+ bsize = vsp->vns_bsize;
+ mutex_exit(&vsp->vns_lock);
+
+ nmps = 0;
+ mptot = 0;
+ blocked = B_FALSE;
+ vqp = &vsp->vns_dq_write;
+ while (nmps < nflush && mptot <= bsize) {
+ mutex_enter(&vqp->vdq_lock);
+ if (vnd_dq_pop(vqp, &mp) == 0) {
+ mutex_exit(&vqp->vdq_lock);
+ break;
+ }
+ mutex_exit(&vqp->vdq_lock);
+
+ nmps++;
+ mptot += msgsize(mp);
+ if (vnd_squeue_tx_one(vsp, mp) != NULL) {
+ blocked = B_TRUE;
+ break;
+ }
+ }
+
+ empty = vnd_dq_is_empty(&vsp->vns_dq_write);
+
+ /*
+ * If the queue is not empty, we're not blocked, and there isn't a drain
+ * scheduled, put it into the squeue with the drain block and
+ * GSQUEUE_FILL.
+ */
+ if (blocked == B_FALSE && empty == B_FALSE) {
+ mutex_enter(&vsp->vns_lock);
+ if (!(vsp->vns_flags & VNS_F_DRAIN_SCHEDULED)) {
+ mblk_t *mp = &vsp->vns_drainblk;
+ vsp->vns_flags |= VNS_F_DRAIN_SCHEDULED;
+ gsqueue_enter_one(vsp->vns_squeue,
+ mp, vnd_squeue_tx_drain, vsp,
+ GSQUEUE_FILL, VND_SQUEUE_TAG_TX_DRAIN);
+ }
+ mutex_exit(&vsp->vns_lock);
+ }
+
+ /*
+ * If we drained some amount of data, we need to signal the data queue.
+ */
+ if (nmps > 0) {
+ cv_broadcast(&vsp->vns_dq_write.vdq_ready);
+ pollwakeup(&vsp->vns_dev->vdd_ph, POLLOUT);
+ }
+}
+
+static void
+vnd_squeue_tx_append(void *arg, mblk_t *mp, gsqueue_t *gsp, void *dummy)
+{
+ vnd_str_t *vsp = arg;
+ vnd_data_queue_t *vqp = &vsp->vns_dq_write;
+ vnd_pnsd_t *nsp = vsp->vns_nsd;
+ size_t len = msgsize(mp);
+
+ /*
+ * Before we append this packet, we should run it through the firewall
+ * rules.
+ */
+ if (nsp->vpnd_hooked && vnd_hook(vsp, &mp, nsp->vpnd_neti_v4,
+ nsp->vpnd_event_out_v4, nsp->vpnd_token_out_v4, nsp->vpnd_neti_v6,
+ nsp->vpnd_event_out_v6, nsp->vpnd_token_out_v6, vnd_drop_hook_out,
+ vnd_drop_out) != 0) {
+ /*
+ * Because we earlier reserved space for this packet and it's
+ * not making the cut, we need to go through and unreserve that
+ * space. Also note that the message block will likely be freed
+ * by the time we return from vnd_hook so we cannot rely on it.
+ */
+ mutex_enter(&vqp->vdq_lock);
+ vnd_dq_unreserve(vqp, len);
+ mutex_exit(&vqp->vdq_lock);
+ return;
+ }
+
+ /*
+ * We earlier reserved space for this packet. So for now simply append
+ * it and call drain. We know that no other drain can be going on right
+ * now thanks to the squeue.
+ */
+ mutex_enter(&vqp->vdq_lock);
+ (void) vnd_dq_push(&vsp->vns_dq_write, mp, B_TRUE, vnd_drop_panic);
+ mutex_exit(&vqp->vdq_lock);
+ vnd_squeue_tx_drain(vsp, NULL, NULL, NULL);
+}
+
+/*
+ * We need to see if this is a valid name of sorts for us. That means a few
+ * things. First off, we can't assume that what we've been given has actually
+ * been null terminated. More importantly, that it's a valid name as far as
+ * ddi_create_minor_node is concerned (that means no '@', '/', or ' '). We
+ * further constrain ourselves to simply alphanumeric characters and a few
+ * additional ones, ':', '-', and '_'.
+ */
+static int
+vnd_validate_name(const char *buf, size_t buflen)
+{
+ int i, len;
+
+ /* First make sure a null terminator exists */
+ for (i = 0; i < buflen; i++)
+ if (buf[i] == '\0')
+ break;
+ len = i;
+ if (i == 0 || i == buflen)
+ return (0);
+
+ for (i = 0; i < len; i++)
+ if (!isalnum(buf[i]) && buf[i] != ':' && buf[i] != '-' &&
+ buf[i] != '_')
+ return (0);
+
+ return (1);
+}
+
+static int
+vnd_ioctl_attach(vnd_dev_t *vdp, uintptr_t arg, cred_t *credp, int cpflag)
+{
+ vnd_ioc_attach_t via;
+ vnd_strioc_associate_t vss;
+ vnd_pnsd_t *nsp;
+ zone_t *zonep;
+ zoneid_t zid;
+ char buf[2*VND_NAMELEN];
+ int ret, rp;
+
+ if (secpolicy_net_config(credp, B_FALSE) != 0)
+ return (EPERM);
+
+ if (secpolicy_net_rawaccess(credp) != 0)
+ return (EPERM);
+
+ if (ddi_copyin((void *)arg, &via, sizeof (via), cpflag) != 0)
+ return (EFAULT);
+ via.via_errno = VND_E_SUCCESS;
+
+ if (vnd_validate_name(via.via_name, VND_NAMELEN) == 0) {
+ via.via_errno = VND_E_BADNAME;
+ ret = EIO;
+ goto errcopyout;
+ }
+
+ /*
+ * Only the global zone can request to create a device in a different
+ * zone.
+ */
+ zid = crgetzoneid(credp);
+ if (zid != GLOBAL_ZONEID && via.via_zoneid != -1 &&
+ zid != via.via_zoneid) {
+ via.via_errno = VND_E_PERM;
+ ret = EIO;
+ goto errcopyout;
+ }
+
+ if (via.via_zoneid == -1)
+ via.via_zoneid = zid;
+
+ /*
+ * Establish the name we'll use now. We want to be extra paranoid about
+ * the device we're opening so check that now.
+ */
+ if (zid == GLOBAL_ZONEID && via.via_zoneid != zid) {
+ zonep = zone_find_by_id(via.via_zoneid);
+ if (zonep == NULL) {
+ via.via_errno = VND_E_NOZONE;
+ ret = EIO;
+ goto errcopyout;
+ }
+ if (snprintf(NULL, 0, "/dev/net/zone/%s/%s", zonep->zone_name,
+ via.via_name) >= sizeof (buf)) {
+ zone_rele(zonep);
+ via.via_errno = VND_E_BADNAME;
+ ret = EIO;
+ goto errcopyout;
+ }
+ (void) snprintf(buf, sizeof (buf), "/dev/net/zone/%s/%s",
+ zonep->zone_name, via.via_name);
+ zone_rele(zonep);
+ zonep = NULL;
+ } else {
+ if (snprintf(NULL, 0, "/dev/net/%s", via.via_name) >=
+ sizeof (buf)) {
+ via.via_errno = VND_E_BADNAME;
+ ret = EIO;
+ goto errcopyout;
+ }
+ (void) snprintf(buf, sizeof (buf), "/dev/net/%s", via.via_name);
+ }
+
+ /*
+ * If our zone is dying then the netstack will have been removed from
+ * this list.
+ */
+ nsp = vnd_nsd_lookup_by_zid(via.via_zoneid);
+ if (nsp == NULL) {
+ via.via_errno = VND_E_NOZONE;
+ ret = EIO;
+ goto errcopyout;
+ }
+
+ /*
+ * Note we set the attached handle even though we haven't actually
+ * finished the process of attaching the ldi handle.
+ */
+ mutex_enter(&vdp->vdd_lock);
+ if (vdp->vdd_flags & (VND_D_ATTACHED | VND_D_ATTACH_INFLIGHT)) {
+ mutex_exit(&vdp->vdd_lock);
+ vnd_nsd_rele(nsp);
+ via.via_errno = VND_E_ATTACHED;
+ ret = EIO;
+ goto errcopyout;
+ }
+ vdp->vdd_flags |= VND_D_ATTACH_INFLIGHT;
+ ASSERT(vdp->vdd_cr == NULL);
+ crhold(credp);
+ vdp->vdd_cr = credp;
+ ASSERT(vdp->vdd_nsd == NULL);
+ vdp->vdd_nsd = nsp;
+ mutex_exit(&vdp->vdd_lock);
+
+ /*
+ * Place an additional hold on the vnd_pnsd_t as we go through and do
+ * all of the rest of our work. This will be the hold that we keep for
+ * as long as this thing is attached.
+ */
+ vnd_nsd_ref(nsp);
+
+ ret = ldi_open_by_name(buf, FREAD | FWRITE, vdp->vdd_cr,
+ &vdp->vdd_ldih, vdp->vdd_ldiid);
+ if (ret != 0) {
+ if (ret == ENODEV)
+ via.via_errno = VND_E_NODATALINK;
+ goto err;
+ }
+
+ /*
+ * Unfortunately the I_PUSH interface doesn't allow us a way to detect
+ * whether or not we're coming in from a layered device. We really want
+ * to make sure that a normal user can't push on our streams module.
+ * Currently the only idea I have for this is to make sure that the
+ * credp is kcred which is really terrible.
+ */
+ ret = ldi_ioctl(vdp->vdd_ldih, I_PUSH, (intptr_t)"vnd", FKIOCTL,
+ kcred, &rp);
+ if (ret != 0) {
+ rp = ldi_close(vdp->vdd_ldih, FREAD | FWRITE, vdp->vdd_cr);
+ VERIFY(rp == 0);
+ via.via_errno = VND_E_STRINIT;
+ ret = EIO;
+ goto err;
+ }
+
+ vss.vsa_minor = vdp->vdd_minor;
+ vss.vsa_nsid = nsp->vpnd_nsid;
+
+ ret = ldi_ioctl(vdp->vdd_ldih, VND_STRIOC_ASSOCIATE, (intptr_t)&vss,
+ FKIOCTL, kcred, &rp);
+ if (ret != 0 || vss.vsa_errno != VND_E_SUCCESS) {
+ rp = ldi_close(vdp->vdd_ldih, FREAD | FWRITE, vdp->vdd_cr);
+ VERIFY(rp == 0);
+ if (ret == 0) {
+ via.via_errno = vss.vsa_errno;
+ ret = EIO;
+ }
+ goto err;
+ }
+
+ mutex_enter(&vdp->vdd_nsd->vpnd_lock);
+
+ /*
+ * There's a chance that our netstack was condemned while we've had a
+ * hold on it. As such we need to check and if so, error out.
+ */
+ if (vdp->vdd_nsd->vpnd_flags & VND_NS_CONDEMNED) {
+ mutex_exit(&vdp->vdd_nsd->vpnd_lock);
+ rp = ldi_close(vdp->vdd_ldih, FREAD | FWRITE, vdp->vdd_cr);
+ VERIFY(rp == 0);
+ ret = EIO;
+ via.via_errno = VND_E_NOZONE;
+ goto err;
+ }
+
+ mutex_enter(&vdp->vdd_lock);
+ VERIFY(vdp->vdd_str != NULL);
+ vdp->vdd_flags &= ~VND_D_ATTACH_INFLIGHT;
+ vdp->vdd_flags |= VND_D_ATTACHED;
+ (void) strlcpy(vdp->vdd_datalink, via.via_name,
+ sizeof (vdp->vdd_datalink));
+ list_insert_tail(&vdp->vdd_nsd->vpnd_dev_list, vdp);
+ mutex_exit(&vdp->vdd_lock);
+ mutex_exit(&vdp->vdd_nsd->vpnd_lock);
+ vnd_nsd_rele(nsp);
+
+ return (0);
+
+err:
+ mutex_enter(&vdp->vdd_lock);
+ vdp->vdd_flags &= ~VND_D_ATTACH_INFLIGHT;
+ crfree(vdp->vdd_cr);
+ vdp->vdd_cr = NULL;
+ vdp->vdd_nsd = NULL;
+ mutex_exit(&vdp->vdd_lock);
+
+ /*
+ * We have two holds to drop here. One for our original reference and
+ * one for the hold this operation would have represented.
+ */
+ vnd_nsd_rele(nsp);
+ vnd_nsd_rele(nsp);
+errcopyout:
+ if (ddi_copyout(&via, (void *)arg, sizeof (via), cpflag) != 0)
+ ret = EFAULT;
+
+ return (ret);
+}
+
+static int
+vnd_ioctl_link(vnd_dev_t *vdp, intptr_t arg, cred_t *credp, int cpflag)
+{
+ int ret = 0;
+ vnd_ioc_link_t vil;
+ char mname[2*VND_NAMELEN];
+ char **c;
+ vnd_dev_t *v;
+ zoneid_t zid;
+
+ /* Not anyone can link something */
+ if (secpolicy_net_config(credp, B_FALSE) != 0)
+ return (EPERM);
+
+ if (ddi_copyin((void *)arg, &vil, sizeof (vil), cpflag) != 0)
+ return (EFAULT);
+
+ if (vnd_validate_name(vil.vil_name, VND_NAMELEN) == 0) {
+ ret = EIO;
+ vil.vil_errno = VND_E_BADNAME;
+ goto errcopyout;
+ }
+
+ c = vnd_reserved_names;
+ while (*c != NULL) {
+ if (strcmp(vil.vil_name, *c) == 0) {
+ ret = EIO;
+ vil.vil_errno = VND_E_BADNAME;
+ goto errcopyout;
+ }
+ c++;
+ }
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_ATTACHED)) {
+ mutex_exit(&vdp->vdd_lock);
+ ret = EIO;
+ vil.vil_errno = VND_E_NOTATTACHED;
+ goto errcopyout;
+ }
+
+ if (vdp->vdd_flags & VND_D_ZONE_DYING) {
+ mutex_exit(&vdp->vdd_lock);
+ ret = EIO;
+ vil.vil_errno = VND_E_NOZONE;
+ goto errcopyout;
+ }
+
+ if (vdp->vdd_flags & (VND_D_LINK_INFLIGHT | VND_D_LINKED)) {
+ mutex_exit(&vdp->vdd_lock);
+ ret = EIO;
+ vil.vil_errno = VND_E_LINKED;
+ goto errcopyout;
+ }
+ vdp->vdd_flags |= VND_D_LINK_INFLIGHT;
+ zid = vdp->vdd_nsd->vpnd_zid;
+ mutex_exit(&vdp->vdd_lock);
+
+ if (snprintf(NULL, 0, "z%d:%s", zid, vil.vil_name) >=
+ sizeof (mname)) {
+ ret = EIO;
+ vil.vil_errno = VND_E_BADNAME;
+ goto errcopyout;
+ }
+
+ mutex_enter(&vnd_dev_lock);
+ for (v = list_head(&vnd_dev_list); v != NULL;
+ v = list_next(&vnd_dev_list, v)) {
+ if (!(v->vdd_flags & VND_D_LINKED))
+ continue;
+
+ if (v->vdd_nsd->vpnd_zid == zid &&
+ strcmp(v->vdd_lname, vil.vil_name) == 0) {
+ mutex_exit(&vnd_dev_lock);
+ ret = EIO;
+ vil.vil_errno = VND_E_LINKEXISTS;
+ goto error;
+ }
+ }
+
+ /*
+ * We set the name and mark ourselves attached while holding the list
+ * lock to ensure that no other user can mistakingly find our name.
+ */
+ (void) snprintf(mname, sizeof (mname), "z%d:%s", zid,
+ vil.vil_name);
+ mutex_enter(&vdp->vdd_lock);
+
+ /*
+ * Because we dropped our lock, we need to double check whether or not
+ * the zone was marked as dying while we were here. If it hasn't, then
+ * it's safe for us to link it in.
+ */
+ if (vdp->vdd_flags & VND_D_ZONE_DYING) {
+ mutex_exit(&vdp->vdd_lock);
+ mutex_exit(&vnd_dev_lock);
+ ret = EIO;
+ vil.vil_errno = VND_E_NOZONE;
+ goto error;
+ }
+
+ (void) strlcpy(vdp->vdd_lname, vil.vil_name, sizeof (vdp->vdd_lname));
+ if (ddi_create_minor_node(vnd_dip, mname, S_IFCHR, vdp->vdd_minor,
+ DDI_PSEUDO, 0) != DDI_SUCCESS) {
+ ret = EIO;
+ vil.vil_errno = VND_E_MINORNODE;
+ } else {
+ vdp->vdd_flags &= ~VND_D_LINK_INFLIGHT;
+ vdp->vdd_flags |= VND_D_LINKED;
+ kstat_named_setstr(&vdp->vdd_str->vns_ksdata.vks_linkname,
+ vdp->vdd_lname);
+ ret = 0;
+ }
+ mutex_exit(&vdp->vdd_lock);
+ mutex_exit(&vnd_dev_lock);
+
+ if (ret == 0) {
+ /*
+ * Add a reference to represent that this device is linked into
+ * the file system name space to ensure that it doesn't
+ * disappear.
+ */
+ vnd_dev_ref(vdp);
+ return (0);
+ }
+
+error:
+ mutex_enter(&vdp->vdd_lock);
+ vdp->vdd_flags &= ~VND_D_LINK_INFLIGHT;
+ vdp->vdd_lname[0] = '\0';
+ mutex_exit(&vdp->vdd_lock);
+
+errcopyout:
+ if (ddi_copyout(&vil, (void *)arg, sizeof (vil), cpflag) != 0)
+ ret = EFAULT;
+ return (ret);
+}
+
+/*
+ * Common unlink function. This is used both from the ioctl path and from the
+ * netstack shutdown path. The caller is required to hold the mutex on the
+ * vnd_dev_t, but they basically will have it relinquished for them. The only
+ * thing the caller is allowed to do afterward is to potentially rele the
+ * vnd_dev_t if they have their own hold. Note that only the ioctl path has its
+ * own hold.
+ */
+static void
+vnd_dev_unlink(vnd_dev_t *vdp)
+{
+ char mname[2*VND_NAMELEN];
+
+ ASSERT(MUTEX_HELD(&vdp->vdd_lock));
+
+ (void) snprintf(mname, sizeof (mname), "z%d:%s",
+ vdp->vdd_nsd->vpnd_zid, vdp->vdd_lname);
+ ddi_remove_minor_node(vnd_dip, mname);
+ vdp->vdd_lname[0] = '\0';
+ vdp->vdd_flags &= ~VND_D_LINKED;
+ kstat_named_setstr(&vdp->vdd_str->vns_ksdata.vks_linkname,
+ vdp->vdd_lname);
+ mutex_exit(&vdp->vdd_lock);
+
+ /*
+ * This rele corresponds to the reference that we took in
+ * vnd_ioctl_link.
+ */
+ vnd_dev_rele(vdp);
+}
+
+static int
+vnd_ioctl_unlink(vnd_dev_t *vdp, intptr_t arg, cred_t *credp, int cpflag)
+{
+ int ret;
+ zoneid_t zid;
+ vnd_ioc_unlink_t viu;
+
+ /* Not anyone can unlink something */
+ if (secpolicy_net_config(credp, B_FALSE) != 0)
+ return (EPERM);
+
+ zid = crgetzoneid(credp);
+
+ if (ddi_copyin((void *)arg, &viu, sizeof (viu), cpflag) != 0)
+ return (EFAULT);
+
+ viu.viu_errno = VND_E_SUCCESS;
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_LINKED)) {
+ mutex_exit(&vdp->vdd_lock);
+ ret = EIO;
+ viu.viu_errno = VND_E_NOTLINKED;
+ goto err;
+ }
+ VERIFY(vdp->vdd_flags & VND_D_ATTACHED);
+
+ if (zid != GLOBAL_ZONEID && zid != vdp->vdd_nsd->vpnd_zid) {
+ mutex_exit(&vdp->vdd_lock);
+ ret = EIO;
+ viu.viu_errno = VND_E_PERM;
+ goto err;
+ }
+
+ /* vnd_dev_unlink releases the vdp mutex for us */
+ vnd_dev_unlink(vdp);
+ ret = 0;
+err:
+ if (ddi_copyout(&viu, (void *)arg, sizeof (viu), cpflag) != 0)
+ return (EFAULT);
+
+ return (ret);
+}
+
+static int
+vnd_ioctl_setrxbuf(vnd_dev_t *vdp, intptr_t arg, int cpflag)
+{
+ int ret;
+ vnd_ioc_buf_t vib;
+
+ if (ddi_copyin((void *)arg, &vib, sizeof (vib), cpflag) != 0)
+ return (EFAULT);
+
+ mutex_enter(&vnd_dev_lock);
+ if (vib.vib_size > vnd_vdq_hard_max) {
+ mutex_exit(&vnd_dev_lock);
+ vib.vib_errno = VND_E_BUFTOOBIG;
+ ret = EIO;
+ goto err;
+ }
+ mutex_exit(&vnd_dev_lock);
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_ATTACHED)) {
+ mutex_exit(&vdp->vdd_lock);
+ vib.vib_errno = VND_E_NOTATTACHED;
+ ret = EIO;
+ goto err;
+ }
+
+ mutex_enter(&vdp->vdd_str->vns_lock);
+ if (vib.vib_size < vdp->vdd_str->vns_minwrite) {
+ mutex_exit(&vdp->vdd_str->vns_lock);
+ mutex_exit(&vdp->vdd_lock);
+ vib.vib_errno = VND_E_BUFTOOSMALL;
+ ret = EIO;
+ goto err;
+ }
+
+ mutex_exit(&vdp->vdd_str->vns_lock);
+ mutex_enter(&vdp->vdd_str->vns_dq_read.vdq_lock);
+ vdp->vdd_str->vns_dq_read.vdq_max = vib.vib_size;
+ mutex_exit(&vdp->vdd_str->vns_dq_read.vdq_lock);
+ mutex_exit(&vdp->vdd_lock);
+ ret = 0;
+
+err:
+ if (ddi_copyout(&vib, (void *)arg, sizeof (vib), cpflag) != 0)
+ return (EFAULT);
+
+ return (ret);
+}
+
+static int
+vnd_ioctl_getrxbuf(vnd_dev_t *vdp, intptr_t arg, int cpflag)
+{
+ int ret;
+ vnd_ioc_buf_t vib;
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_ATTACHED)) {
+ mutex_exit(&vdp->vdd_lock);
+ vib.vib_errno = VND_E_NOTATTACHED;
+ ret = EIO;
+ goto err;
+ }
+
+ mutex_enter(&vdp->vdd_str->vns_dq_read.vdq_lock);
+ vib.vib_size = vdp->vdd_str->vns_dq_read.vdq_max;
+ mutex_exit(&vdp->vdd_str->vns_dq_read.vdq_lock);
+ mutex_exit(&vdp->vdd_lock);
+ ret = 0;
+
+err:
+ if (ddi_copyout(&vib, (void *)arg, sizeof (vib), cpflag) != 0)
+ return (EFAULT);
+
+ return (ret);
+}
+
+static int
+vnd_ioctl_getmaxbuf(vnd_dev_t *vdp, intptr_t arg, int cpflag)
+{
+ vnd_ioc_buf_t vib;
+
+ mutex_enter(&vnd_dev_lock);
+ vib.vib_size = vnd_vdq_hard_max;
+ mutex_exit(&vnd_dev_lock);
+
+ if (ddi_copyout(&vib, (void *)arg, sizeof (vib), cpflag) != 0)
+ return (EFAULT);
+
+ return (0);
+}
+
+static int
+vnd_ioctl_gettxbuf(vnd_dev_t *vdp, intptr_t arg, int cpflag)
+{
+ int ret;
+ vnd_ioc_buf_t vib;
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_ATTACHED)) {
+ mutex_exit(&vdp->vdd_lock);
+ vib.vib_errno = VND_E_NOTATTACHED;
+ ret = EIO;
+ goto err;
+ }
+
+ mutex_enter(&vdp->vdd_str->vns_dq_write.vdq_lock);
+ vib.vib_size = vdp->vdd_str->vns_dq_write.vdq_max;
+ mutex_exit(&vdp->vdd_str->vns_dq_write.vdq_lock);
+ mutex_exit(&vdp->vdd_lock);
+ ret = 0;
+
+err:
+ if (ddi_copyout(&vib, (void *)arg, sizeof (vib), cpflag) != 0)
+ return (EFAULT);
+
+ return (ret);
+}
+
+static int
+vnd_ioctl_settxbuf(vnd_dev_t *vdp, intptr_t arg, int cpflag)
+{
+ int ret;
+ vnd_ioc_buf_t vib;
+
+ if (ddi_copyin((void *)arg, &vib, sizeof (vib), cpflag) != 0)
+ return (EFAULT);
+
+ mutex_enter(&vnd_dev_lock);
+ if (vib.vib_size > vnd_vdq_hard_max) {
+ mutex_exit(&vnd_dev_lock);
+ vib.vib_errno = VND_E_BUFTOOBIG;
+ ret = EIO;
+ goto err;
+ }
+ mutex_exit(&vnd_dev_lock);
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_ATTACHED)) {
+ mutex_exit(&vdp->vdd_lock);
+ vib.vib_errno = VND_E_NOTATTACHED;
+ ret = EIO;
+ goto err;
+ }
+
+ mutex_enter(&vdp->vdd_str->vns_lock);
+ if (vib.vib_size < vdp->vdd_str->vns_minwrite) {
+ mutex_exit(&vdp->vdd_str->vns_lock);
+ mutex_exit(&vdp->vdd_lock);
+ vib.vib_errno = VND_E_BUFTOOSMALL;
+ ret = EIO;
+ goto err;
+ }
+ mutex_exit(&vdp->vdd_str->vns_lock);
+
+ mutex_enter(&vdp->vdd_str->vns_dq_write.vdq_lock);
+ vdp->vdd_str->vns_dq_write.vdq_max = vib.vib_size;
+ mutex_exit(&vdp->vdd_str->vns_dq_write.vdq_lock);
+ mutex_exit(&vdp->vdd_lock);
+ ret = 0;
+
+err:
+ if (ddi_copyout(&vib, (void *)arg, sizeof (vib), cpflag) != 0)
+ return (EFAULT);
+
+ return (ret);
+}
+
+static int
+vnd_ioctl_gettu(vnd_dev_t *vdp, intptr_t arg, int mode, boolean_t min)
+{
+ vnd_ioc_buf_t vib;
+
+ vib.vib_errno = 0;
+ mutex_enter(&vdp->vdd_lock);
+ if (vdp->vdd_flags & VND_D_ATTACHED) {
+ mutex_enter(&vdp->vdd_str->vns_lock);
+ if (min == B_TRUE)
+ vib.vib_size = vdp->vdd_str->vns_minwrite;
+ else
+ vib.vib_size = vdp->vdd_str->vns_maxwrite;
+ mutex_exit(&vdp->vdd_str->vns_lock);
+ } else {
+ vib.vib_errno = VND_E_NOTATTACHED;
+ }
+ mutex_exit(&vdp->vdd_lock);
+
+ if (ddi_copyout(&vib, (void *)arg, sizeof (vib), mode & FKIOCTL) != 0)
+ return (EFAULT);
+
+ return (0);
+}
+
+static int
+vnd_frameio_read(vnd_dev_t *vdp, intptr_t addr, int mode)
+{
+ int ret, nonblock, nwrite;
+ frameio_t *fio;
+ vnd_data_queue_t *vqp;
+ mblk_t *mp;
+
+ fio = frameio_alloc(KM_NOSLEEP | KM_NORMALPRI);
+ if (fio == NULL)
+ return (EAGAIN);
+
+ ret = frameio_hdr_copyin(fio, FRAMEIO_NVECS_MAX, (const void *)addr,
+ mode);
+ if (ret != 0) {
+ frameio_free(fio);
+ return (ret);
+ }
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_ATTACHED)) {
+ mutex_exit(&vdp->vdd_lock);
+ frameio_free(fio);
+ return (ENXIO);
+ }
+ mutex_exit(&vdp->vdd_lock);
+
+ nonblock = mode & (FNONBLOCK | FNDELAY);
+
+ vqp = &vdp->vdd_str->vns_dq_read;
+ mutex_enter(&vqp->vdq_lock);
+
+ /* Check empty case */
+ if (vqp->vdq_cur == 0) {
+ if (nonblock != 0) {
+ mutex_exit(&vqp->vdq_lock);
+ frameio_free(fio);
+ return (EWOULDBLOCK);
+ }
+ while (vqp->vdq_cur == 0) {
+ if (cv_wait_sig(&vqp->vdq_ready, &vqp->vdq_lock) <= 0) {
+ mutex_exit(&vqp->vdq_lock);
+ frameio_free(fio);
+ return (EINTR);
+ }
+ }
+ }
+
+ ret = frameio_mblk_chain_write(fio, MAP_BLK_FRAME, vqp->vdq_head,
+ &nwrite, mode & FKIOCTL);
+ if (ret != 0) {
+ mutex_exit(&vqp->vdq_lock);
+ frameio_free(fio);
+ return (ret);
+ }
+
+ ret = frameio_hdr_copyout(fio, nwrite, (void *)addr, mode);
+ if (ret != 0) {
+ mutex_exit(&vqp->vdq_lock);
+ frameio_free(fio);
+ return (ret);
+ }
+
+ while (nwrite > 0) {
+ (void) vnd_dq_pop(vqp, &mp);
+ freemsg(mp);
+ nwrite--;
+ }
+ mutex_exit(&vqp->vdq_lock);
+ frameio_free(fio);
+
+ return (0);
+}
+
+static int
+vnd_frameio_write(vnd_dev_t *vdp, intptr_t addr, int mode)
+{
+ frameio_t *fio;
+ int ret, nonblock, nframes, i, nread;
+ size_t maxwrite, minwrite, total, flen;
+ mblk_t *mp_chain, *mp, *nmp;
+ vnd_data_queue_t *vqp;
+
+ fio = frameio_alloc(KM_NOSLEEP | KM_NORMALPRI);
+ if (fio == NULL)
+ return (EAGAIN);
+
+ ret = frameio_hdr_copyin(fio, FRAMEIO_NVECS_MAX, (void *)addr, mode);
+ if (ret != 0) {
+ frameio_free(fio);
+ return (ret);
+ }
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_ATTACHED)) {
+ mutex_exit(&vdp->vdd_lock);
+ frameio_free(fio);
+ return (ENXIO);
+ }
+ mutex_exit(&vdp->vdd_lock);
+
+ nonblock = mode & (FNONBLOCK | FNDELAY);
+
+ /*
+ * Make sure no single frame is larger than we can accept.
+ */
+ mutex_enter(&vdp->vdd_str->vns_lock);
+ minwrite = vdp->vdd_str->vns_minwrite;
+ maxwrite = vdp->vdd_str->vns_maxwrite;
+ mutex_exit(&vdp->vdd_str->vns_lock);
+
+ nframes = fio->fio_nvpf / fio->fio_nvecs;
+ total = 0;
+ for (i = 0; i < nframes; i++) {
+ flen = frameio_frame_length(fio,
+ &fio->fio_vecs[i*fio->fio_nvpf]);
+ if (flen < minwrite || flen > maxwrite) {
+ frameio_free(fio);
+ return (ERANGE);
+ }
+ total += flen;
+ }
+
+ vqp = &vdp->vdd_str->vns_dq_write;
+ mutex_enter(&vqp->vdq_lock);
+ while (vnd_dq_reserve(vqp, total) == 0) {
+ if (nonblock != 0) {
+ frameio_free(fio);
+ mutex_exit(&vqp->vdq_lock);
+ return (EAGAIN);
+ }
+ if (cv_wait_sig(&vqp->vdq_ready, &vqp->vdq_lock) <= 0) {
+ mutex_exit(&vqp->vdq_lock);
+ frameio_free(fio);
+ return (EINTR);
+ }
+ }
+ mutex_exit(&vqp->vdq_lock);
+
+ /*
+ * We've reserved our space, let's copyin and go from here.
+ */
+ ret = frameio_mblk_chain_read(fio, &mp_chain, &nread, mode & FKIOCTL);
+ if (ret != 0) {
+ frameio_free(fio);
+ vnd_dq_unreserve(vqp, total);
+ cv_broadcast(&vqp->vdq_ready);
+ pollwakeup(&vdp->vdd_ph, POLLOUT);
+ return (ret);
+ }
+
+ for (mp = mp_chain; mp != NULL; mp = nmp) {
+ nmp = mp->b_next;
+ mp->b_next = NULL;
+ gsqueue_enter_one(vdp->vdd_str->vns_squeue, mp,
+ vnd_squeue_tx_append, vdp->vdd_str, GSQUEUE_PROCESS,
+ VND_SQUEUE_TAG_VND_WRITE);
+ }
+
+ /*
+ * Update the frameio structure to indicate that we wrote those frames.
+ */
+ frameio_mark_consumed(fio, nread);
+ ret = frameio_hdr_copyout(fio, nread, (void *)addr, mode);
+ frameio_free(fio);
+
+ return (ret);
+}
+
+static int
+vnd_ioctl_list_copy_info(vnd_dev_t *vdp, vnd_ioc_info_t *arg, int mode)
+{
+ const char *link;
+ uint32_t vers = 1;
+ ASSERT(MUTEX_HELD(&vdp->vdd_lock));
+
+ /*
+ * Copy all of the members out to userland.
+ */
+ if (ddi_copyout(&vers, &arg->vii_version, sizeof (uint32_t),
+ mode & FKIOCTL) != 0)
+ return (EFAULT);
+
+ if (vdp->vdd_flags & VND_D_LINKED)
+ link = vdp->vdd_lname;
+ else
+ link = "<anonymous>";
+ if (ddi_copyout(link, arg->vii_name, sizeof (arg->vii_name),
+ mode & FKIOCTL) != 0)
+ return (EFAULT);
+
+ if (ddi_copyout(vdp->vdd_datalink, arg->vii_datalink,
+ sizeof (arg->vii_datalink), mode & FKIOCTL) != 0)
+ return (EFAULT);
+
+ if (ddi_copyout(&vdp->vdd_nsd->vpnd_zid, &arg->vii_zone,
+ sizeof (zoneid_t), mode & FKIOCTL) != 0)
+ return (EFAULT);
+ return (0);
+}
+
+static int
+vnd_ioctl_list(intptr_t arg, cred_t *credp, int mode)
+{
+ vnd_ioc_list_t vl;
+ vnd_ioc_list32_t vl32;
+ zoneid_t zid;
+ vnd_dev_t *vdp;
+ vnd_ioc_info_t *vip;
+ int found, cancopy, ret;
+
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ if (ddi_copyin((void *)arg, &vl32, sizeof (vnd_ioc_list32_t),
+ mode & FKIOCTL) != 0)
+ return (EFAULT);
+ vl.vl_nents = vl32.vl_nents;
+ vl.vl_actents = vl32.vl_actents;
+ vl.vl_ents = (void *)(uintptr_t)vl32.vl_ents;
+ } else {
+ if (ddi_copyin((void *)arg, &vl, sizeof (vnd_ioc_list_t),
+ mode & FKIOCTL) != 0)
+ return (EFAULT);
+ }
+
+ cancopy = vl.vl_nents;
+ vip = vl.vl_ents;
+ found = 0;
+ zid = crgetzoneid(credp);
+ mutex_enter(&vnd_dev_lock);
+ for (vdp = list_head(&vnd_dev_list); vdp != NULL;
+ vdp = list_next(&vnd_dev_list, vdp)) {
+ mutex_enter(&vdp->vdd_lock);
+ if (vdp->vdd_flags & VND_D_ATTACHED &&
+ !(vdp->vdd_flags & (VND_D_CONDEMNED | VND_D_ZONE_DYING)) &&
+ (zid == GLOBAL_ZONEID || zid == vdp->vdd_nsd->vpnd_zid)) {
+ found++;
+ if (cancopy > 0) {
+ ret = vnd_ioctl_list_copy_info(vdp, vip, mode);
+ if (ret != 0) {
+ mutex_exit(&vdp->vdd_lock);
+ mutex_exit(&vnd_dev_lock);
+ return (ret);
+ }
+ cancopy--;
+ vip++;
+ }
+ }
+ mutex_exit(&vdp->vdd_lock);
+ }
+ mutex_exit(&vnd_dev_lock);
+
+ if (ddi_copyout(&found, &((vnd_ioc_list_t *)arg)->vl_actents,
+ sizeof (uint_t), mode & FKIOCTL) != 0)
+ return (EFAULT);
+
+ return (0);
+}
+
+
+static int
+vnd_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
+ int *rvalp)
+{
+ int ret;
+ minor_t m;
+ vnd_dev_t *vdp;
+
+ m = getminor(dev);
+ ASSERT(m != 0);
+
+ /*
+ * Make sure no one has come in on an ioctl from the strioc case.
+ */
+ if ((cmd & VND_STRIOC) == VND_STRIOC)
+ return (ENOTTY);
+
+ /*
+ * Like close, seems like if this minor isn't found, it's a programmer
+ * error somehow.
+ */
+ vdp = vnd_dev_lookup(m);
+ if (vdp == NULL)
+ return (ENXIO);
+
+ switch (cmd) {
+ case VND_IOC_ATTACH:
+ if (!(mode & FWRITE)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_ioctl_attach(vdp, arg, credp, mode);
+ break;
+ case VND_IOC_LINK:
+ if (!(mode & FWRITE)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_ioctl_link(vdp, arg, credp, mode);
+ break;
+ case VND_IOC_UNLINK:
+ if (!(mode & FWRITE)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_ioctl_unlink(vdp, arg, credp, mode);
+ break;
+ case VND_IOC_GETRXBUF:
+ if (!(mode & FREAD)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_ioctl_getrxbuf(vdp, arg, mode);
+ break;
+ case VND_IOC_SETRXBUF:
+ if (!(mode & FWRITE)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_ioctl_setrxbuf(vdp, arg, mode);
+ break;
+ case VND_IOC_GETTXBUF:
+ if (!(mode & FREAD)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_ioctl_gettxbuf(vdp, arg, mode);
+ break;
+ case VND_IOC_SETTXBUF:
+ if (!(mode & FWRITE)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_ioctl_settxbuf(vdp, arg, mode);
+ break;
+ case VND_IOC_GETMAXBUF:
+ if (!(mode & FREAD)) {
+ ret = EBADF;
+ break;
+ }
+ if (crgetzoneid(credp) != GLOBAL_ZONEID) {
+ ret = EPERM;
+ break;
+ }
+ ret = vnd_ioctl_getmaxbuf(vdp, arg, mode);
+ break;
+ case VND_IOC_GETMINTU:
+ if (!(mode & FREAD)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_ioctl_gettu(vdp, arg, mode, B_TRUE);
+ break;
+ case VND_IOC_GETMAXTU:
+ if (!(mode & FREAD)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_ioctl_gettu(vdp, arg, mode, B_FALSE);
+ break;
+ case VND_IOC_FRAMEIO_READ:
+ if (!(mode & FREAD)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_frameio_read(vdp, arg, mode);
+ break;
+ case VND_IOC_FRAMEIO_WRITE:
+ if (!(mode & FWRITE)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_frameio_write(vdp, arg, mode);
+ break;
+ case VND_IOC_LIST:
+ if (!(mode & FREAD)) {
+ ret = EBADF;
+ break;
+ }
+ ret = vnd_ioctl_list(arg, credp, mode);
+ break;
+ default:
+ ret = ENOTTY;
+ break;
+ }
+
+ vnd_dev_rele(vdp);
+ return (ret);
+}
+
+static int
+vnd_open(dev_t *devp, int flag, int otyp, cred_t *credp)
+{
+ vnd_dev_t *vdp;
+ minor_t m;
+ zoneid_t zid;
+
+ if (flag & (FEXCL | FNDELAY))
+ return (ENOTSUP);
+
+ if (otyp & OTYP_BLK)
+ return (ENOTSUP);
+
+ zid = crgetzoneid(credp);
+ m = getminor(*devp);
+
+ /*
+ * If we have an open of a non-zero instance then we need to look that
+ * up in our list of entries.
+ */
+ if (m != 0) {
+
+ /*
+ * We don't check for rawaccess globally as a user could be
+ * doing a list ioctl on the control node which doesn't require
+ * this privilege.
+ */
+ if (secpolicy_net_rawaccess(credp) != 0)
+ return (EPERM);
+
+
+ vdp = vnd_dev_lookup(m);
+ if (vdp == NULL)
+ return (ENOENT);
+
+ /*
+ * We need to check to make sure that the user is allowed to
+ * open this node. At this point it should be an attached handle
+ * as that's all we're allowed to access.
+ */
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_LINKED)) {
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+ return (ENOENT);
+ }
+
+ if (vdp->vdd_flags & VND_D_ZONE_DYING) {
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+ return (ENOENT);
+ }
+
+ if (zid != GLOBAL_ZONEID && zid != vdp->vdd_nsd->vpnd_zid) {
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+ return (ENOENT);
+ }
+
+ if ((flag & FEXCL) && (vdp->vdd_flags & VND_D_OPENED)) {
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+ return (EBUSY);
+ }
+
+ if (!(vdp->vdd_flags & VND_D_OPENED)) {
+ vdp->vdd_flags |= VND_D_OPENED;
+ vdp->vdd_ref++;
+ DTRACE_VND_REFINC(vdp);
+ }
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+
+ return (0);
+ }
+
+ if (flag & FEXCL)
+ return (ENOTSUP);
+
+ /*
+ * We need to clone ourselves and set up new a state.
+ */
+ vdp = kmem_cache_alloc(vnd_dev_cache, KM_SLEEP);
+ bzero(vdp, sizeof (vnd_dev_t));
+
+ if (ldi_ident_from_dev(*devp, &vdp->vdd_ldiid) != 0) {
+ kmem_cache_free(vnd_dev_cache, vdp);
+ return (EINVAL);
+ }
+
+ vdp->vdd_minor = id_alloc(vnd_minors);
+ mutex_init(&vdp->vdd_lock, NULL, MUTEX_DRIVER, NULL);
+ list_link_init(&vdp->vdd_link);
+ vdp->vdd_ref = 1;
+ *devp = makedevice(getmajor(*devp), vdp->vdd_minor);
+ vdp->vdd_devid = *devp;
+ DTRACE_VND_REFINC(vdp);
+ vdp->vdd_flags |= VND_D_OPENED;
+
+ mutex_enter(&vnd_dev_lock);
+ list_insert_head(&vnd_dev_list, vdp);
+ mutex_exit(&vnd_dev_lock);
+
+ return (0);
+}
+
+static int
+vnd_close(dev_t dev, int flag, int otyp, cred_t *credp)
+{
+ minor_t m;
+ vnd_dev_t *vdp;
+
+ m = getminor(dev);
+ if (m == 0)
+ return (ENXIO);
+
+ vdp = vnd_dev_lookup(m);
+ if (vdp == NULL)
+ return (ENXIO);
+
+ mutex_enter(&vdp->vdd_lock);
+ VERIFY(vdp->vdd_flags & VND_D_OPENED);
+ vdp->vdd_flags &= ~VND_D_OPENED;
+ mutex_exit(&vdp->vdd_lock);
+
+ /* Remove the hold from the previous open. */
+ vnd_dev_rele(vdp);
+
+ /* And now from lookup */
+ vnd_dev_rele(vdp);
+ return (0);
+}
+
+static int
+vnd_read(dev_t dev, struct uio *uiop, cred_t *credp)
+{
+ int nonblock, error = 0;
+ size_t mpsize;
+ vnd_dev_t *vdp;
+ vnd_data_queue_t *vqp;
+ mblk_t *mp = NULL;
+ offset_t u_loffset;
+
+ /*
+ * If we have more than one uio we refuse to do anything. That's for
+ * frameio.
+ */
+ if (uiop->uio_iovcnt > 1)
+ return (EINVAL);
+
+ vdp = vnd_dev_lookup(getminor(dev));
+ if (vdp == NULL)
+ return (ENXIO);
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_ATTACHED)) {
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+ return (ENXIO);
+ }
+ mutex_exit(&vdp->vdd_lock);
+ nonblock = uiop->uio_fmode & (FNONBLOCK | FNDELAY);
+
+ vqp = &vdp->vdd_str->vns_dq_read;
+ mutex_enter(&vqp->vdq_lock);
+
+ /* Check empty case */
+ if (vqp->vdq_cur == 0) {
+ if (nonblock != 0) {
+ error = EWOULDBLOCK;
+ goto err;
+ }
+ while (vqp->vdq_cur == 0) {
+ if (cv_wait_sig(&vqp->vdq_ready, &vqp->vdq_lock) <= 0) {
+ error = EINTR;
+ goto err;
+ }
+ }
+ }
+
+ /* Ensure our buffer is big enough */
+ mp = vqp->vdq_head;
+ ASSERT(mp != NULL);
+ mpsize = msgsize(mp);
+ if (mpsize > uiop->uio_resid) {
+ error = EOVERFLOW;
+ goto err;
+ }
+
+ u_loffset = uiop->uio_loffset;
+ while (mp != NULL) {
+ if (uiomove(mp->b_rptr, MBLKL(mp), UIO_READ, uiop) != 0) {
+ error = EFAULT;
+ uiop->uio_loffset = u_loffset;
+ mp = NULL;
+ goto err;
+ }
+ mpsize -= MBLKL(mp);
+ mp = mp->b_cont;
+ }
+ ASSERT(mpsize == 0);
+ (void) vnd_dq_pop(vqp, &mp);
+ freemsg(mp);
+err:
+ mutex_exit(&vqp->vdq_lock);
+ vnd_dev_rele(vdp);
+
+ return (error);
+}
+
+static int
+vnd_write(dev_t dev, struct uio *uiop, cred_t *credp)
+{
+ int nonblock, error;
+ vnd_dev_t *vdp;
+ mblk_t *mp;
+ ssize_t iosize, origsize;
+ vnd_data_queue_t *vqp;
+
+ if (uiop->uio_iovcnt > 1)
+ return (EINVAL);
+
+ vdp = vnd_dev_lookup(getminor(dev));
+ if (vdp == NULL)
+ return (ENXIO);
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_ATTACHED)) {
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+ return (ENXIO);
+ }
+ mutex_exit(&vdp->vdd_lock);
+ nonblock = uiop->uio_fmode & (FNONBLOCK | FNDELAY);
+
+ mutex_enter(&vdp->vdd_str->vns_lock);
+ if (uiop->uio_resid > vdp->vdd_str->vns_maxwrite ||
+ uiop->uio_resid < vdp->vdd_str->vns_minwrite) {
+ mutex_exit(&vdp->vdd_str->vns_lock);
+ vnd_dev_rele(vdp);
+ return (ERANGE);
+ }
+ mutex_exit(&vdp->vdd_str->vns_lock);
+ VERIFY(vdp->vdd_str != NULL);
+
+ /*
+ * Reserve space in the data queue if we can. If we can't, block or
+ * return EAGAIN. If we can, go and squeue_enter.
+ */
+ vqp = &vdp->vdd_str->vns_dq_write;
+ mutex_enter(&vqp->vdq_lock);
+ while (vnd_dq_reserve(vqp, uiop->uio_resid) == 0) {
+ if (nonblock != 0) {
+ mutex_exit(&vqp->vdq_lock);
+ vnd_dev_rele(vdp);
+ return (EAGAIN);
+ }
+ if (cv_wait_sig(&vqp->vdq_ready, &vqp->vdq_lock) <= 0) {
+ mutex_exit(&vqp->vdq_lock);
+ vnd_dev_rele(vdp);
+ return (EINTR);
+ }
+ }
+ mutex_exit(&vqp->vdq_lock);
+
+ /*
+ * Now that we've reserved the space, try to allocate kernel space for
+ * and copy in the block. To take care of all this we use the
+ * strmakedata subroutine for now.
+ */
+ origsize = iosize = uiop->uio_resid;
+ error = strmakedata(&iosize, uiop, vdp->vdd_str->vns_wq->q_stream, 0,
+ &mp);
+
+ /*
+ * strmakedata() will return an error or it may only consume a portion
+ * of the data.
+ */
+ if (error != 0 || uiop->uio_resid != 0) {
+ vnd_dq_unreserve(vqp, origsize);
+ cv_broadcast(&vqp->vdq_ready);
+ pollwakeup(&vdp->vdd_ph, POLLOUT);
+ vnd_dev_rele(vdp);
+ return (ENOSR);
+ }
+
+ gsqueue_enter_one(vdp->vdd_str->vns_squeue, mp,
+ vnd_squeue_tx_append, vdp->vdd_str, GSQUEUE_PROCESS,
+ VND_SQUEUE_TAG_VND_WRITE);
+
+ vnd_dev_rele(vdp);
+ return (0);
+}
+
+static int
+vnd_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
+ struct pollhead **phpp)
+{
+ int ready = 0;
+ vnd_dev_t *vdp;
+ vnd_data_queue_t *vqp;
+
+ vdp = vnd_dev_lookup(getminor(dev));
+ if (vdp == NULL)
+ return (ENXIO);
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_ATTACHED)) {
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+ return (ENXIO);
+ }
+ mutex_exit(&vdp->vdd_lock);
+
+ if ((events & POLLIN) || (events & POLLRDNORM)) {
+ vqp = &vdp->vdd_str->vns_dq_read;
+ mutex_enter(&vqp->vdq_lock);
+ if (vqp->vdq_head != NULL)
+ ready |= events & (POLLIN | POLLRDNORM);
+ mutex_exit(&vqp->vdq_lock);
+ }
+
+ if (events & POLLOUT) {
+ vqp = &vdp->vdd_str->vns_dq_write;
+ mutex_enter(&vqp->vdq_lock);
+ if (vqp->vdq_cur != vqp->vdq_max)
+ ready |= POLLOUT;
+ mutex_exit(&vqp->vdq_lock);
+ }
+
+ if (ready != 0) {
+ *reventsp = ready;
+ vnd_dev_rele(vdp);
+ return (0);
+ }
+
+ *reventsp = 0;
+ if (!anyyet)
+ *phpp = &vdp->vdd_ph;
+
+ vnd_dev_rele(vdp);
+ return (0);
+}
+
+static void *
+vnd_stack_init(netstackid_t stackid, netstack_t *ns)
+{
+ vnd_pnsd_t *nsp;
+
+ nsp = kmem_cache_alloc(vnd_pnsd_cache, KM_SLEEP);
+ bzero(nsp, sizeof (*nsp));
+ nsp->vpnd_nsid = stackid;
+ nsp->vpnd_zid = netstackid_to_zoneid(stackid);
+ nsp->vpnd_flags = 0;
+ mutex_init(&nsp->vpnd_lock, NULL, MUTEX_DRIVER, NULL);
+ list_create(&nsp->vpnd_dev_list, sizeof (vnd_dev_t),
+ offsetof(vnd_dev_t, vdd_nslink));
+ if (vnd_netinfo_init(nsp) == 0)
+ nsp->vpnd_hooked = B_TRUE;
+
+ mutex_enter(&vnd_dev_lock);
+ list_insert_tail(&vnd_nsd_list, nsp);
+ mutex_exit(&vnd_dev_lock);
+
+ return (nsp);
+}
+
+static void
+vnd_stack_shutdown(netstackid_t stackid, void *arg)
+{
+ vnd_pnsd_t *nsp = arg;
+ vnd_dev_t *vdp;
+
+ ASSERT(nsp != NULL);
+ /*
+ * After shut down no one should be able to find their way to this
+ * netstack again.
+ */
+ mutex_enter(&vnd_dev_lock);
+ list_remove(&vnd_nsd_list, nsp);
+ mutex_exit(&vnd_dev_lock);
+
+ /*
+ * Make sure hooks know that they're going away.
+ */
+ if (nsp->vpnd_hooked == B_TRUE)
+ vnd_netinfo_shutdown(nsp);
+
+ /*
+ * Now we need to go through and notify each zone that they are in
+ * teardown phase. See the big theory statement section on vnd, zones,
+ * netstacks, and sdev for more information about this.
+ */
+ mutex_enter(&nsp->vpnd_lock);
+ nsp->vpnd_flags |= VND_NS_CONDEMNED;
+ for (vdp = list_head(&nsp->vpnd_dev_list); vdp != NULL;
+ vdp = list_next(&nsp->vpnd_dev_list, vdp)) {
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_CONDEMNED))
+ vdp->vdd_flags |= VND_D_ZONE_DYING;
+ mutex_exit(&vdp->vdd_lock);
+ }
+ mutex_exit(&nsp->vpnd_lock);
+
+ /*
+ * Next we remove all the links as we know nothing new can be added to
+ * the list and that none of the extent devices can obtain additional
+ * links.
+ */
+restart:
+ mutex_enter(&nsp->vpnd_lock);
+ for (vdp = list_head(&nsp->vpnd_dev_list); vdp != NULL;
+ vdp = list_next(&nsp->vpnd_dev_list, vdp)) {
+ mutex_enter(&vdp->vdd_lock);
+ if ((vdp->vdd_flags & VND_D_CONDEMNED) ||
+ !(vdp->vdd_flags & VND_D_LINKED)) {
+ mutex_exit(&vdp->vdd_lock);
+ continue;
+ }
+
+ /*
+ * We drop our lock here and restart afterwards. Note that as
+ * part of unlinking we end up doing a rele of the vnd_dev_t. If
+ * this is the final hold on the vnd_dev_t then it might try and
+ * remove itself. Our locking rules requires not to be holding
+ * any locks when we call any of the rele functions.
+ *
+ * Note that the unlink function requires holders to call into
+ * it with the vnd_dev_t->vdd_lock held and will take care of it
+ * for us. Because we don't have a hold on it, we're done at
+ * this point.
+ */
+ mutex_exit(&nsp->vpnd_lock);
+ /* Forcibly unlink */
+ vnd_dev_unlink(vdp);
+ goto restart;
+ }
+ mutex_exit(&nsp->vpnd_lock);
+}
+
+static void
+vnd_stack_destroy(netstackid_t stackid, void *arg)
+{
+ vnd_pnsd_t *nsp = arg;
+
+ ASSERT(nsp != NULL);
+
+ /*
+ * Now that we've unlinked everything we just have to hang out for
+ * it to finish exiting. Now that it's no longer the kernel itself
+ * that's doing this we just need to wait for our reference count to
+ * equal zero and then we're free. If the global zone is holding open a
+ * reference to a vnd device for another zone, that's bad, but there's
+ * nothing much we can do. See the section on 'vnd, zones, netstacks' in
+ * the big theory statement for more information.
+ */
+ mutex_enter(&nsp->vpnd_lock);
+ while (nsp->vpnd_ref != 0)
+ cv_wait(&nsp->vpnd_ref_change, &nsp->vpnd_lock);
+ mutex_exit(&nsp->vpnd_lock);
+
+ /*
+ * During shutdown we removed ourselves from the list and now we have no
+ * more references so we can safely say that there is nothing left and
+ * destroy everything that we had sitting around.
+ */
+ if (nsp->vpnd_hooked == B_TRUE)
+ vnd_netinfo_fini(nsp);
+
+ mutex_destroy(&nsp->vpnd_lock);
+ list_destroy(&nsp->vpnd_dev_list);
+ kmem_cache_free(vnd_pnsd_cache, nsp);
+}
+
+/*
+ * Convert a node with a name of the form /dev/vnd/zone/%zonename and
+ * /dev/vnd/zone/%zonename/%linkname to the corresponding vnd netstack.
+ */
+static vnd_pnsd_t *
+vnd_sdev_ctx_to_ns(sdev_ctx_t ctx)
+{
+ enum vtype vt;
+ const char *path = sdev_ctx_path(ctx);
+ char *zstart, *dup;
+ size_t duplen;
+ vnd_pnsd_t *nsp;
+
+ vt = sdev_ctx_vtype(ctx);
+ ASSERT(strncmp(path, VND_SDEV_ZROOT, strlen(VND_SDEV_ZROOT)) == 0);
+
+ if (vt == VDIR) {
+ zstart = strrchr(path, '/');
+ ASSERT(zstart != NULL);
+ zstart++;
+ return (vnd_nsd_lookup_by_zonename(zstart));
+ }
+
+ ASSERT(vt == VCHR);
+
+ dup = strdup(path);
+ duplen = strlen(dup) + 1;
+ zstart = strrchr(dup, '/');
+ *zstart = '\0';
+ zstart--;
+ zstart = strrchr(dup, '/');
+ zstart++;
+ nsp = vnd_nsd_lookup_by_zonename(zstart);
+ kmem_free(dup, duplen);
+
+ return (nsp);
+}
+
+static sdev_plugin_validate_t
+vnd_sdev_validate_dir(sdev_ctx_t ctx)
+{
+ vnd_pnsd_t *nsp;
+
+ if (strcmp(sdev_ctx_path(ctx), VND_SDEV_ROOT) == 0)
+ return (SDEV_VTOR_VALID);
+
+ if (strcmp(sdev_ctx_path(ctx), VND_SDEV_ZROOT) == 0) {
+ ASSERT(getzoneid() == GLOBAL_ZONEID);
+ ASSERT(sdev_ctx_flags(ctx) & SDEV_CTX_GLOBAL);
+ return (SDEV_VTOR_VALID);
+ }
+
+ nsp = vnd_sdev_ctx_to_ns(ctx);
+ if (nsp == NULL)
+ return (SDEV_VTOR_INVALID);
+ vnd_nsd_rele(nsp);
+
+ return (SDEV_VTOR_VALID);
+}
+
+static sdev_plugin_validate_t
+vnd_sdev_validate(sdev_ctx_t ctx)
+{
+ enum vtype vt;
+ dev_t dev;
+ vnd_dev_t *vdp;
+
+ vt = sdev_ctx_vtype(ctx);
+ if (vt == VDIR)
+ return (vnd_sdev_validate_dir(ctx));
+ ASSERT(vt == VCHR);
+
+ if (strcmp("ctl", sdev_ctx_name(ctx)) == 0)
+ return (SDEV_VTOR_VALID);
+
+ dev = (uintptr_t)sdev_ctx_vtype_data(ctx);
+ vdp = vnd_dev_lookup(getminor(dev));
+ if (vdp == NULL)
+ return (SDEV_VTOR_STALE);
+
+ mutex_enter(&vdp->vdd_lock);
+ if (!(vdp->vdd_flags & VND_D_LINKED) ||
+ (vdp->vdd_flags & (VND_D_CONDEMNED | VND_D_ZONE_DYING))) {
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+ return (SDEV_VTOR_STALE);
+ }
+
+ if (strcmp(sdev_ctx_name(ctx), vdp->vdd_lname) != 0) {
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+ return (SDEV_VTOR_STALE);
+ }
+
+ mutex_exit(&vdp->vdd_lock);
+ vnd_dev_rele(vdp);
+ return (SDEV_VTOR_VALID);
+}
+
+/*
+ * This function is a no-op. sdev never has holds on our devices as they can go
+ * away at any time and specfs has to deal with that fact.
+ */
+static void
+vnd_sdev_inactive(sdev_ctx_t ctx)
+{
+}
+
+static int
+vnd_sdev_fillzone(vnd_pnsd_t *nsp, sdev_ctx_t ctx)
+{
+ int ret;
+ vnd_dev_t *vdp;
+
+ mutex_enter(&nsp->vpnd_lock);
+ for (vdp = list_head(&nsp->vpnd_dev_list); vdp != NULL;
+ vdp = list_next(&nsp->vpnd_dev_list, vdp)) {
+ mutex_enter(&vdp->vdd_lock);
+ if ((vdp->vdd_flags & VND_D_LINKED) &&
+ !(vdp->vdd_flags & (VND_D_CONDEMNED | VND_D_ZONE_DYING))) {
+ ret = sdev_plugin_mknod(ctx, vdp->vdd_lname, S_IFCHR,
+ vdp->vdd_devid);
+ if (ret != 0 && ret != EEXIST) {
+ mutex_exit(&vdp->vdd_lock);
+ mutex_exit(&nsp->vpnd_lock);
+ vnd_nsd_rele(nsp);
+ return (ret);
+ }
+ }
+ mutex_exit(&vdp->vdd_lock);
+ }
+ mutex_exit(&nsp->vpnd_lock);
+
+ return (0);
+}
+
+static int
+vnd_sdev_filldir_root(sdev_ctx_t ctx)
+{
+ zoneid_t zid;
+ vnd_pnsd_t *nsp;
+ int ret;
+
+ zid = getzoneid();
+ nsp = vnd_nsd_lookup(zoneid_to_netstackid(zid));
+ ASSERT(nsp != NULL);
+ ret = vnd_sdev_fillzone(nsp, ctx);
+ vnd_nsd_rele(nsp);
+ if (ret != 0)
+ return (ret);
+
+ /*
+ * Checking the zone id is not sufficient as the global zone could be
+ * reaching down into a non-global zone's mounted /dev.
+ */
+ if (zid == GLOBAL_ZONEID && (sdev_ctx_flags(ctx) & SDEV_CTX_GLOBAL)) {
+ ret = sdev_plugin_mkdir(ctx, "zone");
+ if (ret != 0 && ret != EEXIST)
+ return (ret);
+ }
+
+ /*
+ * Always add a reference to the control node. There's no need to
+ * reference it since it always exists and is always what we clone from.
+ */
+ ret = sdev_plugin_mknod(ctx, "ctl", S_IFCHR,
+ makedevice(ddi_driver_major(vnd_dip), 0));
+ if (ret != 0 && ret != EEXIST)
+ return (ret);
+
+ return (0);
+}
+
+static int
+vnd_sdev_filldir_zroot(sdev_ctx_t ctx)
+{
+ int ret;
+ vnd_pnsd_t *nsp;
+ zone_t *zonep;
+
+ ASSERT(getzoneid() == GLOBAL_ZONEID);
+ ASSERT(sdev_ctx_flags(ctx) & SDEV_CTX_GLOBAL);
+
+ mutex_enter(&vnd_dev_lock);
+ for (nsp = list_head(&vnd_nsd_list); nsp != NULL;
+ nsp = list_next(&vnd_nsd_list, nsp)) {
+ mutex_enter(&nsp->vpnd_lock);
+ if (list_is_empty(&nsp->vpnd_dev_list)) {
+ mutex_exit(&nsp->vpnd_lock);
+ continue;
+ }
+ mutex_exit(&nsp->vpnd_lock);
+ zonep = zone_find_by_id(nsp->vpnd_zid);
+ /*
+ * This zone must be being torn down, so skip it.
+ */
+ if (zonep == NULL)
+ continue;
+ ret = sdev_plugin_mkdir(ctx, zonep->zone_name);
+ zone_rele(zonep);
+ if (ret != 0 && ret != EEXIST) {
+ mutex_exit(&vnd_dev_lock);
+ return (ret);
+ }
+ }
+ mutex_exit(&vnd_dev_lock);
+ return (0);
+}
+
+static int
+vnd_sdev_filldir(sdev_ctx_t ctx)
+{
+ int ret;
+ vnd_pnsd_t *nsp;
+
+ ASSERT(sdev_ctx_vtype(ctx) == VDIR);
+ if (strcmp(VND_SDEV_ROOT, sdev_ctx_path(ctx)) == 0)
+ return (vnd_sdev_filldir_root(ctx));
+
+ if (strcmp(VND_SDEV_ZROOT, sdev_ctx_path(ctx)) == 0)
+ return (vnd_sdev_filldir_zroot(ctx));
+
+ ASSERT(strncmp(VND_SDEV_ZROOT, sdev_ctx_path(ctx),
+ strlen(VND_SDEV_ZROOT)) == 0);
+ nsp = vnd_sdev_ctx_to_ns(ctx);
+ if (nsp == NULL)
+ return (0);
+
+ ret = vnd_sdev_fillzone(nsp, ctx);
+ vnd_nsd_rele(nsp);
+
+ return (ret);
+}
+
+static sdev_plugin_ops_t vnd_sdev_ops = {
+ SDEV_PLUGIN_VERSION,
+ SDEV_PLUGIN_SUBDIR,
+ vnd_sdev_validate,
+ vnd_sdev_filldir,
+ vnd_sdev_inactive
+};
+
+static int
+vnd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int errp = 0;
+
+ if (cmd != DDI_ATTACH)
+ return (DDI_FAILURE);
+
+ /*
+ * Only allow one instance.
+ */
+ if (vnd_dip != NULL)
+ return (DDI_FAILURE);
+
+ vnd_dip = dip;
+ if (ddi_create_minor_node(vnd_dip, "vnd", S_IFCHR, 0, DDI_PSEUDO, 0) !=
+ DDI_SUCCESS) {
+ vnd_dip = NULL;
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
+ DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) {
+ ddi_remove_minor_node(vnd_dip, NULL);
+ vnd_dip = NULL;
+ return (DDI_FAILURE);
+ }
+
+ vnd_sdev_hdl = sdev_plugin_register(VND_SDEV_NAME, &vnd_sdev_ops,
+ &errp);
+ if (vnd_sdev_hdl == NULL) {
+ ddi_remove_minor_node(vnd_dip, NULL);
+ ddi_prop_remove_all(vnd_dip);
+ vnd_dip = NULL;
+ return (DDI_FAILURE);
+ }
+
+ vnd_sqset = gsqueue_set_create(GSQUEUE_DEFAULT_WAIT,
+ GSQUEUE_DEFAULT_PRIORITY);
+
+ return (DDI_SUCCESS);
+}
+
+static int
+vnd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ if (cmd != DDI_DETACH)
+ return (DDI_FAILURE);
+
+ mutex_enter(&vnd_dev_lock);
+ if (!list_is_empty(&vnd_dev_list)) {
+ mutex_exit(&vnd_dev_lock);
+ return (DDI_FAILURE);
+ }
+ mutex_exit(&vnd_dev_lock);
+
+ return (DDI_FAILURE);
+}
+
+static int
+vnd_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
+{
+ int error;
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ *result = (void *)vnd_dip;
+ error = DDI_SUCCESS;
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *result = (void *)0;
+ error = DDI_SUCCESS;
+ break;
+ default:
+ error = DDI_FAILURE;
+ break;
+ }
+ return (error);
+}
+
+
+
+static void
+vnd_ddi_fini(void)
+{
+ netstack_unregister(NS_VND);
+ if (vnd_taskq != NULL)
+ taskq_destroy(vnd_taskq);
+ if (vnd_str_cache != NULL)
+ kmem_cache_destroy(vnd_str_cache);
+ if (vnd_dev_cache != NULL)
+ kmem_cache_destroy(vnd_dev_cache);
+ if (vnd_pnsd_cache != NULL)
+ kmem_cache_destroy(vnd_pnsd_cache);
+ if (vnd_minors != NULL)
+ id_space_destroy(vnd_minors);
+ if (vnd_list_init != 0) {
+ list_destroy(&vnd_nsd_list);
+ list_destroy(&vnd_dev_list);
+ mutex_destroy(&vnd_dev_lock);
+ vnd_list_init = 0;
+ }
+ frameio_fini();
+}
+
+static int
+vnd_ddi_init(void)
+{
+ if (frameio_init() != 0)
+ return (DDI_FAILURE);
+
+ vnd_str_cache = kmem_cache_create("vnd_str_cache", sizeof (vnd_str_t),
+ 0, NULL, NULL, NULL, NULL, NULL, 0);
+ if (vnd_str_cache == NULL) {
+ frameio_fini();
+ return (DDI_FAILURE);
+ }
+ vnd_dev_cache = kmem_cache_create("vnd_dev_cache", sizeof (vnd_dev_t),
+ 0, NULL, NULL, NULL, NULL, NULL, 0);
+ if (vnd_dev_cache == NULL) {
+ kmem_cache_destroy(vnd_str_cache);
+ frameio_fini();
+ return (DDI_FAILURE);
+ }
+ vnd_pnsd_cache = kmem_cache_create("vnd_pnsd_cache",
+ sizeof (vnd_pnsd_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
+ if (vnd_pnsd_cache == NULL) {
+ kmem_cache_destroy(vnd_dev_cache);
+ kmem_cache_destroy(vnd_str_cache);
+ frameio_fini();
+ return (DDI_FAILURE);
+ }
+
+ vnd_taskq = taskq_create_instance("vnd", -1, 1, minclsyspri, 0, 0, 0);
+ if (vnd_taskq == NULL) {
+ kmem_cache_destroy(vnd_pnsd_cache);
+ kmem_cache_destroy(vnd_dev_cache);
+ kmem_cache_destroy(vnd_str_cache);
+ frameio_fini();
+ return (DDI_FAILURE);
+ }
+
+ vnd_minors = id_space_create("vnd_minors", 1, INT32_MAX);
+ if (vnd_minors == NULL) {
+ taskq_destroy(vnd_taskq);
+ kmem_cache_destroy(vnd_pnsd_cache);
+ kmem_cache_destroy(vnd_dev_cache);
+ kmem_cache_destroy(vnd_str_cache);
+ frameio_fini();
+ return (DDI_FAILURE);
+ }
+
+ mutex_init(&vnd_dev_lock, NULL, MUTEX_DRIVER, NULL);
+ list_create(&vnd_dev_list, sizeof (vnd_dev_t),
+ offsetof(vnd_dev_t, vdd_link));
+ list_create(&vnd_nsd_list, sizeof (vnd_pnsd_t),
+ offsetof(vnd_pnsd_t, vpnd_link));
+ vnd_list_init = 1;
+
+ netstack_register(NS_VND, vnd_stack_init, vnd_stack_shutdown,
+ vnd_stack_destroy);
+
+ return (DDI_SUCCESS);
+}
+
+static struct module_info vnd_minfo = {
+ 0, /* module id */
+ "vnd", /* module name */
+ 1, /* smallest packet size */
+ INFPSZ, /* largest packet size (infinite) */
+ 1, /* high watermark */
+ 0 /* low watermark */
+};
+
+static struct qinit vnd_r_qinit = {
+ vnd_s_rput,
+ NULL,
+ vnd_s_open,
+ vnd_s_close,
+ NULL,
+ &vnd_minfo,
+ NULL
+};
+
+static struct qinit vnd_w_qinit = {
+ vnd_s_wput,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ &vnd_minfo,
+ NULL
+};
+
+static struct streamtab vnd_strtab = {
+ &vnd_r_qinit,
+ &vnd_w_qinit,
+ NULL,
+ NULL
+};
+
+
+static struct cb_ops vnd_cb_ops = {
+ vnd_open, /* open */
+ vnd_close, /* close */
+ nulldev, /* strategy */
+ nulldev, /* print */
+ nodev, /* dump */
+ vnd_read, /* read */
+ vnd_write, /* write */
+ vnd_ioctl, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ vnd_chpoll, /* poll */
+ ddi_prop_op, /* cb_prop_op */
+ NULL, /* streamtab */
+ D_MP /* Driver compatibility flag */
+};
+
+static struct dev_ops vnd_dev_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* refcnt */
+ vnd_info, /* get_dev_info */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ vnd_attach, /* attach */
+ vnd_detach, /* detach */
+ nodev, /* reset */
+ &vnd_cb_ops, /* driver operations */
+ NULL, /* bus operations */
+ nodev, /* dev power */
+ ddi_quiesce_not_needed /* quiesce */
+};
+
+static struct modldrv vnd_modldrv = {
+ &mod_driverops,
+ "Virtual Networking Datapath Driver",
+ &vnd_dev_ops
+};
+
+static struct fmodsw vnd_fmodfsw = {
+ "vnd",
+ &vnd_strtab,
+ D_NEW | D_MP
+};
+
+static struct modlstrmod vnd_modlstrmod = {
+ &mod_strmodops,
+ "Virtual Networking Datapath Driver",
+ &vnd_fmodfsw
+};
+
+static struct modlinkage vnd_modlinkage = {
+ MODREV_1,
+ &vnd_modldrv,
+ &vnd_modlstrmod,
+ NULL
+};
+
+int
+_init(void)
+{
+ int error;
+
+ /*
+ * We need to do all of our global initialization in init as opposed to
+ * attach and detach. The problem here is that because vnd can be used
+ * from a stream context while being detached, we can not rely on having
+ * run attach to create everything, alas. so it goes in _init, just like
+ * our friend ip.
+ */
+ if ((error = vnd_ddi_init()) != DDI_SUCCESS)
+ return (error);
+ error = mod_install((&vnd_modlinkage));
+ if (error != 0)
+ vnd_ddi_fini();
+ return (error);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&vnd_modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int error;
+
+ error = mod_remove(&vnd_modlinkage);
+ if (error == 0)
+ vnd_ddi_fini();
+ return (error);
+}
diff --git a/usr/src/uts/common/io/vnd/vnd.conf b/usr/src/uts/common/io/vnd/vnd.conf
new file mode 100644
index 0000000000..65872e1ddf
--- /dev/null
+++ b/usr/src/uts/common/io/vnd/vnd.conf
@@ -0,0 +1,16 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2014, Joyent, Inc. All rights reserved.
+#
+
+name="vnd" parent="pseudo" instance=0;
diff --git a/usr/src/uts/common/io/vnic/vnic_dev.c b/usr/src/uts/common/io/vnic/vnic_dev.c
index 3cacbe395b..3cb7e7660a 100644
--- a/usr/src/uts/common/io/vnic/vnic_dev.c
+++ b/usr/src/uts/common/io/vnic/vnic_dev.c
@@ -53,6 +53,7 @@
#include <sys/vlan.h>
#include <sys/vnic.h>
#include <sys/vnic_impl.h>
+#include <sys/mac_impl.h>
#include <sys/mac_flow_impl.h>
#include <inet/ip_impl.h>
@@ -369,6 +370,7 @@ vnic_dev_create(datalink_id_t vnic_id, datalink_id_t linkid,
bzero(vnic, sizeof (*vnic));
+ vnic->vn_ls = LINK_STATE_UNKNOWN;
vnic->vn_id = vnic_id;
vnic->vn_link_id = linkid;
vnic->vn_vrid = vrid;
@@ -579,11 +581,12 @@ vnic_dev_create(datalink_id_t vnic_id, datalink_id_t linkid,
vnic->vn_enabled = B_TRUE;
if (is_anchor) {
- mac_link_update(vnic->vn_mh, LINK_STATE_UP);
+ vnic->vn_ls = LINK_STATE_UP;
} else {
- mac_link_update(vnic->vn_mh,
- mac_client_stat_get(vnic->vn_mch, MAC_STAT_LINK_STATE));
+ vnic->vn_ls = mac_client_stat_get(vnic->vn_mch,
+ MAC_STAT_LINK_STATE);
}
+ mac_link_update(vnic->vn_mh, vnic->vn_ls);
rw_exit(&vnic_lock);
@@ -1072,6 +1075,18 @@ vnic_m_setprop(void *m_driver, const char *pr_name, mac_prop_id_t pr_num,
err = mac_maxsdu_update(vn->vn_mh, mtu);
break;
}
+ case MAC_PROP_VN_PROMISC_FILTERED: {
+ boolean_t filtered;
+
+ if (pr_valsize < sizeof (filtered)) {
+ err = EINVAL;
+ break;
+ }
+
+ bcopy(pr_val, &filtered, sizeof (filtered));
+ mac_set_promisc_filtered(vn->vn_mch, filtered);
+ break;
+ }
case MAC_PROP_SECONDARY_ADDRS: {
mac_secondary_addr_t msa;
@@ -1079,6 +1094,34 @@ vnic_m_setprop(void *m_driver, const char *pr_name, mac_prop_id_t pr_num,
err = vnic_set_secondary_macs(vn, &msa);
break;
}
+ case MAC_PROP_PRIVATE: {
+ long val, i;
+ const char *v;
+
+ if (vn->vn_link_id != DATALINK_INVALID_LINKID ||
+ strcmp(pr_name, "_linkstate") != 0) {
+ err = ENOTSUP;
+ break;
+ }
+
+ for (v = pr_val, i = 0; i < pr_valsize; i++, v++) {
+ if (*v == '\0')
+ break;
+ }
+ if (i == pr_valsize) {
+ err = EINVAL;
+ break;
+ }
+
+ (void) ddi_strtol(pr_val, (char **)NULL, 0, &val);
+ if (val != LINK_STATE_UP && val != LINK_STATE_DOWN) {
+ err = EINVAL;
+ break;
+ }
+ vn->vn_ls = val;
+ mac_link_update(vn->vn_mh, vn->vn_ls);
+ break;
+ }
default:
err = ENOTSUP;
break;
@@ -1093,11 +1136,29 @@ vnic_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
{
vnic_t *vn = arg;
int ret = 0;
+ boolean_t out;
switch (pr_num) {
+ case MAC_PROP_VN_PROMISC_FILTERED:
+ out = mac_get_promisc_filtered(vn->vn_mch);
+ ASSERT(pr_valsize >= sizeof (boolean_t));
+ bcopy(&out, pr_val, sizeof (boolean_t));
+ break;
case MAC_PROP_SECONDARY_ADDRS:
ret = vnic_get_secondary_macs(vn, pr_valsize, pr_val);
break;
+ case MAC_PROP_PRIVATE:
+ if (vn->vn_link_id != DATALINK_INVALID_LINKID) {
+ ret = EINVAL;
+ break;
+ }
+
+ if (strcmp(pr_name, "_linkstate") != 0) {
+ ret = EINVAL;
+ break;
+ }
+ (void) snprintf(pr_val, pr_valsize, "%d", vn->vn_ls);
+ break;
default:
ret = ENOTSUP;
break;
@@ -1107,7 +1168,8 @@ vnic_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
}
/* ARGSUSED */
-static void vnic_m_propinfo(void *m_driver, const char *pr_name,
+static void
+vnic_m_propinfo(void *m_driver, const char *pr_name,
mac_prop_id_t pr_num, mac_prop_info_handle_t prh)
{
vnic_t *vn = m_driver;
@@ -1150,6 +1212,18 @@ static void vnic_m_propinfo(void *m_driver, const char *pr_name,
mac_perim_exit(mph);
}
break;
+ case MAC_PROP_PRIVATE:
+ if (vn->vn_link_id != DATALINK_INVALID_LINKID)
+ break;
+
+ if (strcmp(pr_name, "_linkstate") == 0) {
+ char buf[16];
+
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
+ (void) snprintf(buf, sizeof (buf), "%d", vn->vn_ls);
+ mac_prop_info_set_default_str(prh, buf);
+ }
+ break;
}
}
@@ -1222,8 +1296,9 @@ vnic_notify_cb(void *arg, mac_notify_type_t type)
break;
case MAC_NOTE_LINK:
- mac_link_update(vnic->vn_mh,
- mac_client_stat_get(vnic->vn_mch, MAC_STAT_LINK_STATE));
+ vnic->vn_ls = mac_client_stat_get(vnic->vn_mch,
+ MAC_STAT_LINK_STATE);
+ mac_link_update(vnic->vn_mh, vnic->vn_ls);
break;
default:
diff --git a/usr/src/uts/common/io/vscan/vscan_svc.c b/usr/src/uts/common/io/vscan/vscan_svc.c
index a9817f571f..92eb0901c2 100644
--- a/usr/src/uts/common/io/vscan/vscan_svc.c
+++ b/usr/src/uts/common/io/vscan/vscan_svc.c
@@ -22,6 +22,7 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright (c) 2015, Joyent, Inc.
*/
#include <sys/stat.h>
@@ -461,7 +462,7 @@ vscan_svc_scan_file(vnode_t *vp, cred_t *cr, int async)
boolean_t allow;
clock_t timeout, time_left;
- if ((vp == NULL) || (vp->v_path == NULL) || cr == NULL)
+ if ((vp == NULL) || (vp->v_path == vn_vpath_empty) || cr == NULL)
return (0);
DTRACE_PROBE2(vscan__scan__file, char *, vp->v_path, int, async);
@@ -1080,7 +1081,6 @@ vscan_svc_exempt_file(vnode_t *vp, boolean_t *allow)
struct vattr attr;
ASSERT(vp != NULL);
- ASSERT(vp->v_path != NULL);
attr.va_mask = AT_SIZE;
diff --git a/usr/src/uts/common/io/zfd.c b/usr/src/uts/common/io/zfd.c
new file mode 100644
index 0000000000..2da310ab8d
--- /dev/null
+++ b/usr/src/uts/common/io/zfd.c
@@ -0,0 +1,1154 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2016 Joyent, Inc. All rights reserved.
+ */
+
+/*
+ * Zone File Descriptor Driver.
+ *
+ * This driver is derived from the zcons driver which is in turn derived from
+ * the pts/ptm drivers. The purpose is to expose file descriptors within the
+ * zone which are connected to zoneadmd and used for logging or an interactive
+ * connection to a process within the zone.
+ *
+ * Its implementation is straightforward. Each instance of the driver
+ * represents a global-zone/local-zone pair. Unlike the zcons device, zoneadmd
+ * uses these devices unidirectionally to provide stdin, stdout and stderr to
+ * the process within the zone.
+ *
+ * Instances of zfd are onlined as children of /pseudo/zfdnex@2/ by zoneadmd,
+ * using the devctl framework; thus the driver does not need to maintain any
+ * sort of "admin" node.
+ *
+ * The driver shuttles I/O from master side to slave side and back. In a break
+ * from the pts/ptm semantics, if one side is not open, I/O directed towards
+ * it will simply be discarded. This is so that if zoneadmd is not holding the
+ * master side fd open (i.e. it has died somehow), processes in the zone do not
+ * experience any errors and I/O to the fd does not cause the process to hang.
+ *
+ * The driver can also act as a multiplexer so that data written to the
+ * slave side within the zone is also redirected back to another zfd device
+ * inside the zone for consumption (i.e. it can be read). The intention is
+ * that a logging process within the zone can consume data that is being
+ * written by an application onto the primary stream. This is essentially
+ * a tee off of the primary stream into a log stream. This tee can also be
+ * configured to be flow controlled via an ioctl. Flow control happens on the
+ * primary stream and is used to ensure that the log stream receives all of
+ * the messages off the primary stream when consumption of the data off of
+ * the log stream gets behind. Configuring for flow control implies that the
+ * application writing to the primary stream will be blocked when the log
+ * consumer gets behind. Note that closing the log stream (e.g. when the zone
+ * halts) will cause the loss of all messages queued in the stream.
+ *
+ * The zone's zfd device configuration is driven by zoneadmd and a zone mode.
+ * The mode, which is controlled by the zone attribute "zlog-mode" is somewhat
+ * of a misnomer since its purpose has evolved. The attribute can have a
+ * variety of values, but the lowest two positions are used to control how many
+ * zfd devices are created inside the zone and if the primary stream is a tty.
+ *
+ * Here is a summary of how the 4 modes control what zfd devices are created
+ * and how they're used:
+ *
+ * t-: 1 stdio zdev (0) configured as a tty
+ * --: 3 stdio zdevs (0, 1, 2), not configured as a tty
+ * tn: 1 stdio zdev (0) configured as a tty, 1 additional zdev (1)
+ * -n: 3 stdio zdevs (0, 1, 2), not tty, 2 additional zdevs (3, 4)
+ *
+ * With the 't' flag set, stdin/out/err is multiplexed onto a single full-duplex
+ * stream which is configured as a tty. That is, ptem, ldterm and ttycompat are
+ * autopushed onto the stream when the slave side is opened. There is only a
+ * single zfd dev (0) needed for the primary stream.
+ *
+ * When the 'n' flag is set, it is assumed that output logging will be done
+ * within the zone itself. In this configuration 1 or 2 additional zfd devices,
+ * depending on tty mode ('t' flag) are created within the zone. An application
+ * can then configure the zfd streams driver into a multiplexer. Output from
+ * the stdout/stderr zfd(s) will be teed into the correspond logging zfd(s)
+ * within the zone.
+ *
+ * The following is a diagram of how this works for a '-n' configuration:
+ *
+ *
+ * zoneadmd (for zlogin -I stdout)
+ * GZ: ^
+ * |
+ * --------------------------
+ * ^
+ * NGZ: |
+ * app >1 -> zfd1 -> zfd3 -> logger (for logger to consume app's stdout)
+ *
+ * There would be a similar path for the app's stderr into zfd4 for the logger
+ * to consume stderr.
+ */
+
+#include <sys/types.h>
+#include <sys/cmn_err.h>
+#include <sys/conf.h>
+#include <sys/cred.h>
+#include <sys/ddi.h>
+#include <sys/debug.h>
+#include <sys/devops.h>
+#include <sys/errno.h>
+#include <sys/file.h>
+#include <sys/kstr.h>
+#include <sys/modctl.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/stream.h>
+#include <sys/stropts.h>
+#include <sys/strsun.h>
+#include <sys/sunddi.h>
+#include <sys/sysmacros.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/zfd.h>
+#include <sys/vnode.h>
+#include <sys/fs/snode.h>
+#include <sys/zone.h>
+#include <sys/sdt.h>
+
+static kmutex_t zfd_mux_lock;
+
+static int zfd_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
+static int zfd_attach(dev_info_t *, ddi_attach_cmd_t);
+static int zfd_detach(dev_info_t *, ddi_detach_cmd_t);
+
+static int zfd_open(queue_t *, dev_t *, int, int, cred_t *);
+static int zfd_close(queue_t *, int, cred_t *);
+static void zfd_wput(queue_t *, mblk_t *);
+static void zfd_rsrv(queue_t *);
+static void zfd_wsrv(queue_t *);
+
+/*
+ * The instance number is encoded in the dev_t in the minor number; the lowest
+ * bit of the minor number is used to track the master vs. slave side of the
+ * fd. The rest of the bits in the minor number are the instance.
+ */
+#define ZFD_MASTER_MINOR 0
+#define ZFD_SLAVE_MINOR 1
+
+#define ZFD_INSTANCE(x) (getminor((x)) >> 1)
+#define ZFD_NODE(x) (getminor((x)) & 0x01)
+
+/*
+ * This macro converts a zfd_state_t pointer to the associated slave minor
+ * node's dev_t.
+ */
+#define ZFD_STATE_TO_SLAVEDEV(x) \
+ (makedevice(ddi_driver_major((x)->zfd_devinfo), \
+ (minor_t)(ddi_get_instance((x)->zfd_devinfo) << 1 | ZFD_SLAVE_MINOR)))
+
+int zfd_debug = 0;
+#define DBG(a) if (zfd_debug) cmn_err(CE_NOTE, a)
+#define DBG1(a, b) if (zfd_debug) cmn_err(CE_NOTE, a, b)
+
+/*
+ * ZFD Pseudo Terminal Module: stream data structure definitions,
+ * based on zcons.
+ */
+static struct module_info zfd_info = {
+ 0x20FD, /* ZOFD - 8445 */
+ "zfd",
+ 0, /* min packet size */
+ INFPSZ, /* max packet size - infinity */
+ 2048, /* high water */
+ 128 /* low water */
+};
+
+static struct qinit zfd_rinit = {
+ NULL,
+ (int (*)()) zfd_rsrv,
+ zfd_open,
+ zfd_close,
+ NULL,
+ &zfd_info,
+ NULL
+};
+
+static struct qinit zfd_winit = {
+ (int (*)()) zfd_wput,
+ (int (*)()) zfd_wsrv,
+ NULL,
+ NULL,
+ NULL,
+ &zfd_info,
+ NULL
+};
+
+static struct streamtab zfd_tab_info = {
+ &zfd_rinit,
+ &zfd_winit,
+ NULL,
+ NULL
+};
+
+#define ZFD_CONF_FLAG (D_MP | D_MTQPAIR | D_MTOUTPERIM | D_MTOCEXCL)
+
+/*
+ * this will define (struct cb_ops cb_zfd_ops) and (struct dev_ops zfd_ops)
+ */
+DDI_DEFINE_STREAM_OPS(zfd_ops, nulldev, nulldev, zfd_attach, zfd_detach, \
+ nodev, zfd_getinfo, ZFD_CONF_FLAG, &zfd_tab_info, \
+ ddi_quiesce_not_needed);
+
+/*
+ * Module linkage information for the kernel.
+ */
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* Type of module (this is a pseudo driver) */
+ "Zone FD driver", /* description of module */
+ &zfd_ops /* driver ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1,
+ &modldrv,
+ NULL
+};
+
+typedef enum {
+ ZFD_NO_MUX,
+ ZFD_PRIMARY_STREAM,
+ ZFD_LOG_STREAM
+} zfd_mux_type_t;
+
+typedef struct zfd_state {
+ dev_info_t *zfd_devinfo; /* instance info */
+ queue_t *zfd_master_rdq; /* GZ read queue */
+ queue_t *zfd_slave_rdq; /* in-zone read queue */
+ int zfd_state; /* ZFD_STATE_MOPEN, ZFD_STATE_SOPEN */
+ int zfd_tty; /* ZFD_MAKETTY - strm mods will push */
+ boolean_t zfd_is_flowcon; /* primary stream flow stopped */
+ boolean_t zfd_allow_flowcon; /* use flow control */
+ zfd_mux_type_t zfd_muxt; /* state type: none, primary, log */
+ struct zfd_state *zfd_inst_pri; /* log state's primary ptr */
+ struct zfd_state *zfd_inst_log; /* primary state's log ptr */
+} zfd_state_t;
+
+#define ZFD_STATE_MOPEN 0x01
+#define ZFD_STATE_SOPEN 0x02
+
+static void *zfd_soft_state;
+
+/*
+ * List of STREAMS modules that are autopushed onto a slave instance when its
+ * opened, but only if the ZFD_MAKETTY ioctl has first been received by the
+ * master.
+ */
+static char *zfd_mods[] = {
+ "ptem",
+ "ldterm",
+ "ttcompat",
+ NULL
+};
+
+int
+_init(void)
+{
+ int err;
+
+ if ((err = ddi_soft_state_init(&zfd_soft_state, sizeof (zfd_state_t),
+ 0)) != 0) {
+ return (err);
+ }
+
+ if ((err = mod_install(&modlinkage)) != 0)
+ ddi_soft_state_fini(zfd_soft_state);
+
+ mutex_init(&zfd_mux_lock, NULL, MUTEX_DEFAULT, NULL);
+ return (err);
+}
+
+
+int
+_fini(void)
+{
+ int err;
+
+ if ((err = mod_remove(&modlinkage)) != 0) {
+ return (err);
+ }
+
+ ddi_soft_state_fini(&zfd_soft_state);
+ mutex_destroy(&zfd_mux_lock);
+ return (0);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+static int
+zfd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ zfd_state_t *zfds;
+ int instance;
+ char masternm[ZFD_NAME_LEN], slavenm[ZFD_NAME_LEN];
+
+ if (cmd != DDI_ATTACH)
+ return (DDI_FAILURE);
+
+ instance = ddi_get_instance(dip);
+ if (ddi_soft_state_zalloc(zfd_soft_state, instance) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+
+ (void) snprintf(masternm, sizeof (masternm), "%s%d", ZFD_MASTER_NAME,
+ instance);
+ (void) snprintf(slavenm, sizeof (slavenm), "%s%d", ZFD_SLAVE_NAME,
+ instance);
+
+ /*
+ * Create the master and slave minor nodes.
+ */
+ if ((ddi_create_minor_node(dip, slavenm, S_IFCHR,
+ instance << 1 | ZFD_SLAVE_MINOR, DDI_PSEUDO, 0) == DDI_FAILURE) ||
+ (ddi_create_minor_node(dip, masternm, S_IFCHR,
+ instance << 1 | ZFD_MASTER_MINOR, DDI_PSEUDO, 0) == DDI_FAILURE)) {
+ ddi_remove_minor_node(dip, NULL);
+ ddi_soft_state_free(zfd_soft_state, instance);
+ return (DDI_FAILURE);
+ }
+
+ VERIFY((zfds = ddi_get_soft_state(zfd_soft_state, instance)) != NULL);
+ zfds->zfd_devinfo = dip;
+ zfds->zfd_tty = 0;
+ zfds->zfd_muxt = ZFD_NO_MUX;
+ zfds->zfd_inst_log = NULL;
+ return (DDI_SUCCESS);
+}
+
+static int
+zfd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ zfd_state_t *zfds;
+ int instance;
+
+ if (cmd != DDI_DETACH)
+ return (DDI_FAILURE);
+
+ instance = ddi_get_instance(dip);
+ if ((zfds = ddi_get_soft_state(zfd_soft_state, instance)) == NULL)
+ return (DDI_FAILURE);
+
+ if ((zfds->zfd_state & ZFD_STATE_MOPEN) ||
+ (zfds->zfd_state & ZFD_STATE_SOPEN)) {
+ DBG1("zfd_detach: device (dip=%p) still open\n", (void *)dip);
+ return (DDI_FAILURE);
+ }
+
+ ddi_remove_minor_node(dip, NULL);
+ ddi_soft_state_free(zfd_soft_state, instance);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * zfd_getinfo()
+ * getinfo(9e) entrypoint.
+ */
+/*ARGSUSED*/
+static int
+zfd_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
+{
+ zfd_state_t *zfds;
+ int instance = ZFD_INSTANCE((dev_t)arg);
+
+ switch (infocmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ if ((zfds = ddi_get_soft_state(zfd_soft_state,
+ instance)) == NULL)
+ return (DDI_FAILURE);
+ *result = zfds->zfd_devinfo;
+ return (DDI_SUCCESS);
+ case DDI_INFO_DEVT2INSTANCE:
+ *result = (void *)(uintptr_t)instance;
+ return (DDI_SUCCESS);
+ }
+ return (DDI_FAILURE);
+}
+
+/*
+ * Return the equivalent queue from the other side of the relationship.
+ * e.g.: given the slave's write queue, return the master's write queue.
+ */
+static queue_t *
+zfd_switch(queue_t *qp)
+{
+ zfd_state_t *zfds = qp->q_ptr;
+ ASSERT(zfds != NULL);
+
+ if (qp == zfds->zfd_master_rdq)
+ return (zfds->zfd_slave_rdq);
+ else if (OTHERQ(qp) == zfds->zfd_master_rdq && zfds->zfd_slave_rdq
+ != NULL)
+ return (OTHERQ(zfds->zfd_slave_rdq));
+ else if (qp == zfds->zfd_slave_rdq)
+ return (zfds->zfd_master_rdq);
+ else if (OTHERQ(qp) == zfds->zfd_slave_rdq && zfds->zfd_master_rdq
+ != NULL)
+ return (OTHERQ(zfds->zfd_master_rdq));
+ else
+ return (NULL);
+}
+
+/*
+ * For debugging and outputting messages. Returns the name of the side of
+ * the relationship associated with this queue.
+ */
+static const char *
+zfd_side(queue_t *qp)
+{
+ zfd_state_t *zfds = qp->q_ptr;
+ ASSERT(zfds != NULL);
+
+ if (qp == zfds->zfd_master_rdq ||
+ OTHERQ(qp) == zfds->zfd_master_rdq) {
+ return ("master");
+ }
+ ASSERT(qp == zfds->zfd_slave_rdq || OTHERQ(qp) == zfds->zfd_slave_rdq);
+ return ("slave");
+}
+
+/*ARGSUSED*/
+static int
+zfd_master_open(zfd_state_t *zfds,
+ queue_t *rqp, /* pointer to the read side queue */
+ dev_t *devp, /* pointer to stream tail's dev */
+ int oflag, /* the user open(2) supplied flags */
+ int sflag, /* open state flag */
+ cred_t *credp) /* credentials */
+{
+ mblk_t *mop;
+ struct stroptions *sop;
+
+ /*
+ * Enforce exclusivity on the master side; the only consumer should
+ * be the zoneadmd for the zone.
+ */
+ if ((zfds->zfd_state & ZFD_STATE_MOPEN) != 0)
+ return (EBUSY);
+
+ if ((mop = allocb(sizeof (struct stroptions), BPRI_MED)) == NULL) {
+ DBG("zfd_master_open(): mop allocation failed\n");
+ return (ENOMEM);
+ }
+
+ zfds->zfd_state |= ZFD_STATE_MOPEN;
+
+ /*
+ * q_ptr stores driver private data; stash the soft state data on both
+ * read and write sides of the queue.
+ */
+ WR(rqp)->q_ptr = rqp->q_ptr = zfds;
+ qprocson(rqp);
+
+ /*
+ * Following qprocson(), the master side is fully plumbed into the
+ * STREAM and may send/receive messages. Setting zfds->zfd_master_rdq
+ * will allow the slave to send messages to us (the master).
+ * This cannot occur before qprocson() because the master is not
+ * ready to process them until that point.
+ */
+ zfds->zfd_master_rdq = rqp;
+
+ /*
+ * set up hi/lo water marks on stream head read queue and add
+ * controlling tty as needed.
+ */
+ mop->b_datap->db_type = M_SETOPTS;
+ mop->b_wptr += sizeof (struct stroptions);
+ sop = (struct stroptions *)(void *)mop->b_rptr;
+ if (oflag & FNOCTTY)
+ sop->so_flags = SO_HIWAT | SO_LOWAT;
+ else
+ sop->so_flags = SO_HIWAT | SO_LOWAT | SO_ISTTY;
+ sop->so_hiwat = 512;
+ sop->so_lowat = 256;
+ putnext(rqp, mop);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+zfd_slave_open(zfd_state_t *zfds,
+ queue_t *rqp, /* pointer to the read side queue */
+ dev_t *devp, /* pointer to stream tail's dev */
+ int oflag, /* the user open(2) supplied flags */
+ int sflag, /* open state flag */
+ cred_t *credp) /* credentials */
+{
+ mblk_t *mop;
+ struct stroptions *sop;
+ /*
+ * The slave side can be opened as many times as needed.
+ */
+ if ((zfds->zfd_state & ZFD_STATE_SOPEN) != 0) {
+ ASSERT((rqp != NULL) && (WR(rqp)->q_ptr == zfds));
+ return (0);
+ }
+
+ /* A log stream is read-only */
+ if (zfds->zfd_muxt == ZFD_LOG_STREAM &&
+ (oflag & (FREAD | FWRITE)) != FREAD)
+ return (EINVAL);
+
+ if (zfds->zfd_tty == 1) {
+ major_t major;
+ minor_t minor;
+ minor_t lastminor;
+ uint_t anchorindex;
+
+ /*
+ * Set up sad(7D) so that the necessary STREAMS modules will
+ * be in place. A wrinkle is that 'ptem' must be anchored
+ * in place (see streamio(7i)) because we always want the
+ * fd to have terminal semantics.
+ */
+ minor =
+ ddi_get_instance(zfds->zfd_devinfo) << 1 | ZFD_SLAVE_MINOR;
+ major = ddi_driver_major(zfds->zfd_devinfo);
+ lastminor = 0;
+ anchorindex = 1;
+ if (kstr_autopush(SET_AUTOPUSH, &major, &minor, &lastminor,
+ &anchorindex, zfd_mods) != 0) {
+ DBG("zfd_slave_open(): kstr_autopush() failed\n");
+ return (EIO);
+ }
+ }
+
+ if ((mop = allocb(sizeof (struct stroptions), BPRI_MED)) == NULL) {
+ DBG("zfd_slave_open(): mop allocation failed\n");
+ return (ENOMEM);
+ }
+
+ zfds->zfd_state |= ZFD_STATE_SOPEN;
+
+ /*
+ * q_ptr stores driver private data; stash the soft state data on both
+ * read and write sides of the queue.
+ */
+ WR(rqp)->q_ptr = rqp->q_ptr = zfds;
+
+ qprocson(rqp);
+
+ /*
+ * Must follow qprocson(), since we aren't ready to process until then.
+ */
+ zfds->zfd_slave_rdq = rqp;
+
+ /*
+ * set up hi/lo water marks on stream head read queue and add
+ * controlling tty as needed.
+ */
+ mop->b_datap->db_type = M_SETOPTS;
+ mop->b_wptr += sizeof (struct stroptions);
+ sop = (struct stroptions *)(void *)mop->b_rptr;
+ sop->so_flags = SO_HIWAT | SO_LOWAT | SO_ISTTY;
+ sop->so_hiwat = 512;
+ sop->so_lowat = 256;
+ putnext(rqp, mop);
+
+ return (0);
+}
+
+/*
+ * open(9e) entrypoint; checks sflag, and rejects anything unordinary.
+ */
+static int
+zfd_open(queue_t *rqp, /* pointer to the read side queue */
+ dev_t *devp, /* pointer to stream tail's dev */
+ int oflag, /* the user open(2) supplied flags */
+ int sflag, /* open state flag */
+ cred_t *credp) /* credentials */
+{
+ int instance = ZFD_INSTANCE(*devp);
+ int ret;
+ zfd_state_t *zfds;
+
+ if (sflag != 0)
+ return (EINVAL);
+
+ if ((zfds = ddi_get_soft_state(zfd_soft_state, instance)) == NULL)
+ return (ENXIO);
+
+ switch (ZFD_NODE(*devp)) {
+ case ZFD_MASTER_MINOR:
+ ret = zfd_master_open(zfds, rqp, devp, oflag, sflag, credp);
+ break;
+ case ZFD_SLAVE_MINOR:
+ ret = zfd_slave_open(zfds, rqp, devp, oflag, sflag, credp);
+ /*
+ * If we just opened the log stream and flow control has
+ * been enabled, we want to make sure the primary stream can
+ * start flowing.
+ */
+ if (ret == 0 && zfds->zfd_muxt == ZFD_LOG_STREAM &&
+ zfds->zfd_inst_pri->zfd_allow_flowcon) {
+ zfds->zfd_inst_pri->zfd_is_flowcon = B_FALSE;
+ if (zfds->zfd_inst_pri->zfd_master_rdq != NULL)
+ qenable(RD(zfds->zfd_inst_pri->zfd_master_rdq));
+ }
+ break;
+ default:
+ ret = ENXIO;
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * close(9e) entrypoint.
+ */
+/*ARGSUSED1*/
+static int
+zfd_close(queue_t *rqp, int flag, cred_t *credp)
+{
+ queue_t *wqp;
+ mblk_t *bp;
+ zfd_state_t *zfds;
+ major_t major;
+ minor_t minor;
+
+ zfds = (zfd_state_t *)rqp->q_ptr;
+
+ if (rqp == zfds->zfd_master_rdq) {
+ DBG("Closing master side");
+
+ zfds->zfd_master_rdq = NULL;
+ zfds->zfd_state &= ~ZFD_STATE_MOPEN;
+
+ /*
+ * qenable slave side write queue so that it can flush
+ * its messages as master's read queue is going away
+ */
+ if (zfds->zfd_slave_rdq != NULL) {
+ qenable(WR(zfds->zfd_slave_rdq));
+ }
+
+ qprocsoff(rqp);
+ WR(rqp)->q_ptr = rqp->q_ptr = NULL;
+
+ } else if (rqp == zfds->zfd_slave_rdq) {
+
+ DBG("Closing slave side");
+ zfds->zfd_state &= ~ZFD_STATE_SOPEN;
+ zfds->zfd_slave_rdq = NULL;
+
+ wqp = WR(rqp);
+ while ((bp = getq(wqp)) != NULL) {
+ if (zfds->zfd_master_rdq != NULL)
+ putnext(zfds->zfd_master_rdq, bp);
+ else if (bp->b_datap->db_type == M_IOCTL)
+ miocnak(wqp, bp, 0, 0);
+ else
+ freemsg(bp);
+ }
+
+ /*
+ * Qenable master side write queue so that it can flush its
+ * messages as slaves's read queue is going away.
+ */
+ if (zfds->zfd_master_rdq != NULL)
+ qenable(WR(zfds->zfd_master_rdq));
+
+ /*
+ * Qenable primary stream if necessary.
+ */
+ if (zfds->zfd_muxt == ZFD_LOG_STREAM &&
+ zfds->zfd_inst_pri->zfd_allow_flowcon) {
+ zfds->zfd_inst_pri->zfd_is_flowcon = B_FALSE;
+ if (zfds->zfd_inst_pri->zfd_master_rdq != NULL)
+ qenable(RD(zfds->zfd_inst_pri->zfd_master_rdq));
+ }
+
+ qprocsoff(rqp);
+ WR(rqp)->q_ptr = rqp->q_ptr = NULL;
+
+ if (zfds->zfd_tty == 1) {
+ /*
+ * Clear the sad configuration so that reopening
+ * doesn't fail to set up sad configuration.
+ */
+ major = ddi_driver_major(zfds->zfd_devinfo);
+ minor = ddi_get_instance(zfds->zfd_devinfo) << 1 |
+ ZFD_SLAVE_MINOR;
+ (void) kstr_autopush(CLR_AUTOPUSH, &major, &minor,
+ NULL, NULL, NULL);
+ }
+ }
+
+ return (0);
+}
+
+static void
+handle_mflush(queue_t *qp, mblk_t *mp)
+{
+ mblk_t *nmp;
+ DBG1("M_FLUSH on %s side", zfd_side(qp));
+
+ if (*mp->b_rptr & FLUSHW) {
+ DBG1("M_FLUSH, FLUSHW, %s side", zfd_side(qp));
+ flushq(qp, FLUSHDATA);
+ *mp->b_rptr &= ~FLUSHW;
+ if ((*mp->b_rptr & FLUSHR) == 0) {
+ /*
+ * FLUSHW only. Change to FLUSHR and putnext other side,
+ * then we are done.
+ */
+ *mp->b_rptr |= FLUSHR;
+ if (zfd_switch(RD(qp)) != NULL) {
+ putnext(zfd_switch(RD(qp)), mp);
+ return;
+ }
+ } else if ((zfd_switch(RD(qp)) != NULL) &&
+ (nmp = copyb(mp)) != NULL) {
+ /*
+ * It is a FLUSHRW; we copy the mblk and send
+ * it to the other side, since we still need to use
+ * the mblk in FLUSHR processing, below.
+ */
+ putnext(zfd_switch(RD(qp)), nmp);
+ }
+ }
+
+ if (*mp->b_rptr & FLUSHR) {
+ DBG("qreply(qp) turning FLUSHR around\n");
+ qreply(qp, mp);
+ return;
+ }
+ freemsg(mp);
+}
+
+/*
+ * Evaluate the various conditionals to determine if we're teeing into a log
+ * stream and if the primary stream should be flow controlled. This function
+ * can set the zfd_is_flowcon flag as a side effect.
+ *
+ * When teeing with flow control, we always queue the teed msg here and if
+ * the queue is getting full, we set zfd_is_flowcon. The primary stream will
+ * always queue when zfd_is_flowcon and will also not be served when
+ * zfd_is_flowcon is set. This causes backpressure on the primary stream
+ * until the teed queue can drain.
+ */
+static void
+zfd_tee_handler(zfd_state_t *zfds, unsigned char type, mblk_t *mp)
+{
+ queue_t *log_qp;
+ zfd_state_t *log_zfds;
+ mblk_t *lmp;
+
+ if (zfds->zfd_muxt != ZFD_PRIMARY_STREAM)
+ return;
+
+ if (type != M_DATA)
+ return;
+
+ log_zfds = zfds->zfd_inst_log;
+ if (log_zfds == NULL)
+ return;
+
+ ASSERT(log_zfds->zfd_muxt == ZFD_LOG_STREAM);
+
+ if ((log_zfds->zfd_state & ZFD_STATE_SOPEN) == 0) {
+ if (zfds->zfd_allow_flowcon)
+ zfds->zfd_is_flowcon = B_TRUE;
+ return;
+ }
+
+ /* The zfd_slave_rdq is null until the log dev is opened in the zone */
+ log_qp = RD(log_zfds->zfd_slave_rdq);
+ DTRACE_PROBE2(zfd__tee__check, void *, log_qp, void *, zfds);
+
+ if (!zfds->zfd_allow_flowcon) {
+ /*
+ * We're not supposed to tee with flow control and the tee is
+ * full so we skip teeing into the log stream.
+ */
+ if ((log_qp->q_flag & QFULL) != 0)
+ return;
+ }
+
+ /*
+ * Tee the message into the log stream.
+ */
+ lmp = dupmsg(mp);
+ if (lmp == NULL) {
+ if (zfds->zfd_allow_flowcon)
+ zfds->zfd_is_flowcon = B_TRUE;
+ return;
+ }
+
+ if (log_qp->q_first == NULL && bcanputnext(log_qp, lmp->b_band)) {
+ putnext(log_qp, lmp);
+ } else {
+ if (putq(log_qp, lmp) == 0) {
+ /* The logger queue is full, free the msg. */
+ freemsg(lmp);
+ }
+ /*
+ * If we're supposed to tee with flow control and the tee is
+ * over the high water mark then we want the primary stream to
+ * stop flowing. We'll stop queueing the primary stream after
+ * the log stream has drained.
+ */
+ if (zfds->zfd_allow_flowcon &&
+ log_qp->q_count > log_qp->q_hiwat) {
+ zfds->zfd_is_flowcon = B_TRUE;
+ }
+ }
+}
+
+/*
+ * wput(9E) is symmetric for master and slave sides, so this handles both
+ * without splitting the codepath. (The only exception to this is the
+ * processing of zfd ioctls, which is restricted to the master side.)
+ *
+ * zfd_wput() looks at the other side; if there is no process holding that
+ * side open, it frees the message. This prevents processes from hanging
+ * if no one is holding open the fd. Otherwise, it putnext's high
+ * priority messages, putnext's normal messages if possible, and otherwise
+ * enqueues the messages; in the case that something is enqueued, wsrv(9E)
+ * will take care of eventually shuttling I/O to the other side.
+ *
+ * When configured as a multiplexer, then anything written to the stream
+ * from inside the zone is also teed off to the corresponding log stream
+ * for consumption within the zone (i.e. the log stream can be read, but never
+ * written to, by an application inside the zone).
+ */
+static void
+zfd_wput(queue_t *qp, mblk_t *mp)
+{
+ unsigned char type = mp->b_datap->db_type;
+ zfd_state_t *zfds;
+ struct iocblk *iocbp;
+ boolean_t must_queue = B_FALSE;
+
+ ASSERT(qp->q_ptr);
+
+ DBG1("entering zfd_wput, %s side", zfd_side(qp));
+
+ /*
+ * Process zfd ioctl messages if qp is the master side's write queue.
+ */
+ zfds = (zfd_state_t *)qp->q_ptr;
+
+ if (type == M_IOCTL) {
+ iocbp = (struct iocblk *)(void *)mp->b_rptr;
+
+ switch (iocbp->ioc_cmd) {
+ case ZFD_MAKETTY:
+ zfds->zfd_tty = 1;
+ miocack(qp, mp, 0, 0);
+ return;
+ case ZFD_EOF:
+ if (zfds->zfd_slave_rdq != NULL)
+ (void) putnextctl(zfds->zfd_slave_rdq,
+ M_HANGUP);
+ miocack(qp, mp, 0, 0);
+ return;
+ case ZFD_HAS_SLAVE:
+ if ((zfds->zfd_state & ZFD_STATE_SOPEN) != 0) {
+ miocack(qp, mp, 0, 0);
+ } else {
+ miocack(qp, mp, 0, ENOTTY);
+ }
+ return;
+ case ZFD_MUX: {
+ /*
+ * Setup the multiplexer configuration for the two
+ * streams.
+ *
+ * We expect to be called on the stream that will
+ * become the log stream and be passed one data block
+ * with the minor number of the slave side of the
+ * primary stream.
+ */
+ int to;
+ int instance;
+ zfd_state_t *prim_zfds;
+
+ if (iocbp->ioc_count != TRANSPARENT ||
+ mp->b_cont == NULL) {
+ miocack(qp, mp, 0, EINVAL);
+ return;
+ }
+
+ /* Get the primary slave minor device number */
+ to = *(int *)mp->b_cont->b_rptr;
+ instance = ZFD_INSTANCE(to);
+
+ if ((prim_zfds = ddi_get_soft_state(zfd_soft_state,
+ instance)) == NULL) {
+ miocack(qp, mp, 0, EINVAL);
+ return;
+ }
+
+ /* Disallow changing primary/log once set. */
+ mutex_enter(&zfd_mux_lock);
+ if (zfds->zfd_muxt != ZFD_NO_MUX ||
+ prim_zfds->zfd_muxt != ZFD_NO_MUX) {
+ mutex_exit(&zfd_mux_lock);
+ miocack(qp, mp, 0, EINVAL);
+ return;
+ }
+
+ zfds->zfd_muxt = ZFD_LOG_STREAM;
+ zfds->zfd_inst_pri = prim_zfds;
+ prim_zfds->zfd_muxt = ZFD_PRIMARY_STREAM;
+ prim_zfds->zfd_inst_log = zfds;
+ mutex_exit(&zfd_mux_lock);
+ DTRACE_PROBE2(zfd__mux__link, void *, prim_zfds,
+ void *, zfds);
+
+ miocack(qp, mp, 0, 0);
+ return;
+ }
+ case ZFD_MUX_FLOWCON: {
+ /*
+ * We expect this ioctl to be issued against the
+ * log stream. We don't use the primary stream since
+ * there can be other streams modules pushed onto that
+ * stream which would interfere with the ioctl.
+ */
+ int val;
+ zfd_state_t *prim_zfds;
+
+ if (iocbp->ioc_count != TRANSPARENT ||
+ mp->b_cont == NULL) {
+ miocack(qp, mp, 0, EINVAL);
+ return;
+ }
+
+ if (zfds->zfd_muxt != ZFD_LOG_STREAM) {
+ miocack(qp, mp, 0, EINVAL);
+ return;
+ }
+ prim_zfds = zfds->zfd_inst_pri;
+
+ /* Get the flow control setting */
+ val = *(int *)mp->b_cont->b_rptr;
+ if (val != 0 && val != 1) {
+ miocack(qp, mp, 0, EINVAL);
+ return;
+ }
+
+ prim_zfds->zfd_allow_flowcon = (boolean_t)val;
+ if (!prim_zfds->zfd_allow_flowcon)
+ prim_zfds->zfd_is_flowcon = B_FALSE;
+
+ DTRACE_PROBE1(zfd__mux__flowcon, void *, prim_zfds);
+ miocack(qp, mp, 0, 0);
+ return;
+ }
+ default:
+ break;
+ }
+ }
+
+ /* if on the write side, may need to tee */
+ if (zfds->zfd_slave_rdq != NULL && qp == WR(zfds->zfd_slave_rdq)) {
+ /* tee output to any attached log stream */
+ zfd_tee_handler(zfds, type, mp);
+
+ /* high-priority msgs are not subject to flow control */
+ if (zfds->zfd_is_flowcon && type == M_DATA)
+ must_queue = B_TRUE;
+ }
+
+ if (zfd_switch(RD(qp)) == NULL) {
+ DBG1("wput to %s side (no one listening)", zfd_side(qp));
+ switch (type) {
+ case M_FLUSH:
+ handle_mflush(qp, mp);
+ break;
+ case M_IOCTL:
+ miocnak(qp, mp, 0, 0);
+ break;
+ default:
+ freemsg(mp);
+ break;
+ }
+ return;
+ }
+
+ if (type >= QPCTL) {
+ DBG1("(hipri) wput, %s side", zfd_side(qp));
+ switch (type) {
+ case M_READ: /* supposedly from ldterm? */
+ DBG("zfd_wput: tossing M_READ\n");
+ freemsg(mp);
+ break;
+ case M_FLUSH:
+ handle_mflush(qp, mp);
+ break;
+ default:
+ /*
+ * Put this to the other side.
+ */
+ ASSERT(zfd_switch(RD(qp)) != NULL);
+ putnext(zfd_switch(RD(qp)), mp);
+ break;
+ }
+ DBG1("done (hipri) wput, %s side", zfd_side(qp));
+ return;
+ }
+
+ /*
+ * If the primary stream has been stopped for flow control then
+ * enqueue the msg, otherwise only putnext if there isn't already
+ * something in the queue. If we don't do this then things would wind
+ * up out of order.
+ */
+ if (!must_queue && qp->q_first == NULL &&
+ bcanputnext(RD(zfd_switch(qp)), mp->b_band)) {
+ putnext(RD(zfd_switch(qp)), mp);
+ } else {
+ /*
+ * zfd_wsrv expects msgs queued on the primary queue. Those
+ * will be handled by zfd_wsrv after zfd_rsrv performs the
+ * qenable on the proper queue.
+ */
+ (void) putq(qp, mp);
+ }
+
+ DBG1("done wput, %s side", zfd_side(qp));
+}
+
+/*
+ * Read server
+ *
+ * For primary stream:
+ * Under normal execution rsrv(9E) is symmetric for master and slave, so
+ * zfd_rsrv() can handle both without splitting up the codepath. We do this by
+ * enabling the write side of the partner. This triggers the partner to send
+ * messages queued on its write side to this queue's read side.
+ *
+ * For log stream:
+ * Internally we've queued up the msgs that we've teed off to the log stream
+ * so when we're invoked we need to pass these along.
+ */
+static void
+zfd_rsrv(queue_t *qp)
+{
+ zfd_state_t *zfds;
+ zfds = (zfd_state_t *)qp->q_ptr;
+
+ /*
+ * log stream server
+ */
+ if (zfds->zfd_muxt == ZFD_LOG_STREAM && zfds->zfd_slave_rdq != NULL) {
+ queue_t *log_qp;
+ mblk_t *mp;
+
+ log_qp = RD(zfds->zfd_slave_rdq);
+
+ if ((zfds->zfd_state & ZFD_STATE_SOPEN) != 0) {
+ zfd_state_t *pzfds = zfds->zfd_inst_pri;
+
+ while ((mp = getq(qp)) != NULL) {
+ if (bcanputnext(log_qp, mp->b_band)) {
+ putnext(log_qp, mp);
+ } else {
+ (void) putbq(log_qp, mp);
+ break;
+ }
+ }
+
+ if (log_qp->q_count < log_qp->q_lowat) {
+ DTRACE_PROBE(zfd__flow__on);
+ pzfds->zfd_is_flowcon = B_FALSE;
+ if (pzfds->zfd_master_rdq != NULL)
+ qenable(RD(pzfds->zfd_master_rdq));
+ }
+ } else {
+ /* No longer open, drain the queue */
+ while ((mp = getq(qp)) != NULL) {
+ freemsg(mp);
+ }
+ flushq(qp, FLUSHALL);
+ }
+ return;
+ }
+
+ /*
+ * Care must be taken here, as either of the master or slave side
+ * qptr could be NULL.
+ */
+ ASSERT(qp == zfds->zfd_master_rdq || qp == zfds->zfd_slave_rdq);
+ if (zfd_switch(qp) == NULL) {
+ DBG("zfd_rsrv: other side isn't listening\n");
+ return;
+ }
+ qenable(WR(zfd_switch(qp)));
+}
+
+/*
+ * Write server
+ *
+ * This routine is symmetric for master and slave, so it handles both without
+ * splitting up the codepath.
+ *
+ * If there are messages on this queue that can be sent to the other, send
+ * them via putnext(). Else, if queued messages cannot be sent, leave them
+ * on this queue.
+ */
+static void
+zfd_wsrv(queue_t *qp)
+{
+ queue_t *swq;
+ mblk_t *mp;
+ zfd_state_t *zfds = (zfd_state_t *)qp->q_ptr;
+
+ ASSERT(zfds != NULL);
+
+ /*
+ * Partner has no read queue, so take the data, and throw it away.
+ */
+ if (zfd_switch(RD(qp)) == NULL) {
+ DBG("zfd_wsrv: other side isn't listening");
+ while ((mp = getq(qp)) != NULL) {
+ if (mp->b_datap->db_type == M_IOCTL)
+ miocnak(qp, mp, 0, 0);
+ else
+ freemsg(mp);
+ }
+ flushq(qp, FLUSHALL);
+ return;
+ }
+
+ swq = RD(zfd_switch(qp));
+
+ /*
+ * while there are messages on this write queue...
+ */
+ while (!zfds->zfd_is_flowcon && (mp = getq(qp)) != NULL) {
+ /*
+ * Due to the way zfd_wput is implemented, we should never
+ * see a high priority control message here.
+ */
+ ASSERT(mp->b_datap->db_type < QPCTL);
+
+ if (bcanputnext(swq, mp->b_band)) {
+ putnext(swq, mp);
+ } else {
+ (void) putbq(qp, mp);
+ break;
+ }
+ }
+}