summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGarrett D'Amore <garrett@lucera.com>2016-03-10 20:35:38 +0000
committerGarrett D'Amore <garrett@lucera.com>2016-05-20 17:38:28 +0000
commit49ef7e0638c8b771d8a136eae78b1c0f99acc8e0 (patch)
tree83af20aafd932952907c5721dcc2919e8f1b90f8
parent7dc9a163b382daee1ce43b6588dd1b507363dae5 (diff)
downloadillumos-joyent-49ef7e0638c8b771d8a136eae78b1c0f99acc8e0.tar.gz
4057 Add sfxge support
Contributed by: Solarflare Communications Inc. Reviewed by: Robert Mustacchi <rm@joyent.com> Reviewed by: Andrew Rybchenko <arybchenko@solarflare.com> Approved by: Dan McDonald <danmcd@omniti.com>
-rw-r--r--exception_lists/cstyle58
-rw-r--r--usr/src/man/man7d/Makefile3
-rw-r--r--usr/src/man/man7d/sfxge.7d52
-rw-r--r--usr/src/pkg/manifests/driver-network-sfxge.mf48
-rw-r--r--usr/src/uts/common/Makefile.files22
-rw-r--r--usr/src/uts/common/Makefile.rules16
-rw-r--r--usr/src/uts/common/io/sfxge/THIRDPARTYLICENSE23
-rw-r--r--usr/src/uts/common/io/sfxge/THIRDPARTYLICENSE.descrip1
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_ev.c985
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_filter.c1481
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_impl.h1080
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_intr.c197
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_mac.c749
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_mcdi.c300
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_nic.c1629
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_nvram.c2331
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_phy.c474
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_rx.c827
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_tlv_layout.h785
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_tx.c710
-rw-r--r--usr/src/uts/common/io/sfxge/common/ef10_vpd.c460
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx.h2411
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_bootcfg.c345
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_check.h336
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_crc32.c122
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_ev.c1419
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_filter.c1398
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_hash.c328
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_impl.h1166
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_intr.c572
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_lic.c1742
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_mac.c804
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_mcdi.c2280
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_mcdi.h406
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_mon.c255
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_nic.c963
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_nvram.c980
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_phy.c562
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_phy_ids.h51
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_port.c252
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_regs.h3870
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_regs_ef10.h547
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_regs_mcdi.h14242
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_regs_pci.h2356
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_rx.c1226
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_sram.c331
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_tx.c1089
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_types.h1653
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_vpd.c1013
-rw-r--r--usr/src/uts/common/io/sfxge/common/efx_wol.c414
-rw-r--r--usr/src/uts/common/io/sfxge/common/hunt_impl.h107
-rw-r--r--usr/src/uts/common/io/sfxge/common/hunt_nic.c321
-rw-r--r--usr/src/uts/common/io/sfxge/common/hunt_phy.c188
-rw-r--r--usr/src/uts/common/io/sfxge/common/mcdi_mon.c562
-rw-r--r--usr/src/uts/common/io/sfxge/common/mcdi_mon.h74
-rw-r--r--usr/src/uts/common/io/sfxge/common/medford_impl.h67
-rw-r--r--usr/src/uts/common/io/sfxge/common/medford_nic.c305
-rw-r--r--usr/src/uts/common/io/sfxge/common/siena_flash.h212
-rw-r--r--usr/src/uts/common/io/sfxge/common/siena_impl.h414
-rw-r--r--usr/src/uts/common/io/sfxge/common/siena_mac.c441
-rw-r--r--usr/src/uts/common/io/sfxge/common/siena_mcdi.c248
-rw-r--r--usr/src/uts/common/io/sfxge/common/siena_nic.c559
-rw-r--r--usr/src/uts/common/io/sfxge/common/siena_nvram.c722
-rw-r--r--usr/src/uts/common/io/sfxge/common/siena_phy.c797
-rw-r--r--usr/src/uts/common/io/sfxge/common/siena_sram.c178
-rw-r--r--usr/src/uts/common/io/sfxge/common/siena_vpd.c614
-rw-r--r--usr/src/uts/common/io/sfxge/efsys.h686
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge.c1194
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge.h1033
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_bar.c77
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_debug.h83
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_dma.c184
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_err.c85
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_ev.c1341
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_gld_v3.c1331
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_hash.c188
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_intr.c812
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_ioc.h157
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_mac.c1056
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_mcdi.c493
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_mon.c331
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_nvram.c210
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_pci.c242
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_phy.c378
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_rx.c2828
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_sram.c187
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_tcp.c213
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_tx.c3100
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_version.h42
-rw-r--r--usr/src/uts/common/io/sfxge/sfxge_vpd.c186
-rw-r--r--usr/src/uts/intel/Makefile.intel2
-rw-r--r--usr/src/uts/intel/sfxge/Makefile79
92 files changed, 77689 insertions, 2 deletions
diff --git a/exception_lists/cstyle b/exception_lists/cstyle
index 8dca99dbba..784b580d6a 100644
--- a/exception_lists/cstyle
+++ b/exception_lists/cstyle
@@ -973,6 +973,64 @@ usr/src/uts/common/io/ixgbe/core/ixgbe_phy.h
usr/src/uts/common/io/ixgbe/core/ixgbe_type.h
usr/src/uts/common/io/ixgbe/core/ixgbe_x540.c
usr/src/uts/common/io/ixgbe/core/ixgbe_x540.h
+usr/src/uts/common/io/sfxge/common/ef10_ev.c
+usr/src/uts/common/io/sfxge/common/ef10_filter.c
+usr/src/uts/common/io/sfxge/common/ef10_impl.h
+usr/src/uts/common/io/sfxge/common/ef10_intr.c
+usr/src/uts/common/io/sfxge/common/ef10_mac.c
+usr/src/uts/common/io/sfxge/common/ef10_mcdi.c
+usr/src/uts/common/io/sfxge/common/ef10_nic.c
+usr/src/uts/common/io/sfxge/common/ef10_nvram.c
+usr/src/uts/common/io/sfxge/common/ef10_phy.c
+usr/src/uts/common/io/sfxge/common/ef10_rx.c
+usr/src/uts/common/io/sfxge/common/ef10_tlv_layout.h
+usr/src/uts/common/io/sfxge/common/ef10_tx.c
+usr/src/uts/common/io/sfxge/common/ef10_vpd.c
+usr/src/uts/common/io/sfxge/common/efx.h
+usr/src/uts/common/io/sfxge/common/efx_bootcfg.c
+usr/src/uts/common/io/sfxge/common/efx_check.h
+usr/src/uts/common/io/sfxge/common/efx_crc32.c
+usr/src/uts/common/io/sfxge/common/efx_ev.c
+usr/src/uts/common/io/sfxge/common/efx_filter.c
+usr/src/uts/common/io/sfxge/common/efx_hash.c
+usr/src/uts/common/io/sfxge/common/efx_impl.h
+usr/src/uts/common/io/sfxge/common/efx_intr.c
+usr/src/uts/common/io/sfxge/common/efx_lic.c
+usr/src/uts/common/io/sfxge/common/efx_mac.c
+usr/src/uts/common/io/sfxge/common/efx_mcdi.c
+usr/src/uts/common/io/sfxge/common/efx_mcdi.h
+usr/src/uts/common/io/sfxge/common/efx_mon.c
+usr/src/uts/common/io/sfxge/common/efx_nic.c
+usr/src/uts/common/io/sfxge/common/efx_nvram.c
+usr/src/uts/common/io/sfxge/common/efx_phy.c
+usr/src/uts/common/io/sfxge/common/efx_phy_ids.h
+usr/src/uts/common/io/sfxge/common/efx_port.c
+usr/src/uts/common/io/sfxge/common/efx_regs.h
+usr/src/uts/common/io/sfxge/common/efx_regs_ef10.h
+usr/src/uts/common/io/sfxge/common/efx_regs_mcdi.h
+usr/src/uts/common/io/sfxge/common/efx_regs_pci.h
+usr/src/uts/common/io/sfxge/common/efx_rx.c
+usr/src/uts/common/io/sfxge/common/efx_sram.c
+usr/src/uts/common/io/sfxge/common/efx_tx.c
+usr/src/uts/common/io/sfxge/common/efx_types.h
+usr/src/uts/common/io/sfxge/common/efx_vpd.c
+usr/src/uts/common/io/sfxge/common/efx_wol.c
+usr/src/uts/common/io/sfxge/common/hunt_impl.h
+usr/src/uts/common/io/sfxge/common/hunt_nic.c
+usr/src/uts/common/io/sfxge/common/hunt_phy.c
+usr/src/uts/common/io/sfxge/common/mcdi_mon.c
+usr/src/uts/common/io/sfxge/common/mcdi_mon.h
+usr/src/uts/common/io/sfxge/common/medford_impl.h
+usr/src/uts/common/io/sfxge/common/medford_nic.c
+usr/src/uts/common/io/sfxge/common/siena_flash.h
+usr/src/uts/common/io/sfxge/common/siena_impl.h
+usr/src/uts/common/io/sfxge/common/siena_mac.c
+usr/src/uts/common/io/sfxge/common/siena_mcdi.c
+usr/src/uts/common/io/sfxge/common/siena_nic.c
+usr/src/uts/common/io/sfxge/common/siena_nvram.c
+usr/src/uts/common/io/sfxge/common/siena_phy.c
+usr/src/uts/common/io/sfxge/common/siena_sram.c
+usr/src/uts/common/io/sfxge/common/siena_vpd.c
usr/src/uts/common/sys/scsi/adapters/mpt_sas/mpi/mpi2.h
usr/src/uts/common/sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h
usr/src/uts/common/sys/scsi/adapters/mpt_sas/mpi/mpi2_hbd.h
diff --git a/usr/src/man/man7d/Makefile b/usr/src/man/man7d/Makefile
index eb75db718a..e25db8b23b 100644
--- a/usr/src/man/man7d/Makefile
+++ b/usr/src/man/man7d/Makefile
@@ -12,7 +12,7 @@
#
# Copyright 2011, Richard Lowe
# Copyright 2015 Nexenta Systems, Inc. All rights reserved.
-# Copyright 2014 Garrett D'Amore <garrett@damore.org>
+# Copyright 2016 Garrett D'Amore <garrett@damore.org>
#
include $(SRC)/Makefile.master
@@ -116,6 +116,7 @@ _MANFILES= aac.7d \
sdt.7d \
ses.7d \
sfe.7d \
+ sfxge.7d \
sgen.7d \
srpt.7d \
st.7d \
diff --git a/usr/src/man/man7d/sfxge.7d b/usr/src/man/man7d/sfxge.7d
new file mode 100644
index 0000000000..ae375587f1
--- /dev/null
+++ b/usr/src/man/man7d/sfxge.7d
@@ -0,0 +1,52 @@
+.\"
+.\" This file and its contents are supplied under the terms of the
+.\" Common Development and Distribution License ("CDDL"), version 1.0.
+.\" You may only use this file in accordance with the terms of version
+.\" 1.0 of the CDDL.
+.\"
+.\" A full copy of the text of the CDDL should have accompanied this
+.\" source. A copy of the CDDL is also available via the Internet at
+.\" http://www.illumos.org/license/CDDL.
+.\"
+.\"
+.\" Copyright 2016 Garrett D'Amore <garrett@damore.org>
+.\"
+.Dd Apr 11, 2016
+.Dt SFXGE 7D
+.Os
+.Sh NAME
+.Nm sfxge
+.Nd Solarflare Ethernet device driver
+.Sh SYNOPSIS
+.Pa /dev/sfxge
+.Sh DESCRIPTION
+The
+.Nm
+driver provides support for 10 and 40 Gbps Ethernet adapters
+based on the Solarflare SFC9000 family of controllers.
+.Lp
+These devices generally support the standard Ethernet features, including
+autonegotiation, VLANs, and hardware based flow control.
+The maximum frame size supported by these devices is 9202 bytes, including
+the tag, if present.
+.Lp
+The device driver supports the
+.Xr ieee802.3 5
+properties, which can be configured with
+.Xr dladm 1M .
+.Sh FILES
+.Bl -tag -width /dev/sfxge
+.It Pa /dev/sfxge
+Special character device.
+.El
+.
+.Sh SEE ALSO
+.Xr dladm 1M ,
+.Xr ifconfig 1M ,
+.Xr pci 4 ,
+.Xr ieee802.3 5 ,
+.Xr dlpi 7P
+.Rs
+.%T IEEE 802.3: Ethernet
+.%Q IEEE Standards Association
+.Re
diff --git a/usr/src/pkg/manifests/driver-network-sfxge.mf b/usr/src/pkg/manifests/driver-network-sfxge.mf
new file mode 100644
index 0000000000..8c35ed3217
--- /dev/null
+++ b/usr/src/pkg/manifests/driver-network-sfxge.mf
@@ -0,0 +1,48 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2016 Garrett D'Amore <garrett@damore.org>
+#
+# Note: The driver has untested support for the following
+# PCI IDs. Once the tests are complete (and assuming no problems
+# are found), they can be added:
+#
+# pci1924,1903 - SFC9120 Virtual Function
+# pci1924,1923 - SFC9140 Virtual Function
+# pci1924,913 - Medford (PF, Uninitialized)
+# pci1924,a03 - SFC9240
+# pci1924,1a03 - SFC9240 Virtual Function
+
+<include global_zone_only_component>
+set name=pkg.fmri value=pkg:/driver/network/sfxge@$(PKGVERS)
+set name=pkg.description value="Solarflare Ethernet Network Adapter Driver"
+set name=pkg.summary value="Solarflare Ethernet Driver"
+set name=info.classification \
+ value=org.opensolaris.category.2008:Drivers/Networking
+set name=variant.arch value=$(ARCH)
+dir path=kernel group=sys
+dir path=kernel/drv group=sys
+dir path=kernel/drv/$(ARCH64) group=sys
+dir path=usr/share/man
+dir path=usr/share/man/man7d
+driver name=sfxge clone_perms="sfxge 0666 root sys" perms="* 0666 root sys" \
+ alias=pci1924,803 \
+ alias=pci1924,810 \
+ alias=pci1924,813 \
+ alias=pci1924,901 \
+ alias=pci1924,903 \
+ alias=pci1924,923
+file path=kernel/drv/$(ARCH64)/sfxge group=sys
+$(i386_ONLY)file path=kernel/drv/sfxge group=sys
+file path=usr/share/man/man7d/sfxge.7d
+license usr/src/uts/common/io/sfxge/THIRDPARTYLICENSE \
+ license=usr/src/uts/common/io/sfxge/THIRDPARTYLICENSE
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index 030e2df374..e46c461c54 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -25,6 +25,7 @@
# Copyright (c) 2011, 2014 by Delphix. All rights reserved.
# Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
# Copyright 2015 Nexenta Systems, Inc. All rights reserved.
+# Copyright 2016 Garrett D'Amore <garrett@damore.org>
#
#
@@ -2004,6 +2005,27 @@ IXGBE_OBJS = ixgbe_82598.o ixgbe_82599.o ixgbe_api.o \
ixgbe_tx.o ixgbe_x540.o ixgbe_mbx.o
#
+# Solarflare 1/10/40GbE NIC driver module
+#
+# NB: The illumos specific sources are listed first, with the
+# common (OS-independent) sources afterwards.
+#
+SFXGE_OBJS = sfxge_err.o sfxge_ev.o sfxge_hash.o sfxge_intr.o sfxge_mac.o \
+ sfxge_gld_v3.o sfxge_mon.o sfxge_phy.o \
+ sfxge_sram.o sfxge_bar.o sfxge_pci.o sfxge_nvram.o \
+ sfxge_rx.o sfxge_tcp.o sfxge_tx.o sfxge_mcdi.o sfxge_vpd.o \
+ sfxge.o sfxge_dma.o
+SFXGE_SF_OBJS = efx_bootcfg.o efx_crc32.o efx_ev.o efx_filter.o \
+ efx_hash.o efx_intr.o efx_mac.o efx_mcdi.o efx_mon.o \
+ efx_nic.o efx_nvram.o efx_phy.o efx_port.o efx_rx.o \
+ efx_sram.o efx_tx.o efx_vpd.o efx_wol.o mcdi_mon.o \
+ siena_mac.o siena_mcdi.o siena_nic.o siena_nvram.o \
+ siena_phy.o siena_sram.o siena_vpd.o \
+ ef10_ev.o ef10_filter.o ef10_intr.o ef10_mac.o ef10_mcdi.o \
+ ef10_nic.o ef10_nvram.o ef10_phy.o ef10_rx.o ef10_tx.o \
+ ef10_vpd.o hunt_nic.o hunt_phy.o
+
+#
# NIU 10G/1G driver module
#
NXGE_OBJS = nxge_mac.o nxge_ipp.o nxge_rxdma.o \
diff --git a/usr/src/uts/common/Makefile.rules b/usr/src/uts/common/Makefile.rules
index ec03abb448..fdbe9717f3 100644
--- a/usr/src/uts/common/Makefile.rules
+++ b/usr/src/uts/common/Makefile.rules
@@ -21,7 +21,7 @@
#
# Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
-# Copyright 2013 Garrett D'Amore <garrett@damore.org>
+# Copyright 2016 Garrett D'Amore <garrett@damore.org>
# Copyright 2015 Nexenta Systems, Inc. All rights reserved.
# Copyright 2013 Saso Kiselkov. All rights reserved.
#
@@ -1442,6 +1442,14 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/common/io/yge/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/sfxge/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/sfxge/common/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/skd/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
@@ -2756,6 +2764,12 @@ $(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/vr/%.c
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/yge/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
+$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/sfxge/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/sfxge/common/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/skd/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
diff --git a/usr/src/uts/common/io/sfxge/THIRDPARTYLICENSE b/usr/src/uts/common/io/sfxge/THIRDPARTYLICENSE
new file mode 100644
index 0000000000..30acc59021
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/THIRDPARTYLICENSE
@@ -0,0 +1,23 @@
+ * Copyright (c) 2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/usr/src/uts/common/io/sfxge/THIRDPARTYLICENSE.descrip b/usr/src/uts/common/io/sfxge/THIRDPARTYLICENSE.descrip
new file mode 100644
index 0000000000..23a2279c4e
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/THIRDPARTYLICENSE.descrip
@@ -0,0 +1 @@
+SFXGE DRIVER
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_ev.c b/usr/src/uts/common/io/sfxge/common/ef10_ev.c
new file mode 100644
index 0000000000..c90ef5e4c6
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_ev.c
@@ -0,0 +1,985 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_STATS
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+
+static __checkReturn boolean_t
+ef10_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_evq(
+ __in efx_nic_t *enp,
+ __in unsigned int instance,
+ __in efsys_mem_t *esmp,
+ __in size_t nevs,
+ __in uint32_t irq,
+ __out_opt uint32_t *irqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[
+ MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_OUT_LEN)];
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ int supports_rx_batching;
+ efx_rc_t rc;
+
+ npages = EFX_EVQ_NBUFS(nevs);
+ if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
+
+ /*
+ * On Huntington RX and TX event batching can only be requested
+ * together (even if the datapath firmware doesn't actually support RX
+ * batching).
+ * Cut through is incompatible with RX batching and so enabling cut
+ * through disables RX batching (but it does not affect TX batching).
+ *
+ * So always enable RX and TX event batching, and enable cut through
+ * if RX event batching isn't supported (i.e. on low latency firmware).
+ */
+ supports_rx_batching = enp->en_nic_cfg.enc_rx_batching_enabled ? 1 : 0;
+ MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
+ INIT_EVQ_IN_FLAG_INT_ARMD, 0,
+ INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_batching,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1);
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ if (irqp != NULL)
+ *irqp = MCDI_OUT_DWORD(req, INIT_EVQ_OUT_IRQ);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_evq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
+ MC_CMD_FINI_EVQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+}
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t irq;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+ if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* Set up the handler table */
+ eep->ee_rx = ef10_ev_rx;
+ eep->ee_tx = ef10_ev_tx;
+ eep->ee_driver = ef10_ev_driver;
+ eep->ee_drv_gen = ef10_ev_drv_gen;
+ eep->ee_mcdi = ef10_ev_mcdi;
+
+ /*
+ * Set up the event queue
+ * NOTE: ignore the returned IRQ param as firmware does not set it.
+ */
+ irq = index; /* INIT_EVQ expects function-relative vector number */
+ if ((rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, NULL)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ if (enp->en_nic_cfg.enc_bug35388_workaround) {
+ EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
+ (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+ EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
+ (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+ ERF_DD_EVQ_IND_RPTR,
+ (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ &dword, B_FALSE);
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+ ERF_DD_EVQ_IND_RPTR,
+ rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ &dword, B_FALSE);
+ } else {
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+ }
+
+ return (0);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_driver_event(
+ __in efx_nic_t *enp,
+ __in uint32_t evq,
+ __in efx_qword_t data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
+ MC_CMD_DRIVER_EVENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_DRIVER_EVENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
+
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
+ EFX_QWORD_FIELD(data, EFX_DWORD_0));
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
+ EFX_QWORD_FIELD(data, EFX_DWORD_1));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_3(event,
+ ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
+ ESF_DZ_DRV_SUB_CODE, 0,
+ ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
+
+ (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_dword_t dword;
+ uint32_t timer_val, mode;
+ efx_rc_t rc;
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ timer_val = 0;
+ mode = FFE_CZ_TIMER_MODE_DIS;
+ } else {
+ /* Calculate the timer value in quanta */
+ timer_val = us * 1000 / encp->enc_evq_timer_quantum_ns;
+
+ /* Moderation value is base 0 so we need to deduct 1 */
+ if (timer_val > 0)
+ timer_val--;
+
+ mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
+ }
+
+ if (encp->enc_bug35388_workaround) {
+ EFX_POPULATE_DWORD_3(dword,
+ ERF_DD_EVQ_IND_TIMER_FLAGS,
+ EFE_DD_EVQ_IND_TIMER_FLAGS,
+ ERF_DD_EVQ_IND_TIMER_MODE, mode,
+ ERF_DD_EVQ_IND_TIMER_VAL, timer_val);
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
+ eep->ee_index, &dword, 0);
+ } else {
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, timer_val);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
+ eep->ee_index, &dword, 0);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+
+static __checkReturn boolean_t
+ef10_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t size;
+ uint32_t label;
+ uint32_t mac_class;
+ uint32_t eth_tag_class;
+ uint32_t l3_class;
+ uint32_t l4_class;
+ uint32_t next_read_lbits;
+ uint16_t flags;
+ boolean_t cont;
+ boolean_t should_abort;
+ efx_evq_rxq_state_t *eersp;
+ unsigned int desc_count;
+ unsigned int last_used_id;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Discard events after RXQ/TXQ errors */
+ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ return (B_FALSE);
+
+ /* Basic packet information */
+ size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
+ next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+ eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
+ mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
+ l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
+ l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
+ cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
+ /* Drop this event */
+ return (B_FALSE);
+ }
+ flags = 0;
+
+ if (cont != 0) {
+ /*
+ * This may be part of a scattered frame, or it may be a
+ * truncated frame if scatter is disabled on this RXQ.
+ * Overlength frames can be received if e.g. a VF is configured
+ * for 1500 MTU but connected to a port set to 9000 MTU
+ * (see bug56567).
+ * FIXME: There is not yet any driver that supports scatter on
+ * Huntington. Scatter support is required for OSX.
+ */
+ flags |= EFX_PKT_CONT;
+ }
+
+ if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
+ flags |= EFX_PKT_UNICAST;
+
+ /* Increment the count of descriptors read */
+ eersp = &eep->ee_rxq_state[label];
+ desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ eersp->eers_rx_read_ptr += desc_count;
+
+ /*
+ * FIXME: add error checking to make sure this a batched event.
+ * This could also be an aborted scatter, see Bug36629.
+ */
+ if (desc_count > 1) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
+ flags |= EFX_PKT_PREFIX_LEN;
+ }
+
+ /* Calculate the index of the the last descriptor consumed */
+ last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
+
+ /* Check for errors that invalidate checksum and L3/L4 fields */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
+ /* RX frame truncated (error flag is misnamed) */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+ /* Bad Ethernet frame CRC */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+ /*
+ * Hardware parse failed, due to malformed headers
+ * or headers that are too long for the parser.
+ * Headers and checksums must be validated by the host.
+ */
+ // TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
+ goto deliver;
+ }
+
+ if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
+ (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ switch (l3_class) {
+ case ESE_DZ_L3_CLASS_IP4:
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ flags |= EFX_PKT_IPV4;
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ } else {
+ flags |= EFX_CKSUM_IPV4;
+ }
+
+ if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_TCP;
+ } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_UDP;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ }
+ break;
+
+ case ESE_DZ_L3_CLASS_IP6:
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ flags |= EFX_PKT_IPV6;
+
+ if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_TCP;
+ } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_UDP;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ }
+ break;
+
+ default:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ break;
+ }
+
+ if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ } else {
+ flags |= EFX_CKSUM_TCPUDP;
+ }
+ }
+
+deliver:
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ /* Discard events after RXQ/TXQ errors */
+ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ return (B_FALSE);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
+ /* Drop this event */
+ return (B_FALSE);
+ }
+
+ /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ unsigned int code;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
+ switch (code) {
+ case ESE_DZ_DRV_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+ break;
+ }
+
+ case ESE_DZ_DRV_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+ break;
+ }
+
+ case ESE_DZ_DRV_START_UP_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+ break;
+
+ default:
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+ should_abort = B_FALSE;
+
+ data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned code;
+ boolean_t should_abort = B_FALSE;
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+ break;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ case MCDI_EVENT_CODE_PROXY_RESPONSE:
+ /*
+ * This event notifies a function that an authorization request
+ * has been processed. If the request was authorized then the
+ * function can now re-send the original MCDI request.
+ * See SF-113652-SW "SR-IOV Proxied Network Access Control".
+ */
+ efx_mcdi_ev_proxy_response(enp,
+ MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
+ MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
+ break;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ ef10_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ efx_rc_t rc;
+
+ /* Decode monitor stat for MCDI sensor (if supported) */
+ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
+ /* Report monitor stat change */
+ should_abort = eecp->eec_monitor(arg, id, value);
+ } else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else {
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+ }
+#endif
+ break;
+ }
+
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ /* Falcon/Siena only (should not been seen with Huntington). */
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MC_REBOOT:
+ /* MC_REBOOT event is used for Huntington (EF10) and later. */
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_TX_ERR: {
+ /*
+ * After a TXQ error is detected, firmware sends a TX_ERR event.
+ * This may be followed by TX completions (which we discard),
+ * and then finally by a TX_FLUSH event. Firmware destroys the
+ * TXQ automatically after sending the TX_FLUSH event.
+ */
+ enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
+
+ EFSYS_PROBE1(tx_descq_err, uint32_t, MCDI_EV_FIELD(eqp, DATA));
+
+ /* Inform the driver that a reset is required. */
+ eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
+ MCDI_EV_FIELD(eqp, TX_ERR_DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_TX_FLUSH: {
+ uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
+
+ /*
+ * EF10 firmware sends two TX_FLUSH events: one to the txq's
+ * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
+ * We want to wait for all completions, so ignore the events
+ * with TX_FLUSH_TO_DRIVER.
+ */
+ if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
+ should_abort = B_FALSE;
+ break;
+ }
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+ break;
+ }
+
+ case MCDI_EVENT_CODE_RX_ERR: {
+ /*
+ * After an RXQ error is detected, firmware sends an RX_ERR
+ * event. This may be followed by RX events (which we discard),
+ * and then finally by an RX_FLUSH event. Firmware destroys the
+ * RXQ automatically after sending the RX_FLUSH event.
+ */
+ enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
+
+ EFSYS_PROBE1(rx_descq_err, uint32_t, MCDI_EV_FIELD(eqp, DATA));
+
+ /* Inform the driver that a reset is required. */
+ eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
+ MCDI_EV_FIELD(eqp, RX_ERR_DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_RX_FLUSH: {
+ uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
+
+ /*
+ * EF10 firmware sends two RX_FLUSH events: one to the rxq's
+ * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
+ * We want to wait for all completions, so ignore the events
+ * with RX_FLUSH_TO_DRIVER.
+ */
+ if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
+ should_abort = B_FALSE;
+ break;
+ }
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+ break;
+ }
+
+ default:
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ break;
+ }
+
+ return (should_abort);
+}
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label)
+{
+ efx_evq_rxq_state_t *eersp;
+
+ EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ eersp = &eep->ee_rxq_state[label];
+
+ EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
+
+ eersp->eers_rx_read_ptr = 0;
+ eersp->eers_rx_mask = erp->er_mask;
+}
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label)
+{
+ efx_evq_rxq_state_t *eersp;
+
+ EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ eersp = &eep->ee_rxq_state[label];
+
+ EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
+
+ eersp->eers_rx_read_ptr = 0;
+ eersp->eers_rx_mask = 0;
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_filter.c b/usr/src/uts/common/io/sfxge/common/ef10_filter.c
new file mode 100644
index 0000000000..b43cb85052
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_filter.c
@@ -0,0 +1,1481 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_FILTER
+
+#define EFE_SPEC(eftp, index) ((eftp)->eft_entry[(index)].efe_spec)
+
+static efx_filter_spec_t *
+ef10_filter_entry_spec(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) &
+ ~(uintptr_t)EFX_EF10_FILTER_FLAGS));
+}
+
+static boolean_t
+ef10_filter_entry_is_busy(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+static boolean_t
+ef10_filter_entry_is_auto_old(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+static void
+ef10_filter_set_entry(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index,
+ __in_opt const efx_filter_spec_t *efsp)
+{
+ EFE_SPEC(eftp, index) = (uintptr_t)efsp;
+}
+
+static void
+ef10_filter_set_entry_busy(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static void
+ef10_filter_set_entry_not_busy(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static void
+ef10_filter_set_entry_auto_old(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+ EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+}
+
+static void
+ef10_filter_set_entry_not_auto_old(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *eftp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+#define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_IP));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_IP));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_PORT));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO));
+#undef MATCH_MASK
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp);
+
+ if (!eftp) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_filter.ef_ef10_filter_table = eftp;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_filter.ef_ef10_filter_table != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t),
+ enp->en_filter.ef_ef10_filter_table);
+ }
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_filter_op_add(
+ __in efx_nic_t *enp,
+ __in efx_filter_spec_t *spec,
+ __in unsigned int filter_op,
+ __inout ef10_filter_handle_t *handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
+ MC_CMD_FILTER_OP_OUT_LEN)];
+ uint32_t match_fields = 0;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FILTER_OP;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+
+ switch (filter_op) {
+ case MC_CMD_FILTER_OP_IN_OP_REPLACE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO,
+ handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI,
+ handle->efh_hi);
+ /* FALLTHROUGH */
+ case MC_CMD_FILTER_OP_IN_OP_INSERT:
+ case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, filter_op);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
+ /*
+ * The LOC_MAC_IG match flag can represent unknown unicast
+ * or multicast filters - use the MAC address to distinguish
+ * them.
+ */
+ if (EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))
+ match_fields |= 1U <<
+ MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ else
+ match_fields |= 1U <<
+ MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ }
+
+ match_fields |= spec->efs_match_flags & (~EFX_FILTER_MATCH_LOC_MAC_IG);
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_DEST,
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_QUEUE,
+ spec->efs_dmaq_id);
+ if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_CONTEXT,
+ spec->efs_rss_context);
+ }
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_MODE,
+ spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+
+ if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) {
+ /*
+ * NOTE: Unlike most MCDI requests, the filter fields
+ * are presented in network (big endian) byte order.
+ */
+ (void) memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_MAC),
+ spec->efs_rem_mac, EFX_MAC_ADDR_LEN);
+ (void) memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_MAC),
+ spec->efs_loc_mac, EFX_MAC_ADDR_LEN);
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_SRC_PORT,
+ __CPU_TO_BE_16(spec->efs_rem_port));
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_DST_PORT,
+ __CPU_TO_BE_16(spec->efs_loc_port));
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_ETHER_TYPE,
+ __CPU_TO_BE_16(spec->efs_ether_type));
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_INNER_VLAN,
+ __CPU_TO_BE_16(spec->efs_inner_vid));
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_OUTER_VLAN,
+ __CPU_TO_BE_16(spec->efs_outer_vid));
+
+ /* IP protocol (in low byte, high byte is zero) */
+ MCDI_IN_SET_BYTE(req, FILTER_OP_IN_IP_PROTO,
+ spec->efs_ip_proto);
+
+ EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) ==
+ MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+ EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) ==
+ MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+
+ (void) memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_IP),
+ &spec->efs_rem_host.eo_byte[0],
+ MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+ (void) memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_IP),
+ &spec->efs_loc_host.eo_byte[0],
+ MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_LO);
+ handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_HI);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_filter_op_delete(
+ __in efx_nic_t *enp,
+ __in unsigned int filter_op,
+ __inout ef10_filter_handle_t *handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
+ MC_CMD_FILTER_OP_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FILTER_OP;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+
+ switch (filter_op) {
+ case MC_CMD_FILTER_OP_IN_OP_REMOVE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE);
+ break;
+ case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->efh_hi);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+ef10_filter_equal(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ /* FIXME: Consider rx vs tx filters (look at efs_flags) */
+ if (left->efs_match_flags != right->efs_match_flags)
+ return (B_FALSE);
+ if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host))
+ return (B_FALSE);
+ if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host))
+ return (B_FALSE);
+ if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+ if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+ if (left->efs_rem_port != right->efs_rem_port)
+ return (B_FALSE);
+ if (left->efs_loc_port != right->efs_loc_port)
+ return (B_FALSE);
+ if (left->efs_inner_vid != right->efs_inner_vid)
+ return (B_FALSE);
+ if (left->efs_outer_vid != right->efs_outer_vid)
+ return (B_FALSE);
+ if (left->efs_ether_type != right->efs_ether_type)
+ return (B_FALSE);
+ if (left->efs_ip_proto != right->efs_ip_proto)
+ return (B_FALSE);
+
+ return (B_TRUE);
+
+}
+
+static __checkReturn boolean_t
+ef10_filter_same_dest(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+ (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) {
+ if (left->efs_rss_context == right->efs_rss_context)
+ return (B_TRUE);
+ } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) &&
+ (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) {
+ if (left->efs_dmaq_id == right->efs_dmaq_id)
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
+static __checkReturn uint32_t
+ef10_filter_hash(
+ __in efx_filter_spec_t *spec)
+{
+ EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t))
+ == 0);
+ EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) %
+ sizeof (uint32_t)) == 0);
+
+ /*
+ * As the area of the efx_filter_spec_t we need to hash is DWORD
+ * aligned and an exact number of DWORDs in size we can use the
+ * optimised efx_hash_dwords() rather than efx_hash_bytes()
+ */
+ return (efx_hash_dwords((const void *)&spec->efs_outer_vid,
+ (sizeof (efx_filter_spec_t) -
+ EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) /
+ sizeof (uint32_t), 0));
+}
+
+/*
+ * Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static __checkReturn boolean_t
+ef10_filter_is_exclusive(
+ __in efx_filter_spec_t *spec)
+{
+ if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) &&
+ !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))
+ return (B_TRUE);
+
+ if ((spec->efs_match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) &&
+ ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe))
+ return (B_TRUE);
+ if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) &&
+ (spec->efs_loc_host.eo_u8[0] != 0xff))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp)
+{
+ int tbl_id;
+ efx_filter_spec_t *spec;
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ boolean_t restoring;
+ int state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) {
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ spec = ef10_filter_entry_spec(eftp, tbl_id);
+ if (spec == NULL) {
+ restoring = B_FALSE;
+ } else if (ef10_filter_entry_is_busy(eftp, tbl_id)) {
+ /* Ignore busy entries. */
+ restoring = B_FALSE;
+ } else {
+ ef10_filter_set_entry_busy(eftp, tbl_id);
+ restoring = B_TRUE;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (restoring == B_FALSE)
+ continue;
+
+ if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_INSERT,
+ &eftp->eft_entry[tbl_id].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+ &eftp->eft_entry[tbl_id].efe_handle);
+ }
+
+ if (rc != 0)
+ goto fail1;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ ef10_filter_set_entry_not_busy(eftp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * An arbitrary search limit for the software hash table. As per the linux net
+ * driver.
+ */
+#define EF10_FILTER_SEARCH_LIMIT 200
+
+static __checkReturn efx_rc_t
+ef10_filter_add_internal(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace,
+ __out_opt uint32_t *filter_id)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *saved_spec;
+ uint32_t hash;
+ unsigned int depth;
+ int ins_index;
+ boolean_t replacing = B_FALSE;
+ unsigned int i;
+ int state;
+ boolean_t locked = B_FALSE;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+#if EFSYS_OPT_RX_SCALE
+ spec->efs_rss_context = enp->en_rss_context;
+#endif
+
+ hash = ef10_filter_hash(spec);
+
+ /*
+ * FIXME: Add support for inserting filters of different priorities
+ * and removing lower priority multicast filters (bug 42378)
+ */
+
+ /*
+ * Find any existing filters with the same match tuple or
+ * else a free slot to insert at. If any of them are busy,
+ * we have to wait and retry.
+ */
+ for (;;) {
+ ins_index = -1;
+ depth = 1;
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ for (;;) {
+ i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+ saved_spec = ef10_filter_entry_spec(eftp, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0) {
+ ins_index = i;
+ }
+ } else if (ef10_filter_equal(spec, saved_spec)) {
+ if (ef10_filter_entry_is_busy(eftp, i))
+ break;
+ if (saved_spec->efs_priority
+ == EFX_FILTER_PRI_AUTO) {
+ ins_index = i;
+ goto found;
+ } else if (ef10_filter_is_exclusive(spec)) {
+ if (may_replace) {
+ ins_index = i;
+ goto found;
+ } else {
+ rc = EEXIST;
+ goto fail1;
+ }
+ }
+
+ /* Leave existing */
+ }
+
+ /*
+ * Once we reach the maximum search depth, use
+ * the first suitable slot or return EBUSY if
+ * there was none.
+ */
+ if (depth == EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+ goto found;
+ }
+ depth++;
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+ }
+
+found:
+ /*
+ * Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = ef10_filter_entry_spec(eftp, ins_index);
+ if (saved_spec) {
+ if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) {
+ /* This is a filter we are refreshing */
+ ef10_filter_set_entry_not_auto_old(eftp, ins_index);
+ goto out_unlock;
+
+ }
+ replacing = B_TRUE;
+ } else {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec);
+ if (!saved_spec) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+ *saved_spec = *spec;
+ ef10_filter_set_entry(eftp, ins_index, saved_spec);
+ }
+ ef10_filter_set_entry_busy(eftp, ins_index);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ /*
+ * On replacing the filter handle may change after after a successful
+ * replace operation.
+ */
+ if (replacing) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE,
+ &eftp->eft_entry[ins_index].efe_handle);
+ } else if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_INSERT,
+ &eftp->eft_entry[ins_index].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+ &eftp->eft_entry[ins_index].efe_handle);
+ }
+
+ if (rc != 0)
+ goto fail4;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ if (replacing) {
+ /* Update the fields that may differ */
+ saved_spec->efs_priority = spec->efs_priority;
+ saved_spec->efs_flags = spec->efs_flags;
+ saved_spec->efs_rss_context = spec->efs_rss_context;
+ saved_spec->efs_dmaq_id = spec->efs_dmaq_id;
+ }
+
+ ef10_filter_set_entry_not_busy(eftp, ins_index);
+
+out_unlock:
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ if (filter_id)
+ *filter_id = ins_index;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+ if (!replacing) {
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec);
+ saved_spec = NULL;
+ }
+ ef10_filter_set_entry_not_busy(eftp, ins_index);
+ ef10_filter_set_entry(eftp, ins_index, NULL);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ if (locked)
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace)
+{
+ efx_rc_t rc;
+
+ rc = ef10_filter_add_internal(enp, spec, may_replace, NULL);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+ef10_filter_delete_internal(
+ __in efx_nic_t *enp,
+ __in uint32_t filter_id)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *spec;
+ int state;
+ uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS;
+
+ /*
+ * Find the software table entry and mark it busy. Don't
+ * remove it yet; any attempt to update while we're waiting
+ * for the firmware must find the busy entry.
+ *
+ * FIXME: What if the busy flag is never cleared?
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ while (ef10_filter_entry_is_busy(table, filter_idx)) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_SPIN(1);
+ EFSYS_LOCK(enp->en_eslp, state);
+ }
+ if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) {
+ ef10_filter_set_entry_busy(table, filter_idx);
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (spec == NULL) {
+ rc = ENOENT;
+ goto fail1;
+ }
+
+ /*
+ * Try to remove the hardware filter. This may fail if the MC has
+ * rebooted (which frees all hardware filter resources).
+ */
+ if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_delete(enp,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE,
+ &table->eft_entry[filter_idx].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_delete(enp,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE,
+ &table->eft_entry[filter_idx].efe_handle);
+ }
+
+ /* Free the software table entry */
+ EFSYS_LOCK(enp->en_eslp, state);
+ ef10_filter_set_entry_not_busy(table, filter_idx);
+ ef10_filter_set_entry(table, filter_idx, NULL);
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec);
+
+ /* Check result of hardware filter removal */
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *saved_spec;
+ unsigned int hash;
+ unsigned int depth;
+ unsigned int i;
+ int state;
+ boolean_t locked = B_FALSE;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ hash = ef10_filter_hash(spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ depth = 1;
+ for (;;) {
+ i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+ saved_spec = ef10_filter_entry_spec(table, i);
+ if (saved_spec && ef10_filter_equal(spec, saved_spec) &&
+ ef10_filter_same_dest(spec, saved_spec)) {
+ break;
+ }
+ if (depth == EF10_FILTER_SEARCH_LIMIT) {
+ rc = ENOENT;
+ goto fail1;
+ }
+ depth++;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ rc = ef10_filter_delete_internal(enp, i);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ if (locked)
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_parser_disp_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *list,
+ __out size_t *length)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)];
+ efx_rc_t rc;
+ uint32_t i;
+ boolean_t support_unknown_ucast = B_FALSE;
+ boolean_t support_unknown_mcast = B_FALSE;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP,
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ *length = MCDI_OUT_DWORD(req,
+ GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES);
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(*length)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ (void) memcpy(list,
+ MCDI_OUT2(req,
+ void,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES),
+ (*length) * sizeof (uint32_t));
+ EFX_STATIC_ASSERT(sizeof (uint32_t) ==
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN);
+
+ /*
+ * Remove UNKNOWN UCAST and MCAST flags, and if both are present, change
+ * the lower priority one to LOC_MAC_IG.
+ */
+ for (i = 0; i < *length; i++) {
+ if (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN) {
+ list[i] &=
+ (~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
+ support_unknown_ucast = B_TRUE;
+ }
+ if (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) {
+ list[i] &=
+ (~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN);
+ support_unknown_mcast = B_TRUE;
+ }
+
+ if (support_unknown_ucast && support_unknown_mcast) {
+ list[i] &= EFX_FILTER_MATCH_LOC_MAC_IG;
+ break;
+ }
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out uint32_t *list,
+ __out size_t *length)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_parser_disp_info(enp, list, length)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_unicast(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *addr,
+ __in efx_filter_flag_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the filter for the local station address */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ (void) efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,
+ addr);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_unicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flag_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the unknown unicast filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ (void) efx_filter_spec_set_uc_def(&spec);
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_multicast_list(
+ __in efx_nic_t *enp,
+ __in boolean_t mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count,
+ __in efx_filter_flag_t filter_flags,
+ __in boolean_t rollback)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ uint8_t addr[6];
+ uint32_t i;
+ uint32_t filter_index;
+ uint32_t filter_count;
+ efx_rc_t rc;
+
+ if (mulcst == B_FALSE)
+ count = 0;
+
+ if (count + (brdcst ? 1 : 0) >
+ EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) {
+ /* Too many MAC addresses */
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Insert/renew multicast address list filters */
+ filter_count = 0;
+ for (i = 0; i < count; i++) {
+ efx_filter_spec_init_rx(&spec,
+ EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+
+ (void) efx_filter_spec_set_eth_local(&spec,
+ EFX_FILTER_SPEC_VID_UNSPEC,
+ &addrs[i * EFX_MAC_ADDR_LEN]);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
+
+ }
+
+ if (brdcst == B_TRUE) {
+ /* Insert/renew broadcast address filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+
+ EFX_MAC_BROADCAST_ADDR_SET(addr);
+ (void) efx_filter_spec_set_eth_local(&spec,
+ EFX_FILTER_SPEC_VID_UNSPEC,
+ addr);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
+ }
+
+ eftp->eft_mulcst_filter_count = filter_count;
+ eftp->eft_using_all_mulcst = B_FALSE;
+
+ return (0);
+
+rollback:
+ /* Remove any filters we have inserted */
+ i = filter_count;
+ while (i--) {
+ (void) ef10_filter_delete_internal(enp,
+ eftp->eft_mulcst_filter_indexes[i]);
+ }
+ eftp->eft_mulcst_filter_count = 0;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_multicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flag_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the unknown multicast filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ (void) efx_filter_spec_set_mc_def(&spec);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_mulcst_filter_indexes[0]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_mulcst_filter_count = 1;
+ eftp->eft_using_all_mulcst = B_TRUE;
+
+ /*
+ * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic.
+ */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_filter_remove_old(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ uint32_t i;
+
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ (void) ef10_filter_delete_internal(enp, i);
+ }
+ }
+}
+
+
+static __checkReturn efx_rc_t
+ef10_filter_get_workarounds(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint32_t implemented = 0;
+ uint32_t enabled = 0;
+ efx_rc_t rc;
+
+ rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled);
+ if (rc == 0) {
+ /* Check if chained multicast filter support is enabled */
+ if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807)
+ encp->enc_bug26807_workaround = B_TRUE;
+ else
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else if (rc == ENOTSUP) {
+ /*
+ * Firmware is too old to support GET_WORKAROUNDS, and support
+ * for this workaround was implemented later.
+ */
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+
+/*
+ * Reconfigure all filters.
+ * If all_unicst and/or all mulcst filters cannot be applied then
+ * return ENOTSUP (Note the filters for the specified addresses are
+ * still applied in this case).
+ */
+ __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_flag_t filter_flags;
+ unsigned i;
+ efx_rc_t all_unicst_rc = 0;
+ efx_rc_t all_mulcst_rc = 0;
+ efx_rc_t rc;
+
+ if (table->eft_default_rxq == NULL) {
+ /*
+ * Filters direct traffic to the default RXQ, and so cannot be
+ * inserted until it is available. Any currently configured
+ * filters must be removed (ignore errors in case the MC
+ * has rebooted, which removes hardware filters).
+ */
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_unicst_filter_indexes[i]);
+ }
+ table->eft_unicst_filter_count = 0;
+
+ for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_mulcst_filter_indexes[i]);
+ }
+ table->eft_mulcst_filter_count = 0;
+
+ return (0);
+ }
+
+ if (table->eft_using_rss)
+ filter_flags = EFX_FILTER_FLAG_RX_RSS;
+ else
+ filter_flags = 0;
+
+ /* Mark old filters which may need to be removed */
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_unicst_filter_indexes[i]);
+ }
+ for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_mulcst_filter_indexes[i]);
+ }
+
+ /*
+ * Insert or renew unicast filters.
+ *
+ * Frimware does not perform chaining on unicast filters. As traffic is
+ * therefore only delivered to the first matching filter, we should
+ * always insert the specific filter for our MAC address, to try and
+ * ensure we get that traffic.
+ *
+ * (If the filter for our MAC address has already been inserted by
+ * another function, we won't receive traffic sent to us, even if we
+ * insert a unicast mismatch filter. To prevent traffic stealing, this
+ * therefore relies on the privilege model only allowing functions to
+ * insert filters for their own MAC address unless explicitly given
+ * additional privileges by the user. This also means that, even on a
+ * priviliged function, inserting a unicast mismatch filter may not
+ * catch all traffic in multi PCI function scenarios.)
+ */
+ table->eft_unicst_filter_count = 0;
+ rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags);
+ if (all_unicst || (rc != 0)) {
+ all_unicst_rc = ef10_filter_insert_all_unicast(enp,
+ filter_flags);
+ if ((rc != 0) && (all_unicst_rc != 0))
+ goto fail1;
+ }
+
+ /*
+ * WORKAROUND_BUG26807 controls firmware support for chained multicast
+ * filters, and can only be enabled or disabled when the hardware filter
+ * table is empty.
+ *
+ * Chained multicast filters require support from the datapath firmware,
+ * and may not be available (e.g. low-latency variants or old Huntington
+ * firmware).
+ *
+ * Firmware will reset (FLR) functions which have inserted filters in
+ * the hardware filter table when the workaround is enabled/disabled.
+ * Functions without any hardware filters are not reset.
+ *
+ * Re-check if the workaround is enabled after adding unicast hardware
+ * filters. This ensures that encp->enc_bug26807_workaround matches the
+ * firmware state, and that later changes to enable/disable the
+ * workaround will result in this function seeing a reset (FLR).
+ *
+ * In common-code drivers, we only support multiple PCI function
+ * scenarios with firmware that supports multicast chaining, so we can
+ * assume it is enabled for such cases and hence simplify the filter
+ * insertion logic. Firmware that does not support multicast chaining
+ * does not support multiple PCI function configurations either, so
+ * filter insertion is much simpler and the same strategies can still be
+ * used.
+ */
+ if ((rc = ef10_filter_get_workarounds(enp)) != 0)
+ goto fail2;
+
+ if ((table->eft_using_all_mulcst != all_mulcst) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is enabled, so traffic that matches
+ * more than one multicast filter will be replicated and
+ * delivered to multiple recipients. To avoid this duplicate
+ * delivery, remove old multicast filters before inserting new
+ * multicast filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
+ /* Insert or renew multicast filters */
+ if (all_mulcst == B_TRUE) {
+ /*
+ * Insert the all multicast filter. If that fails, try to insert
+ * all of our multicast filters (but without rollback on
+ * failure).
+ */
+ all_mulcst_rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (all_mulcst_rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp, B_TRUE,
+ brdcst, addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail3;
+ }
+ } else {
+ /*
+ * Insert filters for multicast addresses.
+ * If any insertion fails, then rollback and try to insert the
+ * all multicast filter instead.
+ * If that also fails, try to insert all of the multicast
+ * filters (but without rollback on failure).
+ */
+ rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst,
+ addrs, count, filter_flags, B_TRUE);
+ if (rc != 0) {
+ if ((table->eft_using_all_mulcst == B_FALSE) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is on, so remove
+ * old filters before inserting the multicast
+ * all filter to avoid duplicate delivery caused
+ * by packets matching multiple filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
+ rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp,
+ mulcst, brdcst,
+ addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail4;
+ }
+ }
+ }
+
+ /* Remove old filters which were not renewed */
+ ef10_filter_remove_old(enp);
+
+ /* report if any optional flags were rejected */
+ if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) ||
+ ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) {
+ rc = ENOTSUP;
+ }
+
+ return (rc);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Clear auto old flags */
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ ef10_filter_set_entry_not_auto_old(table, i);
+ }
+ }
+
+ return (rc);
+}
+
+ void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+ *erpp = table->eft_default_rxq;
+ *using_rss = table->eft_using_rss;
+}
+
+
+ void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+#if EFSYS_OPT_RX_SCALE
+ EFSYS_ASSERT((using_rss == B_FALSE) ||
+ (enp->en_rss_context != EF10_RSS_CONTEXT_INVALID));
+ table->eft_using_rss = using_rss;
+#else
+ EFSYS_ASSERT(using_rss == B_FALSE);
+ table->eft_using_rss = B_FALSE;
+#endif
+ table->eft_default_rxq = erp;
+}
+
+ void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+ table->eft_default_rxq = NULL;
+ table->eft_using_rss = B_FALSE;
+}
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_impl.h b/usr/src/uts/common/io/sfxge/common/ef10_impl.h
new file mode 100644
index 0000000000..5bfe59bebc
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_impl.h
@@ -0,0 +1,1080 @@
+/*
+ * Copyright (c) 2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EF10_IMPL_H
+#define _SYS_EF10_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD)
+#define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS)
+#elif EFSYS_OPT_HUNTINGTON
+#define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS
+#elif EFSYS_OPT_MEDFORD
+#define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS
+#endif
+
+/*
+ * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
+ * possibly be increased, or the write size reported by newer firmware used
+ * instead.
+ */
+#define EF10_NVRAM_CHUNK 0x80
+
+/* Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary
+ */
+#define EF10_RX_WPTR_ALIGN 8
+
+/*
+ * Max byte offset into the packet the TCP header must start for the hardware
+ * to be able to parse the packet correctly.
+ */
+#define EF10_TCP_HEADER_OFFSET_LIMIT 208
+
+/* Invalid RSS context handle */
+#define EF10_RSS_CONTEXT_INVALID (0xffffffff)
+
+
+/* EV */
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep);
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+#endif /* EFSYS_OPT_QSTATS */
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label);
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label);
+
+/* INTR */
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp);
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp);
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+ef10_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp);
+
+
+/* MAC */
+
+extern __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+
+/* MCDI */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in void *hdrp,
+ __in size_t hdr_len,
+ __in void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern void
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_NVRAM */
+
+
+/* PHY */
+
+typedef struct ef10_link_state_s {
+ uint32_t els_adv_cap_mask;
+ uint32_t els_lp_cap_mask;
+ unsigned int els_fcntl;
+ efx_link_mode_t els_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t els_loopback;
+#endif
+ boolean_t els_mac_up;
+} ef10_link_state_t;
+
+extern void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+/* TX */
+
+extern __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_tx_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+extern void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t vlan_tci,
+ __out efx_desc_t *edp);
+
+
+#if EFSYS_OPT_QSTATS
+
+extern void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+typedef uint32_t efx_piobuf_handle_t;
+
+#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1)
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index);
+
+
+/* VPD */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+ef10_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+extern __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#if EFSYS_OPT_RX_SCALE
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+extern void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_FILTER
+
+typedef struct ef10_filter_handle_s {
+ uint32_t efh_lo;
+ uint32_t efh_hi;
+} ef10_filter_handle_t;
+
+typedef struct ef10_filter_entry_s {
+ uintptr_t efe_spec; /* pointer to filter spec plus busy bit */
+ ef10_filter_handle_t efe_handle;
+} ef10_filter_entry_t;
+
+/*
+ * BUSY flag indicates that an update is in progress.
+ * AUTO_OLD flag is used to mark and sweep MAC packet filters.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1U
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U
+#define EFX_EF10_FILTER_FLAGS 3U
+
+/*
+ * Size of the hash table used by the driver. Doesn't need to be the
+ * same size as the hardware's table.
+ */
+#define EFX_EF10_FILTER_TBL_ROWS 8192
+
+/* Only need to allow for one directed and one unknown unicast filter */
+#define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2
+
+/* Allow for the broadcast address to be added to the multicast list */
+#define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1)
+
+typedef struct ef10_filter_table_s {
+ ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS];
+ efx_rxq_t * eft_default_rxq;
+ boolean_t eft_using_rss;
+ uint32_t eft_unicst_filter_indexes[
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX];
+ boolean_t eft_unicst_filter_count;
+ uint32_t eft_mulcst_filter_indexes[
+ EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
+ uint32_t eft_mulcst_filter_count;
+ boolean_t eft_using_all_mulcst;
+} ef10_filter_table_t;
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out uint32_t *list,
+ __out size_t *length);
+
+extern __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+extern void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss);
+
+extern void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp);
+
+extern __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EF10_IMPL_H */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_intr.c b/usr/src/uts/common/io/sfxge/common/ef10_intr.c
new file mode 100644
index 0000000000..c1bb4eae65
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_intr.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ _NOTE(ARGUNUSED(enp, type, esmp))
+ return (0);
+}
+
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_trigger_interrupt(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_TRIGGER_INTERRUPT_IN_LEN,
+ MC_CMD_TRIGGER_INTERRUPT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (level >= enp->en_nic_cfg.enc_intr_limit) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_TRIGGER_INTERRUPT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_TRIGGER_INTERRUPT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, TRIGGER_INTERRUPT_IN_INTR_LEVEL, level);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ if (encp->enc_bug41750_workaround) {
+ /*
+ * bug 41750: Test interrupts don't work on Greenport
+ * bug 50084: Test interrupts don't work on VFs
+ */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_trigger_interrupt(enp, level)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_dword_t dword;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Read the queue mask and implicitly acknowledge the interrupt. */
+ EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ *fatalp = B_FALSE;
+}
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ _NOTE(ARGUNUSED(enp, message))
+
+ /* EF10 fatal errors are reported via events */
+ *fatalp = B_FALSE;
+}
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp)
+{
+ /* EF10 fatal errors are reported via events */
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_mac.c b/usr/src/uts/common/io/sfxge/common/ef10_mac.c
new file mode 100644
index 0000000000..501c033456
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_mac.c
@@ -0,0 +1,749 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+ __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ ef10_link_state_t els;
+ efx_rc_t rc;
+
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_fcntl = els.els_fcntl;
+
+ *link_modep = els.els_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ ef10_link_state_t els;
+ efx_rc_t rc;
+
+ /*
+ * Because EF10 doesn't *require* polling, we can't rely on
+ * ef10_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ *mac_upp = els.els_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * EF10 adapters use MC_CMD_VADAPTOR_SET_MAC to set the
+ * MAC address; the address field in MC_CMD_SET_MAC has no
+ * effect.
+ * MC_CMD_VADAPTOR_SET_MAC requires mac-spoofing privilege and
+ * the port to have no filters or queues active.
+ */
+static __checkReturn efx_rc_t
+efx_mcdi_vadapter_set_mac(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN,
+ MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+ enp->en_vport_id);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, VADAPTOR_SET_MAC_IN_MACADDR),
+ epp->ep_mac_addr);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_vadapter_set_mac(enp)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /*
+ * Fallback for older Huntington firmware without Vadapter
+ * support.
+ */
+ if ((rc = ef10_mac_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_mtu_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mtu)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ /* Only configure the MTU in this call to MC_CMD_SET_MAC */
+ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_MTU, mtu);
+ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_EXT_IN_CONTROL,
+ SET_MAC_EXT_IN_CFG_MTU, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ if (encp->enc_enhanced_set_mac_supported) {
+ if ((rc = efx_mcdi_mtu_set(enp, epp->ep_mac_pdu)) != 0)
+ goto fail1;
+ } else {
+ /*
+ * Fallback for older Huntington firmware, which always
+ * configure all of the parameters to MC_CMD_SET_MAC. This isn't
+ * suitable for setting the MTU on unpriviliged functions.
+ */
+ if ((rc = ef10_mac_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+__checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+
+ /*
+ * Note: The Huntington MAC does not support REJECT_BRDCST.
+ * The REJECT_UNCST flag will also prevent multicast traffic
+ * from reaching the filters. As Huntington filters drop any
+ * traffic that does not match a filter it is ok to leave the
+ * MAC running in promiscuous mode. See bug41141.
+ *
+ * FIXME: Does REJECT_UNCST behave the same way on Medford?
+ */
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, 0,
+ SET_MAC_IN_REJECT_BRDCST, 0);
+
+ /*
+ * Flow control, whether it is auto-negotiated or not,
+ * is set via the PHY advertised capabilities. When set to
+ * automatic the MAC will use the PHY settings to determine
+ * the flow control settings.
+ */
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, MC_CMD_FCNTL_AUTO);
+
+ /* Do not include the Ethernet frame checksum in RX packets */
+ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_IN_FLAGS,
+ SET_MAC_IN_FLAG_INCLUDE_FCS, 0);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ /*
+ * Unprivileged functions cannot control link state,
+ * but still need to configure filters.
+ */
+ if (req.emr_rc != EACCES) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ }
+
+ /*
+ * Apply the filters for the MAC configuration.
+ * If the NIC isn't ready to accept filters this may
+ * return success without setting anything.
+ */
+ (void) efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rxq_t *old_rxq;
+ boolean_t old_using_rss;
+ efx_rc_t rc;
+
+ ef10_filter_get_default_rxq(enp, &old_rxq, &old_using_rss);
+
+ ef10_filter_default_rxq_set(enp, erp, using_rss);
+
+ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ ef10_filter_default_rxq_set(enp, old_rxq, old_using_rss);
+
+ return (rc);
+}
+
+ void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ ef10_filter_default_rxq_clear(enp);
+
+ (void) efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+}
+
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ efx_rc_t rc;
+
+ /* The PHY object handles this on EF10 */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+#define EF10_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+
+ __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp)
+{
+ efx_qword_t value;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+
+ _NOTE(ARGUNUSED(enp))
+
+ /* Read END first so we don't race with the MC */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ /* Packet memory (EF10 only) */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_BB_OVERFLOW]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_BB_OVERFLOW]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_VFIFO_FULL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_VFIFO_FULL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_VFIFO_FULL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_VFIFO_FULL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_QBB, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_QBB]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_QBB, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_QBB]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_MAPPING, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_MAPPING]), &value);
+
+ /* RX datapath */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_Q_DISABLED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_Q_DISABLED_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_DI_DROPPED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_DI_DROPPED_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_STREAMING_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_STREAMING_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_FETCH]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_WAIT]), &value);
+
+
+ /* VADAPTER RX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_BYTES]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_OVERFLOW]), &value);
+
+ /* VADAPTER TX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_PACKETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_BYTES]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value);
+
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EFSYS_MEM_READ_BARRIER();
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ /* Not a good enough check ? */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_mcdi.c b/usr/src/uts/common/io/sfxge/common/ef10_mcdi.c
new file mode 100644
index 0000000000..f58371ada4
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_mcdi.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_MCDI
+
+#ifndef WITH_MCDI_V2
+#error "WITH_MCDI_V2 required for EF10 MCDIv2 commands."
+#endif
+
+
+ __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *emtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t dword;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA);
+
+ /*
+ * All EF10 firmware supports MCDIv2 and MCDIv1.
+ * Medford BootROM supports MCDIv2 and MCDIv1.
+ * Huntington BootROM supports MCDIv1 only.
+ */
+ emip->emi_max_version = 2;
+
+ /* A host DMA buffer is required for EF10 MCDI */
+ if (esmp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ensure that the MC doorbell is in a known state before issuing MCDI
+ * commands. The recovery algorithm requires that the MC command buffer
+ * must be 256 byte aligned. See bug24769.
+ */
+ if ((EFSYS_MEM_ADDR(esmp) & 0xFF) != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 1);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+
+ /* Save initial MC reboot status */
+ (void) ef10_mcdi_poll_reboot(enp);
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ efx_mcdi_new_epoch(enp);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+
+ emip->emi_new_epoch = B_FALSE;
+}
+
+ void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in void *hdrp,
+ __in size_t hdr_len,
+ __in void *sdup,
+ __in size_t sdu_len)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t dword;
+ unsigned int pos;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Write the header */
+ for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)hdrp + pos);
+ EFSYS_MEM_WRITED(esmp, pos, &dword);
+ }
+
+ /* Write the payload */
+ for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+ EFSYS_MEM_WRITED(esmp, hdr_len + pos, &dword);
+ }
+
+ /* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */
+ EFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, hdr_len + sdu_len);
+ EFSYS_PIO_WRITE_BARRIER();
+
+ /* Ring the doorbell to post the command DMA address to the MC */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE);
+
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+}
+
+ __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t hdr;
+
+ EFSYS_MEM_READD(esmp, 0, &hdr);
+ return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+ void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ unsigned int pos;
+ efx_dword_t data;
+
+ for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+ EFSYS_MEM_READD(esmp, offset + pos, &data);
+ (void) memcpy((uint8_t *)bufferp + pos, &data,
+ MIN(sizeof (data), length - pos));
+ }
+}
+
+ efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t dword;
+ uint32_t old_status;
+ uint32_t new_status;
+ efx_rc_t rc;
+
+ old_status = emip->emi_mc_reboot_status;
+
+ /* Update MC reboot status word */
+ EFX_BAR_TBL_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, 0, &dword, B_FALSE);
+ new_status = dword.ed_u32[0];
+
+ /* MC has rebooted if the value has changed */
+ if (new_status != old_status) {
+ emip->emi_mc_reboot_status = new_status;
+
+ /*
+ * FIXME: Ignore detected MC REBOOT for now.
+ *
+ * The Siena support for checking for MC reboot from status
+ * flags is broken - see comments in siena_mcdi_poll_reboot().
+ * As the generic MCDI code is shared the EF10 reboot
+ * detection suffers similar problems.
+ *
+ * Do not report an error when the boot status changes until
+ * this can be handled by common code drivers (and reworked to
+ * support Siena too).
+ */
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = EIO;
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t privilege_mask = encp->enc_privilege_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Use privilege mask state at MCDI attach.
+ */
+
+ switch (id) {
+ case EFX_MCDI_FEATURE_FW_UPDATE:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * specific flag.
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_LINK_CONTROL:
+ /*
+ * Admin privilege used prior to introduction of
+ * specific flag.
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, LINK) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * mac spoofing privilege (at v4.6), which is used up to
+ * introduction of change mac spoofing privilege (at v4.7)
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, CHANGE_MAC) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_MAC_SPOOFING:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * mac spoofing privilege (at v4.6), which is used up to
+ * introduction of mac spoofing TX privilege (at v4.7)
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING_TX) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MCDI */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_nic.c b/usr/src/uts/common/io/sfxge/common/ef10_nic.c
new file mode 100644
index 0000000000..8b4c5d7520
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_nic.c
@@ -0,0 +1,1629 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
+ MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
+ MC_CMD_GET_PORT_MODES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /*
+ * Require only Modes and DefaultMode fields.
+ * (CurrentMode field was added for Medford)
+ */
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_alloc(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
+ MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
+ VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
+ enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_free(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
+ MC_CMD_VADAPTOR_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
+ MC_CMD_GET_CLOCK_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CLOCK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
+ if (*sys_freqp == 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
+ MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (vec_basep != NULL)
+ *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
+ if (pf_nvecp != NULL)
+ *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
+ if (vf_nvecp != NULL)
+ *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out uint32_t *flagsp,
+ __out uint32_t *flags2p)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ *flags2p = 0;
+ else
+ *flags2p = MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_vis(
+ __in efx_nic_t *enp,
+ __in uint32_t min_vi_count,
+ __in uint32_t max_vi_count,
+ __out uint32_t *vi_basep,
+ __out uint32_t *vi_countp,
+ __out uint32_t *vi_shiftp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
+ MC_CMD_ALLOC_VIS_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (vi_countp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_VIS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_VIS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
+ *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
+
+ /* Report VI_SHIFT if available (always zero for Huntington) */
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
+ *vi_shiftp = 0;
+ else
+ *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_vis(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_FREE_VIS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ /* Ignore ELREADY (no allocated VIs, so nothing to free) */
+ if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_piobuf(
+ __in efx_nic_t *enp,
+ __out efx_piobuf_handle_t *handlep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
+ MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (handlep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_piobuf(
+ __in efx_nic_t *enp,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
+ MC_CMD_FREE_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FREE_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_link_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_LINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_unlink_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_nic_alloc_piobufs(
+ __in efx_nic_t *enp,
+ __in uint32_t max_piobuf_count)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(max_piobuf_count, <=,
+ EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
+
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+
+ for (i = 0; i < max_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
+ goto fail1;
+
+ enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
+ enp->en_arch.ef10.ena_piobuf_count++;
+ }
+
+ return;
+
+fail1:
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ (void) efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+
+static void
+ef10_nic_free_piobufs(
+ __in efx_nic_t *enp)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ (void) efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+/* Sub-allocate a block from a piobuf */
+ __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
+ uint32_t blk_per_buf;
+ uint32_t buf, blk;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT(bufnump);
+ EFSYS_ASSERT(handlep);
+ EFSYS_ASSERT(blknump);
+ EFSYS_ASSERT(offsetp);
+ EFSYS_ASSERT(sizep);
+
+ if ((edcp->edc_pio_alloc_size == 0) ||
+ (enp->en_arch.ef10.ena_piobuf_count == 0)) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+ blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
+
+ for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
+ uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
+
+ if (~(*map) == 0)
+ continue;
+
+ EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
+ for (blk = 0; blk < blk_per_buf; blk++) {
+ if ((*map & (1u << blk)) == 0) {
+ *map |= (1u << blk);
+ goto done;
+ }
+ }
+ }
+ rc = ENOMEM;
+ goto fail2;
+
+done:
+ *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
+ *bufnump = buf;
+ *blknump = blk;
+ *sizep = edcp->edc_pio_alloc_size;
+ *offsetp = blk * (*sizep);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Free a piobuf sub-allocated block */
+ __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum)
+{
+ uint32_t *map;
+ efx_rc_t rc;
+
+ if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
+ (blknum >= (8 * sizeof (*map)))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
+ if ((*map & (1u << blknum)) == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+ *map &= ~(1u << blknum);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ return (efx_mcdi_link_piobuf(enp, vi_index, handle));
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ return (efx_mcdi_unlink_piobuf(enp, vi_index));
+}
+
+ __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t flags;
+ uint32_t flags2;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_capabilities(enp, &flags, &flags2)) != 0)
+ goto fail1;
+
+#define CAP_FLAG(flags1, field) \
+ ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+#define CAP_FLAG2(flags2, field) \
+ ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+ /*
+ * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
+ * We only support the 14 byte prefix here.
+ */
+ if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ encp->enc_rx_prefix_size = 14;
+
+ /* Check if the firmware supports TSO */
+ encp->enc_fw_assisted_tso_enabled =
+ CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports FATSOv2 */
+ encp->enc_fw_assisted_tso_v2_enabled =
+ CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware has vadapter/vport/vswitch support */
+ encp->enc_datapath_cap_evb =
+ CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports VLAN insertion */
+ encp->enc_hw_tx_insert_vlan_enabled =
+ CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports RX event batching */
+ encp->enc_rx_batching_enabled =
+ CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
+
+ if (encp->enc_rx_batching_enabled)
+ encp->enc_rx_batch_max = 16;
+
+ /* Check if the firmware supports disabling scatter on RXQs */
+ encp->enc_rx_disable_scatter_supported =
+ CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports set mac with running filters */
+ encp->enc_allow_set_mac_with_installed_filters =
+ CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
+ B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
+ * specifying which parameters to configure.
+ */
+ encp->enc_enhanced_set_mac_supported =
+ CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
+
+#undef CAP_FLAG
+#undef CAP_FLAG2
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#define EF10_LEGACY_PF_PRIVILEGE_MASK \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
+
+#define EF10_LEGACY_VF_PRIVILEGE_MASK 0
+
+
+ __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t mask;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
+ &mask)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /* Fallback for old firmware without privilege mask support */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ /* Assume PF has admin privilege */
+ mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
+ } else {
+ /* VF is always unprivileged by default */
+ mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
+ }
+ }
+
+ *maskp = mask;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * The external port mapping is a one-based numbering of the external
+ * connectors on the board. It does not distinguish off-board separated
+ * outputs such as multi-headed cables.
+ * The number of ports that map to each external port connector
+ * on the board is determined by the chip family and the port modes to
+ * which the NIC can be configured. The mapping table lists modes with
+ * port numbering requirements in increasing order.
+ */
+static struct {
+ efx_family_t family;
+ uint32_t modes_mask;
+ uint32_t stride;
+} __ef10_external_port_mappings[] = {
+ /* Supported modes requiring 1 output per port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G),
+ 1
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G),
+ 1
+ },
+ /* Supported modes requiring 2 outputs per port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G),
+ 2
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G),
+ 2
+ },
+ /* Supported modes requiring 4 outputs per port */
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
+ 4
+ },
+};
+
+ __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp)
+{
+ efx_rc_t rc;
+ int i;
+ uint32_t port_modes;
+ uint32_t matches;
+ uint32_t stride = 1; /* default 1-1 mapping */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes)) != 0) {
+ /* No port mode information available - use default mapping */
+ goto out;
+ }
+
+ /*
+ * Infer the internal port -> external port mapping from
+ * the possible port modes for this NIC.
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
+ if (__ef10_external_port_mappings[i].family !=
+ enp->en_family)
+ continue;
+ matches = (__ef10_external_port_mappings[i].modes_mask &
+ port_modes);
+ if (matches != 0) {
+ stride = __ef10_external_port_mappings[i].stride;
+ port_modes &= ~matches;
+ }
+ }
+
+ if (port_modes != 0) {
+ /* Some advertised modes are not supported */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+out:
+ /*
+ * Scale as required by last matched mode and then convert to
+ * one-based numbering
+ */
+ *external_portp = (uint8_t)(port / stride) + 1;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Read and clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ if (rc != EACCES)
+ goto fail2;
+
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail3;
+
+ if ((rc = enop->eno_board_cfg(enp)) != 0)
+ if (rc != EACCES)
+ goto fail4;
+
+ /*
+ * Set default driver config limits (based on board config).
+ *
+ * FIXME: For now allocate a fixed number of VIs which is likely to be
+ * sufficient and small enough to allow multiple functions on the same
+ * port.
+ */
+ edcp->edc_min_vi_count = edcp->edc_max_vi_count =
+ MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
+
+ /* The client driver must configure and enable PIO buffer support */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail5;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail6;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
+ /* Unprivileged functions do not have access to sensors */
+ if (rc != EACCES)
+ goto fail7;
+ }
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail7:
+ EFSYS_PROBE(fail7);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail6:
+ EFSYS_PROBE(fail6);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail5:
+ EFSYS_PROBE(fail5);
+#endif
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_evq_count, max_evq_count;
+ uint32_t min_rxq_count, max_rxq_count;
+ uint32_t min_txq_count, max_txq_count;
+ efx_rc_t rc;
+
+ if (edlp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get minimum required and maximum usable VI limits */
+ min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
+ min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
+ min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_min_vi_count =
+ MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
+
+ max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
+ max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
+ max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_max_vi_count =
+ MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
+
+ /*
+ * Check limits for sub-allocated piobuf blocks.
+ * PIO is optional, so don't fail if the limits are incorrect.
+ */
+ if ((encp->enc_piobuf_size == 0) ||
+ (encp->enc_piobuf_limit == 0) ||
+ (edlp->edl_min_pio_alloc_size == 0) ||
+ (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
+ /* Disable PIO */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+ } else {
+ uint32_t blk_size, blk_count, blks_per_piobuf;
+
+ blk_size =
+ MAX(edlp->edl_min_pio_alloc_size,
+ encp->enc_piobuf_min_alloc_size);
+
+ blks_per_piobuf = encp->enc_piobuf_size / blk_size;
+ EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
+
+ blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
+
+ /* A zero max pio alloc count means unlimited */
+ if ((edlp->edl_max_pio_alloc_count > 0) &&
+ (edlp->edl_max_pio_alloc_count < blk_count)) {
+ blk_count = edlp->edl_max_pio_alloc_count;
+ }
+
+ edcp->edc_pio_alloc_size = blk_size;
+ edcp->edc_max_piobuf_count =
+ (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
+ MC_CMD_ENTITY_RESET_OUT_LEN)];
+ efx_rc_t rc;
+
+ /* ef10_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ /* Clear RX/TX DMA queue errors */
+ enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_vi_count, max_vi_count;
+ uint32_t vi_count, vi_base, vi_shift;
+ uint32_t i;
+ uint32_t retry;
+ uint32_t delay_us;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ /* Allocate (optional) on-chip PIO buffers */
+ ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
+
+ /*
+ * For best performance, PIO writes should use a write-combined
+ * (WC) memory mapping. Using a separate WC mapping for the PIO
+ * aperture of each VI would be a burden to drivers (and not
+ * possible if the host page size is >4Kbyte).
+ *
+ * To avoid this we use a single uncached (UC) mapping for VI
+ * register access, and a single WC mapping for extra VIs used
+ * for PIO writes.
+ *
+ * Each piobuf must be linked to a VI in the WC mapping, and to
+ * each VI that is using a sub-allocated block from the piobuf.
+ */
+ min_vi_count = edcp->edc_min_vi_count;
+ max_vi_count =
+ edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Ensure that the previously attached driver's VIs are freed */
+ if ((rc = efx_mcdi_free_vis(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
+ * fails then retrying the request for fewer VI resources may succeed.
+ */
+ vi_count = 0;
+ if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
+ &vi_base, &vi_count, &vi_shift)) != 0)
+ goto fail3;
+
+ EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
+
+ if (vi_count < min_vi_count) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ enp->en_arch.ef10.ena_vi_base = vi_base;
+ enp->en_arch.ef10.ena_vi_count = vi_count;
+ enp->en_arch.ef10.ena_vi_shift = vi_shift;
+
+ if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
+ /* Not enough extra VIs to map piobufs */
+ ef10_nic_free_piobufs(enp);
+ }
+
+ enp->en_arch.ef10.ena_pio_write_vi_base =
+ vi_count - enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Save UC memory mapping details */
+ enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_pio_write_vi_base);
+ } else {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_vi_count);
+ }
+
+ /* Save WC memory mapping details */
+ enp->en_arch.ef10.ena_wc_mem_map_offset =
+ enp->en_arch.ef10.ena_uc_mem_map_offset +
+ enp->en_arch.ef10.ena_uc_mem_map_size;
+
+ enp->en_arch.ef10.ena_wc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_piobuf_count);
+
+ /* Link piobufs to extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_link_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i,
+ enp->en_arch.ef10.ena_piobuf_handle[i]);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ /*
+ * Allocate a vAdaptor attached to our upstream vPort/pPort.
+ *
+ * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
+ * driver has yet to bring up the EVB port. See bug 56147. In this case,
+ * retry the request several times after waiting a while. The wait time
+ * between retries starts small (10ms) and exponentially increases.
+ * Total wait time is a little over two seconds. Retry logic in the
+ * client driver may mean this whole loop is repeated if it continues to
+ * fail.
+ */
+ retry = 0;
+ delay_us = 10000;
+ while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
+ (rc != ENOENT)) {
+ /*
+ * Do not retry alloc for PF, or for other errors on
+ * a VF.
+ */
+ goto fail5;
+ }
+
+ /* VF startup before PF is ready. Retry allocation. */
+ if (retry > 5) {
+ /* Too many attempts */
+ rc = EINVAL;
+ goto fail6;
+ }
+ EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
+ EFSYS_SLEEP(delay_us);
+ retry++;
+ if (delay_us < 500000)
+ delay_us <<= 2;
+ }
+
+ enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+
+ ef10_nic_free_piobufs(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Report VIs that the client driver can use.
+ * Do not include VIs used for PIO buffer writes.
+ */
+ *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * TODO: Specify host memory mapping alignment and granularity
+ * in efx_drv_limits_t so that they can be taken into account
+ * when allocating extra VIs for PIO writes.
+ */
+ switch (region) {
+ case EFX_REGION_VI:
+ /* UC mapped memory BAR region for VI registers */
+ *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
+ break;
+
+ case EFX_REGION_PIO_WRITE_VI:
+ /* WC mapped memory BAR region for piobuf writes */
+ *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_nic_fini(
+ __in efx_nic_t *enp)
+{
+ uint32_t i;
+ efx_rc_t rc;
+
+ (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
+ enp->en_vport_id = 0;
+
+ /* Unlink piobufs from extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_unlink_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ ef10_nic_free_piobufs(enp);
+
+ (void) efx_mcdi_free_vis(enp);
+ enp->en_arch.ef10.ena_vi_count = 0;
+}
+
+ void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(enp))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_nvram.c b/usr/src/uts/common/io/sfxge/common/ef10_nvram.c
new file mode 100644
index 0000000000..d1f69427dd
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_nvram.c
@@ -0,0 +1,2331 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+#include "ef10_tlv_layout.h"
+
+/* Cursor for TLV partition format */
+typedef struct tlv_cursor_s {
+ uint32_t *block; /* Base of data block */
+ uint32_t *current; /* Cursor position */
+ uint32_t *end; /* End tag position */
+ uint32_t *limit; /* Last dword of data block */
+} tlv_cursor_t;
+
+typedef struct nvram_partition_s {
+ uint16_t type;
+ uint8_t chip_select;
+ uint8_t flags;
+ /*
+ * The full length of the NVRAM partition.
+ * This is different from tlv_partition_header.total_length,
+ * which can be smaller.
+ */
+ uint32_t length;
+ uint32_t erase_size;
+ uint32_t *data;
+ tlv_cursor_t tlv_cursor;
+} nvram_partition_t;
+
+
+static __checkReturn efx_rc_t
+tlv_validate_state(
+ __inout tlv_cursor_t *cursor);
+
+
+static void
+tlv_init_block(
+ __out uint32_t *block)
+{
+ *block = __CPU_TO_LE_32(TLV_TAG_END);
+}
+
+static uint32_t
+tlv_tag(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t dword, tag;
+
+ dword = cursor->current[0];
+ tag = __LE_TO_CPU_32(dword);
+
+ return (tag);
+}
+
+static size_t
+tlv_length(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t dword, length;
+
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (0);
+
+ dword = cursor->current[1];
+ length = __LE_TO_CPU_32(dword);
+
+ return ((size_t)length);
+}
+
+static uint8_t *
+tlv_value(
+ __in tlv_cursor_t *cursor)
+{
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (NULL);
+
+ return ((uint8_t *)(&cursor->current[2]));
+}
+
+static uint8_t *
+tlv_item(
+ __in tlv_cursor_t *cursor)
+{
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (NULL);
+
+ return ((uint8_t *)cursor->current);
+}
+
+/*
+ * TLV item DWORD length is tag + length + value (rounded up to DWORD)
+ * equivalent to tlv_n_words_for_len in mc-comms tlv.c
+ */
+#define TLV_DWORD_COUNT(length) \
+ (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t)))
+
+
+static uint32_t *
+tlv_next_item_ptr(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t length;
+
+ length = tlv_length(cursor);
+
+ return (cursor->current + TLV_DWORD_COUNT(length));
+}
+
+static __checkReturn efx_rc_t
+tlv_advance(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (cursor->current == cursor->end) {
+ /* No more tags after END tag */
+ cursor->current = NULL;
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ /* Advance to next item and validate */
+ cursor->current = tlv_next_item_ptr(cursor);
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_rewind(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ cursor->current = cursor->block;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_find(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag)
+{
+ efx_rc_t rc;
+
+ rc = tlv_rewind(cursor);
+ while (rc == 0) {
+ if (tlv_tag(cursor) == tag)
+ break;
+
+ rc = tlv_advance(cursor);
+ }
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_validate_state(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ /* Check cursor position */
+ if (cursor->current < cursor->block) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (cursor->current > cursor->limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (tlv_tag(cursor) != TLV_TAG_END) {
+ /* Check current item has space for tag and length */
+ if (cursor->current > (cursor->limit - 2)) {
+ cursor->current = NULL;
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ /* Check we have value data for current item and another tag */
+ if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) {
+ cursor->current = NULL;
+ rc = EFAULT;
+ goto fail4;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_init_cursor(
+ __out tlv_cursor_t *cursor,
+ __in uint32_t *block,
+ __in uint32_t *limit,
+ __in uint32_t *current)
+{
+ cursor->block = block;
+ cursor->limit = limit;
+
+ cursor->current = current;
+ cursor->end = NULL;
+
+ return (tlv_validate_state(cursor));
+}
+
+static __checkReturn efx_rc_t
+tlv_init_cursor_from_size(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size)
+{
+ uint32_t *limit;
+ limit = (void *)(block + size - sizeof (uint32_t));
+ return (tlv_init_cursor(cursor, (void *)block,
+ limit, (void *)block));
+}
+
+static __checkReturn efx_rc_t
+tlv_init_cursor_at_offset(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size,
+ __in size_t offset)
+{
+ uint32_t *limit;
+ uint32_t *current;
+ limit = (void *)(block + size - sizeof (uint32_t));
+ current = (void *)(block + offset);
+ return (tlv_init_cursor(cursor, (void *)block, limit, current));
+}
+
+static __checkReturn efx_rc_t
+tlv_require_end(
+ __inout tlv_cursor_t *cursor)
+{
+ uint32_t *pos;
+ efx_rc_t rc;
+
+ if (cursor->end == NULL) {
+ pos = cursor->current;
+ if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0)
+ goto fail1;
+
+ cursor->end = cursor->current;
+ cursor->current = pos;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static size_t
+tlv_block_length_used(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail2;
+
+ /* Return space used (including the END tag) */
+ return (cursor->end + 1 - cursor->block) * sizeof (uint32_t);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+static uint32_t *
+tlv_last_segment_end(
+ __in tlv_cursor_t *cursor)
+{
+ tlv_cursor_t segment_cursor;
+ uint32_t *last_segment_end = cursor->block;
+ uint32_t *segment_start = cursor->block;
+
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the pointer to its end tag.
+ */
+ for (;;) {
+ if (tlv_init_cursor(&segment_cursor, segment_start,
+ cursor->limit, segment_start) != 0)
+ break;
+ if (tlv_require_end(&segment_cursor) != 0)
+ break;
+ last_segment_end = segment_cursor.end;
+ segment_start = segment_cursor.end + 1;
+ }
+
+ return (last_segment_end);
+}
+
+
+static void
+tlv_write(
+ __in tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size) uint8_t *data,
+ __in size_t size)
+{
+ uint32_t len = (uint32_t)size;
+ uint32_t *ptr;
+
+ ptr = cursor->current;
+
+ *ptr++ = __CPU_TO_LE_32(tag);
+ *ptr++ = __CPU_TO_LE_32(len);
+
+ if (len > 0) {
+ ptr[(len - 1) / sizeof (uint32_t)] = 0;
+ (void) memcpy(ptr, data, len);
+ }
+}
+
+static __checkReturn efx_rc_t
+tlv_insert(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size)
+ uint8_t *data,
+ __in size_t size)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail2;
+
+ if (tag == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ delta = TLV_DWORD_COUNT(size);
+ if (last_segment_end + 1 + delta > cursor->limit) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ /* Move data up: new space at cursor->current */
+ (void) memmove(cursor->current + delta, cursor->current,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end += delta;
+
+ /* Write new TLV item */
+ tlv_write(cursor, tag, data, size);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_delete(
+ __inout tlv_cursor_t *cursor)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ delta = TLV_DWORD_COUNT(tlv_length(cursor));
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail3;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ /* Shuffle things down, destroying the item at cursor->current */
+ (void) memmove(cursor->current, cursor->current + delta,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+ /* Zero the new space at the end of the TLV chain */
+ (void) memset(last_segment_end + 1 - delta, 0,
+ delta * sizeof (uint32_t));
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_modify(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size)
+ uint8_t *data,
+ __in size_t size)
+{
+ uint32_t *pos;
+ unsigned int old_ndwords;
+ unsigned int new_ndwords;
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ if (tlv_tag(cursor) != tag) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor));
+ new_ndwords = TLV_DWORD_COUNT(size);
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail4;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ if (new_ndwords > old_ndwords) {
+ /* Expand space used for TLV item */
+ delta = new_ndwords - old_ndwords;
+ pos = cursor->current + old_ndwords;
+
+ if (last_segment_end + 1 + delta > cursor->limit) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Move up: new space at (cursor->current + old_ndwords) */
+ (void) memmove(pos + delta, pos,
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end += delta;
+
+ } else if (new_ndwords < old_ndwords) {
+ /* Shrink space used for TLV item */
+ delta = old_ndwords - new_ndwords;
+ pos = cursor->current + new_ndwords;
+
+ /* Move down: remove words at (cursor->current + new_ndwords) */
+ (void) memmove(pos, pos + delta,
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
+
+ /* Zero the new space at the end of the TLV chain */
+ (void) memset(last_segment_end + 1 - delta, 0,
+ delta * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+ }
+
+ /* Write new data */
+ tlv_write(cursor, tag, data, size);
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t checksum_tlv_partition(
+ __in nvram_partition_t *partition)
+{
+ tlv_cursor_t *cursor;
+ uint32_t *ptr;
+ uint32_t *end;
+ uint32_t csum;
+ size_t len;
+
+ cursor = &partition->tlv_cursor;
+ len = tlv_block_length_used(cursor);
+ EFSYS_ASSERT3U((len & 3), ==, 0);
+
+ csum = 0;
+ ptr = partition->data;
+ end = &ptr[len >> 2];
+
+ while (ptr < end)
+ csum += __LE_TO_CPU_32(*ptr++);
+
+ return (csum);
+}
+
+static __checkReturn efx_rc_t
+tlv_update_partition_len_and_cks(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+ nvram_partition_t partition;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t new_len;
+
+ /*
+ * We just modified the partition, so the total length may not be
+ * valid. Don't use tlv_find(), which performs some sanity checks
+ * that may fail here.
+ */
+ partition.data = cursor->block;
+ (void) memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor));
+ header = (struct tlv_partition_header *)partition.data;
+ /* Sanity check. */
+ if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ new_len = tlv_block_length_used(&partition.tlv_cursor);
+ if (new_len == 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ header->total_length = __CPU_TO_LE_32(new_len);
+ /* Ensure the modified partition always has a new generation count. */
+ header->generation = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(header->generation) + 1);
+
+ trailer = (void *)((uint8_t *)header +
+ new_len - sizeof (*trailer) - sizeof (uint32_t));
+ trailer->generation = header->generation;
+ trailer->checksum = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(trailer->checksum) -
+ checksum_tlv_partition(&partition));
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t total_length;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp, partn));
+ EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
+
+ if ((partn_data == NULL) || (partn_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* The partition header must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data,
+ partn_size)) != 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ header = (void *)tlv_item(&cursor);
+
+ /* Check TLV partition length (includes the END tag) */
+ total_length = __LE_TO_CPU_32(header->total_length);
+ if (total_length > partn_size) {
+ rc = EFBIG;
+ goto fail4;
+ }
+
+ /* Check partition ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ trailer = (void *)tlv_item(&cursor);
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ /* Check generation counts are consistent */
+ if (trailer->generation != header->generation) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ /* Verify partition checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(void *)(partn_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ uint32_t *buf = (void *)partn_data;
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header header;
+ struct tlv_partition_trailer trailer;
+
+ unsigned min_buf_size = sizeof (struct tlv_partition_header) +
+ sizeof (struct tlv_partition_trailer);
+ if (partn_size < min_buf_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(buf, 0xff, partn_size);
+
+ tlv_init_block(buf);
+ if ((rc = tlv_init_cursor(&cursor, buf,
+ (void *)((uint8_t *)buf + partn_size),
+ buf)) != 0) {
+ goto fail2;
+ }
+
+ header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER);
+ header.length = __CPU_TO_LE_32(sizeof (header) - 8);
+ header.type_id = __CPU_TO_LE_16(partn_type);
+ header.preset = 0;
+ header.generation = __CPU_TO_LE_32(1);
+ header.total_length = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(
+ &cursor, TLV_TAG_PARTITION_HEADER,
+ (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0)
+ goto fail3;
+ if ((rc = tlv_advance(&cursor)) != 0)
+ goto fail4;
+
+ trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER);
+ trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8);
+ trailer.generation = header.generation;
+ trailer.checksum = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER,
+ (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0)
+ goto fail5;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail6;
+
+ /* Check that the partition is valid. */
+ if ((rc = ef10_nvram_buffer_validate(enp, partn_type,
+ partn_data, partn_size)) != 0)
+ goto fail7;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t
+byte_offset(
+ __in uint32_t *position,
+ __in uint32_t *base)
+{
+ return (uint32_t)((uintptr_t)position - (uintptr_t)base);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp)
+{
+ // Read past partition header to find start address of the first key
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp)
+{
+ // Read to end of partition
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(offset));
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail2;
+
+ *endp = byte_offset(tlv_last_segment_end(&cursor)+1, cursor.block);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp)
+{
+ // Find TLV at offset and return key start and length
+ tlv_cursor_t cursor;
+ uint32_t tag;
+
+ if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset) != 0) {
+ return (B_FALSE);
+ }
+
+ while (tlv_item(&cursor) != NULL) {
+ tag = tlv_tag(&cursor);
+ if (tag == TLV_TAG_PARTITION_HEADER ||
+ tag == TLV_TAG_PARTITION_TRAILER) {
+ if (tlv_advance(&cursor) != 0) {
+ break;
+ }
+ continue;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ uint32_t item_length;
+
+ if (item_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail2;
+ }
+
+ item_length = tlv_length(&cursor);
+ if (length < item_length) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ (void) memcpy(itemp, tlv_value(&cursor), item_length);
+
+ *lengthp = item_length;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length);
+
+ if (rc != 0) {
+ goto fail2;
+ }
+
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ _NOTE(ARGUNUSED(length, end))
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ if ((rc = tlv_delete(&cursor)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail2;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+/*
+ * Read and validate a segment from a partition. A segment is a complete
+ * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may
+ * be multiple segments in a partition, so seg_offset allows segments
+ * beyond the first to be read.
+ */
+static __checkReturn efx_rc_t
+ef10_nvram_read_tlv_segment(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in size_t seg_offset,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t total_length;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
+
+ if ((seg_data == NULL) || (max_seg_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Read initial chunk of the segment, starting at offset */
+ if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data,
+ EF10_NVRAM_CHUNK,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) {
+ goto fail2;
+ }
+
+ /* A PARTITION_HEADER tag must be the first item at the given offset */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ header = (void *)tlv_item(&cursor);
+
+ /* Check TLV segment length (includes the END tag) */
+ total_length = __LE_TO_CPU_32(header->total_length);
+ if (total_length > max_seg_size) {
+ rc = EFBIG;
+ goto fail5;
+ }
+
+ /* Read the remaining segment content */
+ if (total_length > EF10_NVRAM_CHUNK) {
+ if ((rc = ef10_nvram_partn_read_mode(enp, partn,
+ seg_offset + EF10_NVRAM_CHUNK,
+ seg_data + EF10_NVRAM_CHUNK,
+ total_length - EF10_NVRAM_CHUNK,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0)
+ goto fail6;
+ }
+
+ /* Check segment ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+ trailer = (void *)tlv_item(&cursor);
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail8;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ /* Check data read from segment is consistent */
+ if (trailer->generation != header->generation) {
+ /*
+ * The partition data may have been modified between successive
+ * MCDI NVRAM_READ requests by the MC or another PCI function.
+ *
+ * The caller must retry to obtain consistent partition data.
+ */
+ rc = EAGAIN;
+ goto fail10;
+ }
+
+ /* Verify segment checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(void *)(seg_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail11;
+ }
+
+ return (0);
+
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Read a single TLV item from a host memory
+ * buffer containing a TLV formatted segment.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep)
+{
+ tlv_cursor_t cursor;
+ caddr_t data;
+ size_t length;
+ caddr_t value;
+ efx_rc_t rc;
+
+ if ((seg_data == NULL) || (max_seg_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Find requested TLV tag in segment data */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ if ((rc = tlv_find(&cursor, tag)) != 0) {
+ rc = ENOENT;
+ goto fail3;
+ }
+ value = (caddr_t)tlv_value(&cursor);
+ length = tlv_length(&cursor);
+
+ if (length == 0)
+ data = NULL;
+ else {
+ /* Copy out data from TLV item */
+ EFSYS_KMEM_ALLOC(enp->en_esip, length, data);
+ if (data == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ (void) memcpy(data, value, length);
+ }
+
+ *datap = data;
+ *sizep = length;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Read a single TLV item from the first segment in a TLV formatted partition */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap,
+ __out size_t *seg_sizep)
+{
+ caddr_t seg_data = NULL;
+ size_t partn_size = 0;
+ size_t length;
+ caddr_t data;
+ int retry;
+ efx_rc_t rc;
+
+ /* Allocate sufficient memory for the entire partition */
+ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
+ goto fail1;
+
+ if (partn_size == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data);
+ if (seg_data == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /*
+ * Read the first segment in a TLV partition. Retry until consistent
+ * segment contents are returned. Inconsistent data may be read if:
+ * a) the segment contents are invalid
+ * b) the MC has rebooted while we were reading the partition
+ * c) the partition has been modified while we were reading it
+ * Limit retry attempts to ensure forward progress.
+ */
+ retry = 10;
+ do {
+ rc = ef10_nvram_read_tlv_segment(enp, partn, 0,
+ seg_data, partn_size);
+ } while ((rc == EAGAIN) && (--retry > 0));
+
+ if (rc != 0) {
+ /* Failed to obtain consistent segment data */
+ goto fail4;
+ }
+
+ if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size,
+ tag, &data, &length)) != 0)
+ goto fail5;
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
+
+ *seg_datap = data;
+ *seg_sizep = length;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Compute the size of a segment. */
+ static __checkReturn efx_rc_t
+ef10_nvram_buf_segment_size(
+ __in caddr_t seg_data,
+ __in size_t max_seg_size,
+ __out size_t *seg_sizep)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ uint32_t cksum;
+ int pos;
+ uint32_t *end_tag_position;
+ uint32_t segment_length;
+
+ /* A PARTITION_HEADER tag must be the first item at the given offset */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ header = (void *)tlv_item(&cursor);
+
+ /* Check TLV segment length (includes the END tag) */
+ *seg_sizep = __LE_TO_CPU_32(header->total_length);
+ if (*seg_sizep > max_seg_size) {
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ /* Check segment ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ end_tag_position = cursor.current;
+
+ /* Verify segment checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(void *)(seg_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ /*
+ * Calculate total length from HEADER to END tags and compare to
+ * max_seg_size and the total_length field in the HEADER tag.
+ */
+ segment_length = tlv_block_length_used(&cursor);
+
+ if (segment_length > max_seg_size) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ if (segment_length != *seg_sizep) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ /* Skip over the first HEADER tag. */
+ rc = tlv_rewind(&cursor);
+ rc = tlv_advance(&cursor);
+
+ while (rc == 0) {
+ if (tlv_tag(&cursor) == TLV_TAG_END) {
+ /* Check that the END tag is the one found earlier. */
+ if (cursor.current != end_tag_position)
+ goto fail10;
+ break;
+ }
+ /* Check for duplicate HEADER tags before the END tag. */
+ if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail11;
+ }
+
+ rc = tlv_advance(&cursor);
+ }
+ if (rc != 0)
+ goto fail12;
+
+ return (0);
+
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in a host memory buffer containing a TLV
+ * formatted segment. Historically partitions consisted of only one segment.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ uint32_t generation;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ header = (void *)tlv_item(&cursor);
+
+ /* Update the TLV chain to contain the new data */
+ if ((rc = tlv_find(&cursor, tag)) == 0) {
+ /* Modify existing TLV item */
+ if ((rc = tlv_modify(&cursor, tag,
+ (uint8_t *)tag_data, tag_size)) != 0)
+ goto fail3;
+ } else {
+ /* Insert a new TLV item before the PARTITION_TRAILER */
+ rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER);
+ if (rc != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ if ((rc = tlv_insert(&cursor, tag,
+ (uint8_t *)tag_data, tag_size)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ }
+
+ /* Find the trailer tag */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ trailer = (void *)tlv_item(&cursor);
+
+ /* Update PARTITION_HEADER and PARTITION_TRAILER fields */
+ *total_lengthp = tlv_block_length_used(&cursor);
+ if (*total_lengthp > max_seg_size) {
+ rc = ENOSPC;
+ goto fail7;
+ }
+ generation = __LE_TO_CPU_32(header->generation) + 1;
+
+ header->total_length = __CPU_TO_LE_32(*total_lengthp);
+ header->generation = __CPU_TO_LE_32(generation);
+ trailer->generation = __CPU_TO_LE_32(generation);
+
+ /* Recompute PARTITION_TRAILER checksum */
+ trailer->checksum = 0;
+ cksum = 0;
+ for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(void *)(seg_data + pos));
+ }
+ trailer->checksum = ~cksum + 1;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in the first segment of a TLV formatted
+ * dynamic config partition. The first segment is the current active
+ * configuration.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data,
+ size, B_FALSE);
+}
+
+/*
+ * Read a segment from nvram at the given offset into a buffer (segment_data)
+ * and optionally write a new tag to it.
+ */
+ static __checkReturn efx_rc_t
+ef10_nvram_segment_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout caddr_t *seg_datap,
+ __inout size_t *partn_offsetp,
+ __inout size_t *src_remain_lenp,
+ __inout size_t *dest_remain_lenp,
+ __in boolean_t write)
+{
+ efx_rc_t rc;
+ efx_rc_t status;
+ size_t original_segment_size;
+ size_t modified_segment_size;
+
+ /*
+ * Read the segment from NVRAM into the segment_data buffer and validate
+ * it, returning if it does not validate. This is not a failure unless
+ * this is the first segment in a partition. In this case the caller
+ * must propogate the error.
+ */
+ status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp,
+ *seg_datap, *src_remain_lenp);
+ if (status != 0)
+ return (EINVAL);
+
+ status = ef10_nvram_buf_segment_size(*seg_datap,
+ *src_remain_lenp, &original_segment_size);
+ if (status != 0)
+ return (EINVAL);
+
+ if (write) {
+ /* Update the contents of the segment in the buffer */
+ if ((rc = ef10_nvram_buf_write_tlv(*seg_datap,
+ *dest_remain_lenp, tag, data, size,
+ &modified_segment_size)) != 0)
+ goto fail1;
+ *dest_remain_lenp -= modified_segment_size;
+ *seg_datap += modified_segment_size;
+ } else {
+ /*
+ * We won't modify this segment, but still need to update the
+ * remaining lengths and pointers.
+ */
+ *dest_remain_lenp -= original_segment_size;
+ *seg_datap += original_segment_size;
+ }
+
+ *partn_offsetp += original_segment_size;
+ *src_remain_lenp -= original_segment_size;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in either the first segment or in all
+ * segments in a TLV formatted dynamic config partition. Dynamic config
+ * partitions on boards that support RFID are divided into a number of segments,
+ * each formatted like a partition, with header, trailer and end tags. The first
+ * segment is the current active configuration.
+ *
+ * The segments are initialised by manftest and each contain a different
+ * configuration e.g. firmware variant. The firmware can be instructed
+ * via RFID to copy a segment to replace the first segment, hence changing the
+ * active configuration. This allows ops to change the configuration of a board
+ * prior to shipment using RFID.
+ *
+ * Changes to the dynamic config may need to be written to all segments (e.g.
+ * firmware versions) or just the first segment (changes to the active
+ * configuration). See SF-111324-SW "The use of RFID in Solarflare Products".
+ * If only the first segment is written the code still needs to be aware of the
+ * possible presence of subsequent segments as writing to a segment may cause
+ * its size to increase, which would overwrite the subsequent segments and
+ * invalidate them.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments)
+{
+ size_t partn_size = 0;
+ caddr_t partn_data;
+ size_t total_length = 0;
+ efx_rc_t rc;
+ size_t current_offset = 0;
+ size_t remaining_original_length;
+ size_t remaining_modified_length;
+ caddr_t segment_data;
+
+ EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG);
+
+ /* Allocate sufficient memory for the entire partition */
+ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data);
+ if (partn_data == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ remaining_original_length = partn_size;
+ remaining_modified_length = partn_size;
+ segment_data = partn_data;
+
+ /* Lock the partition */
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail3;
+
+ /* Iterate over each (potential) segment to update it. */
+ do {
+ boolean_t write = all_segments || current_offset == 0;
+
+ rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size,
+ &segment_data, &current_offset, &remaining_original_length,
+ &remaining_modified_length, write);
+ if (rc != 0) {
+ if (current_offset == 0) {
+ /*
+ * If no data has been read then the first
+ * segment is invalid, which is an error.
+ */
+ goto fail4;
+ }
+ break;
+ }
+ } while (current_offset < partn_size);
+
+ total_length = (uintptr_t)segment_data - (uintptr_t)partn_data;
+
+ /*
+ * We've run out of space. This should actually be dealt with by
+ * ef10_nvram_buf_write_tlv returning ENOSPC.
+ */
+ if (total_length > partn_size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Erase the whole partition in NVRAM */
+ if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write new partition contents from the buffer to NVRAM */
+ if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data,
+ total_length)) != 0)
+ goto fail7;
+
+ /* Unlock the partition */
+ ef10_nvram_partn_unlock(enp, partn);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ ef10_nvram_partn_unlock(enp, partn);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Get the size of a NVRAM partition. This is the total size allocated in nvram,
+ * not the data used by the segments in the partition.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
+ NULL, NULL, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, EF10_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_read(enp, partn, offset,
+ data, chunk, mode)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ /*
+ * Read requests which come in through the EFX API expect to
+ * read the current, active partition.
+ */
+ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_rc_t rc;
+ uint32_t erase_size;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ &erase_size, NULL)) != 0)
+ goto fail1;
+
+ if (erase_size == 0) {
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0)
+ goto fail2;
+ } else {
+ if (size % erase_size != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ while (size > 0) {
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset,
+ erase_size)) != 0)
+ goto fail4;
+ offset += erase_size;
+ size -= erase_size;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ uint32_t write_size;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ NULL, &write_size)) != 0)
+ goto fail1;
+
+ if (write_size != 0) {
+ /*
+ * Check that the size is a multiple of the write chunk size if
+ * the write chunk size is available.
+ */
+ if (size % write_size != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ } else {
+ write_size = EF10_NVRAM_CHUNK;
+ }
+
+ while (size > 0) {
+ chunk = MIN(size, write_size);
+
+ if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
+ data, chunk)) != 0) {
+ goto fail3;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ boolean_t reboot;
+ efx_rc_t rc;
+
+ reboot = B_FALSE;
+ if ((rc = efx_mcdi_nvram_update_finish(enp, partn, reboot)) != 0)
+ goto fail1;
+
+ return;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4])
+{
+ struct tlv_partition_version partn_version;
+ size_t size;
+ efx_rc_t rc;
+
+ /* Add or modify partition version TLV item */
+ partn_version.version_w = __CPU_TO_LE_16(version[0]);
+ partn_version.version_x = __CPU_TO_LE_16(version[1]);
+ partn_version.version_y = __CPU_TO_LE_16(version[2]);
+ partn_version.version_z = __CPU_TO_LE_16(version[3]);
+
+ size = sizeof (partn_version) - (2 * sizeof (uint32_t));
+
+ /* Write the version number to all segments in the partition */
+ if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ TLV_TAG_PARTITION_VERSION(partn),
+ (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct ef10_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port;
+ efx_nvram_type_t nvtype;
+} ef10_parttbl_entry_t;
+
+/* Translate EFX NVRAM types to firmware partition types */
+static ef10_parttbl_entry_t hunt_parttbl[] = {
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 3, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 4, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE}
+};
+
+static ef10_parttbl_entry_t medford_parttbl[] = {
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 2, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 3, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 4, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE}
+};
+
+static __checkReturn efx_rc_t
+ef10_parttbl_get(
+ __in efx_nic_t *enp,
+ __out ef10_parttbl_entry_t **parttblp,
+ __out size_t *parttbl_rowsp)
+{
+ switch (enp->en_family) {
+ case EFX_FAMILY_HUNTINGTON:
+ *parttblp = hunt_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl);
+ break;
+
+ case EFX_FAMILY_MEDFORD:
+ *parttblp = medford_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl);
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ return (EINVAL);
+ }
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ ef10_parttbl_entry_t *parttbl = NULL;
+ size_t parttbl_rows = 0;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT(partnp != NULL);
+
+ if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
+ for (i = 0; i < parttbl_rows; i++) {
+ ef10_parttbl_entry_t *entry = &parttbl[i];
+
+ if (entry->nvtype == type &&
+ entry->port == emip->emi_port) {
+ *partnp = entry->partn;
+ return (0);
+ }
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_DIAG
+
+static __checkReturn efx_rc_t
+ef10_nvram_partn_to_type(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out efx_nvram_type_t *typep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ ef10_parttbl_entry_t *parttbl = NULL;
+ size_t parttbl_rows = 0;
+ unsigned int i;
+
+ EFSYS_ASSERT(typep != NULL);
+
+ if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
+ for (i = 0; i < parttbl_rows; i++) {
+ ef10_parttbl_entry_t *entry = &parttbl[i];
+
+ if (entry->partn == partn &&
+ entry->port == emip->emi_port) {
+ *typep = entry->nvtype;
+ return (0);
+ }
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_nvram_type_t type;
+ unsigned int npartns = 0;
+ uint32_t *partns = NULL;
+ size_t size;
+ unsigned int i;
+ efx_rc_t rc;
+
+ /* Read available partitions from NVRAM partition map */
+ size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t);
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, partns);
+ if (partns == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size,
+ &npartns)) != 0) {
+ goto fail2;
+ }
+
+ for (i = 0; i < npartns; i++) {
+ /* Check if the partition is supported for this port */
+ if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0)
+ continue;
+
+ if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0)
+ goto fail3;
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, partns);
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, size, partns);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ efx_rc_t rc;
+
+ /* FIXME: get highest partn version from all ports */
+ /* FIXME: return partn description if available */
+
+ if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep,
+ version, NULL, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail1;
+
+ if (chunk_sizep != NULL)
+ *chunk_sizep = EF10_NVRAM_CHUNK;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ ef10_nvram_partn_unlock(enp, partn);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_phy.c b/usr/src/uts/common/io/sfxge/common/ef10_phy.c
new file mode 100644
index 0000000000..2e374f44bb
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_phy.c
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static void
+mcdi_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_40000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+mcdi_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 40000 && fd)
+ *link_modep = EFX_LINK_40000FDX;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_GENERATE)
+ *fcntlp = EFX_FCNTL_GENERATE;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+
+ void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_40G:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ mcdi_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = ef10_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &elsp->els_adv_cap_mask);
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &elsp->els_lp_cap_mask);
+
+ mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &elsp->els_link_mode, &elsp->els_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN)];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ efx_rc_t rc;
+
+ if (~encp->enc_func_flags & EFX_NIC_FUNC_LINKCTRL)
+ goto out;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+ /* Too many fields for for POPULATE macros, so insert this afterwards */
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ case EFX_LINK_40000FDX:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+ __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ /* TBD: no stats support in firmware yet */
+ _NOTE(ARGUNUSED(enp, esmp))
+ (void) memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat));
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_rx.c b/usr/src/uts/common/io/sfxge/common/ef10_rx.c
new file mode 100644
index 0000000000..26155125af
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_rx.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t size,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in efsys_mem_t *esmp,
+ __in boolean_t disable_scatter)
+{
+ efx_mcdi_req_t req;
+
+ uint8_t payload[
+ MC_CMD_INIT_RXQ_IN_LEN(EFX_RXQ_NBUFS(EFX_RXQ_MAXNDESCS))];
+ int npages = EFX_RXQ_NBUFS(size);
+ int i;
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ efx_rc_t rc;
+
+ /* If this changes, then the payload size might need to change. */
+ EFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0);
+ EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_RXQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_RXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_SIZE, size);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_INSTANCE, instance);
+ MCDI_IN_POPULATE_DWORD_6(req, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_BUFF_MODE, 0,
+ INIT_RXQ_IN_FLAG_HDR_SPLIT, 0,
+ INIT_RXQ_IN_FLAG_TIMESTAMP, 0,
+ INIT_RXQ_IN_CRC_MODE, 0,
+ INIT_RXQ_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_IN_FLAG_DISABLE_SCATTER, disable_scatter);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
+ MC_CMD_FINI_RXQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute(enp, &req);
+
+ if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+efx_mcdi_rss_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_support_t scale_support,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
+ MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)];
+ uint32_t rss_context;
+ uint32_t context_type;
+ efx_rc_t rc;
+
+ if (num_queues > EFX_MAXRSS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (scale_support) {
+ case EFX_RX_SCALE_EXCLUSIVE:
+ context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE;
+ break;
+ case EFX_RX_SCALE_SHARED:
+ context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type);
+ /* NUM_QUEUES is only used to validate indirection table offsets */
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = ENOENT;
+ goto fail5;
+ }
+
+ *rss_contextp = rss_context;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_flags(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_type_t type)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
+ (type & (1U << EFX_RX_HASH_IPV4)) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
+ (type & (1U << EFX_RX_HASH_TCPIPV4)) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
+ (type & (1U << EFX_RX_HASH_IPV6)) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
+ (type & (1U << EFX_RX_HASH_TCPIPV6)) ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_key(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ (void) memcpy(MCDI_IN2(req, uint8_t,
+ RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY), key, n);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_table(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)];
+ uint8_t *req_table;
+ int i, rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ req_table =
+ MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE);
+
+ for (i = 0;
+ i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN;
+ i++) {
+ req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+
+ __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_RX_SCALE
+
+ if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS,
+ &enp->en_rss_context) == 0) {
+ /*
+ * Allocated an exclusive RSS context, which allows both the
+ * indirection table and key to be modified.
+ */
+ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
+ enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
+ } else {
+ /*
+ * Failed to allocate an exclusive RSS context. Continue
+ * operation without support for RSS. The pseudo-header in
+ * received packets will not contain a Toeplitz hash value.
+ */
+ enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
+ enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE;
+ }
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ _NOTE(ARGUNUSED(enp, buf_size))
+ return (0);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ);
+ EFSYS_ASSERT3U(insert, ==, B_TRUE);
+
+ if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_flags(enp,
+ enp->en_rss_context, type)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_rc_t rc;
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_key(enp,
+ enp->en_rss_context, key, n)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_rc_t rc;
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_table(enp,
+ enp->en_rss_context, table, n)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+
+/*
+ * EF10 RX pseudo-header
+ * ---------------------
+ *
+ * Receive packets are prefixed by an (optional) 14 byte pseudo-header:
+ *
+ * +00: Toeplitz hash value.
+ * (32bit little-endian)
+ * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.
+ * (16bit big-endian)
+ * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.
+ * (16bit big-endian)
+ * +08: Packet Length. Zero if the RX datapath was in cut-through mode.
+ * (16bit little-endian)
+ * +10: MAC timestamp. Zero if timestamping is not enabled.
+ * (32bit little-endian)
+ *
+ * See "The RX Pseudo-header" in SF-109306-TC.
+ */
+
+ __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp))
+ /*
+ * The RX pseudo-header contains the packet length, excluding the
+ * pseudo-header. If the hardware receive datapath was operating in
+ * cut-through mode then the length in the RX pseudo-header will be
+ * zero, and the packet length must be obtained from the DMA length
+ * reported in the RX event.
+ */
+ *lengthp = buffer[8] | (buffer[9] << 8);
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ _NOTE(ARGUNUSED(enp))
+ switch (func) {
+ case EFX_RX_HASHALG_TOEPLITZ:
+ return (buffer[0] |
+ (buffer[1] << 8) |
+ (buffer[2] << 16) |
+ (buffer[3] << 24));
+
+ default:
+ EFSYS_ASSERT(0);
+ return (0);
+ }
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + n, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < n; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ ESF_DZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+ void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ unsigned int pushed = *pushedp;
+ uint32_t wptr;
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);
+ if (pushed == wptr)
+ return;
+
+ *pushedp = wptr;
+
+ /* Push the populated descriptors out */
+ wptr &= erp->er_mask;
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+ wptr, pushed & erp->er_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(erp))
+ /* FIXME */
+}
+
+ __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+ boolean_t disable_scatter;
+
+ _NOTE(ARGUNUSED(erp, id))
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* Scatter can only be disabled if the firmware supports doing so */
+ if ((type != EFX_RXQ_TYPE_SCATTER) &&
+ enp->en_nic_cfg.enc_rx_disable_scatter_supported) {
+ disable_scatter = B_TRUE;
+ } else {
+ disable_scatter = B_FALSE;
+ }
+
+ if ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index,
+ esmp, disable_scatter)) != 0)
+ goto fail3;
+
+ erp->er_eep = eep;
+ erp->er_label = label;
+
+ ef10_ev_rxlabel_init(eep, erp, label);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_evq_t *eep = erp->er_eep;
+ unsigned int label = erp->er_label;
+
+ ef10_ev_rxlabel_fini(eep, label);
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+ void
+ef10_rx_fini(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_rss_support != EFX_RX_SCALE_UNAVAILABLE) {
+ (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context);
+ }
+ enp->en_rss_context = 0;
+ enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
+#else
+ _NOTE(ARGUNUSED(enp))
+#endif /* EFSYS_OPT_RX_SCALE */
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_tlv_layout.h b/usr/src/uts/common/io/sfxge/common/ef10_tlv_layout.h
new file mode 100644
index 0000000000..a6c189d686
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_tlv_layout.h
@@ -0,0 +1,785 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+/* These structures define the layouts for the TLV items stored in static and
+ * dynamic configuration partitions in NVRAM for EF10 (Huntington etc.).
+ *
+ * They contain the same sort of information that was kept in the
+ * siena_mc_static_config_hdr_t and siena_mc_dynamic_config_hdr_t structures
+ * (defined in <ci/mgmt/mc_flash_layout.h> and <ci/mgmt/mc_dynamic_cfg.h>) for
+ * Siena.
+ *
+ * These are used directly by the MC and should also be usable directly on host
+ * systems which are little-endian and do not do strange things with structure
+ * padding. (Big-endian host systems will require some byte-swapping.)
+ *
+ * -----
+ *
+ * Please refer to SF-108797-SW for a general overview of the TLV partition
+ * format.
+ *
+ * -----
+ *
+ * The current tag IDs have a general structure: with the exception of the
+ * special values defined in the document, they are of the form 0xLTTTNNNN,
+ * where:
+ *
+ * - L is a location, indicating where this tag is expected to be found:
+ * 0: static configuration
+ * 1: dynamic configuration
+ * 2: firmware internal use
+ * 3: license partition
+ *
+ * - TTT is a type, which is just a unique value. The same type value
+ * might appear in both locations, indicating a relationship between
+ * the items (e.g. static and dynamic VPD below).
+ *
+ * - NNNN is an index of some form. Some item types are per-port, some
+ * are per-PF, some are per-partition-type.
+ *
+ * -----
+ *
+ * As with the previous Siena structures, each structure here is laid out
+ * carefully: values are aligned to their natural boundary, with explicit
+ * padding fields added where necessary. (No, technically this does not
+ * absolutely guarantee portability. But, in practice, compilers are generally
+ * sensible enough not to introduce completely pointless padding, and it works
+ * well enough.)
+ */
+
+
+#ifndef CI_MGMT_TLV_LAYOUT_H
+#define CI_MGMT_TLV_LAYOUT_H
+
+
+/* ----------------------------------------------------------------------------
+ * General structure (defined by SF-108797-SW)
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* The "end" tag.
+ *
+ * (Note that this is *not* followed by length or value fields: anything after
+ * the tag itself is irrelevant.)
+ */
+
+#define TLV_TAG_END (0xEEEEEEEE)
+
+
+/* Other special reserved tag values.
+ */
+
+#define TLV_TAG_SKIP (0x00000000)
+#define TLV_TAG_INVALID (0xFFFFFFFF)
+
+
+/* TLV partition header.
+ *
+ * In a TLV partition, this must be the first item in the sequence, at offset
+ * 0.
+ */
+
+#define TLV_TAG_PARTITION_HEADER (0xEF10DA7A)
+
+struct tlv_partition_header {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t type_id;
+/* 0 indicates the default segment (always located at offset 0), while other values
+ * are for RFID-selectable presets that should immediately follow the default segment.
+ * The default segment may also have preset > 0, which means that it is a preset
+ * selected through an RFID command and copied by FW to the location at offset 0. */
+ uint16_t preset;
+ uint32_t generation;
+ uint32_t total_length;
+};
+
+
+/* TLV partition trailer.
+ *
+ * In a TLV partition, this must be the last item in the sequence, immediately
+ * preceding the TLV_TAG_END word.
+ */
+
+#define TLV_TAG_PARTITION_TRAILER (0xEF101A57)
+
+struct tlv_partition_trailer {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t generation;
+ uint32_t checksum;
+};
+
+
+/* Appendable TLV partition header.
+ *
+ * In an appendable TLV partition, this must be the first item in the sequence,
+ * at offset 0. (Note that, unlike the configuration partitions, there is no
+ * trailer before the TLV_TAG_END word.)
+ */
+
+#define TLV_TAG_APPENDABLE_PARTITION_HEADER (0xEF10ADA7)
+
+struct tlv_appendable_partition_header {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t type_id;
+ uint16_t reserved;
+};
+
+
+/* ----------------------------------------------------------------------------
+ * Configuration items
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* NIC global capabilities.
+ */
+
+#define TLV_TAG_GLOBAL_CAPABILITIES (0x00010000)
+
+struct tlv_global_capabilities {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t flags;
+};
+
+
+/* Siena-style per-port MAC address allocation.
+ *
+ * There are <count> addresses, starting at <base_address> and incrementing
+ * by adding <stride> to the low-order byte(s).
+ *
+ * (See also TLV_TAG_GLOBAL_MAC for an alternative, specifying a global pool
+ * of contiguous MAC addresses for the firmware to allocate as it sees fit.)
+ */
+
+#define TLV_TAG_PORT_MAC(port) (0x00020000 + (port))
+
+struct tlv_port_mac {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t base_address[6];
+ uint16_t reserved;
+ uint16_t count;
+ uint16_t stride;
+};
+
+
+/* Static VPD.
+ *
+ * This is the portion of VPD which is set at manufacturing time and not
+ * expected to change. It is formatted as a standard PCI VPD block. There are
+ * global and per-pf TLVs for this, the global TLV is new for Medford and is
+ * used in preference to the per-pf TLV.
+ */
+
+#define TLV_TAG_PF_STATIC_VPD(pf) (0x00030000 + (pf))
+
+struct tlv_pf_static_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+#define TLV_TAG_GLOBAL_STATIC_VPD (0x001f0000)
+
+struct tlv_global_static_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+
+/* Dynamic VPD.
+ *
+ * This is the portion of VPD which may be changed (e.g. by firmware updates).
+ * It is formatted as a standard PCI VPD block. There are global and per-pf TLVs
+ * for this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DYNAMIC_VPD(pf) (0x10030000 + (pf))
+
+struct tlv_pf_dynamic_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+#define TLV_TAG_GLOBAL_DYNAMIC_VPD (0x10200000)
+
+struct tlv_global_dynamic_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+
+/* "DBI" PCI config space changes.
+ *
+ * This is a set of edits made to the default PCI config space values before
+ * the device is allowed to enumerate. There are global and per-pf TLVs for
+ * this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DBI(pf) (0x00040000 + (pf))
+
+struct tlv_pf_dbi {
+ uint32_t tag;
+ uint32_t length;
+ struct {
+ uint16_t addr;
+ uint16_t byte_enables;
+ uint32_t value;
+ } items[];
+};
+
+
+#define TLV_TAG_GLOBAL_DBI (0x00210000)
+
+struct tlv_global_dbi {
+ uint32_t tag;
+ uint32_t length;
+ struct {
+ uint16_t addr;
+ uint16_t byte_enables;
+ uint32_t value;
+ } items[];
+};
+
+
+/* Partition subtype codes.
+ *
+ * A subtype may optionally be stored for each type of partition present in
+ * the NVRAM. For example, this may be used to allow a generic firmware update
+ * utility to select a specific variant of firmware for a specific variant of
+ * board.
+ *
+ * The description[] field is an optional string which is returned in the
+ * MC_CMD_NVRAM_METADATA response if present.
+ */
+
+#define TLV_TAG_PARTITION_SUBTYPE(type) (0x00050000 + (type))
+
+struct tlv_partition_subtype {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t subtype;
+ uint8_t description[];
+};
+
+
+/* Partition version codes.
+ *
+ * A version may optionally be stored for each type of partition present in
+ * the NVRAM. This provides a standard way of tracking the currently stored
+ * version of each of the various component images.
+ */
+
+#define TLV_TAG_PARTITION_VERSION(type) (0x10060000 + (type))
+
+struct tlv_partition_version {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t version_w;
+ uint16_t version_x;
+ uint16_t version_y;
+ uint16_t version_z;
+};
+
+/* Global PCIe configuration */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG (0x10070000)
+
+struct tlv_pcie_config {
+ uint32_t tag;
+ uint32_t length;
+ int16_t max_pf_number; /**< Largest PF RID (lower PFs may be hidden) */
+ uint16_t pf_aper; /**< BIU aperture for PF BAR2 */
+ uint16_t vf_aper; /**< BIU aperture for VF BAR0 */
+ uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */
+#define TLV_MAX_PF_DEFAULT (-1) /* Use FW default for largest PF RID */
+#define TLV_APER_DEFAULT (0xFFFF) /* Use FW default for a given aperture */
+};
+
+/* Per-PF configuration. Note that not all these fields are necessarily useful
+ * as the apertures are constrained by the BIU settings (the one case we do
+ * use is to make BAR2 bigger than the BIU thinks to reserve space), but we can
+ * tidy things up later */
+
+#define TLV_TAG_PF_PCIE_CONFIG(pf) (0x10080000 + (pf))
+
+struct tlv_per_pf_pcie_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t vfs_total;
+ uint8_t port_allocation;
+ uint16_t vectors_per_pf;
+ uint16_t vectors_per_vf;
+ uint8_t pf_bar0_aperture;
+ uint8_t pf_bar2_aperture;
+ uint8_t vf_bar0_aperture;
+ uint8_t vf_base;
+ uint16_t supp_pagesz;
+ uint16_t msix_vec_base;
+};
+
+
+/* Development ONLY. This is a single TLV tag for all the gubbins
+ * that can be set through the MC command-line other than the PCIe
+ * settings. This is a temporary measure. */
+#define TLV_TAG_TMP_GUBBINS (0x10090000) /* legacy symbol - do not use */
+#define TLV_TAG_TMP_GUBBINS_HUNT TLV_TAG_TMP_GUBBINS
+
+struct tlv_tmp_gubbins {
+ uint32_t tag;
+ uint32_t length;
+ /* Consumed by dpcpu.c */
+ uint64_t tx0_tags; /* Bitmap */
+ uint64_t tx1_tags; /* Bitmap */
+ uint64_t dl_tags; /* Bitmap */
+ uint32_t flags;
+#define TLV_DPCPU_TX_STRIPE (1) /* No longer used, has no effect */
+#define TLV_DPCPU_BIU_TAGS (2) /* Use BIU tag manager */
+#define TLV_DPCPU_TX0_TAGS (4) /* tx0_tags is valid */
+#define TLV_DPCPU_TX1_TAGS (8) /* tx1_tags is valid */
+#define TLV_DPCPU_DL_TAGS (16) /* dl_tags is valid */
+ /* Consumed by features.c */
+ uint32_t dut_features; /* All 1s -> leave alone */
+ int8_t with_rmon; /* 0 -> off, 1 -> on, -1 -> leave alone */
+ /* Consumed by clocks_hunt.c */
+ int8_t clk_mode; /* 0 -> off, 1 -> on, -1 -> leave alone */
+ /* No longer used, superseded by TLV_TAG_DESCRIPTOR_CACHE_CONFIG. */
+ int8_t rx_dc_size; /* -1 -> leave alone */
+ int8_t tx_dc_size;
+ int16_t num_q_allocs;
+};
+
+/* Global port configuration
+ *
+ * This is now deprecated in favour of a platform-provided default
+ * and dynamic config override via tlv_global_port_options.
+ */
+#define TLV_TAG_GLOBAL_PORT_CONFIG (0x000a0000)
+
+struct tlv_global_port_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t ports_per_core;
+ uint32_t max_port_speed;
+};
+
+
+/* Firmware options.
+ *
+ * This is intended for user-configurable selection of optional firmware
+ * features and variants.
+ *
+ * Initially, this consists only of the satellite CPU firmware variant
+ * selection, but this tag could be extended in the future (using the
+ * tag length to determine whether additional fields are present).
+ */
+
+#define TLV_TAG_FIRMWARE_OPTIONS (0x100b0000)
+
+struct tlv_firmware_options {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t firmware_variant;
+#define TLV_FIRMWARE_VARIANT_DRIVER_SELECTED (0xffffffff)
+
+/* These are the values for overriding the driver's choice; the definitions
+ * are taken from MCDI so that they don't get out of step. Include
+ * <ci/mgmt/mc_driver_pcol.h> or the equivalent from your driver's tree if
+ * you need to use these constants.
+ */
+#define TLV_FIRMWARE_VARIANT_FULL_FEATURED MC_CMD_FW_FULL_FEATURED
+#define TLV_FIRMWARE_VARIANT_LOW_LATENCY MC_CMD_FW_LOW_LATENCY
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM MC_CMD_FW_PACKED_STREAM
+#define TLV_FIRMWARE_VARIANT_HIGH_TX_RATE MC_CMD_FW_HIGH_TX_RATE
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \
+ MC_CMD_FW_PACKED_STREAM_HASH_MODE_1
+};
+
+/* Voltage settings
+ *
+ * Intended for boards with A0 silicon where the core voltage may
+ * need tweaking. Most likely set once when the pass voltage is
+ * determined. */
+
+#define TLV_TAG_0V9_SETTINGS (0x000c0000)
+
+struct tlv_0v9_settings {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t flags; /* Boards with high 0v9 settings may need active cooling */
+#define TLV_TAG_0V9_REQUIRES_FAN (1)
+ uint16_t target_voltage; /* In millivolts */
+ /* Since the limits are meant to be centred to the target (and must at least
+ * contain it) they need setting as well. */
+ uint16_t warn_low; /* In millivolts */
+ uint16_t warn_high; /* In millivolts */
+ uint16_t panic_low; /* In millivolts */
+ uint16_t panic_high; /* In millivolts */
+};
+
+
+/* Clock configuration */
+
+#define TLV_TAG_CLOCK_CONFIG (0x000d0000) /* legacy symbol - do not use */
+#define TLV_TAG_CLOCK_CONFIG_HUNT TLV_TAG_CLOCK_CONFIG
+
+struct tlv_clock_config {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t clk_sys; /* MHz */
+ uint16_t clk_dpcpu; /* MHz */
+ uint16_t clk_icore; /* MHz */
+ uint16_t clk_pcs; /* MHz */
+};
+
+#define TLV_TAG_CLOCK_CONFIG_MEDFORD (0x00100000)
+
+struct tlv_clock_config_medford {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t clk_sys; /* MHz */
+ uint16_t clk_mc; /* MHz */
+ uint16_t clk_rmon; /* MHz */
+ uint16_t clk_vswitch; /* MHz */
+ uint16_t clk_dpcpu; /* MHz */
+ uint16_t clk_pcs; /* MHz */
+};
+
+
+/* EF10-style global pool of MAC addresses.
+ *
+ * There are <count> addresses, starting at <base_address>, which are
+ * contiguous. Firmware is responsible for allocating addresses from this
+ * pool to ports / PFs as appropriate.
+ */
+
+#define TLV_TAG_GLOBAL_MAC (0x000e0000)
+
+struct tlv_global_mac {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t base_address[6];
+ uint16_t reserved1;
+ uint16_t count;
+ uint16_t reserved2;
+};
+
+#define TLV_TAG_ATB_0V9_TARGET (0x000f0000) /* legacy symbol - do not use */
+#define TLV_TAG_ATB_0V9_TARGET_HUNT TLV_TAG_ATB_0V9_TARGET
+
+/* The target value for the 0v9 power rail measured on-chip at the
+ * analogue test bus */
+struct tlv_0v9_atb_target {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t millivolts;
+ uint16_t reserved;
+};
+
+/* Global PCIe configuration, second revision. This represents the visible PFs
+ * by a bitmap rather than having the number of the highest visible one. As such
+ * it can (for a 16-PF chip) represent a superset of what TLV_TAG_GLOBAL_PCIE_CONFIG
+ * can and it should be used in place of that tag in future (but compatibility with
+ * the old tag will be left in the firmware indefinitely). */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG_R2 (0x10100000)
+
+struct tlv_pcie_config_r2 {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t visible_pfs; /**< Bitmap of visible PFs */
+ uint16_t pf_aper; /**< BIU aperture for PF BAR2 */
+ uint16_t vf_aper; /**< BIU aperture for VF BAR0 */
+ uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */
+};
+
+/* Dynamic port mode.
+ *
+ * Allows selecting alternate port configuration for platforms that support it
+ * (e.g. 1x40G vs 2x10G on Milano, 1x40G vs 4x10G on Medford). This affects the
+ * number of externally visible ports (and, hence, PF to port mapping), so must
+ * be done at boot time.
+ *
+ * This tag supercedes tlv_global_port_config.
+ */
+
+#define TLV_TAG_GLOBAL_PORT_MODE (0x10110000)
+
+struct tlv_global_port_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t port_mode;
+#define TLV_PORT_MODE_DEFAULT (0xffffffff) /* Default for given platform */
+#define TLV_PORT_MODE_10G (0) /* 10G, single SFP/10G-KR */
+#define TLV_PORT_MODE_40G (1) /* 40G, single QSFP/40G-KR */
+#define TLV_PORT_MODE_10G_10G (2) /* 2x10G, dual SFP/10G-KR or single QSFP */
+#define TLV_PORT_MODE_40G_40G (3) /* 40G + 40G, dual QSFP/40G-KR (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G (4) /* 2x10G + 2x10G, quad SFP/10G-KR or dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) /* 4x10G, single QSFP, cage 0 (Medford) */
+#define TLV_PORT_MODE_40G_10G_10G (6) /* 1x40G + 2x10G, dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_40G (7) /* 2x10G + 1x40G, dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) /* 4x10G, single QSFP, cage 1 (Medford) */
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_10G_10G_10G_10G_Q2
+};
+
+/* Type of the v-switch created implicitly by the firmware */
+
+#define TLV_TAG_VSWITCH_TYPE(port) (0x10120000 + (port))
+
+struct tlv_vswitch_type {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t vswitch_type;
+#define TLV_VSWITCH_TYPE_DEFAULT (0xffffffff) /* Firmware default; equivalent to no TLV present for a given port */
+#define TLV_VSWITCH_TYPE_NONE (0)
+#define TLV_VSWITCH_TYPE_VLAN (1)
+#define TLV_VSWITCH_TYPE_VEB (2)
+#define TLV_VSWITCH_TYPE_VEPA (3)
+#define TLV_VSWITCH_TYPE_MUX (4)
+#define TLV_VSWITCH_TYPE_TEST (5)
+};
+
+/* A VLAN tag for the v-port created implicitly by the firmware */
+
+#define TLV_TAG_VPORT_VLAN_TAG(pf) (0x10130000 + (pf))
+
+struct tlv_vport_vlan_tag {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t vlan_tag;
+#define TLV_VPORT_NO_VLAN_TAG (0xFFFFFFFF) /* Default in the absence of TLV for a given PF */
+};
+
+/* Offset to be applied to the 0v9 setting, wherever it came from */
+
+#define TLV_TAG_ATB_0V9_OFFSET (0x10140000)
+
+struct tlv_0v9_atb_offset {
+ uint32_t tag;
+ uint32_t length;
+ int16_t offset_millivolts;
+ uint16_t reserved;
+};
+
+/* A privilege mask given on reset to all non-admin PCIe functions (that is other than first-PF-per-port).
+ * The meaning of particular bits is defined in mcdi_ef10.yml under MC_CMD_PRIVILEGE_MASK, see also bug 44583.
+ * TLV_TAG_PRIVILEGE_MASK_ADD specifies bits that should be added (ORed) to firmware default while
+ * TLV_TAG_PRIVILEGE_MASK_REM specifies bits that should be removed (ANDed) from firmware default:
+ * Initial_privilege_mask = (firmware_default_mask | privilege_mask_add) & ~privilege_mask_rem */
+
+#define TLV_TAG_PRIVILEGE_MASK (0x10150000) /* legacy symbol - do not use */
+
+struct tlv_privilege_mask { /* legacy structure - do not use */
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD (0x10150000)
+
+struct tlv_privilege_mask_add {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_REM (0x10160000)
+
+struct tlv_privilege_mask_rem {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_rem;
+};
+
+/* Additional privileges given to all PFs.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_ALL_PFS (0x10190000)
+
+struct tlv_privilege_mask_add_all_pfs {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+/* Additional privileges given to a selected PF.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_SINGLE_PF(pf) (0x101A0000 + (pf))
+
+struct tlv_privilege_mask_add_single_pf {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+/* Turning on/off the PFIOV mode.
+ * This tag only takes effect if TLV_TAG_VSWITCH_TYPE is missing or set to DEFAULT. */
+
+#define TLV_TAG_PFIOV(port) (0x10170000 + (port))
+
+struct tlv_pfiov {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t pfiov;
+#define TLV_PFIOV_OFF (0) /* Default */
+#define TLV_PFIOV_ON (1)
+};
+
+/* Multicast filter chaining mode selection.
+ *
+ * When enabled, multicast packets are delivered to all recipients of all
+ * matching multicast filters, with the exception that IP multicast filters
+ * will steal traffic from MAC multicast filters on a per-function basis.
+ * (New behaviour.)
+ *
+ * When disabled, multicast packets will always be delivered only to the
+ * recipients of the highest priority matching multicast filter.
+ * (Legacy behaviour.)
+ *
+ * The DEFAULT mode (which is the same as the tag not being present at all)
+ * is equivalent to ENABLED in production builds, and DISABLED in eftest
+ * builds.
+ *
+ * This option is intended to provide run-time control over this feature
+ * while it is being stabilised and may be withdrawn at some point in the
+ * future; the new behaviour is intended to become the standard behaviour.
+ */
+
+#define TLV_TAG_MCAST_FILTER_CHAINING (0x10180000)
+
+struct tlv_mcast_filter_chaining {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+#define TLV_MCAST_FILTER_CHAINING_DEFAULT (0xffffffff)
+#define TLV_MCAST_FILTER_CHAINING_DISABLED (0)
+#define TLV_MCAST_FILTER_CHAINING_ENABLED (1)
+};
+
+/* Pacer rate limit per PF */
+#define TLV_TAG_RATE_LIMIT(pf) (0x101b0000 + (pf))
+
+struct tlv_rate_limit {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t rate_mbps;
+};
+
+/* OCSD Enable/Disable
+ *
+ * This setting allows OCSD to be disabled. This is a requirement for HP
+ * servers to support PCI passthrough for virtualization.
+ *
+ * The DEFAULT mode (which is the same as the tag not being present) is
+ * equivalent to ENABLED.
+ *
+ * This option is not used by the MCFW, and is entirely handled by the various
+ * drivers that support OCSD, by reading the setting before they attempt
+ * to enable OCSD.
+ *
+ * bit0: OCSD Disabled/Enabled
+ */
+
+#define TLV_TAG_OCSD (0x101C0000)
+
+struct tlv_ocsd {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+#define TLV_OCSD_DISABLED 0
+#define TLV_OCSD_ENABLED 1 /* Default */
+};
+
+/* Descriptor cache config.
+ *
+ * Sets the sizes of the TX and RX descriptor caches as a power of 2. It also
+ * sets the total number of VIs. When the number of VIs is reduced VIs are taken
+ * away from the highest numbered port first, so a vi_count of 1024 means 1024
+ * VIs on the first port and 0 on the second (on a Torino).
+ */
+
+#define TLV_TAG_DESCRIPTOR_CACHE_CONFIG (0x101d0000)
+
+struct tlv_descriptor_cache_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t rx_desc_cache_size;
+ uint8_t tx_desc_cache_size;
+ uint16_t vi_count;
+};
+#define TLV_DESC_CACHE_DEFAULT (0xff)
+#define TLV_VI_COUNT_DEFAULT (0xffff)
+
+/* RX event merging config (read batching).
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins.
+ */
+
+#define TLV_TAG_RX_EVENT_MERGING_CONFIG (0x101e0000)
+
+struct tlv_rx_event_merging_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t max_events;
+#define TLV_RX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+ uint32_t timeout_ns;
+};
+#define TLV_RX_EVENT_MERGING_MAX_EVENTS_DEFAULT 7
+#define TLV_RX_EVENT_MERGING_TIMEOUT_NS_DEFAULT 8740
+
+#define TLV_TAG_PCIE_LINK_SETTINGS (0x101f0000)
+struct tlv_pcie_link_settings {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t gen; /* Target PCIe generation: 1, 2, 3 */
+ uint16_t width; /* Number of lanes */
+};
+
+#define TLV_TAG_LICENSE (0x30800000)
+
+typedef struct tlv_license {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t data[];
+} tlv_license_t;
+
+#endif /* CI_MGMT_TLV_LAYOUT_H */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_tx.c b/usr/src/uts/common/io/sfxge/common/ef10_tx.c
new file mode 100644
index 0000000000..40b2e7ee0f
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_tx.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t size,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in uint16_t flags,
+ __in efsys_mem_t *esmp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
+ MC_CMD_INIT_TXQ_OUT_LEN)];
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
+ EFX_TXQ_NBUFS(EFX_TXQ_MAXNDESCS(&enp->en_nic_cfg)));
+
+ npages = EFX_TXQ_NBUFS(size);
+ if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
+
+ MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
+ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
+ INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
+ INIT_TXQ_IN_CRC_MODE, 0,
+ INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
+ MC_CMD_FINI_TXQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute(enp, &req);
+
+ if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+}
+
+ void
+ef10_tx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp)
+{
+ efx_qword_t desc;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id));
+
+ if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
+ esmp)) != 0)
+ goto fail1;
+
+ /*
+ * A previous user of this TX queue may have written a descriptor to the
+ * TX push collector, but not pushed the doorbell (e.g. after a crash).
+ * The next doorbell write would then push the stale descriptor.
+ *
+ * Ensure the (per network port) TX push collector is cleared by writing
+ * a no-op TX option descriptor. See bug29981 for details.
+ */
+ *addedp = 1;
+ EFX_POPULATE_QWORD_4(desc,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
+ ef10_tx_qpush(etp, *addedp, 0);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp))
+ /* FIXME */
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_piobuf_handle_t handle;
+ efx_rc_t rc;
+
+ if (etp->et_pio_size != 0) {
+ rc = EALREADY;
+ goto fail1;
+ }
+
+ /* Sub-allocate a PIO block from a piobuf */
+ if ((rc = ef10_nic_pio_alloc(enp,
+ &etp->et_pio_bufnum,
+ &handle,
+ &etp->et_pio_blknum,
+ &etp->et_pio_offset,
+ &etp->et_pio_size)) != 0) {
+ goto fail2;
+ }
+ EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
+
+ /* Link the piobuf to this TXQ */
+ if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
+ goto fail3;
+ }
+
+ /*
+ * et_pio_offset is the offset of the sub-allocated block within the
+ * hardware PIO buffer. It is used as the buffer address in the PIO
+ * option descriptor.
+ *
+ * et_pio_write_offset is the offset of the sub-allocated block from the
+ * start of the write-combined memory mapping, and is used for writing
+ * data into the PIO buffer.
+ */
+ etp->et_pio_write_offset =
+ (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
+ ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+ (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+ etp->et_pio_size = 0;
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+
+ if (etp->et_pio_size != 0) {
+ /* Unlink the piobuf from this TXQ */
+ (void) ef10_nic_pio_unlink(enp, etp->et_index);
+
+ /* Free the sub-allocated PIO block */
+ (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum,
+ etp->et_pio_blknum);
+ etp->et_pio_size = 0;
+ etp->et_pio_write_offset = 0;
+ }
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(length) uint8_t *buffer,
+ __in size_t length,
+ __in size_t offset)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efsys_bar_t *esbp = enp->en_esbp;
+ uint32_t write_offset;
+ uint32_t write_offset_limit;
+ efx_qword_t *eqp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
+
+ if (etp->et_pio_size == 0) {
+ rc = ENOENT;
+ goto fail1;
+ }
+ if (offset + length > etp->et_pio_size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ /*
+ * Writes to PIO buffers must be 64 bit aligned, and multiples of
+ * 64 bits.
+ */
+ write_offset = etp->et_pio_write_offset + offset;
+ write_offset_limit = write_offset + length;
+ eqp = (efx_qword_t *)buffer;
+ while (write_offset < write_offset_limit) {
+ EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
+ eqp++;
+ write_offset += sizeof (efx_qword_t);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_qword_t pio_desc;
+ unsigned int id;
+ size_t offset;
+ unsigned int added = *addedp;
+ efx_rc_t rc;
+
+
+ if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (etp->et_pio_size == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
+ unsigned int, id, uint32_t, etp->et_pio_offset,
+ size_t, pkt_length);
+
+ EFX_POPULATE_QWORD_5(pio_desc,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, 1,
+ ESF_DZ_TX_PIO_CONT, 0,
+ ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
+ ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
+
+ *addedp = added;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t addr = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ boolean_t eop = ebp->eb_eop;
+ unsigned int id;
+ size_t offset;
+ efx_qword_t qword;
+
+ /* Fragments must not span 4k boundaries. */
+ EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size));
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
+ unsigned int, id, efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_5(qword,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+ ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+ ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * This improves performance by pushing a TX descriptor at the same time as the
+ * doorbell. The descriptor must be added to the TXQ, so that can be used if the
+ * hardware decides not to use the pushed descriptor.
+ */
+ void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ unsigned int wptr;
+ unsigned int id;
+ size_t offset;
+ efx_qword_t desc;
+ efx_oword_t oword;
+
+ wptr = added & etp->et_mask;
+ id = pushed & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
+ EFX_POPULATE_OWORD_3(oword,
+ ERF_DZ_TX_DESC_WPTR, wptr,
+ ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+ ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
+ &oword);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_desc_t *edp = &ed[i];
+ unsigned int id;
+ size_t offset;
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_desc_t);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+ }
+
+ EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+ unsigned int, added, unsigned int, n);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ /* Fragments must not span 4k boundaries. */
+ EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size);
+
+ EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+ efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_5(edp->ed_eq,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+ ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+ ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+}
+
+ void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp)
+{
+ EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
+ uint16_t, ipv4_id, uint32_t, tcp_seq,
+ uint8_t, tcp_flags);
+
+ EFX_POPULATE_QWORD_5(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+}
+
+ void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count)
+{
+ EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
+ uint16_t, ipv4_id, uint32_t, tcp_seq,
+ uint16_t, tcp_mss);
+
+ EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
+
+ EFX_POPULATE_QWORD_5(edp[0].ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+ EFX_POPULATE_QWORD_4(edp[1].ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
+ ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
+}
+
+ void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp)
+{
+ EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
+ uint16_t, tci);
+
+ EFX_POPULATE_QWORD_4(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_VLAN,
+ ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
+ ESF_DZ_TX_VLAN_TAG1, tci);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp, ns))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp))
+ /* FIXME */
+}
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_QSTATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/ef10_vpd.c b/usr/src/uts/common/io/sfxge/common/ef10_vpd.c
new file mode 100644
index 0000000000..2c54d18fbc
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/ef10_vpd.c
@@ -0,0 +1,460 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp)
+{
+ caddr_t svpd;
+ size_t svpd_size;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_STATIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_STATIC_VPD(pci_pf);
+ }
+
+ /*
+ * The VPD interface exposes VPD resources from the combined static and
+ * dynamic VPD storage. As the static VPD configuration should *never*
+ * change, we can cache it.
+ */
+ svpd = NULL;
+ svpd_size = 0;
+ rc = ef10_nvram_partn_read_tlv(enp,
+ NVRAM_PARTITION_TYPE_STATIC_CONFIG,
+ tag, &svpd, &svpd_size);
+ if (rc != 0) {
+ if (rc == EACCES) {
+ /* Unprivileged functions cannot access VPD */
+ goto out;
+ }
+ goto fail1;
+ }
+
+ if (svpd != NULL && svpd_size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, svpd_size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_arch.ef10.ena_svpd = svpd;
+ enp->en_arch.ef10.ena_svpd_length = svpd_size;
+
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, svpd_size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd,
+ * which is the size of the DYNAMIC_CONFIG partition.
+ */
+ if ((rc = efx_mcdi_nvram_info(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ sizep, NULL, NULL, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ caddr_t dvpd;
+ size_t dvpd_size;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf);
+ }
+
+ if ((rc = ef10_nvram_partn_read_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ tag, &dvpd, &dvpd_size)) != 0)
+ goto fail1;
+
+ if (dvpd_size > size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+ (void) memcpy(data, dvpd, dvpd_size);
+
+ /* Pad data with all-1s, consistent with update operations */
+ (void) memset(data + dvpd_size, 0xff, size - dvpd_size);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. ef10_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_arch.ef10.ena_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ /*
+ * Skip the RV keyword. It should be present in both the static
+ * and dynamic cfg sectors.
+ */
+ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V'))
+ continue;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ efx_rc_t rc;
+
+ /*
+ * Only create an ID string if the dynamic cfg doesn't have one
+ */
+ if (enp->en_arch.ef10.ena_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ (void) memcpy(evvp->evv_value,
+ enp->en_arch.ef10.ena_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0)
+ goto fail2;
+
+ evvp->evv_length = length;
+ (void) memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t vpd_length;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf);
+ }
+
+ /* Determine total length of new dynamic VPD */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Store new dynamic VPD in all segments in DYNAMIC_CONFIG partition */
+ if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ tag, data, vpd_length, B_TRUE)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_arch.ef10.ena_svpd_length,
+ enp->en_arch.ef10.ena_svpd);
+
+ enp->en_arch.ef10.ena_svpd = NULL;
+ enp->en_arch.ef10.ena_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/usr/src/uts/common/io/sfxge/common/efx.h b/usr/src/uts/common/io/sfxge/common/efx.h
new file mode 100644
index 0000000000..c5129b483f
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx.h
@@ -0,0 +1,2411 @@
+/*
+ * Copyright (c) 2006-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_H
+#define _SYS_EFX_H
+
+#include "efsys.h"
+#include "efx_check.h"
+#include "efx_phy_ids.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_STATIC_ASSERT(_cond) \
+ ((void)sizeof(char[(_cond) ? 1 : -1]))
+
+#define EFX_ARRAY_SIZE(_array) \
+ (sizeof(_array) / sizeof((_array)[0]))
+
+#define EFX_FIELD_OFFSET(_type, _field) \
+ ((size_t) &(((_type *)0)->_field))
+
+/* Return codes */
+
+typedef __success(return == 0) int efx_rc_t;
+
+
+/* Chip families */
+
+typedef enum efx_family_e {
+ EFX_FAMILY_INVALID,
+ EFX_FAMILY_FALCON, /* Obsolete and not supported */
+ EFX_FAMILY_SIENA,
+ EFX_FAMILY_HUNTINGTON,
+ EFX_FAMILY_MEDFORD,
+ EFX_FAMILY_NTYPES
+} efx_family_t;
+
+extern __checkReturn efx_rc_t
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp);
+
+
+#define EFX_PCI_VENID_SFC 0x1924
+
+#define EFX_PCI_DEVID_FALCON 0x0710 /* SFC4000 */
+
+#define EFX_PCI_DEVID_BETHPAGE 0x0803 /* SFC9020 */
+#define EFX_PCI_DEVID_SIENA 0x0813 /* SFL9021 */
+#define EFX_PCI_DEVID_SIENA_F1_UNINIT 0x0810
+
+#define EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT 0x0901
+#define EFX_PCI_DEVID_FARMINGDALE 0x0903 /* SFC9120 PF */
+#define EFX_PCI_DEVID_GREENPORT 0x0923 /* SFC9140 PF */
+
+#define EFX_PCI_DEVID_FARMINGDALE_VF 0x1903 /* SFC9120 VF */
+#define EFX_PCI_DEVID_GREENPORT_VF 0x1923 /* SFC9140 VF */
+
+#define EFX_PCI_DEVID_MEDFORD_PF_UNINIT 0x0913
+#define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */
+#define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */
+
+#define EFX_MEM_BAR 2
+
+/* Error codes */
+
+enum {
+ EFX_ERR_INVALID,
+ EFX_ERR_SRAM_OOB,
+ EFX_ERR_BUFID_DC_OOB,
+ EFX_ERR_MEM_PERR,
+ EFX_ERR_RBUF_OWN,
+ EFX_ERR_TBUF_OWN,
+ EFX_ERR_RDESQ_OWN,
+ EFX_ERR_TDESQ_OWN,
+ EFX_ERR_EVQ_OWN,
+ EFX_ERR_EVFF_OFLO,
+ EFX_ERR_ILL_ADDR,
+ EFX_ERR_SRAM_PERR,
+ EFX_ERR_NCODES
+};
+
+/* Calculate the IEEE 802.3 CRC32 of a MAC addr */
+extern __checkReturn uint32_t
+efx_crc32_calculate(
+ __in uint32_t crc_init,
+ __in_ecount(length) uint8_t const *input,
+ __in int length);
+
+
+/* Type prototypes */
+
+typedef struct efx_rxq_s efx_rxq_t;
+
+/* NIC */
+
+typedef struct efx_nic_s efx_nic_t;
+
+#define EFX_NIC_FUNC_PRIMARY 0x00000001
+#define EFX_NIC_FUNC_LINKCTRL 0x00000002
+#define EFX_NIC_FUNC_TRUSTED 0x00000004
+
+
+extern __checkReturn efx_rc_t
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp);
+
+extern __checkReturn efx_rc_t
+efx_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_nic_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_nic_reset(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+efx_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+efx_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_unprobe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_destroy(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_MCDI
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+/* Huntington and Medford require MCDIv2 commands */
+#define WITH_MCDI_V2 1
+#endif
+
+typedef struct efx_mcdi_req_s efx_mcdi_req_t;
+
+typedef enum efx_mcdi_exception_e {
+ EFX_MCDI_EXCEPTION_MC_REBOOT,
+ EFX_MCDI_EXCEPTION_MC_BADASSERT,
+} efx_mcdi_exception_t;
+
+#if EFSYS_OPT_MCDI_LOGGING
+typedef enum efx_log_msg_e
+{
+ EFX_LOG_INVALID,
+ EFX_LOG_MCDI_REQUEST,
+ EFX_LOG_MCDI_RESPONSE,
+} efx_log_msg_t;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+typedef struct efx_mcdi_transport_s {
+ void *emt_context;
+ efsys_mem_t *emt_dma_mem;
+ void (*emt_execute)(void *, efx_mcdi_req_t *);
+ void (*emt_ev_cpl)(void *);
+ void (*emt_exception)(void *, efx_mcdi_exception_t);
+#if EFSYS_OPT_MCDI_LOGGING
+ void (*emt_logger)(void *, efx_log_msg_t,
+ void *, size_t, void *, size_t);
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ void (*emt_ev_proxy_response)(void *, uint32_t, efx_rc_t);
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+} efx_mcdi_transport_t;
+
+extern __checkReturn efx_rc_t
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_reboot(
+ __in efx_nic_t *enp);
+
+ void
+efx_mcdi_new_epoch(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* INTR */
+
+#define EFX_NINTR_SIENA 1024
+
+typedef enum efx_intr_type_e {
+ EFX_INTR_INVALID = 0,
+ EFX_INTR_LINE,
+ EFX_INTR_MESSAGE,
+ EFX_INTR_NTYPES
+} efx_intr_type_t;
+
+#define EFX_INTR_SIZE (sizeof (efx_oword_t))
+
+extern __checkReturn efx_rc_t
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+extern void
+efx_intr_enable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+#define EFX_INTR_NEVQS 32
+
+extern __checkReturn efx_rc_t
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+extern void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *maskp);
+
+extern void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+extern void
+efx_intr_fatal(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_fini(
+ __in efx_nic_t *enp);
+
+/* MAC */
+
+#if EFSYS_OPT_MAC_STATS
+
+/* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */
+typedef enum efx_mac_stat_e {
+ EFX_MAC_RX_OCTETS,
+ EFX_MAC_RX_PKTS,
+ EFX_MAC_RX_UNICST_PKTS,
+ EFX_MAC_RX_MULTICST_PKTS,
+ EFX_MAC_RX_BRDCST_PKTS,
+ EFX_MAC_RX_PAUSE_PKTS,
+ EFX_MAC_RX_LE_64_PKTS,
+ EFX_MAC_RX_65_TO_127_PKTS,
+ EFX_MAC_RX_128_TO_255_PKTS,
+ EFX_MAC_RX_256_TO_511_PKTS,
+ EFX_MAC_RX_512_TO_1023_PKTS,
+ EFX_MAC_RX_1024_TO_15XX_PKTS,
+ EFX_MAC_RX_GE_15XX_PKTS,
+ EFX_MAC_RX_ERRORS,
+ EFX_MAC_RX_FCS_ERRORS,
+ EFX_MAC_RX_DROP_EVENTS,
+ EFX_MAC_RX_FALSE_CARRIER_ERRORS,
+ EFX_MAC_RX_SYMBOL_ERRORS,
+ EFX_MAC_RX_ALIGN_ERRORS,
+ EFX_MAC_RX_INTERNAL_ERRORS,
+ EFX_MAC_RX_JABBER_PKTS,
+ EFX_MAC_RX_LANE0_CHAR_ERR,
+ EFX_MAC_RX_LANE1_CHAR_ERR,
+ EFX_MAC_RX_LANE2_CHAR_ERR,
+ EFX_MAC_RX_LANE3_CHAR_ERR,
+ EFX_MAC_RX_LANE0_DISP_ERR,
+ EFX_MAC_RX_LANE1_DISP_ERR,
+ EFX_MAC_RX_LANE2_DISP_ERR,
+ EFX_MAC_RX_LANE3_DISP_ERR,
+ EFX_MAC_RX_MATCH_FAULT,
+ EFX_MAC_RX_NODESC_DROP_CNT,
+ EFX_MAC_TX_OCTETS,
+ EFX_MAC_TX_PKTS,
+ EFX_MAC_TX_UNICST_PKTS,
+ EFX_MAC_TX_MULTICST_PKTS,
+ EFX_MAC_TX_BRDCST_PKTS,
+ EFX_MAC_TX_PAUSE_PKTS,
+ EFX_MAC_TX_LE_64_PKTS,
+ EFX_MAC_TX_65_TO_127_PKTS,
+ EFX_MAC_TX_128_TO_255_PKTS,
+ EFX_MAC_TX_256_TO_511_PKTS,
+ EFX_MAC_TX_512_TO_1023_PKTS,
+ EFX_MAC_TX_1024_TO_15XX_PKTS,
+ EFX_MAC_TX_GE_15XX_PKTS,
+ EFX_MAC_TX_ERRORS,
+ EFX_MAC_TX_SGL_COL_PKTS,
+ EFX_MAC_TX_MULT_COL_PKTS,
+ EFX_MAC_TX_EX_COL_PKTS,
+ EFX_MAC_TX_LATE_COL_PKTS,
+ EFX_MAC_TX_DEF_PKTS,
+ EFX_MAC_TX_EX_DEF_PKTS,
+ EFX_MAC_PM_TRUNC_BB_OVERFLOW,
+ EFX_MAC_PM_DISCARD_BB_OVERFLOW,
+ EFX_MAC_PM_TRUNC_VFIFO_FULL,
+ EFX_MAC_PM_DISCARD_VFIFO_FULL,
+ EFX_MAC_PM_TRUNC_QBB,
+ EFX_MAC_PM_DISCARD_QBB,
+ EFX_MAC_PM_DISCARD_MAPPING,
+ EFX_MAC_RXDP_Q_DISABLED_PKTS,
+ EFX_MAC_RXDP_DI_DROPPED_PKTS,
+ EFX_MAC_RXDP_STREAMING_PKTS,
+ EFX_MAC_RXDP_HLB_FETCH,
+ EFX_MAC_RXDP_HLB_WAIT,
+ EFX_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_UNICAST_BYTES,
+ EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_MULTICAST_BYTES,
+ EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_BROADCAST_BYTES,
+ EFX_MAC_VADAPTER_RX_BAD_PACKETS,
+ EFX_MAC_VADAPTER_RX_BAD_BYTES,
+ EFX_MAC_VADAPTER_RX_OVERFLOW,
+ EFX_MAC_VADAPTER_TX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_UNICAST_BYTES,
+ EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_MULTICAST_BYTES,
+ EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_BROADCAST_BYTES,
+ EFX_MAC_VADAPTER_TX_BAD_PACKETS,
+ EFX_MAC_VADAPTER_TX_BAD_BYTES,
+ EFX_MAC_VADAPTER_TX_OVERFLOW,
+ EFX_MAC_NSTATS
+} efx_mac_stat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderMacBlock */
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef enum efx_link_mode_e {
+ EFX_LINK_UNKNOWN = 0,
+ EFX_LINK_DOWN,
+ EFX_LINK_10HDX,
+ EFX_LINK_10FDX,
+ EFX_LINK_100HDX,
+ EFX_LINK_100FDX,
+ EFX_LINK_1000HDX,
+ EFX_LINK_1000FDX,
+ EFX_LINK_10000FDX,
+ EFX_LINK_40000FDX,
+ EFX_LINK_NMODES
+} efx_link_mode_t;
+
+#define EFX_MAC_ADDR_LEN 6
+
+#define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t*)_address)[0] & 0x01)
+
+#define EFX_MAC_MULTICAST_LIST_MAX 256
+
+#define EFX_MAC_SDU_MAX 9202
+
+#define EFX_MAC_PDU(_sdu) \
+ P2ROUNDUP(((_sdu) \
+ + /* EtherII */ 14 \
+ + /* VLAN */ 4 \
+ + /* CRC */ 4 \
+ + /* bug16011 */ 16), \
+ (1 << 3))
+
+#define EFX_MAC_PDU_MIN 60
+#define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX)
+
+extern __checkReturn efx_rc_t
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu);
+
+extern __checkReturn efx_rc_t
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr);
+
+extern __checkReturn efx_rc_t
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst);
+
+extern __checkReturn efx_rc_t
+efx_mac_multicast_list_set(
+ __in efx_nic_t *enp,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count);
+
+extern __checkReturn efx_rc_t
+efx_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+efx_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled);
+
+extern __checkReturn efx_rc_t
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+#define EFX_FCNTL_RESPOND 0x00000001
+#define EFX_FCNTL_GENERATE 0x00000002
+
+extern __checkReturn efx_rc_t
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg);
+
+extern void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp);
+
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_MAC_STATS_SIZE 0x400
+
+/*
+ * Upload mac statistics supported by the hardware into the given buffer.
+ *
+ * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes,
+ * and page aligned.
+ *
+ * The hardware will only DMA statistics that it understands (of course).
+ * Drivers should not make any assumptions about which statistics are
+ * supported, especially when the statistics are generated by firmware.
+ *
+ * Thus, drivers should zero this buffer before use, so that not-understood
+ * statistics read back as zero.
+ */
+extern __checkReturn efx_rc_t
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+/* MON */
+
+typedef enum efx_mon_type_e {
+ EFX_MON_INVALID = 0,
+ EFX_MON_SFC90X0,
+ EFX_MON_SFC91X0,
+ EFX_MON_SFC92X0,
+ EFX_MON_NTYPES
+} efx_mon_type_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_mon_name(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn efx_rc_t
+efx_mon_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_MON_STATS
+
+#define EFX_MON_STATS_PAGE_SIZE 0x100
+#define EFX_MON_MASK_ELEMENT_SIZE 32
+
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 5d4ee5185e419abe */
+typedef enum efx_mon_stat_e {
+ EFX_MON_STAT_2_5V,
+ EFX_MON_STAT_VCCP1,
+ EFX_MON_STAT_VCC,
+ EFX_MON_STAT_5V,
+ EFX_MON_STAT_12V,
+ EFX_MON_STAT_VCCP2,
+ EFX_MON_STAT_EXT_TEMP,
+ EFX_MON_STAT_INT_TEMP,
+ EFX_MON_STAT_AIN1,
+ EFX_MON_STAT_AIN2,
+ EFX_MON_STAT_INT_COOLING,
+ EFX_MON_STAT_EXT_COOLING,
+ EFX_MON_STAT_1V,
+ EFX_MON_STAT_1_2V,
+ EFX_MON_STAT_1_8V,
+ EFX_MON_STAT_3_3V,
+ EFX_MON_STAT_1_2VA,
+ EFX_MON_STAT_VREF,
+ EFX_MON_STAT_VAOE,
+ EFX_MON_STAT_AOE_TEMP,
+ EFX_MON_STAT_PSU_AOE_TEMP,
+ EFX_MON_STAT_PSU_TEMP,
+ EFX_MON_STAT_FAN0,
+ EFX_MON_STAT_FAN1,
+ EFX_MON_STAT_FAN2,
+ EFX_MON_STAT_FAN3,
+ EFX_MON_STAT_FAN4,
+ EFX_MON_STAT_VAOE_IN,
+ EFX_MON_STAT_IAOE,
+ EFX_MON_STAT_IAOE_IN,
+ EFX_MON_STAT_NIC_POWER,
+ EFX_MON_STAT_0_9V,
+ EFX_MON_STAT_I0_9V,
+ EFX_MON_STAT_I1_2V,
+ EFX_MON_STAT_0_9V_ADC,
+ EFX_MON_STAT_INT_TEMP2,
+ EFX_MON_STAT_VREG_TEMP,
+ EFX_MON_STAT_VREG_0_9V_TEMP,
+ EFX_MON_STAT_VREG_1_2V_TEMP,
+ EFX_MON_STAT_INT_VPTAT,
+ EFX_MON_STAT_INT_ADC_TEMP,
+ EFX_MON_STAT_EXT_VPTAT,
+ EFX_MON_STAT_EXT_ADC_TEMP,
+ EFX_MON_STAT_AMBIENT_TEMP,
+ EFX_MON_STAT_AIRFLOW,
+ EFX_MON_STAT_VDD08D_VSS08D_CSR,
+ EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC,
+ EFX_MON_STAT_HOTPOINT_TEMP,
+ EFX_MON_STAT_PHY_POWER_SWITCH_PORT0,
+ EFX_MON_STAT_PHY_POWER_SWITCH_PORT1,
+ EFX_MON_STAT_MUM_VCC,
+ EFX_MON_STAT_0V9_A,
+ EFX_MON_STAT_I0V9_A,
+ EFX_MON_STAT_0V9_A_TEMP,
+ EFX_MON_STAT_0V9_B,
+ EFX_MON_STAT_I0V9_B,
+ EFX_MON_STAT_0V9_B_TEMP,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_SODIMM_VOUT,
+ EFX_MON_STAT_SODIMM_0_TEMP,
+ EFX_MON_STAT_SODIMM_1_TEMP,
+ EFX_MON_STAT_PHY0_VCC,
+ EFX_MON_STAT_PHY1_VCC,
+ EFX_MON_STAT_CONTROLLER_TDIODE_TEMP,
+ EFX_MON_STAT_BOARD_FRONT_TEMP,
+ EFX_MON_STAT_BOARD_BACK_TEMP,
+ EFX_MON_NSTATS
+} efx_mon_stat_t;
+
+/* END MKCONFIG GENERATED MonitorHeaderStatsBlock */
+
+typedef enum efx_mon_stat_state_e {
+ EFX_MON_STAT_STATE_OK = 0,
+ EFX_MON_STAT_STATE_WARNING = 1,
+ EFX_MON_STAT_STATE_FATAL = 2,
+ EFX_MON_STAT_STATE_BROKEN = 3,
+ EFX_MON_STAT_STATE_NO_READING = 4,
+} efx_mon_stat_state_t;
+
+typedef struct efx_mon_stat_value_s {
+ uint16_t emsv_value;
+ uint16_t emsv_state;
+} efx_mon_stat_value_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn efx_rc_t
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+extern void
+efx_mon_fini(
+ __in efx_nic_t *enp);
+
+/* PHY */
+
+extern __checkReturn efx_rc_t
+efx_phy_verify(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+typedef enum efx_phy_led_mode_e {
+ EFX_PHY_LED_DEFAULT = 0,
+ EFX_PHY_LED_OFF,
+ EFX_PHY_LED_ON,
+ EFX_PHY_LED_FLASH,
+ EFX_PHY_LED_NMODES
+} efx_phy_led_mode_t;
+
+extern __checkReturn efx_rc_t
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode);
+
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+extern __checkReturn efx_rc_t
+efx_port_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+typedef enum efx_loopback_type_e {
+ EFX_LOOPBACK_OFF = 0,
+ EFX_LOOPBACK_DATA = 1,
+ EFX_LOOPBACK_GMAC = 2,
+ EFX_LOOPBACK_XGMII = 3,
+ EFX_LOOPBACK_XGXS = 4,
+ EFX_LOOPBACK_XAUI = 5,
+ EFX_LOOPBACK_GMII = 6,
+ EFX_LOOPBACK_SGMII = 7,
+ EFX_LOOPBACK_XGBR = 8,
+ EFX_LOOPBACK_XFI = 9,
+ EFX_LOOPBACK_XAUI_FAR = 10,
+ EFX_LOOPBACK_GMII_FAR = 11,
+ EFX_LOOPBACK_SGMII_FAR = 12,
+ EFX_LOOPBACK_XFI_FAR = 13,
+ EFX_LOOPBACK_GPHY = 14,
+ EFX_LOOPBACK_PHY_XS = 15,
+ EFX_LOOPBACK_PCS = 16,
+ EFX_LOOPBACK_PMA_PMD = 17,
+ EFX_LOOPBACK_XPORT = 18,
+ EFX_LOOPBACK_XGMII_WS = 19,
+ EFX_LOOPBACK_XAUI_WS = 20,
+ EFX_LOOPBACK_XAUI_WS_FAR = 21,
+ EFX_LOOPBACK_XAUI_WS_NEAR = 22,
+ EFX_LOOPBACK_GMII_WS = 23,
+ EFX_LOOPBACK_XFI_WS = 24,
+ EFX_LOOPBACK_XFI_WS_FAR = 25,
+ EFX_LOOPBACK_PHYXS_WS = 26,
+ EFX_LOOPBACK_PMA_INT = 27,
+ EFX_LOOPBACK_SD_NEAR = 28,
+ EFX_LOOPBACK_SD_FAR = 29,
+ EFX_LOOPBACK_PMA_INT_WS = 30,
+ EFX_LOOPBACK_SD_FEP2_WS = 31,
+ EFX_LOOPBACK_SD_FEP1_5_WS = 32,
+ EFX_LOOPBACK_SD_FEP_WS = 33,
+ EFX_LOOPBACK_SD_FES_WS = 34,
+ EFX_LOOPBACK_NTYPES
+} efx_loopback_type_t;
+
+typedef enum efx_loopback_kind_e {
+ EFX_LOOPBACK_KIND_OFF = 0,
+ EFX_LOOPBACK_KIND_ALL,
+ EFX_LOOPBACK_KIND_MAC,
+ EFX_LOOPBACK_KIND_PHY,
+ EFX_LOOPBACK_NKINDS
+} efx_loopback_kind_t;
+
+extern void
+efx_loopback_mask(
+ __in efx_loopback_kind_t loopback_kind,
+ __out efx_qword_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t type);
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn efx_rc_t
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out_opt efx_link_mode_t *link_modep);
+
+extern void
+efx_port_fini(
+ __in efx_nic_t *enp);
+
+typedef enum efx_phy_cap_type_e {
+ EFX_PHY_CAP_INVALID = 0,
+ EFX_PHY_CAP_10HDX,
+ EFX_PHY_CAP_10FDX,
+ EFX_PHY_CAP_100HDX,
+ EFX_PHY_CAP_100FDX,
+ EFX_PHY_CAP_1000HDX,
+ EFX_PHY_CAP_1000FDX,
+ EFX_PHY_CAP_10000FDX,
+ EFX_PHY_CAP_PAUSE,
+ EFX_PHY_CAP_ASYM,
+ EFX_PHY_CAP_AN,
+ EFX_PHY_CAP_40000FDX,
+ EFX_PHY_CAP_NTYPES
+} efx_phy_cap_type_t;
+
+
+#define EFX_PHY_CAP_CURRENT 0x00000000
+#define EFX_PHY_CAP_DEFAULT 0x00000001
+#define EFX_PHY_CAP_PERM 0x00000002
+
+extern void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask);
+
+extern void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+typedef enum efx_phy_media_type_e {
+ EFX_PHY_MEDIA_INVALID = 0,
+ EFX_PHY_MEDIA_XAUI,
+ EFX_PHY_MEDIA_CX4,
+ EFX_PHY_MEDIA_KX4,
+ EFX_PHY_MEDIA_XFP,
+ EFX_PHY_MEDIA_SFP_PLUS,
+ EFX_PHY_MEDIA_BASE_T,
+ EFX_PHY_MEDIA_QSFP_PLUS,
+ EFX_PHY_MEDIA_NTYPES
+} efx_phy_media_type_t;
+
+/* Get the type of medium currently used. If the board has ports for
+ * modules, a module is present, and we recognise the media type of
+ * the module, then this will be the media type of the module.
+ * Otherwise it will be the media type of the port.
+ */
+extern void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep);
+
+extern efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
+#if EFSYS_OPT_PHY_STATS
+
+/* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */
+typedef enum efx_phy_stat_e {
+ EFX_PHY_STAT_OUI,
+ EFX_PHY_STAT_PMA_PMD_LINK_UP,
+ EFX_PHY_STAT_PMA_PMD_RX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_TX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_REV_A,
+ EFX_PHY_STAT_PMA_PMD_REV_B,
+ EFX_PHY_STAT_PMA_PMD_REV_C,
+ EFX_PHY_STAT_PMA_PMD_REV_D,
+ EFX_PHY_STAT_PCS_LINK_UP,
+ EFX_PHY_STAT_PCS_RX_FAULT,
+ EFX_PHY_STAT_PCS_TX_FAULT,
+ EFX_PHY_STAT_PCS_BER,
+ EFX_PHY_STAT_PCS_BLOCK_ERRORS,
+ EFX_PHY_STAT_PHY_XS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_SYNC_A,
+ EFX_PHY_STAT_PHY_XS_SYNC_B,
+ EFX_PHY_STAT_PHY_XS_SYNC_C,
+ EFX_PHY_STAT_PHY_XS_SYNC_D,
+ EFX_PHY_STAT_AN_LINK_UP,
+ EFX_PHY_STAT_AN_MASTER,
+ EFX_PHY_STAT_AN_LOCAL_RX_OK,
+ EFX_PHY_STAT_AN_REMOTE_RX_OK,
+ EFX_PHY_STAT_CL22EXT_LINK_UP,
+ EFX_PHY_STAT_SNR_A,
+ EFX_PHY_STAT_SNR_B,
+ EFX_PHY_STAT_SNR_C,
+ EFX_PHY_STAT_SNR_D,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_A,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_B,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_C,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_D,
+ EFX_PHY_STAT_AN_COMPLETE,
+ EFX_PHY_STAT_PMA_PMD_REV_MAJOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MINOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MICRO,
+ EFX_PHY_STAT_PCS_FW_VERSION_0,
+ EFX_PHY_STAT_PCS_FW_VERSION_1,
+ EFX_PHY_STAT_PCS_FW_VERSION_2,
+ EFX_PHY_STAT_PCS_FW_VERSION_3,
+ EFX_PHY_STAT_PCS_FW_BUILD_YY,
+ EFX_PHY_STAT_PCS_FW_BUILD_MM,
+ EFX_PHY_STAT_PCS_FW_BUILD_DD,
+ EFX_PHY_STAT_PCS_OP_MODE,
+ EFX_PHY_NSTATS
+} efx_phy_stat_t;
+
+/* END MKCONFIG GENERATED PhyHeaderStatsBlock */
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t stat);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_PHY_STATS_SIZE 0x100
+
+extern __checkReturn efx_rc_t
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+#if EFSYS_OPT_BIST
+
+typedef enum efx_bist_type_e {
+ EFX_BIST_TYPE_UNKNOWN,
+ EFX_BIST_TYPE_PHY_NORMAL,
+ EFX_BIST_TYPE_PHY_CABLE_SHORT,
+ EFX_BIST_TYPE_PHY_CABLE_LONG,
+ EFX_BIST_TYPE_MC_MEM, /* Test the MC DMEM and IMEM */
+ EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus*/
+ EFX_BIST_TYPE_REG, /* Test the register memories */
+ EFX_BIST_TYPE_NTYPES,
+} efx_bist_type_t;
+
+typedef enum efx_bist_result_e {
+ EFX_BIST_RESULT_UNKNOWN,
+ EFX_BIST_RESULT_RUNNING,
+ EFX_BIST_RESULT_PASSED,
+ EFX_BIST_RESULT_FAILED,
+} efx_bist_result_t;
+
+typedef enum efx_phy_cable_status_e {
+ EFX_PHY_CABLE_STATUS_OK,
+ EFX_PHY_CABLE_STATUS_INVALID,
+ EFX_PHY_CABLE_STATUS_OPEN,
+ EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_INTERPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_BUSY,
+} efx_phy_cable_status_t;
+
+typedef enum efx_bist_value_e {
+ EFX_BIST_PHY_CABLE_LENGTH_A,
+ EFX_BIST_PHY_CABLE_LENGTH_B,
+ EFX_BIST_PHY_CABLE_LENGTH_C,
+ EFX_BIST_PHY_CABLE_LENGTH_D,
+ EFX_BIST_PHY_CABLE_STATUS_A,
+ EFX_BIST_PHY_CABLE_STATUS_B,
+ EFX_BIST_PHY_CABLE_STATUS_C,
+ EFX_BIST_PHY_CABLE_STATUS_D,
+ EFX_BIST_FAULT_CODE,
+ /* Memory BIST specific values. These match to the MC_CMD_BIST_POLL
+ * response. */
+ EFX_BIST_MEM_TEST,
+ EFX_BIST_MEM_ADDR,
+ EFX_BIST_MEM_BUS,
+ EFX_BIST_MEM_EXPECT,
+ EFX_BIST_MEM_ACTUAL,
+ EFX_BIST_MEM_ECC,
+ EFX_BIST_MEM_ECC_PARITY,
+ EFX_BIST_MEM_ECC_FATAL,
+ EFX_BIST_NVALUES,
+} efx_bist_value_t;
+
+extern __checkReturn efx_rc_t
+efx_bist_enable_offline(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+efx_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+#define EFX_FEATURE_IPV6 0x00000001
+#define EFX_FEATURE_LFSR_HASH_INSERT 0x00000002
+#define EFX_FEATURE_LINK_EVENTS 0x00000004
+#define EFX_FEATURE_PERIODIC_MAC_STATS 0x00000008
+#define EFX_FEATURE_WOL 0x00000010
+#define EFX_FEATURE_MCDI 0x00000020
+#define EFX_FEATURE_LOOKAHEAD_SPLIT 0x00000040
+#define EFX_FEATURE_MAC_HEADER_FILTERS 0x00000080
+#define EFX_FEATURE_TURBO 0x00000100
+#define EFX_FEATURE_MCDI_DMA 0x00000200
+#define EFX_FEATURE_TX_SRC_FILTERS 0x00000400
+#define EFX_FEATURE_PIO_BUFFERS 0x00000800
+#define EFX_FEATURE_FW_ASSISTED_TSO 0x00001000
+#define EFX_FEATURE_FW_ASSISTED_TSO_V2 0x00002000
+
+typedef struct efx_nic_cfg_s {
+ uint32_t enc_board_type;
+ uint32_t enc_phy_type;
+#if EFSYS_OPT_NAMES
+ char enc_phy_name[21];
+#endif
+ char enc_phy_revision[21];
+ efx_mon_type_t enc_mon_type;
+#if EFSYS_OPT_MON_STATS
+ uint32_t enc_mon_stat_dma_buf_size;
+ uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32];
+#endif
+ unsigned int enc_features;
+ uint8_t enc_mac_addr[6];
+ uint8_t enc_port; /* PHY port number */
+ uint32_t enc_func_flags;
+ uint32_t enc_intr_vec_base;
+ uint32_t enc_intr_limit;
+ uint32_t enc_evq_limit;
+ uint32_t enc_txq_limit;
+ uint32_t enc_rxq_limit;
+ uint32_t enc_buftbl_limit;
+ uint32_t enc_piobuf_limit;
+ uint32_t enc_piobuf_size;
+ uint32_t enc_piobuf_min_alloc_size;
+ uint32_t enc_evq_timer_quantum_ns;
+ uint32_t enc_evq_timer_max_us;
+ uint32_t enc_clk_mult;
+ uint32_t enc_rx_prefix_size;
+ uint32_t enc_rx_buf_align_start;
+ uint32_t enc_rx_buf_align_end;
+#if EFSYS_OPT_LOOPBACK
+ efx_qword_t enc_loopback_types[EFX_LINK_NMODES];
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t enc_phy_flags_mask;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ uint32_t enc_led_mask;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+#if EFSYS_OPT_PHY_STATS
+ uint64_t enc_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_SIENA
+ uint8_t enc_mcdi_mdio_channel;
+#if EFSYS_OPT_PHY_STATS
+ uint32_t enc_mcdi_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#endif /* EFSYS_OPT_SIENA */
+#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#if EFSYS_OPT_MON_STATS
+ uint32_t *enc_mcdi_sensor_maskp;
+ uint32_t enc_mcdi_sensor_mask_size;
+#endif /* EFSYS_OPT_MON_STATS */
+#endif /* (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+#if EFSYS_OPT_BIST
+ uint32_t enc_bist_mask;
+#endif /* EFSYS_OPT_BIST */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ uint32_t enc_pf;
+ uint32_t enc_vf;
+ uint32_t enc_privilege_mask;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+ boolean_t enc_bug26807_workaround;
+ boolean_t enc_bug35388_workaround;
+ boolean_t enc_bug41750_workaround;
+ boolean_t enc_rx_batching_enabled;
+ /* Maximum number of descriptors completed in an rx event. */
+ uint32_t enc_rx_batch_max;
+ /* Number of rx descriptors the hardware requires for a push. */
+ uint32_t enc_rx_push_align;
+ /*
+ * Maximum number of bytes into the packet the TCP header can start for
+ * the hardware to apply TSO packet edits.
+ */
+ uint32_t enc_tx_tso_tcp_header_offset_limit;
+ boolean_t enc_fw_assisted_tso_enabled;
+ boolean_t enc_fw_assisted_tso_v2_enabled;
+ boolean_t enc_hw_tx_insert_vlan_enabled;
+ /* Datapath firmware vadapter/vport/vswitch support */
+ boolean_t enc_datapath_cap_evb;
+ boolean_t enc_rx_disable_scatter_supported;
+ boolean_t enc_allow_set_mac_with_installed_filters;
+ boolean_t enc_enhanced_set_mac_supported;
+ /* External port identifier */
+ uint8_t enc_external_port;
+ uint32_t enc_mcdi_max_payload_length;
+ /* VPD may be per-PF or global */
+ boolean_t enc_vpd_is_global;
+} efx_nic_cfg_t;
+
+#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff)
+#define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != 0xffff)
+
+#define EFX_PCI_FUNCTION(_encp) \
+ (EFX_PCI_FUNCTION_IS_PF(_encp) ? (_encp)->enc_pf : (_encp)->enc_vf)
+
+#define EFX_PCI_VF_PARENT(_encp) ((_encp)->enc_pf)
+
+extern const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp);
+
+/* Driver resource limits (minimum required/maximum usable). */
+typedef struct efx_drv_limits_s
+{
+ uint32_t edl_min_evq_count;
+ uint32_t edl_max_evq_count;
+
+ uint32_t edl_min_rxq_count;
+ uint32_t edl_max_rxq_count;
+
+ uint32_t edl_min_txq_count;
+ uint32_t edl_max_txq_count;
+
+ /* PIO blocks (sub-allocated from piobuf) */
+ uint32_t edl_min_pio_alloc_size;
+ uint32_t edl_max_pio_alloc_count;
+} efx_drv_limits_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+typedef enum efx_nic_region_e {
+ EFX_REGION_VI, /* Memory BAR UC mapping */
+ EFX_REGION_PIO_WRITE_VI, /* Memory BAR WC mapping */
+} efx_nic_region_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *evq_countp,
+ __out uint32_t *rxq_countp,
+ __out uint32_t *txq_countp);
+
+
+#if EFSYS_OPT_VPD
+
+typedef enum efx_vpd_tag_e {
+ EFX_VPD_ID = 0x02,
+ EFX_VPD_END = 0x0f,
+ EFX_VPD_RO = 0x10,
+ EFX_VPD_RW = 0x11,
+} efx_vpd_tag_t;
+
+typedef uint16_t efx_vpd_keyword_t;
+
+typedef struct efx_vpd_value_s {
+ efx_vpd_tag_t evv_tag;
+ efx_vpd_keyword_t evv_keyword;
+ uint8_t evv_length;
+ uint8_t evv_value[0x100];
+} efx_vpd_value_t;
+
+
+#define EFX_VPD_KEYWORD(x, y) ((x) | ((y) << 8))
+
+extern __checkReturn efx_rc_t
+efx_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef enum efx_nvram_type_e {
+ EFX_NVRAM_INVALID = 0,
+ EFX_NVRAM_BOOTROM,
+ EFX_NVRAM_BOOTROM_CFG,
+ EFX_NVRAM_MC_FIRMWARE,
+ EFX_NVRAM_MC_GOLDEN,
+ EFX_NVRAM_PHY,
+ EFX_NVRAM_NULLPHY,
+ EFX_NVRAM_FPGA,
+ EFX_NVRAM_FCFW,
+ EFX_NVRAM_CPLD,
+ EFX_NVRAM_FPGA_BACKUP,
+ EFX_NVRAM_DYNAMIC_CFG,
+ EFX_NVRAM_LICENSE,
+ EFX_NVRAM_NTYPES,
+} efx_nvram_type_t;
+
+extern __checkReturn efx_rc_t
+efx_nvram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+efx_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *pref_chunkp);
+
+extern void
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_nvram_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_BOOTCFG
+
+extern efx_rc_t
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern efx_rc_t
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+#endif /* EFSYS_OPT_BOOTCFG */
+
+#if EFSYS_OPT_WOL
+
+typedef enum efx_wol_type_e {
+ EFX_WOL_TYPE_INVALID,
+ EFX_WOL_TYPE_MAGIC,
+ EFX_WOL_TYPE_BITMAP,
+ EFX_WOL_TYPE_LINK,
+ EFX_WOL_NTYPES,
+} efx_wol_type_t;
+
+typedef enum efx_lightsout_offload_type_e {
+ EFX_LIGHTSOUT_OFFLOAD_TYPE_INVALID,
+ EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP,
+ EFX_LIGHTSOUT_OFFLOAD_TYPE_NS,
+} efx_lightsout_offload_type_t;
+
+#define EFX_WOL_BITMAP_MASK_SIZE (48)
+#define EFX_WOL_BITMAP_VALUE_SIZE (128)
+
+typedef union efx_wol_param_u {
+ struct {
+ uint8_t mac_addr[6];
+ } ewp_magic;
+ struct {
+ uint8_t mask[EFX_WOL_BITMAP_MASK_SIZE]; /* 1 bit per byte */
+ uint8_t value[EFX_WOL_BITMAP_VALUE_SIZE]; /* value to match */
+ uint8_t value_len;
+ } ewp_bitmap;
+} efx_wol_param_t;
+
+typedef union efx_lightsout_offload_param_u {
+ struct {
+ uint8_t mac_addr[6];
+ uint32_t ip;
+ } elop_arp;
+ struct {
+ uint8_t mac_addr[6];
+ uint32_t solicited_node[4];
+ uint32_t ip[4];
+ } elop_ns;
+} efx_lightsout_offload_param_t;
+
+extern __checkReturn efx_rc_t
+efx_wol_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_wol_filter_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_wol_filter_add(
+ __in efx_nic_t *enp,
+ __in efx_wol_type_t type,
+ __in efx_wol_param_t *paramp,
+ __out uint32_t *filter_idp);
+
+extern __checkReturn efx_rc_t
+efx_wol_filter_remove(
+ __in efx_nic_t *enp,
+ __in uint32_t filter_id);
+
+extern __checkReturn efx_rc_t
+efx_lightsout_offload_add(
+ __in efx_nic_t *enp,
+ __in efx_lightsout_offload_type_t type,
+ __in efx_lightsout_offload_param_t *paramp,
+ __out uint32_t *filter_idp);
+
+extern __checkReturn efx_rc_t
+efx_lightsout_offload_remove(
+ __in efx_nic_t *enp,
+ __in efx_lightsout_offload_type_t type,
+ __in uint32_t filter_id);
+
+extern void
+efx_wol_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_WOL */
+
+#if EFSYS_OPT_DIAG
+
+typedef enum efx_pattern_type_t {
+ EFX_PATTERN_BYTE_INCREMENT = 0,
+ EFX_PATTERN_ALL_THE_SAME,
+ EFX_PATTERN_BIT_ALTERNATE,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_PATTERN_BIT_SWEEP,
+ EFX_PATTERN_NTYPES
+} efx_pattern_type_t;
+
+typedef void
+(*efx_sram_pattern_fn_t)(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp);
+
+extern __checkReturn efx_rc_t
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n);
+
+extern void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n);
+
+#define EFX_BUF_TBL_SIZE 0x20000
+
+#define EFX_BUF_SIZE 4096
+
+/* EV */
+
+typedef struct efx_evq_s efx_evq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderEventQueueBlock 6f3843f5fe7cc843 */
+typedef enum efx_ev_qstat_e {
+ EV_ALL,
+ EV_RX,
+ EV_RX_OK,
+ EV_RX_FRM_TRUNC,
+ EV_RX_TOBE_DISC,
+ EV_RX_PAUSE_FRM_ERR,
+ EV_RX_BUF_OWNER_ID_ERR,
+ EV_RX_IPV4_HDR_CHKSUM_ERR,
+ EV_RX_TCP_UDP_CHKSUM_ERR,
+ EV_RX_ETH_CRC_ERR,
+ EV_RX_IP_FRAG_ERR,
+ EV_RX_MCAST_PKT,
+ EV_RX_MCAST_HASH_MATCH,
+ EV_RX_TCP_IPV4,
+ EV_RX_TCP_IPV6,
+ EV_RX_UDP_IPV4,
+ EV_RX_UDP_IPV6,
+ EV_RX_OTHER_IPV4,
+ EV_RX_OTHER_IPV6,
+ EV_RX_NON_IP,
+ EV_RX_BATCH,
+ EV_TX,
+ EV_TX_WQ_FF_FULL,
+ EV_TX_PKT_ERR,
+ EV_TX_PKT_TOO_BIG,
+ EV_TX_UNEXPECTED,
+ EV_GLOBAL,
+ EV_GLOBAL_MNT,
+ EV_DRIVER,
+ EV_DRIVER_SRM_UPD_DONE,
+ EV_DRIVER_TX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_FAILED,
+ EV_DRIVER_RX_DSC_ERROR,
+ EV_DRIVER_TX_DSC_ERROR,
+ EV_DRV_GEN,
+ EV_MCDI_RESPONSE,
+ EV_NQSTATS
+} efx_ev_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderEventQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn efx_rc_t
+efx_ev_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_ev_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_EVQ_MAXNEVS 32768
+#define EFX_EVQ_MINNEVS 512
+
+#define EFX_EVQ_SIZE(_nevs) ((_nevs) * sizeof (efx_qword_t))
+#define EFX_EVQ_NBUFS(_nevs) (EFX_EVQ_SIZE(_nevs) / EFX_BUF_SIZE)
+
+extern __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __deref_out efx_evq_t **eepp);
+
+extern void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+typedef __checkReturn boolean_t
+(*efx_initialized_ev_t)(
+ __in_opt void *arg);
+
+#define EFX_PKT_UNICAST 0x0004
+#define EFX_PKT_START 0x0008
+
+#define EFX_PKT_VLAN_TAGGED 0x0010
+#define EFX_CKSUM_TCPUDP 0x0020
+#define EFX_CKSUM_IPV4 0x0040
+#define EFX_PKT_CONT 0x0080
+
+#define EFX_CHECK_VLAN 0x0100
+#define EFX_PKT_TCP 0x0200
+#define EFX_PKT_UDP 0x0400
+#define EFX_PKT_IPV4 0x0800
+
+#define EFX_PKT_IPV6 0x1000
+#define EFX_PKT_PREFIX_LEN 0x2000
+#define EFX_ADDR_MISMATCH 0x4000
+#define EFX_DISCARD 0x8000
+
+#define EFX_EV_RX_NLABELS 32
+#define EFX_EV_TX_NLABELS 32
+
+typedef __checkReturn boolean_t
+(*efx_rx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id,
+ __in uint32_t size,
+ __in uint16_t flags);
+
+typedef __checkReturn boolean_t
+(*efx_tx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id);
+
+#define EFX_EXCEPTION_RX_RECOVERY 0x00000001
+#define EFX_EXCEPTION_RX_DSC_ERROR 0x00000002
+#define EFX_EXCEPTION_TX_DSC_ERROR 0x00000003
+#define EFX_EXCEPTION_UNKNOWN_SENSOREVT 0x00000004
+#define EFX_EXCEPTION_FWALERT_SRAM 0x00000005
+#define EFX_EXCEPTION_UNKNOWN_FWALERT 0x00000006
+#define EFX_EXCEPTION_RX_ERROR 0x00000007
+#define EFX_EXCEPTION_TX_ERROR 0x00000008
+#define EFX_EXCEPTION_EV_ERROR 0x00000009
+
+typedef __checkReturn boolean_t
+(*efx_exception_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t data);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t rxq_index);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_failed_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t rxq_index);
+
+typedef __checkReturn boolean_t
+(*efx_txq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t txq_index);
+
+typedef __checkReturn boolean_t
+(*efx_software_ev_t)(
+ __in_opt void *arg,
+ __in uint16_t magic);
+
+typedef __checkReturn boolean_t
+(*efx_sram_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t code);
+
+#define EFX_SRAM_CLEAR 0
+#define EFX_SRAM_UPDATE 1
+#define EFX_SRAM_ILLEGAL_CLEAR 2
+
+typedef __checkReturn boolean_t
+(*efx_wake_up_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_timer_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_link_change_ev_t)(
+ __in_opt void *arg,
+ __in efx_link_mode_t link_mode);
+
+#if EFSYS_OPT_MON_STATS
+
+typedef __checkReturn boolean_t
+(*efx_monitor_ev_t)(
+ __in_opt void *arg,
+ __in efx_mon_stat_t id,
+ __in efx_mon_stat_value_t value);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef __checkReturn boolean_t
+(*efx_mac_stats_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t generation
+ );
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef struct efx_ev_callbacks_s {
+ efx_initialized_ev_t eec_initialized;
+ efx_rx_ev_t eec_rx;
+ efx_tx_ev_t eec_tx;
+ efx_exception_ev_t eec_exception;
+ efx_rxq_flush_done_ev_t eec_rxq_flush_done;
+ efx_rxq_flush_failed_ev_t eec_rxq_flush_failed;
+ efx_txq_flush_done_ev_t eec_txq_flush_done;
+ efx_software_ev_t eec_software;
+ efx_sram_ev_t eec_sram;
+ efx_wake_up_ev_t eec_wake_up;
+ efx_timer_ev_t eec_timer;
+ efx_link_change_ev_t eec_link_change;
+#if EFSYS_OPT_MON_STATS
+ efx_monitor_ev_t eec_monitor;
+#endif /* EFSYS_OPT_MON_STATS */
+#if EFSYS_OPT_MAC_STATS
+ efx_mac_stats_ev_t eec_mac_stats;
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_ev_callbacks_t;
+
+extern __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_EV_PREFETCH
+
+extern void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+extern void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+extern __checkReturn efx_rc_t
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+extern __checkReturn efx_rc_t
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+efx_rx_init(
+ __inout efx_nic_t *enp);
+
+extern void
+efx_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+
+typedef enum efx_rx_hash_alg_e {
+ EFX_RX_HASHALG_LFSR = 0,
+ EFX_RX_HASHALG_TOEPLITZ
+} efx_rx_hash_alg_t;
+
+typedef enum efx_rx_hash_type_e {
+ EFX_RX_HASH_IPV4 = 0,
+ EFX_RX_HASH_TCPIPV4,
+ EFX_RX_HASH_IPV6,
+ EFX_RX_HASH_TCPIPV6,
+} efx_rx_hash_type_t;
+
+typedef enum efx_rx_hash_support_e {
+ EFX_RX_HASH_UNAVAILABLE = 0, /* Hardware hash not inserted */
+ EFX_RX_HASH_AVAILABLE /* Insert hash with/without RSS */
+} efx_rx_hash_support_t;
+
+#define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */
+#define EFX_MAXRSS 64 /* RX indirection entry range */
+#define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */
+
+typedef enum efx_rx_scale_support_e {
+ EFX_RX_SCALE_UNAVAILABLE = 0, /* Not supported */
+ EFX_RX_SCALE_EXCLUSIVE, /* Writable key/indirection table */
+ EFX_RX_SCALE_SHARED /* Read-only key/indirection table */
+} efx_rx_scale_support_t;
+
+extern __checkReturn efx_rc_t
+efx_rx_hash_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_hash_support_t *supportp);
+
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_support_t *supportp);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+efx_psuedo_hdr_hash_get(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+efx_psuedo_hdr_pkt_length_get(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *pkt_lengthp);
+
+#define EFX_RXQ_MAXNDESCS 4096
+#define EFX_RXQ_MINNDESCS 512
+
+#define EFX_RXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_RXQ_NBUFS(_ndescs) (EFX_RXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+#define EFX_RXQ_DC_NDESCS(_dcsize) (8 << _dcsize)
+
+typedef enum efx_rxq_type_e {
+ EFX_RXQ_TYPE_DEFAULT,
+ EFX_RXQ_TYPE_SCATTER,
+ EFX_RXQ_NTYPES
+} efx_rxq_type_t;
+
+extern __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+typedef struct efx_buffer_s {
+ efsys_dma_addr_t eb_addr;
+ size_t eb_size;
+ boolean_t eb_eop;
+} efx_buffer_t;
+
+typedef struct efx_desc_s {
+ efx_qword_t ed_eq;
+} efx_desc_t;
+
+extern void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+extern __checkReturn efx_rc_t
+efx_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+/* TX */
+
+typedef struct efx_txq_s efx_txq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderTransmitQueueBlock 12dff8778598b2db */
+typedef enum efx_tx_qstat_e {
+ TX_POST,
+ TX_POST_PIO,
+ TX_NQSTATS
+} efx_tx_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderTransmitQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn efx_rc_t
+efx_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_tx_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_BUG35388_WORKAROUND(_encp) \
+ (((_encp) == NULL) ? 1 : ((_encp)->enc_bug35388_workaround != 0))
+
+#define EFX_TXQ_MAXNDESCS(_encp) \
+ ((EFX_BUG35388_WORKAROUND(_encp)) ? 2048 : 4096)
+
+#define EFX_TXQ_MINNDESCS 512
+
+#define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+#define EFX_TXQ_DC_NDESCS(_dcsize) (8 << _dcsize)
+
+#define EFX_TXQ_MAX_BUFS 8 /* Maximum independent of EFX_BUG35388_WORKAROUND. */
+
+#define EFX_TXQ_CKSUM_IPV4 0x0001
+#define EFX_TXQ_CKSUM_TCPUDP 0x0002
+#define EFX_TXQ_FATSOV2 0x0004
+
+extern __checkReturn efx_rc_t
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp,
+ __out unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+extern __checkReturn efx_rc_t
+efx_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+efx_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+efx_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+/* Number of FATSOv2 option descriptors */
+#define EFX_TX_FATSOV2_OPT_NDESCS 2
+
+/* Maximum number of DMA segments per TSO packet (not superframe) */
+#define EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX 24
+
+extern void
+efx_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+efx_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_tx_qstat_name(
+ __in efx_nic_t *etp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+
+/* FILTER */
+
+#if EFSYS_OPT_FILTER
+
+#define EFX_ETHER_TYPE_IPV4 0x0800
+#define EFX_ETHER_TYPE_IPV6 0x86DD
+
+#define EFX_IPPROTO_TCP 6
+#define EFX_IPPROTO_UDP 17
+
+typedef enum efx_filter_flag_e {
+ EFX_FILTER_FLAG_RX_RSS = 0x01, /* use RSS to spread across
+ * multiple queues */
+ EFX_FILTER_FLAG_RX_SCATTER = 0x02, /* enable RX scatter */
+ EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04, /* Override an automatic filter
+ * (priority EFX_FILTER_PRI_AUTO).
+ * May only be set by the filter
+ * implementation for each type.
+ * A removal request will
+ * restore the automatic filter
+ * in its place. */
+ EFX_FILTER_FLAG_RX = 0x08, /* Filter is for RX */
+ EFX_FILTER_FLAG_TX = 0x10, /* Filter is for TX */
+} efx_filter_flag_t;
+
+typedef enum efx_filter_match_flags_e {
+ EFX_FILTER_MATCH_REM_HOST = 0x0001, /* Match by remote IP host
+ * address */
+ EFX_FILTER_MATCH_LOC_HOST = 0x0002, /* Match by local IP host
+ * address */
+ EFX_FILTER_MATCH_REM_MAC = 0x0004, /* Match by remote MAC address */
+ EFX_FILTER_MATCH_REM_PORT = 0x0008, /* Match by remote TCP/UDP port */
+ EFX_FILTER_MATCH_LOC_MAC = 0x0010, /* Match by remote TCP/UDP port */
+ EFX_FILTER_MATCH_LOC_PORT = 0x0020, /* Match by local TCP/UDP port */
+ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, /* Match by Ether-type */
+ EFX_FILTER_MATCH_INNER_VID = 0x0080, /* Match by inner VLAN ID */
+ EFX_FILTER_MATCH_OUTER_VID = 0x0100, /* Match by outer VLAN ID */
+ EFX_FILTER_MATCH_IP_PROTO = 0x0200, /* Match by IP transport
+ * protocol */
+ EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, /* Match by local MAC address
+ * I/G bit. Used for RX default
+ * unicast and multicast/
+ * broadcast filters. */
+} efx_filter_match_flags_t;
+
+typedef enum efx_filter_priority_s {
+ EFX_FILTER_PRI_HINT = 0, /* Performance hint */
+ EFX_FILTER_PRI_AUTO, /* Automatic filter based on device
+ * address list or hardware
+ * requirements. This may only be used
+ * by the filter implementation for
+ * each NIC type. */
+ EFX_FILTER_PRI_MANUAL, /* Manually configured filter */
+ EFX_FILTER_PRI_REQUIRED, /* Required for correct behaviour of the
+ * client (e.g. SR-IOV, HyperV VMQ etc.)
+ */
+} efx_filter_priority_t;
+
+/*
+ * FIXME: All these fields are assumed to be in little-endian byte order.
+ * It may be better for some to be big-endian. See bug42804.
+ */
+
+typedef struct efx_filter_spec_s {
+ uint32_t efs_match_flags:12;
+ uint32_t efs_priority:2;
+ uint32_t efs_flags:6;
+ uint32_t efs_dmaq_id:12;
+ uint32_t efs_rss_context;
+ uint16_t efs_outer_vid;
+ uint16_t efs_inner_vid;
+ uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN];
+ uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN];
+ uint16_t efs_ether_type;
+ uint8_t efs_ip_proto;
+ uint16_t efs_loc_port;
+ uint16_t efs_rem_port;
+ efx_oword_t efs_rem_host;
+ efx_oword_t efs_loc_host;
+} efx_filter_spec_t;
+
+
+/* Default values for use in filter specifications */
+#define EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT 0xffffffff
+#define EFX_FILTER_SPEC_RX_DMAQ_ID_DROP 0xfff
+#define EFX_FILTER_SPEC_VID_UNSPEC 0xffff
+
+extern __checkReturn efx_rc_t
+efx_filter_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_filter_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_filter_insert(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_remove(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_restore(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out uint32_t *list,
+ __out size_t *length);
+
+extern void
+efx_filter_spec_init_rx(
+ __out efx_filter_spec_t *spec,
+ __in efx_filter_priority_t priority,
+ __in efx_filter_flag_t flags,
+ __in efx_rxq_t *erp);
+
+extern void
+efx_filter_spec_init_tx(
+ __out efx_filter_spec_t *spec,
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t host,
+ __in uint16_t port);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t lhost,
+ __in uint16_t lport,
+ __in uint32_t rhost,
+ __in uint16_t rport);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_eth_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vid,
+ __in const uint8_t *addr);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_uc_def(
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_mc_def(
+ __inout efx_filter_spec_t *spec);
+
+#endif /* EFSYS_OPT_FILTER */
+
+/* HASH */
+
+extern __checkReturn uint32_t
+efx_hash_dwords(
+ __in_ecount(count) uint32_t const *input,
+ __in size_t count,
+ __in uint32_t init);
+
+extern __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init);
+
+#if EFSYS_OPT_LICENSING
+
+/* LICENSING */
+
+typedef struct efx_key_stats_s {
+ uint32_t eks_valid;
+ uint32_t eks_invalid;
+ uint32_t eks_blacklisted;
+ uint32_t eks_unverifiable;
+ uint32_t eks_wrong_node;
+ uint32_t eks_licensed_apps_lo;
+ uint32_t eks_licensed_apps_hi;
+ uint32_t eks_licensed_features_lo;
+ uint32_t eks_licensed_features_hi;
+} efx_key_stats_t;
+
+extern __checkReturn efx_rc_t
+efx_lic_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_lic_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_lic_update_licenses(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_lic_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *ksp);
+
+extern __checkReturn efx_rc_t
+efx_lic_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+extern __checkReturn efx_rc_t
+efx_lic_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_opt uint8_t *bufferp);
+
+
+extern __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_LICENSING */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_H */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_bootcfg.c b/usr/src/uts/common/io/sfxge/common/efx_bootcfg.c
new file mode 100644
index 0000000000..9e0874115d
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_bootcfg.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_BOOTCFG
+
+/*
+ * Maximum size of BOOTCFG block across all nics as understood by SFCgPXE.
+ * A multiple of 0x100 so trailing 0xff characters don't contrinbute to the
+ * checksum.
+ */
+#define BOOTCFG_MAX_SIZE 0x1000
+
+#define DHCP_END (uint8_t)0xff
+#define DHCP_PAD (uint8_t)0
+
+static __checkReturn uint8_t
+efx_bootcfg_csum(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ unsigned int pos;
+ uint8_t checksum = 0;
+
+ for (pos = 0; pos < size; pos++)
+ checksum += data[pos];
+ return (checksum);
+}
+
+static __checkReturn efx_rc_t
+efx_bootcfg_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt size_t *usedp)
+{
+ size_t offset = 0;
+ size_t used = 0;
+ efx_rc_t rc;
+
+ /* Start parsing tags immediatly after the checksum */
+ for (offset = 1; offset < size; ) {
+ uint8_t tag;
+ uint8_t length;
+
+ /* Consume tag */
+ tag = data[offset];
+ if (tag == DHCP_END) {
+ offset++;
+ used = offset;
+ break;
+ }
+ if (tag == DHCP_PAD) {
+ offset++;
+ continue;
+ }
+
+ /* Consume length */
+ if (offset + 1 >= size) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ length = data[offset + 1];
+
+ /* Consume *length */
+ if (offset + 1 + length >= size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ offset += 2 + length;
+ used = offset;
+ }
+
+ /* Checksum the entire sector, including bytes after any DHCP_END */
+ if (efx_bootcfg_csum(enp, data, size) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if (usedp != NULL)
+ *usedp = used;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ uint8_t *payload = NULL;
+ size_t used_bytes;
+ size_t sector_length;
+ efx_rc_t rc;
+
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &sector_length);
+ if (rc != 0)
+ goto fail1;
+
+ /*
+ * We need to read the entire BOOTCFG area to ensure we read all the
+ * tags, because legacy bootcfg sectors are not guaranteed to end with
+ * a DHCP_END character. If the user hasn't supplied a sufficiently
+ * large buffer then use our own buffer.
+ */
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+ if (sector_length > size) {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload);
+ if (payload == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+ } else
+ payload = (uint8_t *)data;
+
+ if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
+ goto fail3;
+
+ rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0,
+ (caddr_t)payload, sector_length);
+
+ efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+
+ if (rc != 0)
+ goto fail4;
+
+ /* Verify that the area is correctly formatted and checksummed */
+ rc = efx_bootcfg_verify(enp, (caddr_t)payload, sector_length,
+ &used_bytes);
+ if (rc != 0 || used_bytes == 0) {
+ payload[0] = (uint8_t)~DHCP_END;
+ payload[1] = DHCP_END;
+ used_bytes = 2;
+ }
+
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes <= sector_length);
+
+ /*
+ * Legacy bootcfg sectors don't terminate with a DHCP_END character.
+ * Modify the returned payload so it does. BOOTCFG_MAX_SIZE is by
+ * definition large enough for any valid (per-port) bootcfg sector,
+ * so reinitialise the sector if there isn't room for the character.
+ */
+ if (payload[used_bytes - 1] != DHCP_END) {
+ if (used_bytes + 1 > sector_length) {
+ payload[0] = 0;
+ used_bytes = 1;
+ }
+
+ payload[used_bytes] = DHCP_END;
+ ++used_bytes;
+ }
+
+ /*
+ * Verify that the user supplied buffer is large enough for the
+ * entire used bootcfg area, then copy into the user supplied buffer.
+ */
+ if (used_bytes > size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+ if (sector_length > size) {
+ (void) memcpy(data, payload, used_bytes);
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+ }
+
+ /* Zero out the unused portion of the user buffer */
+ if (used_bytes < size)
+ (void) memset(data + used_bytes, 0, size - used_bytes);
+
+ /*
+ * The checksum includes trailing data after any DHCP_END character,
+ * which we've just modified (by truncation or appending DHCP_END).
+ */
+ data[0] -= efx_bootcfg_csum(enp, data, size);
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ if (sector_length > size)
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ uint8_t *chunk;
+ uint8_t checksum;
+ size_t sector_length;
+ size_t chunk_length;
+ size_t used_bytes;
+ size_t offset;
+ size_t remaining;
+ efx_rc_t rc;
+
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &sector_length);
+ if (rc != 0)
+ goto fail1;
+
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+
+ if ((rc = efx_bootcfg_verify(enp, data, size, &used_bytes)) != 0)
+ goto fail2;
+
+ /* The caller *must* terminate their block with a DHCP_END character */
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ if ((uint8_t)data[used_bytes - 1] != DHCP_END) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ /* Check that the hardware has support for this much data */
+ if (used_bytes > MIN(sector_length, BOOTCFG_MAX_SIZE)) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, &chunk_length);
+ if (rc != 0)
+ goto fail5;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, chunk_length, chunk);
+ if (chunk == NULL) {
+ rc = ENOMEM;
+ goto fail6;
+ }
+
+ if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail7;
+
+ /*
+ * Write the entire sector_length bytes of data in chunks. Zero out
+ * all data following the DHCP_END, and adjust the checksum
+ */
+ checksum = efx_bootcfg_csum(enp, data, used_bytes);
+ for (offset = 0; offset < sector_length; offset += remaining) {
+ remaining = MIN(chunk_length, sector_length - offset);
+
+ /* Fill chunk */
+ (void) memset(chunk, 0x0, chunk_length);
+ if (offset < used_bytes)
+ (void) memcpy(chunk, data + offset,
+ MIN(remaining, used_bytes - offset));
+
+ /* Adjust checksum */
+ if (offset == 0)
+ chunk[0] -= checksum;
+
+ if ((rc = efx_nvram_write_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
+ offset, (caddr_t)chunk, remaining)) != 0)
+ goto fail8;
+ }
+
+ efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+
+ EFSYS_KMEM_FREE(enp->en_esip, chunk_length, chunk);
+
+ return (0);
+
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+
+ EFSYS_KMEM_FREE(enp->en_esip, chunk_length, chunk);
+fail6:
+ EFSYS_PROBE(fail6);
+
+ efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BOOTCFG */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_check.h b/usr/src/uts/common/io/sfxge/common/efx_check.h
new file mode 100644
index 0000000000..5748ec8e1b
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_check.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_CHECK_H
+#define _SYS_EFX_CHECK_H
+
+#include "efsys.h"
+
+/*
+ * Check that the efsys.h header in client code has a valid combination of
+ * EFSYS_OPT_xxx options.
+ *
+ * NOTE: Keep checks for obsolete options here to ensure that they are removed
+ * from client code (and do not reappear in merges from other branches).
+ */
+
+#ifdef EFSYS_OPT_FALCON
+# error "FALCON is obsolete and is not supported."
+#endif
+
+/* Support NVRAM based boot config */
+#if EFSYS_OPT_BOOTCFG
+# if !EFSYS_OPT_NVRAM
+# error "BOOTCFG requires NVRAM"
+# endif
+#endif /* EFSYS_OPT_BOOTCFG */
+
+/* Verify chip implements accessed registers */
+#if EFSYS_OPT_CHECK_REG
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_CHECK_REG */
+
+/* Decode fatal errors */
+#if EFSYS_OPT_DECODE_INTR_FATAL
+# if !EFSYS_OPT_SIENA
+# error "INTR_FATAL requires SIENA"
+# endif
+#endif /* EFSYS_OPT_DECODE_INTR_FATAL */
+
+/* Support diagnostic hardware tests */
+#if EFSYS_OPT_DIAG
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "DIAG requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_DIAG */
+
+/* Support optimized EVQ data access */
+#if EFSYS_OPT_EV_PREFETCH
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+#ifdef EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE
+# error "FALCON_NIC_CFG_OVERRIDE is obsolete and is not supported."
+#endif
+
+/* Support hardware packet filters */
+#if EFSYS_OPT_FILTER
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "FILTER requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_FILTER */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# if !EFSYS_OPT_FILTER
+# error "HUNTINGTON or MEDFORD requires FILTER"
+# endif
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+/* Support hardware loopback modes */
+#if EFSYS_OPT_LOOPBACK
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#ifdef EFSYS_OPT_MAC_FALCON_GMAC
+# error "MAC_FALCON_GMAC is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MAC_FALCON_XMAC
+# error "MAC_FALCON_XMAC is obsolete and is not supported."
+#endif
+
+/* Support MAC statistics */
+#if EFSYS_OPT_MAC_STATS
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MAC_STATS */
+
+/* Support management controller messages */
+#if EFSYS_OPT_MCDI
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MCDI */
+
+#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# if !EFSYS_OPT_MCDI
+# error "SIENA or HUNTINGTON or MEDFORD requires MCDI"
+# endif
+#endif
+
+/* Support MCDI logging */
+#if EFSYS_OPT_MCDI_LOGGING
+# if !EFSYS_OPT_MCDI
+# error "MCDI_LOGGING requires MCDI"
+# endif
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+/* Support MCDI proxy authorization */
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+# if !EFSYS_OPT_MCDI
+# error "MCDI_PROXY_AUTH requires MCDI"
+# endif
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+#ifdef EFSYS_OPT_MON_LM87
+# error "MON_LM87 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_MAX6647
+# error "MON_MAX6647 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_NULL
+# error "MON_NULL is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_SIENA
+# error "MON_SIENA is obsolete (replaced by MON_MCDI)."
+#endif
+
+#ifdef EFSYS_OPT_MON_HUNTINGTON
+# error "MON_HUNTINGTON is obsolete (replaced by MON_MCDI)."
+#endif
+
+/* Support monitor statistics (voltage/temperature) */
+#if EFSYS_OPT_MON_STATS
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MON_STATS */
+
+/* Support Monitor via mcdi */
+#if EFSYS_OPT_MON_MCDI
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MON_MCDI*/
+
+/* Support printable names for statistics */
+#if EFSYS_OPT_NAMES
+# if !(EFSYS_OPT_LOOPBACK || EFSYS_OPT_MAC_STATS || EFSYS_OPT_MCDI || \
+ EFSYS_MON_STATS || EFSYS_OPT_PHY_STATS || EFSYS_OPT_QSTATS)
+# error "NAMES requires LOOPBACK or xxxSTATS or MCDI"
+# endif
+#endif /* EFSYS_OPT_NAMES */
+
+/* Support non volatile configuration */
+#if EFSYS_OPT_NVRAM
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_NVRAM */
+
+#ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM
+# error "NVRAM_FALCON_BOOTROM is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_NVRAM_SFT9001
+# error "NVRAM_SFT9001 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_NVRAM_SFX7101
+# error "NVRAM_SFX7101 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PCIE_TUNE
+# error "PCIE_TUNE is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_BIST
+# error "PHY_BIST is obsolete (replaced by BIST)."
+#endif
+
+/* Support PHY flags */
+#if EFSYS_OPT_PHY_FLAGS
+# if !EFSYS_OPT_SIENA
+# error "PHY_FLAGS requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+/* Support for PHY LED control */
+#if EFSYS_OPT_PHY_LED_CONTROL
+# if !EFSYS_OPT_SIENA
+# error "PHY_LED_CONTROL requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+#ifdef EFSYS_OPT_PHY_NULL
+# error "PHY_NULL is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_PM8358
+# error "PHY_PM8358 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_PROPS
+# error "PHY_PROPS is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_QT2022C2
+# error "PHY_QT2022C2 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_QT2025C
+# error "PHY_QT2025C is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_SFT9001
+# error "PHY_SFT9001 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_SFX7101
+# error "PHY_SFX7101 is obsolete and is not supported."
+#endif
+
+/* Support PHY statistics */
+#if EFSYS_OPT_PHY_STATS
+# if !EFSYS_OPT_SIENA
+# error "PHY_STATS requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#ifdef EFSYS_OPT_PHY_TXC43128
+# error "PHY_TXC43128 is obsolete and is not supported."
+#endif
+
+/* Support EVQ/RXQ/TXQ statistics */
+#if EFSYS_OPT_QSTATS
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_QSTATS */
+
+#ifdef EFSYS_OPT_RX_HDR_SPLIT
+# error "RX_HDR_SPLIT is obsolete and is not supported"
+#endif
+
+/* Support receive scaling (RSS) */
+#if EFSYS_OPT_RX_SCALE
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_RX_SCALE */
+
+/* Support receive scatter DMA */
+#if EFSYS_OPT_RX_SCATTER
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#ifdef EFSYS_OPT_STAT_NAME
+# error "STAT_NAME is obsolete (replaced by NAMES)."
+#endif
+
+/* Support PCI Vital Product Data (VPD) */
+#if EFSYS_OPT_VPD
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "VPD requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_VPD */
+
+/* Support Wake on LAN */
+#if EFSYS_OPT_WOL
+# if !EFSYS_OPT_SIENA
+# error "WOL requires SIENA"
+# endif
+#endif /* EFSYS_OPT_WOL */
+
+#ifdef EFSYS_OPT_MCAST_FILTER_LIST
+# error "MCAST_FILTER_LIST is obsolete and is not supported"
+#endif
+
+/* Support BIST */
+#if EFSYS_OPT_BIST
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "BIST requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_BIST */
+
+/* Support MCDI licensing API */
+#if EFSYS_OPT_LICENSING
+# if !EFSYS_OPT_MCDI
+# error "LICENSING requires MCDI"
+# endif
+# if !EFSYS_HAS_UINT64
+# error "LICENSING requires UINT64"
+# endif
+#endif /* EFSYS_OPT_LICENSING */
+
+
+#endif /* _SYS_EFX_CHECK_H */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_crc32.c b/usr/src/uts/common/io/sfxge/common/efx_crc32.c
new file mode 100644
index 0000000000..cc22a946ce
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_crc32.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2013-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+/*
+ * Precomputed table for computing IEEE 802.3 CRC32
+ * with polynomial 0x04c11db7 (bit-reversed 0xedb88320)
+ */
+
+static const uint32_t efx_crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+/* Calculate the IEEE 802.3 CRC32 of a MAC addr */
+ __checkReturn uint32_t
+efx_crc32_calculate(
+ __in uint32_t crc_init,
+ __in_ecount(length) uint8_t const *input,
+ __in int length)
+{
+ int index;
+ uint32_t crc = crc_init;
+
+ for (index = 0; index < length; index++) {
+ uint32_t data = *(input++);
+ crc = (crc >> 8) ^ efx_crc32_table[(crc ^ data) & 0xff];
+ }
+
+ return (crc);
+}
diff --git a/usr/src/uts/common/io/sfxge/common/efx_ev.c b/usr/src/uts/common/io/sfxge/common/efx_ev.c
new file mode 100644
index 0000000000..96082e7c49
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_ev.c
@@ -0,0 +1,1419 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+#define EFX_EV_PRESENT(_qword) \
+ (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
+ EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
+
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_ev_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_ev_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep);
+
+static void
+siena_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+static __checkReturn efx_rc_t
+siena_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+static void
+siena_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static void
+siena_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+static __checkReturn efx_rc_t
+siena_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_ev_ops_t __efx_ev_siena_ops = {
+ siena_ev_init, /* eevo_init */
+ siena_ev_fini, /* eevo_fini */
+ siena_ev_qcreate, /* eevo_qcreate */
+ siena_ev_qdestroy, /* eevo_qdestroy */
+ siena_ev_qprime, /* eevo_qprime */
+ siena_ev_qpost, /* eevo_qpost */
+ siena_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ siena_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_ev_ops_t __efx_ev_ef10_ops = {
+ ef10_ev_init, /* eevo_init */
+ ef10_ev_fini, /* eevo_fini */
+ ef10_ev_qcreate, /* eevo_qcreate */
+ ef10_ev_qdestroy, /* eevo_qdestroy */
+ ef10_ev_qprime, /* eevo_qprime */
+ ef10_ev_qpost, /* eevo_qpost */
+ ef10_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ ef10_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_ev_init(
+ __in efx_nic_t *enp)
+{
+ const efx_ev_ops_t *eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (enp->en_mod_flags & EFX_MOD_EV) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eevop = &__efx_ev_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ if ((rc = eevop->eevo_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_eevop = eevop;
+ enp->en_mod_flags |= EFX_MOD_EV;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_eevop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+ return (rc);
+}
+
+ void
+efx_ev_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ eevop->eevo_fini(enp);
+
+ enp->en_eevop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+}
+
+
+ __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __deref_out efx_evq_t **eepp)
+{
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_evq_t *eep;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit);
+
+ /* Allocate an EVQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
+ if (eep == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ eep->ee_magic = EFX_EVQ_MAGIC;
+ eep->ee_enp = enp;
+ eep->ee_index = index;
+ eep->ee_mask = n - 1;
+ eep->ee_esmp = esmp;
+
+ if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, eep)) != 0)
+ goto fail2;
+
+ enp->en_ev_qcount++;
+ *eepp = eep;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_ev_qcount != 0);
+ --enp->en_ev_qcount;
+
+ eevop->eevo_qdestroy(eep);
+
+ /* Free the EVQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = eevop->eevo_qprime(eep, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ size_t offset;
+ efx_qword_t qword;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
+
+ return (EFX_EV_PRESENT(qword));
+}
+
+#if EFSYS_OPT_EV_PREFETCH
+
+ void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned int offset;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+}
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ /*
+ * FIXME: Huntington will require support for hardware event batching
+ * and merging, which will need a different ev_qpoll implementation.
+ *
+ * Without those features the Falcon/Siena code can be used unchanged.
+ */
+ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
+ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
+
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
+ FSE_AZ_EV_CODE_DRV_GEN_EV);
+#if EFSYS_OPT_MCDI
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
+ FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
+#endif
+ siena_ev_qpoll(eep, countp, eecp, arg);
+}
+
+ void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(eevop != NULL &&
+ eevop->eevo_qpost != NULL);
+
+ eevop->eevo_qpost(eep, data);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+
+{ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ eevop->eevo_qstats_update(eep, stat);
+}
+
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_ev_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * Program the event queue for receive and transmit queue
+ * flush events.
+ */
+ EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
+
+ return (0);
+
+}
+
+static __checkReturn boolean_t
+siena_ev_rx_not_ok(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in uint32_t label,
+ __in uint32_t id,
+ __inout uint16_t *flagsp)
+{
+ boolean_t ignore = B_FALSE;
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
+ EFSYS_PROBE(tobe_disc);
+ /*
+ * Assume this is a unicast address mismatch, unless below
+ * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
+ * EV_RX_PAUSE_FRM_ERR is set.
+ */
+ (*flagsp) |= EFX_ADDR_MISMATCH;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
+ EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ (*flagsp) |= EFX_DISCARD;
+
+#if EFSYS_OPT_RX_SCATTER
+ /*
+ * Lookout for payload queue ran dry errors and ignore them.
+ *
+ * Sadly for the header/data split cases, the descriptor
+ * pointer in this event refers to the header queue and
+ * therefore cannot be easily detected as duplicate.
+ * So we drop these and rely on the receive processing seeing
+ * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
+ * the partially received packet.
+ */
+ if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
+ ignore = B_TRUE;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ EFSYS_PROBE(crc_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
+ EFSYS_PROBE(pause_frm_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
+ EFSYS_PROBE(owner_id_err);
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ EFSYS_PROBE(ipv4_err);
+ (*flagsp) &= ~EFX_CKSUM_IPV4;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ EFSYS_PROBE(udp_chk_err);
+ (*flagsp) &= ~EFX_CKSUM_TCPUDP;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
+
+ /*
+ * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
+ * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
+ * condition.
+ */
+ (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
+ }
+
+ return (ignore);
+}
+
+static __checkReturn boolean_t
+siena_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t size;
+ uint32_t label;
+ boolean_t ok;
+#if EFSYS_OPT_RX_SCATTER
+ boolean_t sop;
+ boolean_t jumbo_cont;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ uint32_t hdr_type;
+ boolean_t is_v6;
+ uint16_t flags;
+ boolean_t ignore;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Basic packet information */
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
+ size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
+ ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
+
+#if EFSYS_OPT_RX_SCATTER
+ sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
+ jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
+
+ is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
+
+ /*
+ * If packet is marked as OK and packet type is TCP/IP or
+ * UDP/IP or other IP, then we can rely on the hardware checksums.
+ */
+ switch (hdr_type) {
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ flags = EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ flags = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ flags = 0;
+ break;
+ }
+
+#if EFSYS_OPT_RX_SCATTER
+ /* Report scatter and header/lookahead split buffer flags */
+ if (sop)
+ flags |= EFX_PKT_START;
+ if (jumbo_cont)
+ flags |= EFX_PKT_CONT;
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
+ if (!ok) {
+ ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
+ if (ignore) {
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ return (B_FALSE);
+ }
+ }
+
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ /* Detect multicast packets that didn't match the filter */
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
+ } else {
+ EFSYS_PROBE(mcast_mismatch);
+ flags |= EFX_ADDR_MISMATCH;
+ }
+ } else {
+ flags |= EFX_PKT_UNICAST;
+ }
+
+ /*
+ * The packet parser in Siena can abort parsing packets under
+ * certain error conditions, setting the PKT_NOT_PARSED bit
+ * (which clears PKT_OK). If this is set, then don't trust
+ * the PKT_TYPE field.
+ */
+ if (!ok) {
+ uint32_t parse_err;
+
+ parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
+ if (parse_err != 0)
+ flags |= EFX_CHECK_VLAN;
+ }
+
+ if (~flags & EFX_CHECK_VLAN) {
+ uint32_t pkt_type;
+
+ pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
+ if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+siena_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+siena_ev_global(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ _NOTE(ARGUNUSED(eqp, eecp, arg))
+
+ EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
+
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+siena_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
+ uint32_t txq_index;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+
+ break;
+ }
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
+ uint32_t rxq_index;
+ uint32_t failed;
+
+ rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
+
+ if (failed) {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
+
+ EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
+
+ should_abort = eecp->eec_rxq_flush_failed(arg,
+ rxq_index);
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+ should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+ }
+
+ break;
+ }
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+
+ break;
+
+ case FSE_AZ_EVQ_NOT_EN_EV:
+ EFSYS_PROBE(evq_not_en);
+ break;
+
+ case FSE_AZ_SRM_UPD_DONE_EV: {
+ uint32_t code;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
+
+ code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_sram != NULL);
+ should_abort = eecp->eec_sram(arg, code);
+
+ break;
+ }
+ case FSE_AZ_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+
+ break;
+ }
+ case FSE_AZ_TX_PKT_NON_TCP_UDP:
+ EFSYS_PROBE(tx_pkt_non_tcp_udp);
+ break;
+
+ case FSE_AZ_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+
+ break;
+ }
+ case FSE_AZ_RX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
+
+ EFSYS_PROBE(rx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_RX_DSC_ERROR, 0);
+
+ break;
+
+ case FSE_AZ_TX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
+
+ EFSYS_PROBE(tx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_TX_DSC_ERROR, 0);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+siena_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+
+ data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+#if EFSYS_OPT_MCDI
+
+static __checkReturn boolean_t
+siena_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned code;
+ boolean_t should_abort = B_FALSE;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ goto out;
+
+ EFSYS_ASSERT(eecp->eec_link_change != NULL);
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+#if EFSYS_OPT_MON_STATS
+ EFSYS_ASSERT(eecp->eec_monitor != NULL);
+#endif
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+ break;
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ siena_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ efx_rc_t rc;
+
+ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
+ should_abort = eecp->eec_monitor(arg, id, value);
+ else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+#else
+ should_abort = B_FALSE;
+#endif
+ break;
+ }
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, code);
+ break;
+ }
+
+out:
+ return (should_abort);
+}
+
+#endif /* EFSYS_OPT_MCDI */
+
+static __checkReturn efx_rc_t
+siena_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
+
+ EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+
+ return (0);
+}
+
+#define EFX_EV_BATCH 8
+
+static void
+siena_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_qword_t ev[EFX_EV_BATCH];
+ unsigned int batch;
+ unsigned int total;
+ unsigned int count;
+ unsigned int index;
+ size_t offset;
+
+ EFSYS_ASSERT(countp != NULL);
+ EFSYS_ASSERT(eecp != NULL);
+
+ count = *countp;
+ do {
+ /* Read up until the end of the batch period */
+ batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (total = 0; total < batch; ++total) {
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
+
+ if (!EFX_EV_PRESENT(ev[total]))
+ break;
+
+ EFSYS_PROBE3(event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
+
+ offset += sizeof (efx_qword_t);
+ }
+
+#if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
+ /*
+ * Prefetch the next batch when we get within PREFETCH_PERIOD
+ * of a completed batch. If the batch is smaller, then prefetch
+ * immediately.
+ */
+ if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ /* Process the batch of events */
+ for (index = 0; index < total; ++index) {
+ boolean_t should_abort;
+ uint32_t code;
+
+#if EFSYS_OPT_EV_PREFETCH
+ /* Prefetch if we've now reached the batch period */
+ if (total == batch &&
+ index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
+ offset = (count + batch) & eep->ee_mask;
+ offset *= sizeof (efx_qword_t);
+
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+ }
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ EFX_EV_QSTAT_INCR(eep, EV_ALL);
+
+ code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
+ switch (code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ should_abort = eep->ee_rx(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ should_abort = eep->ee_tx(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ should_abort = eep->ee_driver(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ should_abort = eep->ee_drv_gen(eep,
+ &(ev[index]), eecp, arg);
+ break;
+#if EFSYS_OPT_MCDI
+ case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
+ should_abort = eep->ee_mcdi(eep,
+ &(ev[index]), eecp, arg);
+ break;
+#endif
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (eep->ee_global) {
+ should_abort = eep->ee_global(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ }
+ /* else fallthrough */
+ default:
+ EFSYS_PROBE3(bad_event,
+ unsigned int, eep->ee_index,
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ (void) eecp->eec_exception(arg,
+ EFX_EXCEPTION_EV_ERROR, code);
+ should_abort = B_TRUE;
+ }
+ if (should_abort) {
+ /* Ignore subsequent events */
+ total = index + 1;
+ break;
+ }
+ }
+
+ /*
+ * Now that the hardware has most likely moved onto dma'ing
+ * into the next cache line, clear the processed events. Take
+ * care to only clear out events that we've processed
+ */
+ EFX_SET_QWORD(ev[0]);
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (index = 0; index < total; ++index) {
+ EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
+ offset += sizeof (efx_qword_t);
+ }
+
+ count += total;
+
+ } while (total == batch);
+
+ *countp = count;
+}
+
+static void
+siena_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t ev;
+ efx_oword_t oword;
+
+ EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_EV_DATA_DW0, (uint32_t)data);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
+ EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
+ EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
+
+ EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
+}
+
+static __checkReturn efx_rc_t
+siena_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ unsigned int locked;
+ efx_dword_t dword;
+ efx_rc_t rc;
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
+ FRF_CZ_TC_TIMER_VAL, 0);
+ } else {
+ uint32_t timer_val;
+
+ /* Calculate the timer value in quanta */
+ timer_val = us * 1000 / encp->enc_evq_timer_quantum_ns;
+
+ /* Moderation value is base 0 so we need to deduct 1 */
+ if (timer_val > 0)
+ timer_val--;
+
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
+ FRF_CZ_TC_TIMER_VAL, timer_val);
+ }
+
+ locked = (eep->ee_index == 0) ? 1 : 0;
+
+ EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
+ eep->ee_index, &dword, locked);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp));
+
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+ if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_intr.ei_type == EFX_INTR_LINE &&
+ index >= EFX_MAXRSS_LEGACY) {
+ rc = EINVAL;
+ goto fail3;
+ }
+#endif
+ for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Set up the handler table */
+ eep->ee_rx = siena_ev_rx;
+ eep->ee_tx = siena_ev_tx;
+ eep->ee_driver = siena_ev_driver;
+ eep->ee_global = siena_ev_global;
+ eep->ee_drv_gen = siena_ev_drv_gen;
+#if EFSYS_OPT_MCDI
+ eep->ee_mcdi = siena_ev_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+
+ /* Set up the new event queue */
+ EFX_POPULATE_OWORD_1(oword, FRF_CZ_TIMER_Q_EN, 1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
+ FRF_AZ_EVQ_BUF_BASE_ID, id);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+#if EFSYS_OPT_RX_SCALE
+fail3:
+ EFSYS_PROBE(fail3);
+#endif
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_QSTATS
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock b693ddf85aee1bfd */
+static const char *__efx_ev_qstat_name[] = {
+ "all",
+ "rx",
+ "rx_ok",
+ "rx_frm_trunc",
+ "rx_tobe_disc",
+ "rx_pause_frm_err",
+ "rx_buf_owner_id_err",
+ "rx_ipv4_hdr_chksum_err",
+ "rx_tcp_udp_chksum_err",
+ "rx_eth_crc_err",
+ "rx_ip_frag_err",
+ "rx_mcast_pkt",
+ "rx_mcast_hash_match",
+ "rx_tcp_ipv4",
+ "rx_tcp_ipv6",
+ "rx_udp_ipv4",
+ "rx_udp_ipv6",
+ "rx_other_ipv4",
+ "rx_other_ipv6",
+ "rx_non_ip",
+ "rx_batch",
+ "tx",
+ "tx_wq_ff_full",
+ "tx_pkt_err",
+ "tx_pkt_too_big",
+ "tx_unexpected",
+ "global",
+ "global_mnt",
+ "driver",
+ "driver_srm_upd_done",
+ "driver_tx_descq_fls_done",
+ "driver_rx_descq_fls_done",
+ "driver_rx_descq_fls_failed",
+ "driver_rx_dsc_error",
+ "driver_tx_dsc_error",
+ "drv_gen",
+ "mcdi_response",
+};
+/* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
+
+ const char *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, EV_NQSTATS);
+
+ return (__efx_ev_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+static void
+siena_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_oword_t oword;
+
+ /* Purge event queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
+ eep->ee_index, &oword, B_TRUE);
+
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
+}
+
+static void
+siena_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_filter.c b/usr/src/uts/common/io/sfxge/common/efx_filter.c
new file mode 100644
index 0000000000..bb722ab8e3
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_filter.c
@@ -0,0 +1,1398 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_FILTER
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_filter_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_filter_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_filter_restore(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+static __checkReturn efx_rc_t
+siena_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+static __checkReturn efx_rc_t
+siena_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out uint32_t *list,
+ __out size_t *length);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_filter_ops_t __efx_filter_siena_ops = {
+ siena_filter_init, /* efo_init */
+ siena_filter_fini, /* efo_fini */
+ siena_filter_restore, /* efo_restore */
+ siena_filter_add, /* efo_add */
+ siena_filter_delete, /* efo_delete */
+ siena_filter_supported_filters, /* efo_supported_filters */
+ NULL, /* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_filter_ops_t __efx_filter_ef10_ops = {
+ ef10_filter_init, /* efo_init */
+ ef10_filter_fini, /* efo_fini */
+ ef10_filter_restore, /* efo_restore */
+ ef10_filter_add, /* efo_add */
+ ef10_filter_delete, /* efo_delete */
+ ef10_filter_supported_filters, /* efo_supported_filters */
+ ef10_filter_reconfigure, /* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_filter_insert(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ const efx_filter_ops_t *efop = enp->en_efop;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ return (efop->efo_add(enp, spec, B_FALSE));
+}
+
+ __checkReturn efx_rc_t
+efx_filter_remove(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ const efx_filter_ops_t *efop = enp->en_efop;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+#if EFSYS_OPT_RX_SCALE
+ spec->efs_rss_context = enp->en_rss_context;
+#endif
+
+ return (efop->efo_delete(enp, spec));
+}
+
+ __checkReturn efx_rc_t
+efx_filter_restore(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ if ((rc = enp->en_efop->efo_restore(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_init(
+ __in efx_nic_t *enp)
+{
+ const efx_filter_ops_t *efop;
+ efx_rc_t rc;
+
+ /* Check that efx_filter_spec_t is 64 bytes. */
+ EFX_STATIC_ASSERT(sizeof (efx_filter_spec_t) == 64);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ efop = &__efx_filter_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efop->efo_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_efop = efop;
+ enp->en_mod_flags |= EFX_MOD_FILTER;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_efop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+ return (rc);
+}
+
+ void
+efx_filter_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ enp->en_efop->efo_fini(enp);
+
+ enp->en_efop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+}
+
+ __checkReturn efx_rc_t
+efx_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out uint32_t *list,
+ __out size_t *length)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT(enp->en_efop->efo_supported_filters != NULL);
+
+ if ((rc = enp->en_efop->efo_supported_filters(enp, list, length)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ if (enp->en_efop->efo_reconfigure != NULL) {
+ if ((rc = enp->en_efop->efo_reconfigure(enp, mac_addr,
+ all_unicst, mulcst,
+ all_mulcst, brdcst,
+ addrs, count)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_filter_spec_init_rx(
+ __out efx_filter_spec_t *spec,
+ __in efx_filter_priority_t priority,
+ __in efx_filter_flag_t flags,
+ __in efx_rxq_t *erp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(erp, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER)) == 0);
+
+ (void) memset(spec, 0, sizeof (*spec));
+ spec->efs_priority = priority;
+ spec->efs_flags = EFX_FILTER_FLAG_RX | flags;
+ spec->efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+ spec->efs_dmaq_id = (uint16_t)erp->er_index;
+}
+
+ void
+efx_filter_spec_init_tx(
+ __out efx_filter_spec_t *spec,
+ __in efx_txq_t *etp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(etp, !=, NULL);
+
+ (void) memset(spec, 0, sizeof (*spec));
+ spec->efs_priority = EFX_FILTER_PRI_REQUIRED;
+ spec->efs_flags = EFX_FILTER_FLAG_TX;
+ spec->efs_dmaq_id = (uint16_t)etp->et_index;
+}
+
+
+/*
+ * Specify IPv4 host, transport protocol and port in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t host,
+ __in uint16_t port)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->efs_ether_type = EFX_ETHER_TYPE_IPV4;
+ spec->efs_ip_proto = proto;
+ spec->efs_loc_host.eo_u32[0] = host;
+ spec->efs_loc_port = port;
+ return (0);
+}
+
+/*
+ * Specify IPv4 hosts, transport protocol and ports in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t lhost,
+ __in uint16_t lport,
+ __in uint32_t rhost,
+ __in uint16_t rport)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->efs_ether_type = EFX_ETHER_TYPE_IPV4;
+ spec->efs_ip_proto = proto;
+ spec->efs_loc_host.eo_u32[0] = lhost;
+ spec->efs_loc_port = lport;
+ spec->efs_rem_host.eo_u32[0] = rhost;
+ spec->efs_rem_port = rport;
+ return (0);
+}
+
+/*
+ * Specify local Ethernet address and/or VID in filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_eth_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vid,
+ __in const uint8_t *addr)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(addr, !=, NULL);
+
+ if (vid == EFX_FILTER_SPEC_VID_UNSPEC && addr == NULL)
+ return (EINVAL);
+
+ if (vid != EFX_FILTER_SPEC_VID_UNSPEC) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->efs_outer_vid = vid;
+ }
+ if (addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ (void) memcpy(spec->efs_loc_mac, addr, EFX_MAC_ADDR_LEN);
+ }
+ return (0);
+}
+
+/*
+ * Specify matching otherwise-unmatched unicast in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_uc_def(
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ return (0);
+}
+
+/*
+ * Specify matching otherwise-unmatched multicast in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_mc_def(
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ spec->efs_loc_mac[0] = 1;
+ return (0);
+}
+
+
+
+#if EFSYS_OPT_SIENA
+
+/*
+ * "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define FILTER_CTL_SRCH_FUDGE_WILD 3
+#define FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/*
+ * Hard maximum hop limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_filter_search() when the
+ * table is full.
+ */
+#define FILTER_CTL_SRCH_MAX 200
+
+static __checkReturn efx_rc_t
+siena_filter_spec_from_gen_spec(
+ __out siena_filter_spec_t *sf_spec,
+ __in efx_filter_spec_t *gen_spec)
+{
+ efx_rc_t rc;
+ boolean_t is_full = B_FALSE;
+
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX)
+ EFSYS_ASSERT3U(gen_spec->efs_flags, ==, EFX_FILTER_FLAG_TX);
+ else
+ EFSYS_ASSERT3U(gen_spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ /* Falconsiena only has one RSS context */
+ if ((gen_spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+ gen_spec->efs_rss_context != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ sf_spec->sfs_flags = gen_spec->efs_flags;
+ sf_spec->sfs_dmaq_id = gen_spec->efs_dmaq_id;
+
+ switch (gen_spec->efs_match_flags) {
+ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT:
+ is_full = B_TRUE;
+ /* FALLTHROUGH */
+ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT: {
+ uint32_t rhost, host1, host2;
+ uint16_t rport, port1, port2;
+
+ if (gen_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ if (gen_spec->efs_loc_port == 0 ||
+ (is_full && gen_spec->efs_rem_port == 0)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ switch (gen_spec->efs_ip_proto) {
+ case EFX_IPPROTO_TCP:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_TCP_FULL :
+ EFX_SIENA_FILTER_TX_TCP_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_TCP_FULL :
+ EFX_SIENA_FILTER_RX_TCP_WILD);
+ }
+ break;
+ case EFX_IPPROTO_UDP:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_UDP_FULL :
+ EFX_SIENA_FILTER_TX_UDP_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_UDP_FULL :
+ EFX_SIENA_FILTER_RX_UDP_WILD);
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail4;
+ }
+ /*
+ * The filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * addresses (zero for a wildcard).
+ */
+ rhost = is_full ? gen_spec->efs_rem_host.eo_u32[0] : 0;
+ rport = is_full ? gen_spec->efs_rem_port : 0;
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ host1 = gen_spec->efs_loc_host.eo_u32[0];
+ host2 = rhost;
+ } else {
+ host1 = rhost;
+ host2 = gen_spec->efs_loc_host.eo_u32[0];
+ }
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_TX_UDP_WILD) {
+ port1 = rport;
+ port2 = gen_spec->efs_loc_port;
+ } else {
+ port1 = gen_spec->efs_loc_port;
+ port2 = rport;
+ }
+ } else {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_RX_UDP_WILD) {
+ port1 = gen_spec->efs_loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->efs_loc_port;
+ }
+ }
+ sf_spec->sfs_dword[0] = (host1 << 16) | port1;
+ sf_spec->sfs_dword[1] = (port2 << 16) | (host1 >> 16);
+ sf_spec->sfs_dword[2] = host2;
+ break;
+ }
+
+ case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
+ is_full = B_TRUE;
+ /* FALLTHROUGH */
+ case EFX_FILTER_MATCH_LOC_MAC:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_MAC_FULL :
+ EFX_SIENA_FILTER_TX_MAC_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_MAC_FULL :
+ EFX_SIENA_FILTER_RX_MAC_WILD);
+ }
+ sf_spec->sfs_dword[0] = is_full ? gen_spec->efs_outer_vid : 0;
+ sf_spec->sfs_dword[1] =
+ gen_spec->efs_loc_mac[2] << 24 |
+ gen_spec->efs_loc_mac[3] << 16 |
+ gen_spec->efs_loc_mac[4] << 8 |
+ gen_spec->efs_loc_mac[5];
+ sf_spec->sfs_dword[2] =
+ gen_spec->efs_loc_mac[0] << 8 |
+ gen_spec->efs_loc_mac[1];
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple.
+ */
+static uint16_t
+siena_filter_tbl_hash(
+ __in uint32_t key)
+{
+ uint16_t tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ (uint16_t)(key >> 16);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ (uint16_t)(key & 0xffff);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ return (tmp);
+}
+
+/*
+ * To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash.
+ */
+static uint16_t
+siena_filter_tbl_increment(
+ __in uint32_t key)
+{
+ return ((uint16_t)(key * 2 - 1));
+}
+
+static __checkReturn boolean_t
+siena_filter_test_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ return ((sftp->sft_bitmap[index / 32] & (1 << (index % 32))) != 0);
+}
+
+static void
+siena_filter_set_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] |= (1 << (index % 32));
+ ++sftp->sft_used;
+}
+
+static void
+siena_filter_clear_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] &= ~(1 << (index % 32));
+
+ --sftp->sft_used;
+}
+
+
+static siena_filter_tbl_id_t
+siena_filter_tbl_id(
+ __in siena_filter_type_t type)
+{
+ siena_filter_tbl_id_t tbl_id;
+
+ switch (type) {
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_IP;
+ break;
+
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_MAC;
+ break;
+
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_IP;
+ break;
+
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_MAC;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ tbl_id = EFX_SIENA_FILTER_NTBLS;
+ break;
+ }
+ return (tbl_id);
+}
+
+static void
+siena_filter_reset_search_depth(
+ __inout siena_filter_t *sfp,
+ __in siena_filter_tbl_id_t tbl_id)
+{
+ switch (tbl_id) {
+ case EFX_SIENA_FILTER_TBL_RX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_RX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_TX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_TX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+static void
+siena_filter_push_rx_limits(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC].sft_size) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_filter_push_tx_limits(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP].sft_size != 0) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC].sft_size != 0) {
+ EFX_SET_OWORD_FIELD(
+ oword, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ oword, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static __checkReturn uint32_t
+siena_filter_build(
+ __out efx_oword_t *filter,
+ __in siena_filter_spec_t *spec)
+{
+ uint32_t dword3;
+ uint32_t key;
+ uint8_t type = spec->sfs_type;
+ uint32_t flags = spec->sfs_flags;
+
+ switch (siena_filter_tbl_id(type)) {
+ case EFX_SIENA_FILTER_TBL_RX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_RX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_RX_UDP_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_BZ_RSS_EN,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_BZ_SCATTER_EN,
+ (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_AZ_TCP_UDP, is_udp,
+ FRF_AZ_RXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_RX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_RX_MAC_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_CZ_RMFT_RSS_EN,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_CZ_RMFT_SCATTER_EN,
+ (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_CZ_RMFT_RXQ_ID, spec->sfs_dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_RMFT_DEST_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_TX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_TX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_TX_UDP_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TIFT_TCP_UDP, is_udp,
+ FRF_CZ_TIFT_TXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp | spec->sfs_dmaq_id << 1;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_TX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_TX_MAC_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->sfs_dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_TMFT_SRC_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild | spec->sfs_dmaq_id << 1;
+ break;
+ }
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ return (0);
+ }
+
+ key =
+ spec->sfs_dword[0] ^
+ spec->sfs_dword[1] ^
+ spec->sfs_dword[2] ^
+ dword3;
+
+ return (key);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_push_entry(
+ __inout efx_nic_t *enp,
+ __in siena_filter_type_t type,
+ __in int index,
+ __in efx_oword_t *eop)
+{
+ efx_rc_t rc;
+
+ switch (type) {
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ return (0);
+
+fail1:
+ return (rc);
+}
+
+
+static __checkReturn boolean_t
+siena_filter_equal(
+ __in const siena_filter_spec_t *left,
+ __in const siena_filter_spec_t *right)
+{
+ siena_filter_tbl_id_t tbl_id;
+
+ tbl_id = siena_filter_tbl_id(left->sfs_type);
+
+
+ if (left->sfs_type != right->sfs_type)
+ return (B_FALSE);
+
+ if (memcmp(left->sfs_dword, right->sfs_dword,
+ sizeof (left->sfs_dword)))
+ return (B_FALSE);
+
+ if ((tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC) &&
+ left->sfs_dmaq_id != right->sfs_dmaq_id)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_search(
+ __in siena_filter_tbl_t *sftp,
+ __in siena_filter_spec_t *spec,
+ __in uint32_t key,
+ __in boolean_t for_insert,
+ __out int *filter_index,
+ __out unsigned int *depth_required)
+{
+ unsigned hash, incr, filter_idx, depth;
+
+ hash = siena_filter_tbl_hash(key);
+ incr = siena_filter_tbl_increment(key);
+
+ filter_idx = hash & (sftp->sft_size - 1);
+ depth = 1;
+
+ for (;;) {
+ /*
+ * Return success if entry is used and matches this spec
+ * or entry is unused and we are trying to insert.
+ */
+ if (siena_filter_test_used(sftp, filter_idx) ?
+ siena_filter_equal(spec,
+ &sftp->sft_spec[filter_idx]) :
+ for_insert) {
+ *filter_index = filter_idx;
+ *depth_required = depth;
+ return (0);
+ }
+
+ /* Return failure if we reached the maximum search depth */
+ if (depth == FILTER_CTL_SRCH_MAX)
+ return (for_insert ? EBUSY : ENOENT);
+
+ filter_idx = (filter_idx + incr) & (sftp->sft_size - 1);
+ ++depth;
+ }
+}
+
+static void
+siena_filter_clear_entry(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_t *sftp,
+ __in int index)
+{
+ efx_oword_t filter;
+
+ if (siena_filter_test_used(sftp, index)) {
+ siena_filter_clear_used(sftp, index);
+
+ EFX_ZERO_OWORD(filter);
+ (void) siena_filter_push_entry(enp,
+ sftp->sft_spec[index].sfs_type,
+ index, &filter);
+
+ (void) memset(&sftp->sft_spec[index],
+ 0, sizeof (sftp->sft_spec[0]));
+ }
+}
+
+ void
+siena_filter_tbl_clear(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_id_t tbl_id)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
+ int index;
+ int state;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (index = 0; index < sftp->sft_size; ++index) {
+ siena_filter_clear_entry(enp, sftp, index);
+ }
+
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_init(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp;
+ siena_filter_tbl_t *sftp;
+ int tbl_id;
+ efx_rc_t rc;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (siena_filter_t), sfp);
+
+ if (!sfp) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_filter.ef_siena_filter = sfp;
+
+ switch (enp->en_family) {
+ case EFX_FAMILY_SIENA:
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_IP];
+ sftp->sft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC];
+ sftp->sft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP];
+ sftp->sft_size = FR_CZ_TX_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC];
+ sftp->sft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ break;
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ unsigned int bitmap_size;
+
+ sftp = &sfp->sf_tbl[tbl_id];
+ if (sftp->sft_size == 0)
+ continue;
+
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
+ sizeof (uint32_t));
+ bitmap_size =
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, sftp->sft_bitmap);
+ if (!sftp->sft_bitmap) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ sftp->sft_size * sizeof (*sftp->sft_spec),
+ sftp->sft_spec);
+ if (!sftp->sft_spec) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ (void) memset(sftp->sft_spec, 0,
+ sftp->sft_size * sizeof (*sftp->sft_spec));
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ siena_filter_fini(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static void
+siena_filter_fini(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (sfp == NULL)
+ return;
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
+ unsigned int bitmap_size;
+
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
+ sizeof (uint32_t));
+ bitmap_size =
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+
+ if (sftp->sft_bitmap != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, bitmap_size,
+ sftp->sft_bitmap);
+ sftp->sft_bitmap = NULL;
+ }
+
+ if (sftp->sft_spec != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sftp->sft_size *
+ sizeof (*sftp->sft_spec), sftp->sft_spec);
+ sftp->sft_spec = NULL;
+ }
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (siena_filter_t),
+ enp->en_filter.ef_siena_filter);
+}
+
+/* Restore filter state after a reset */
+static __checkReturn efx_rc_t
+siena_filter_restore(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *spec;
+ efx_oword_t filter;
+ int filter_idx;
+ int state;
+ efx_rc_t rc;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ sftp = &sfp->sf_tbl[tbl_id];
+ for (filter_idx = 0;
+ filter_idx < sftp->sft_size;
+ filter_idx++) {
+ if (!siena_filter_test_used(sftp, filter_idx))
+ continue;
+
+ spec = &sftp->sft_spec[filter_idx];
+ if ((rc = siena_filter_build(&filter, spec)) != 0)
+ goto fail1;
+ if ((rc = siena_filter_push_entry(enp,
+ spec->sfs_type, filter_idx, &filter)) != 0)
+ goto fail2;
+ }
+ }
+
+ siena_filter_push_rx_limits(enp);
+ siena_filter_push_tx_limits(enp);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace)
+{
+ efx_rc_t rc;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *saved_sf_spec;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ int state;
+ uint32_t key;
+
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
+ goto fail1;
+
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
+
+ if (sftp->sft_size == 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ key = siena_filter_build(&filter, &sf_spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = siena_filter_search(sftp, &sf_spec, key, B_TRUE,
+ &filter_idx, &depth);
+ if (rc != 0)
+ goto fail3;
+
+ EFSYS_ASSERT3U(filter_idx, <, sftp->sft_size);
+ saved_sf_spec = &sftp->sft_spec[filter_idx];
+
+ if (siena_filter_test_used(sftp, filter_idx)) {
+ if (may_replace == B_FALSE) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ siena_filter_set_used(sftp, filter_idx);
+ *saved_sf_spec = sf_spec;
+
+ if (sfp->sf_depth[sf_spec.sfs_type] < depth) {
+ sfp->sf_depth[sf_spec.sfs_type] = depth;
+ if (tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC)
+ siena_filter_push_tx_limits(enp);
+ else
+ siena_filter_push_rx_limits(enp);
+ }
+
+ (void) siena_filter_push_entry(enp, sf_spec.sfs_type,
+ filter_idx, &filter);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ efx_rc_t rc;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ int state;
+ uint32_t key;
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
+ goto fail1;
+
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
+
+ key = siena_filter_build(&filter, &sf_spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = siena_filter_search(sftp, &sf_spec, key, B_FALSE,
+ &filter_idx, &depth);
+ if (rc != 0)
+ goto fail2;
+
+ siena_filter_clear_entry(enp, sftp, filter_idx);
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (0);
+
+fail2:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#define MAX_SUPPORTED 4
+
+static __checkReturn efx_rc_t
+siena_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out uint32_t *list,
+ __out size_t *length)
+{
+ int index = 0;
+ uint32_t rx_matches[MAX_SUPPORTED];
+ efx_rc_t rc;
+
+ if (list == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+
+ if (enp->en_features & EFX_FEATURE_MAC_HEADER_FILTERS) {
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC;
+
+ rx_matches[index++] = EFX_FILTER_MATCH_LOC_MAC;
+ }
+
+ EFSYS_ASSERT3U(index, <=, MAX_SUPPORTED);
+
+ *length = index;
+ (void) memcpy(list, rx_matches, *length);
+
+ return (0);
+
+fail1:
+
+ return (rc);
+}
+
+#undef MAX_SUPPORTED
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_FILTER */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_hash.c b/usr/src/uts/common/io/sfxge/common/efx_hash.c
new file mode 100644
index 0000000000..3d8fe2ea4e
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_hash.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2006 Bob Jenkins
+ *
+ * Derived from public domain source, see
+ * <http://burtleburtle.net/bob/c/lookup3.c>:
+ *
+ * "lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
+ * These are functions for producing 32-bit hashes for hash table lookup...
+ * ...You can use this free for any purpose. It's in the public domain.
+ * It has no warranty."
+ *
+ * Copyright (c) 2014-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+/* Hash initial value */
+#define EFX_HASH_INITIAL_VALUE 0xdeadbeef
+
+/*
+ * Rotate a 32-bit value left
+ *
+ * Allow platform to provide an intrinsic or optimised routine and
+ * fall-back to a simple shift based implementation.
+ */
+#if EFSYS_HAS_ROTL_DWORD
+
+#define EFX_HASH_ROTATE(_value, _shift) \
+ EFSYS_ROTL_DWORD(_value, _shift)
+
+#else
+
+#define EFX_HASH_ROTATE(_value, _shift) \
+ (((_value) << (_shift)) | ((_value) >> (32 - (_shift))))
+
+#endif
+
+/* Mix three 32-bit values reversibly */
+#define EFX_HASH_MIX(_a, _b, _c) \
+ do { \
+ _a -= _c; \
+ _a ^= EFX_HASH_ROTATE(_c, 4); \
+ _c += _b; \
+ _b -= _a; \
+ _b ^= EFX_HASH_ROTATE(_a, 6); \
+ _a += _c; \
+ _c -= _b; \
+ _c ^= EFX_HASH_ROTATE(_b, 8); \
+ _b += _a; \
+ _a -= _c; \
+ _a ^= EFX_HASH_ROTATE(_c, 16); \
+ _c += _b; \
+ _b -= _a; \
+ _b ^= EFX_HASH_ROTATE(_a, 19); \
+ _a += _c; \
+ _c -= _b; \
+ _c ^= EFX_HASH_ROTATE(_b, 4); \
+ _b += _a; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Final mixing of three 32-bit values into one (_c) */
+#define EFX_HASH_FINALISE(_a, _b, _c) \
+ do { \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 14); \
+ _a ^= _c; \
+ _a -= EFX_HASH_ROTATE(_c, 11); \
+ _b ^= _a; \
+ _b -= EFX_HASH_ROTATE(_a, 25); \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 16); \
+ _a ^= _c; \
+ _a -= EFX_HASH_ROTATE(_c, 4); \
+ _b ^= _a; \
+ _b -= EFX_HASH_ROTATE(_a, 14); \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 24); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+/* Produce a 32-bit hash from 32-bit aligned input */
+ __checkReturn uint32_t
+efx_hash_dwords(
+ __in_ecount(count) uint32_t const *input,
+ __in size_t count,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE +
+ (((uint32_t)count) * sizeof (uint32_t)) + init;
+
+ /* Handle all but the last three dwords of the input */
+ while (count > 3) {
+ a += input[0];
+ b += input[1];
+ c += input[2];
+ EFX_HASH_MIX(a, b, c);
+
+ count -= 3;
+ input += 3;
+ }
+
+ /* Handle the left-overs */
+ switch (count) {
+ case 3:
+ c += input[2];
+ /* FALLTHROUGH */
+ case 2:
+ b += input[1];
+ /* FALLTHROUGH */
+ case 1:
+ a += input[0];
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if count parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#if EFSYS_IS_BIG_ENDIAN
+
+/* Produce a 32-bit hash from arbitrarily aligned input */
+ __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init;
+
+ /* Handle all but the last twelve bytes of the input */
+ while (length > 12) {
+ a += ((uint32_t)input[0]) << 24;
+ a += ((uint32_t)input[1]) << 16;
+ a += ((uint32_t)input[2]) << 8;
+ a += ((uint32_t)input[3]);
+ b += ((uint32_t)input[4]) << 24;
+ b += ((uint32_t)input[5]) << 16;
+ b += ((uint32_t)input[6]) << 8;
+ b += ((uint32_t)input[7]);
+ c += ((uint32_t)input[8]) << 24;
+ c += ((uint32_t)input[9]) << 16;
+ c += ((uint32_t)input[10]) << 8;
+ c += ((uint32_t)input[11]);
+ EFX_HASH_MIX(a, b, c);
+ length -= 12;
+ input += 12;
+ }
+
+ /* Handle the left-overs */
+ switch (length) {
+ case 12:
+ c += ((uint32_t)input[11]);
+ /* Fall-through */
+ case 11:
+ c += ((uint32_t)input[10]) << 8;
+ /* Fall-through */
+ case 10:
+ c += ((uint32_t)input[9]) << 16;
+ /* Fall-through */
+ case 9:
+ c += ((uint32_t)input[8]) << 24;
+ /* Fall-through */
+ case 8:
+ b += ((uint32_t)input[7]);
+ /* Fall-through */
+ case 7:
+ b += ((uint32_t)input[6]) << 8;
+ /* Fall-through */
+ case 6:
+ b += ((uint32_t)input[5]) << 16;
+ /* Fall-through */
+ case 5:
+ b += ((uint32_t)input[4]) << 24;
+ /* Fall-through */
+ case 4:
+ a += ((uint32_t)input[3]);
+ /* Fall-through */
+ case 3:
+ a += ((uint32_t)input[2]) << 8;
+ /* Fall-through */
+ case 2:
+ a += ((uint32_t)input[1]) << 16;
+ /* Fall-through */
+ case 1:
+ a += ((uint32_t)input[0]) << 24;
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if length parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+/* Produce a 32-bit hash from arbitrarily aligned input */
+ __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init;
+
+ /* Handle all but the last twelve bytes of the input */
+ while (length > 12) {
+ a += ((uint32_t)input[0]);
+ a += ((uint32_t)input[1]) << 8;
+ a += ((uint32_t)input[2]) << 16;
+ a += ((uint32_t)input[3]) << 24;
+ b += ((uint32_t)input[4]);
+ b += ((uint32_t)input[5]) << 8;
+ b += ((uint32_t)input[6]) << 16;
+ b += ((uint32_t)input[7]) << 24;
+ c += ((uint32_t)input[8]);
+ c += ((uint32_t)input[9]) << 8;
+ c += ((uint32_t)input[10]) << 16;
+ c += ((uint32_t)input[11]) << 24;
+ EFX_HASH_MIX(a, b, c);
+ length -= 12;
+ input += 12;
+ }
+
+ /* Handle the left-overs */
+ switch (length) {
+ case 12:
+ c += ((uint32_t)input[11]) << 24;
+ /* FALLTHROUGH */
+ case 11:
+ c += ((uint32_t)input[10]) << 16;
+ /* FALLTHROUGH */
+ case 10:
+ c += ((uint32_t)input[9]) << 8;
+ /* FALLTHROUGH */
+ case 9:
+ c += ((uint32_t)input[8]);
+ /* FALLTHROUGH */
+ case 8:
+ b += ((uint32_t)input[7]) << 24;
+ /* FALLTHROUGH */
+ case 7:
+ b += ((uint32_t)input[6]) << 16;
+ /* FALLTHROUGH */
+ case 6:
+ b += ((uint32_t)input[5]) << 8;
+ /* FALLTHROUGH */
+ case 5:
+ b += ((uint32_t)input[4]);
+ /* FALLTHROUGH */
+ case 4:
+ a += ((uint32_t)input[3]) << 24;
+ /* FALLTHROUGH */
+ case 3:
+ a += ((uint32_t)input[2]) << 16;
+ /* FALLTHROUGH */
+ case 2:
+ a += ((uint32_t)input[1]) << 8;
+ /* FALLTHROUGH */
+ case 1:
+ a += ((uint32_t)input[0]);
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if length parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
diff --git a/usr/src/uts/common/io/sfxge/common/efx_impl.h b/usr/src/uts/common/io/sfxge/common/efx_impl.h
new file mode 100644
index 0000000000..a23d51182e
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_impl.h
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_IMPL_H
+#define _SYS_EFX_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+/* FIXME: Add definition for driver generated software events */
+#ifndef ESE_DZ_EV_CODE_DRV_GEN_EV
+#define ESE_DZ_EV_CODE_DRV_GEN_EV FSE_AZ_EV_CODE_DRV_GEN_EV
+#endif
+
+
+#if EFSYS_OPT_SIENA
+#include "siena_impl.h"
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+#include "hunt_impl.h"
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+#include "medford_impl.h"
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#include "ef10_impl.h"
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_MOD_MCDI 0x00000001
+#define EFX_MOD_PROBE 0x00000002
+#define EFX_MOD_NVRAM 0x00000004
+#define EFX_MOD_VPD 0x00000008
+#define EFX_MOD_NIC 0x00000010
+#define EFX_MOD_INTR 0x00000020
+#define EFX_MOD_EV 0x00000040
+#define EFX_MOD_RX 0x00000080
+#define EFX_MOD_TX 0x00000100
+#define EFX_MOD_PORT 0x00000200
+#define EFX_MOD_MON 0x00000400
+#define EFX_MOD_WOL 0x00000800
+#define EFX_MOD_FILTER 0x00001000
+#define EFX_MOD_LIC 0x00002000
+
+#define EFX_RESET_PHY 0x00000001
+#define EFX_RESET_RXQ_ERR 0x00000002
+#define EFX_RESET_TXQ_ERR 0x00000004
+
+typedef enum efx_mac_type_e {
+ EFX_MAC_INVALID = 0,
+ EFX_MAC_SIENA,
+ EFX_MAC_HUNTINGTON,
+ EFX_MAC_MEDFORD,
+ EFX_MAC_NTYPES
+} efx_mac_type_t;
+
+typedef struct efx_ev_ops_s {
+ efx_rc_t (*eevo_init)(efx_nic_t *);
+ void (*eevo_fini)(efx_nic_t *);
+ efx_rc_t (*eevo_qcreate)(efx_nic_t *, unsigned int,
+ efsys_mem_t *, size_t, uint32_t,
+ efx_evq_t *);
+ void (*eevo_qdestroy)(efx_evq_t *);
+ efx_rc_t (*eevo_qprime)(efx_evq_t *, unsigned int);
+ void (*eevo_qpost)(efx_evq_t *, uint16_t);
+ efx_rc_t (*eevo_qmoderate)(efx_evq_t *, unsigned int);
+#if EFSYS_OPT_QSTATS
+ void (*eevo_qstats_update)(efx_evq_t *, efsys_stat_t *);
+#endif
+} efx_ev_ops_t;
+
+typedef struct efx_tx_ops_s {
+ efx_rc_t (*etxo_init)(efx_nic_t *);
+ void (*etxo_fini)(efx_nic_t *);
+ efx_rc_t (*etxo_qcreate)(efx_nic_t *,
+ unsigned int, unsigned int,
+ efsys_mem_t *, size_t,
+ uint32_t, uint16_t,
+ efx_evq_t *, efx_txq_t *,
+ unsigned int *);
+ void (*etxo_qdestroy)(efx_txq_t *);
+ efx_rc_t (*etxo_qpost)(efx_txq_t *, efx_buffer_t *,
+ unsigned int, unsigned int,
+ unsigned int *);
+ void (*etxo_qpush)(efx_txq_t *, unsigned int, unsigned int);
+ efx_rc_t (*etxo_qpace)(efx_txq_t *, unsigned int);
+ efx_rc_t (*etxo_qflush)(efx_txq_t *);
+ void (*etxo_qenable)(efx_txq_t *);
+ efx_rc_t (*etxo_qpio_enable)(efx_txq_t *);
+ void (*etxo_qpio_disable)(efx_txq_t *);
+ efx_rc_t (*etxo_qpio_write)(efx_txq_t *,uint8_t *, size_t,
+ size_t);
+ efx_rc_t (*etxo_qpio_post)(efx_txq_t *, size_t, unsigned int,
+ unsigned int *);
+ efx_rc_t (*etxo_qdesc_post)(efx_txq_t *, efx_desc_t *,
+ unsigned int, unsigned int,
+ unsigned int *);
+ void (*etxo_qdesc_dma_create)(efx_txq_t *, efsys_dma_addr_t,
+ size_t, boolean_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_tso_create)(efx_txq_t *, uint16_t,
+ uint32_t, uint8_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t,
+ uint32_t, uint16_t,
+ efx_desc_t *, int);
+ void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t,
+ efx_desc_t *);
+#if EFSYS_OPT_QSTATS
+ void (*etxo_qstats_update)(efx_txq_t *,
+ efsys_stat_t *);
+#endif
+} efx_tx_ops_t;
+
+typedef struct efx_rx_ops_s {
+ efx_rc_t (*erxo_init)(efx_nic_t *);
+ void (*erxo_fini)(efx_nic_t *);
+#if EFSYS_OPT_RX_SCATTER
+ efx_rc_t (*erxo_scatter_enable)(efx_nic_t *, unsigned int);
+#endif
+#if EFSYS_OPT_RX_SCALE
+ efx_rc_t (*erxo_scale_mode_set)(efx_nic_t *, efx_rx_hash_alg_t,
+ efx_rx_hash_type_t, boolean_t);
+ efx_rc_t (*erxo_scale_key_set)(efx_nic_t *, uint8_t *, size_t);
+ efx_rc_t (*erxo_scale_tbl_set)(efx_nic_t *, unsigned int *,
+ size_t);
+ uint32_t (*erxo_prefix_hash)(efx_nic_t *, efx_rx_hash_alg_t,
+ uint8_t *);
+#endif /* EFSYS_OPT_RX_SCALE */
+ efx_rc_t (*erxo_prefix_pktlen)(efx_nic_t *, uint8_t *,
+ uint16_t *);
+ void (*erxo_qpost)(efx_rxq_t *, efsys_dma_addr_t *, size_t,
+ unsigned int, unsigned int,
+ unsigned int);
+ void (*erxo_qpush)(efx_rxq_t *, unsigned int, unsigned int *);
+ efx_rc_t (*erxo_qflush)(efx_rxq_t *);
+ void (*erxo_qenable)(efx_rxq_t *);
+ efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int,
+ unsigned int, efx_rxq_type_t,
+ efsys_mem_t *, size_t, uint32_t,
+ efx_evq_t *, efx_rxq_t *);
+ void (*erxo_qdestroy)(efx_rxq_t *);
+} efx_rx_ops_t;
+
+typedef struct efx_mac_ops_s {
+ efx_rc_t (*emo_poll)(efx_nic_t *, efx_link_mode_t *);
+ efx_rc_t (*emo_up)(efx_nic_t *, boolean_t *);
+ efx_rc_t (*emo_addr_set)(efx_nic_t *);
+ efx_rc_t (*emo_pdu_set)(efx_nic_t *);
+ efx_rc_t (*emo_reconfigure)(efx_nic_t *);
+ efx_rc_t (*emo_multicast_list_set)(efx_nic_t *);
+ efx_rc_t (*emo_filter_default_rxq_set)(efx_nic_t *,
+ efx_rxq_t *, boolean_t);
+ void (*emo_filter_default_rxq_clear)(efx_nic_t *);
+#if EFSYS_OPT_LOOPBACK
+ efx_rc_t (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t,
+ efx_loopback_type_t);
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ efx_rc_t (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *);
+ efx_rc_t (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *,
+ uint16_t, boolean_t);
+ efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efsys_stat_t *, uint32_t *);
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_mac_ops_t;
+
+typedef struct efx_phy_ops_s {
+ efx_rc_t (*epo_power)(efx_nic_t *, boolean_t); /* optional */
+ efx_rc_t (*epo_reset)(efx_nic_t *);
+ efx_rc_t (*epo_reconfigure)(efx_nic_t *);
+ efx_rc_t (*epo_verify)(efx_nic_t *);
+ efx_rc_t (*epo_oui_get)(efx_nic_t *, uint32_t *);
+#if EFSYS_OPT_PHY_STATS
+ efx_rc_t (*epo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ uint32_t *);
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ efx_rc_t (*epo_bist_enable_offline)(efx_nic_t *);
+ efx_rc_t (*epo_bist_start)(efx_nic_t *, efx_bist_type_t);
+ efx_rc_t (*epo_bist_poll)(efx_nic_t *, efx_bist_type_t,
+ efx_bist_result_t *, uint32_t *,
+ unsigned long *, size_t);
+ void (*epo_bist_stop)(efx_nic_t *, efx_bist_type_t);
+#endif /* EFSYS_OPT_BIST */
+} efx_phy_ops_t;
+
+#if EFSYS_OPT_FILTER
+typedef struct efx_filter_ops_s {
+ efx_rc_t (*efo_init)(efx_nic_t *);
+ void (*efo_fini)(efx_nic_t *);
+ efx_rc_t (*efo_restore)(efx_nic_t *);
+ efx_rc_t (*efo_add)(efx_nic_t *, efx_filter_spec_t *,
+ boolean_t may_replace);
+ efx_rc_t (*efo_delete)(efx_nic_t *, efx_filter_spec_t *);
+ efx_rc_t (*efo_supported_filters)(efx_nic_t *, uint32_t *, size_t *);
+ efx_rc_t (*efo_reconfigure)(efx_nic_t *, uint8_t const *, boolean_t,
+ boolean_t, boolean_t, boolean_t,
+ uint8_t const *, uint32_t);
+} efx_filter_ops_t;
+
+extern __checkReturn efx_rc_t
+efx_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+#endif /* EFSYS_OPT_FILTER */
+
+
+typedef struct efx_port_s {
+ efx_mac_type_t ep_mac_type;
+ uint32_t ep_phy_type;
+ uint8_t ep_port;
+ uint32_t ep_mac_pdu;
+ uint8_t ep_mac_addr[6];
+ efx_link_mode_t ep_link_mode;
+ boolean_t ep_all_unicst;
+ boolean_t ep_mulcst;
+ boolean_t ep_all_mulcst;
+ boolean_t ep_brdcst;
+ unsigned int ep_fcntl;
+ boolean_t ep_fcntl_autoneg;
+ efx_oword_t ep_multicst_hash[2];
+ uint8_t ep_mulcst_addr_list[EFX_MAC_ADDR_LEN *
+ EFX_MAC_MULTICAST_LIST_MAX];
+ uint32_t ep_mulcst_addr_count;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t ep_loopback_type;
+ efx_link_mode_t ep_loopback_link_mode;
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t ep_phy_flags;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ efx_phy_led_mode_t ep_phy_led_mode;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+ efx_phy_media_type_t ep_fixed_port_type;
+ efx_phy_media_type_t ep_module_type;
+ uint32_t ep_adv_cap_mask;
+ uint32_t ep_lp_cap_mask;
+ uint32_t ep_default_adv_cap_mask;
+ uint32_t ep_phy_cap_mask;
+ boolean_t ep_mac_drain;
+ boolean_t ep_mac_stats_pending;
+#if EFSYS_OPT_BIST
+ efx_bist_type_t ep_current_bist;
+#endif
+ const efx_mac_ops_t *ep_emop;
+ const efx_phy_ops_t *ep_epop;
+} efx_port_t;
+
+typedef struct efx_mon_ops_s {
+#if EFSYS_OPT_MON_STATS
+ efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efx_mon_stat_value_t *);
+#endif /* EFSYS_OPT_MON_STATS */
+} efx_mon_ops_t;
+
+typedef struct efx_mon_s {
+ efx_mon_type_t em_type;
+ const efx_mon_ops_t *em_emop;
+} efx_mon_t;
+
+typedef struct efx_intr_ops_s {
+ efx_rc_t (*eio_init)(efx_nic_t *, efx_intr_type_t, efsys_mem_t *);
+ void (*eio_enable)(efx_nic_t *);
+ void (*eio_disable)(efx_nic_t *);
+ void (*eio_disable_unlocked)(efx_nic_t *);
+ efx_rc_t (*eio_trigger)(efx_nic_t *, unsigned int);
+ void (*eio_status_line)(efx_nic_t *, boolean_t *, uint32_t *);
+ void (*eio_status_message)(efx_nic_t *, unsigned int,
+ boolean_t *);
+ void (*eio_fatal)(efx_nic_t *);
+ void (*eio_fini)(efx_nic_t *);
+} efx_intr_ops_t;
+
+typedef struct efx_intr_s {
+ const efx_intr_ops_t *ei_eiop;
+ efsys_mem_t *ei_esmp;
+ efx_intr_type_t ei_type;
+ unsigned int ei_level;
+} efx_intr_t;
+
+typedef struct efx_nic_ops_s {
+ efx_rc_t (*eno_probe)(efx_nic_t *);
+ efx_rc_t (*eno_board_cfg)(efx_nic_t *);
+ efx_rc_t (*eno_set_drv_limits)(efx_nic_t *, efx_drv_limits_t*);
+ efx_rc_t (*eno_reset)(efx_nic_t *);
+ efx_rc_t (*eno_init)(efx_nic_t *);
+ efx_rc_t (*eno_get_vi_pool)(efx_nic_t *, uint32_t *);
+ efx_rc_t (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t,
+ uint32_t *, size_t *);
+#if EFSYS_OPT_DIAG
+ efx_rc_t (*eno_register_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ void (*eno_fini)(efx_nic_t *);
+ void (*eno_unprobe)(efx_nic_t *);
+} efx_nic_ops_t;
+
+#ifndef EFX_TXQ_LIMIT_TARGET
+#define EFX_TXQ_LIMIT_TARGET 259
+#endif
+#ifndef EFX_RXQ_LIMIT_TARGET
+#define EFX_RXQ_LIMIT_TARGET 512
+#endif
+#ifndef EFX_TXQ_DC_SIZE
+#define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */
+#endif
+#ifndef EFX_RXQ_DC_SIZE
+#define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */
+#endif
+
+#if EFSYS_OPT_FILTER
+
+typedef struct siena_filter_spec_s {
+ uint8_t sfs_type;
+ uint32_t sfs_flags;
+ uint32_t sfs_dmaq_id;
+ uint32_t sfs_dword[3];
+} siena_filter_spec_t;
+
+typedef enum siena_filter_type_e {
+ EFX_SIENA_FILTER_RX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_RX_TCP_WILD, /* TCP/IPv4 {dIP,dTCP, -, -} */
+ EFX_SIENA_FILTER_RX_UDP_FULL, /* UDP/IPv4 {dIP,dUDP,sIP,sUDP} */
+ EFX_SIENA_FILTER_RX_UDP_WILD, /* UDP/IPv4 {dIP,dUDP, -, -} */
+ EFX_SIENA_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */
+ EFX_SIENA_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */
+
+ EFX_SIENA_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_WILD, /* UDP/IPv4 { -, -,sIP,sUDP} */
+ EFX_SIENA_FILTER_TX_MAC_FULL, /* Ethernet {sMAC,VLAN} */
+ EFX_SIENA_FILTER_TX_MAC_WILD, /* Ethernet {sMAC, -} */
+
+ EFX_SIENA_FILTER_NTYPES
+} siena_filter_type_t;
+
+typedef enum siena_filter_tbl_id_e {
+ EFX_SIENA_FILTER_TBL_RX_IP = 0,
+ EFX_SIENA_FILTER_TBL_RX_MAC,
+ EFX_SIENA_FILTER_TBL_TX_IP,
+ EFX_SIENA_FILTER_TBL_TX_MAC,
+ EFX_SIENA_FILTER_NTBLS
+} siena_filter_tbl_id_t;
+
+typedef struct siena_filter_tbl_s {
+ int sft_size; /* number of entries */
+ int sft_used; /* active count */
+ uint32_t *sft_bitmap; /* active bitmap */
+ siena_filter_spec_t *sft_spec; /* array of saved specs */
+} siena_filter_tbl_t;
+
+typedef struct siena_filter_s {
+ siena_filter_tbl_t sf_tbl[EFX_SIENA_FILTER_NTBLS];
+ unsigned int sf_depth[EFX_SIENA_FILTER_NTYPES];
+} siena_filter_t;
+
+typedef struct efx_filter_s {
+#if EFSYS_OPT_SIENA
+ siena_filter_t *ef_siena_filter;
+#endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ ef10_filter_table_t *ef_ef10_filter_table;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+} efx_filter_t;
+
+extern void
+siena_filter_tbl_clear(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_id_t tbl);
+
+#endif /* EFSYS_OPT_FILTER */
+
+#if EFSYS_OPT_MCDI
+
+typedef struct efx_mcdi_ops_s {
+ efx_rc_t (*emco_init)(efx_nic_t *, const efx_mcdi_transport_t *);
+ void (*emco_send_request)(efx_nic_t *, void *, size_t,
+ void *, size_t);
+ efx_rc_t (*emco_poll_reboot)(efx_nic_t *);
+ boolean_t (*emco_poll_response)(efx_nic_t *);
+ void (*emco_read_response)(efx_nic_t *, void *, size_t, size_t);
+ void (*emco_fini)(efx_nic_t *);
+ efx_rc_t (*emco_feature_supported)(efx_nic_t *, efx_mcdi_feature_id_t, boolean_t *);
+} efx_mcdi_ops_t;
+
+typedef struct efx_mcdi_s {
+ const efx_mcdi_ops_t *em_emcop;
+ const efx_mcdi_transport_t *em_emtp;
+ efx_mcdi_iface_t em_emip;
+} efx_mcdi_t;
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_NVRAM
+typedef struct efx_nvram_ops_s {
+#if EFSYS_OPT_DIAG
+ efx_rc_t (*envo_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ efx_rc_t (*envo_type_to_partn)(efx_nic_t *, efx_nvram_type_t,
+ uint32_t *);
+ efx_rc_t (*envo_partn_size)(efx_nic_t *, uint32_t, size_t *);
+ efx_rc_t (*envo_partn_rw_start)(efx_nic_t *, uint32_t, size_t *);
+ efx_rc_t (*envo_partn_read)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_erase)(efx_nic_t *, uint32_t,
+ unsigned int, size_t);
+ efx_rc_t (*envo_partn_write)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ void (*envo_partn_rw_finish)(efx_nic_t *, uint32_t);
+ efx_rc_t (*envo_partn_get_version)(efx_nic_t *, uint32_t,
+ uint32_t *, uint16_t *);
+ efx_rc_t (*envo_partn_set_version)(efx_nic_t *, uint32_t,
+ uint16_t *);
+ efx_rc_t (*envo_buffer_validate)(efx_nic_t *, uint32_t,
+ caddr_t, size_t);
+} efx_nvram_ops_t;
+#endif /* EFSYS_OPT_NVRAM */
+
+extern __checkReturn efx_rc_t
+efx_nvram_tlv_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size);
+
+
+#if EFSYS_OPT_VPD
+typedef struct efx_vpd_ops_s {
+ efx_rc_t (*evpdo_init)(efx_nic_t *);
+ efx_rc_t (*evpdo_size)(efx_nic_t *, size_t *);
+ efx_rc_t (*evpdo_read)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_verify)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_get)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *);
+ efx_rc_t (*evpdo_set)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *);
+ efx_rc_t (*evpdo_next)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *, unsigned int *);
+ efx_rc_t (*evpdo_write)(efx_nic_t *, caddr_t, size_t);
+ void (*evpdo_fini)(efx_nic_t *);
+} efx_vpd_ops_t;
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_partitions(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __out unsigned int *npartnp);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_metadata(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4],
+ __out_bcount_opt(size) char *descp,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_info(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt size_t *sizep,
+ __out_opt uint32_t *addressp,
+ __out_opt uint32_t *erase_sizep,
+ __out_opt uint32_t *write_sizep);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t reboot);
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_test(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_LICENSING
+
+typedef struct efx_lic_ops_s {
+ efx_rc_t (*elo_update_licenses)(efx_nic_t *);
+ efx_rc_t (*elo_get_key_stats)(efx_nic_t *, efx_key_stats_t *);
+ efx_rc_t (*elo_app_state)(efx_nic_t *, uint64_t, boolean_t *);
+ efx_rc_t (*elo_get_id)(efx_nic_t *, size_t, uint32_t *,
+ size_t *, uint8_t *);
+ efx_rc_t (*elo_find_start)
+ (efx_nic_t *, caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_find_end)(efx_nic_t *, caddr_t, size_t,
+ uint32_t , uint32_t *);
+ boolean_t (*elo_find_key)(efx_nic_t *, caddr_t, size_t,
+ uint32_t, uint32_t *, uint32_t *);
+ boolean_t (*elo_validate_key)(efx_nic_t *,
+ caddr_t, uint32_t);
+ efx_rc_t (*elo_read_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t, uint32_t,
+ caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_write_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ caddr_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_delete_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ uint32_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_create_partition)(efx_nic_t *,
+ caddr_t, size_t);
+ efx_rc_t (*elo_finish_partition)(efx_nic_t *,
+ caddr_t, size_t);
+} efx_lic_ops_t;
+
+#endif
+
+typedef struct efx_drv_cfg_s {
+ uint32_t edc_min_vi_count;
+ uint32_t edc_max_vi_count;
+
+ uint32_t edc_max_piobuf_count;
+ uint32_t edc_pio_alloc_size;
+} efx_drv_cfg_t;
+
+struct efx_nic_s {
+ uint32_t en_magic;
+ efx_family_t en_family;
+ uint32_t en_features;
+ efsys_identifier_t *en_esip;
+ efsys_lock_t *en_eslp;
+ efsys_bar_t *en_esbp;
+ unsigned int en_mod_flags;
+ unsigned int en_reset_flags;
+ efx_nic_cfg_t en_nic_cfg;
+ efx_drv_cfg_t en_drv_cfg;
+ efx_port_t en_port;
+ efx_mon_t en_mon;
+ efx_intr_t en_intr;
+ uint32_t en_ev_qcount;
+ uint32_t en_rx_qcount;
+ uint32_t en_tx_qcount;
+ const efx_nic_ops_t *en_enop;
+ const efx_ev_ops_t *en_eevop;
+ const efx_tx_ops_t *en_etxop;
+ const efx_rx_ops_t *en_erxop;
+#if EFSYS_OPT_FILTER
+ efx_filter_t en_filter;
+ const efx_filter_ops_t *en_efop;
+#endif /* EFSYS_OPT_FILTER */
+#if EFSYS_OPT_MCDI
+ efx_mcdi_t en_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_NVRAM
+ efx_nvram_type_t en_nvram_locked;
+ const efx_nvram_ops_t *en_envop;
+#endif /* EFSYS_OPT_NVRAM */
+#if EFSYS_OPT_VPD
+ const efx_vpd_ops_t *en_evpdop;
+#endif /* EFSYS_OPT_VPD */
+#if EFSYS_OPT_RX_SCALE
+ efx_rx_hash_support_t en_hash_support;
+ efx_rx_scale_support_t en_rss_support;
+ uint32_t en_rss_context;
+#endif /* EFSYS_OPT_RX_SCALE */
+ uint32_t en_vport_id;
+#if EFSYS_OPT_LICENSING
+ const efx_lic_ops_t *en_elop;
+#endif
+ union {
+#if EFSYS_OPT_SIENA
+ struct {
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+ unsigned int enu_partn_mask;
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+#if EFSYS_OPT_VPD
+ caddr_t enu_svpd;
+ size_t enu_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ int enu_unused;
+ } siena;
+#endif /* EFSYS_OPT_SIENA */
+ int enu_unused;
+ } en_u;
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+ union en_arch {
+ struct {
+ int ena_vi_base;
+ int ena_vi_count;
+ int ena_vi_shift;
+#if EFSYS_OPT_VPD
+ caddr_t ena_svpd;
+ size_t ena_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ efx_piobuf_handle_t ena_piobuf_handle[EF10_MAX_PIOBUF_NBUFS];
+ uint32_t ena_piobuf_count;
+ uint32_t ena_pio_alloc_map[EF10_MAX_PIOBUF_NBUFS];
+ uint32_t ena_pio_write_vi_base;
+ /* Memory BAR mapping regions */
+ uint32_t ena_uc_mem_map_offset;
+ size_t ena_uc_mem_map_size;
+ uint32_t ena_wc_mem_map_offset;
+ size_t ena_wc_mem_map_size;
+ } ef10;
+ } en_arch;
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+};
+
+
+#define EFX_NIC_MAGIC 0x02121996
+
+typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *,
+ const efx_ev_callbacks_t *, void *);
+
+typedef struct efx_evq_rxq_state_s {
+ unsigned int eers_rx_read_ptr;
+ unsigned int eers_rx_mask;
+} efx_evq_rxq_state_t;
+
+struct efx_evq_s {
+ uint32_t ee_magic;
+ efx_nic_t *ee_enp;
+ unsigned int ee_index;
+ unsigned int ee_mask;
+ efsys_mem_t *ee_esmp;
+#if EFSYS_OPT_QSTATS
+ uint32_t ee_stat[EV_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+
+ efx_ev_handler_t ee_rx;
+ efx_ev_handler_t ee_tx;
+ efx_ev_handler_t ee_driver;
+ efx_ev_handler_t ee_global;
+ efx_ev_handler_t ee_drv_gen;
+#if EFSYS_OPT_MCDI
+ efx_ev_handler_t ee_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+
+ efx_evq_rxq_state_t ee_rxq_state[EFX_EV_RX_NLABELS];
+};
+
+#define EFX_EVQ_MAGIC 0x08081997
+
+#define EFX_EVQ_SIENA_TIMER_QUANTUM_NS 6144 /* 768 cycles */
+
+struct efx_rxq_s {
+ uint32_t er_magic;
+ efx_nic_t *er_enp;
+ efx_evq_t *er_eep;
+ unsigned int er_index;
+ unsigned int er_label;
+ unsigned int er_mask;
+ efsys_mem_t *er_esmp;
+};
+
+#define EFX_RXQ_MAGIC 0x15022005
+
+struct efx_txq_s {
+ uint32_t et_magic;
+ efx_nic_t *et_enp;
+ unsigned int et_index;
+ unsigned int et_mask;
+ efsys_mem_t *et_esmp;
+#if EFSYS_OPT_HUNTINGTON
+ uint32_t et_pio_bufnum;
+ uint32_t et_pio_blknum;
+ uint32_t et_pio_write_offset;
+ uint32_t et_pio_offset;
+ size_t et_pio_size;
+#endif
+#if EFSYS_OPT_QSTATS
+ uint32_t et_stat[TX_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+};
+
+#define EFX_TXQ_MAGIC 0x05092005
+
+#define EFX_MAC_ADDR_COPY(_dst, _src) \
+ do { \
+ (_dst)[0] = (_src)[0]; \
+ (_dst)[1] = (_src)[1]; \
+ (_dst)[2] = (_src)[2]; \
+ (_dst)[3] = (_src)[3]; \
+ (_dst)[4] = (_src)[4]; \
+ (_dst)[5] = (_src)[5]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_MAC_BROADCAST_ADDR_SET(_dst) \
+ do { \
+ uint16_t *_d = (void *)(_dst); \
+ _d[0] = 0xffff; \
+ _d[1] = 0xffff; \
+ _d[2] = 0xffff; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_OPT_CHECK_REG
+#define EFX_CHECK_REG(_enp, _reg) \
+ do { \
+ const char *name = #_reg; \
+ char min = name[4]; \
+ char max = name[5]; \
+ char rev; \
+ \
+ switch ((_enp)->en_family) { \
+ case EFX_FAMILY_SIENA: \
+ rev = 'C'; \
+ break; \
+ \
+ case EFX_FAMILY_HUNTINGTON: \
+ rev = 'D'; \
+ break; \
+ \
+ case EFX_FAMILY_MEDFORD: \
+ rev = 'E'; \
+ break; \
+ \
+ default: \
+ rev = '?'; \
+ break; \
+ } \
+ \
+ EFSYS_ASSERT3S(rev, >=, min); \
+ EFSYS_ASSERT3S(rev, <=, max); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_CHECK_REG(_enp, _reg) do { \
+ _NOTE(CONSTANTCONDITION) \
+ } while(B_FALSE)
+#endif
+
+#define EFX_BAR_READD(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ EFSYS_PROBE3(efx_bar_readd, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITED(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE3(efx_bar_writed, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ EFSYS_PROBE4(efx_bar_readq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_writeq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ EFSYS_PROBE6(efx_bar_reado, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE6(efx_bar_writeo, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ EFSYS_PROBE4(efx_bar_tbl_readd, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED2(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ (2 * sizeof (efx_dword_t)) + \
+ ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ (3 * sizeof (efx_dword_t)) + \
+ ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ EFSYS_PROBE5(efx_bar_tbl_readq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE5(efx_bar_tbl_writeq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READO(_enp, _reg, _index, _eop, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), (_lock)); \
+ EFSYS_PROBE7(efx_bar_tbl_reado, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEO(_enp, _reg, _index, _eop, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_tbl_writeo, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Allow drivers to perform optimised 128-bit doorbell writes.
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid
+ * the need for locking in the host, and are the only ones known to be safe to
+ * use 128-bites write with.
+ */
+#define EFX_BAR_TBL_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \
+ const char *, \
+ #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_DMA_SYNC_QUEUE_FOR_DEVICE(_esmp, _entries, _wptr, _owptr) \
+ do { \
+ unsigned int _new = (_wptr); \
+ unsigned int _old = (_owptr); \
+ \
+ if ((_new) >= (_old)) \
+ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \
+ (_old) * sizeof (efx_desc_t), \
+ ((_new) - (_old)) * sizeof (efx_desc_t)); \
+ else \
+ /* \
+ * It is cheaper to sync entire map than sync \
+ * two parts especially when offset/size are \
+ * ignored and entire map is synced in any case.\
+ */ \
+ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \
+ 0, \
+ (_entries) * sizeof (efx_desc_t)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+extern __checkReturn efx_rc_t
+efx_nic_biu_test(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mac_select(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mac_multicast_hash_compute(
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count,
+ __out efx_oword_t *hash_low,
+ __out efx_oword_t *hash_high);
+
+extern __checkReturn efx_rc_t
+efx_phy_probe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_phy_unprobe(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_VPD
+
+/* VPD utility functions */
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_reinit(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keyword,
+ __out_opt unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_DIAG
+
+extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[];
+
+typedef struct efx_register_set_s {
+ unsigned int address;
+ unsigned int step;
+ unsigned int rows;
+ efx_oword_t mask;
+} efx_register_set_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in size_t count);
+
+extern __checkReturn efx_rc_t
+efx_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+efx_mcdi_set_workaround(
+ __in efx_nic_t *enp,
+ __in uint32_t type,
+ __in boolean_t enabled,
+ __out_opt uint32_t *flagsp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_workarounds(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *implementedp,
+ __out_opt uint32_t *enabledp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_IMPL_H */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_intr.c b/usr/src/uts/common/io/sfxge/common/efx_intr.c
new file mode 100644
index 0000000000..0e5c6c020f
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_intr.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+static void
+siena_intr_enable(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_disable(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+static void
+siena_intr_fini(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+static void
+siena_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+static void
+siena_intr_fatal(
+ __in efx_nic_t *enp);
+
+static __checkReturn boolean_t
+siena_intr_check_fatal(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_intr_ops_t __efx_intr_siena_ops = {
+ siena_intr_init, /* eio_init */
+ siena_intr_enable, /* eio_enable */
+ siena_intr_disable, /* eio_disable */
+ siena_intr_disable_unlocked, /* eio_disable_unlocked */
+ siena_intr_trigger, /* eio_trigger */
+ siena_intr_status_line, /* eio_status_line */
+ siena_intr_status_message, /* eio_status_message */
+ siena_intr_fatal, /* eio_fatal */
+ siena_intr_fini, /* eio_fini */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_intr_ops_t __efx_intr_ef10_ops = {
+ ef10_intr_init, /* eio_init */
+ ef10_intr_enable, /* eio_enable */
+ ef10_intr_disable, /* eio_disable */
+ ef10_intr_disable_unlocked, /* eio_disable_unlocked */
+ ef10_intr_trigger, /* eio_trigger */
+ ef10_intr_status_line, /* eio_status_line */
+ ef10_intr_status_message, /* eio_status_message */
+ ef10_intr_fatal, /* eio_fatal */
+ ef10_intr_fini, /* eio_fini */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_INTR) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ eip->ei_esmp = esmp;
+ eip->ei_type = type;
+ eip->ei_level = 0;
+
+ enp->en_mod_flags |= EFX_MOD_INTR;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eiop = &__efx_intr_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = eiop->eio_init(enp, type, esmp)) != 0)
+ goto fail3;
+
+ eip->ei_eiop = eiop;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_INTR;
+}
+
+ void
+efx_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_enable(enp);
+}
+
+ void
+efx_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_disable(enp);
+}
+
+ void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_disable_unlocked(enp);
+}
+
+
+ __checkReturn efx_rc_t
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ return (eiop->eio_trigger(enp, level));
+}
+
+ void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_status_line(enp, fatalp, qmaskp);
+}
+
+ void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_status_message(enp, message, fatalp);
+}
+
+ void
+efx_intr_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_fatal(enp);
+}
+
+
+/* ************************************************************************* */
+/* ************************************************************************* */
+/* ************************************************************************* */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+
+ /*
+ * bug17213 workaround.
+ *
+ * Under legacy interrupts, don't share a level between fatal
+ * interrupts and event queue interrupts. Under MSI-X, they
+ * must share, or we won't get an interrupt.
+ */
+ if (enp->en_family == EFX_FAMILY_SIENA &&
+ eip->ei_type == EFX_INTR_LINE)
+ eip->ei_level = 0x1f;
+ else
+ eip->ei_level = 0;
+
+ /* Enable all the genuinely fatal interrupts */
+ EFX_SET_OWORD(oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0);
+ if (enp->en_family >= EFX_FAMILY_SIENA)
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword);
+
+ /* Set up the interrupt address register */
+ EFX_POPULATE_OWORD_3(oword,
+ FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0,
+ FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff,
+ FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+
+ return (0);
+}
+
+static void
+siena_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+}
+
+static void
+siena_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFSYS_SPIN(10);
+}
+
+static void
+siena_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFSYS_BAR_READO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFSYS_BAR_WRITEO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+}
+
+static __checkReturn efx_rc_t
+siena_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+ unsigned int count;
+ uint32_t sel;
+ efx_rc_t rc;
+
+ /* bug16757: No event queues can be initialized */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ if (level >= EFX_NINTR_SIENA) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL))
+ return (ENOTSUP); /* avoid EFSYS_PROBE() */
+
+ sel = level;
+
+ /* Trigger a test interrupt */
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, sel);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ /*
+ * Wait up to 100ms for the interrupt to be raised before restoring
+ * KER_INT_LEVE_SEL. Ignore a failure to raise (the caller will
+ * observe this soon enough anyway), but always reset KER_INT_LEVE_SEL
+ */
+ count = 0;
+ do {
+ EFSYS_SPIN(100); /* 100us */
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ } while (EFX_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER) && ++count < 1000);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+siena_intr_check_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efsys_mem_t *esmp = eip->ei_esmp;
+ efx_oword_t oword;
+
+ /* Read the syndrome */
+ EFSYS_MEM_READO(esmp, 0, &oword);
+
+ if (EFX_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT) != 0) {
+ EFSYS_PROBE(fatal);
+
+ /* Clear the fatal interrupt condition */
+ EFX_SET_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT, 0);
+ EFSYS_MEM_WRITEO(esmp, 0, &oword);
+
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+static void
+siena_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_dword_t dword;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ /*
+ * Read the queue mask and implicitly acknowledge the
+ * interrupt.
+ */
+ EFX_BAR_READD(enp, FR_BZ_INT_ISR0_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ if (*qmaskp & (1U << eip->ei_level))
+ *fatalp = siena_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+static void
+siena_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (message == eip->ei_level)
+ *fatalp = siena_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+
+static void
+siena_intr_fatal(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_DECODE_INTR_FATAL
+ efx_oword_t fatal;
+ efx_oword_t mem_per;
+
+ EFX_BAR_READO(enp, FR_AZ_FATAL_INTR_REG_KER, &fatal);
+ EFX_ZERO_OWORD(mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0 ||
+ EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFX_BAR_READO(enp, FR_AZ_MEM_STAT_REG, &mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRAM_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_BUFID_DC_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_BUFID_DC_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_MEM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVF_OFLO_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVFF_OFLO, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_ILL_ADR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_ILL_ADDR, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+#else
+ EFSYS_ASSERT(0);
+#endif
+}
+
+static void
+siena_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /* Clear the interrupt address register */
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_lic.c b/usr/src/uts/common/io/sfxge/common/efx_lic.c
new file mode 100644
index 0000000000..beba5b2348
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_lic.c
@@ -0,0 +1,1742 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_LICENSING
+
+#include "ef10_tlv_layout.h"
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_update_license(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static const efx_lic_ops_t __efx_lic_v1_ops = {
+ efx_mcdi_fc_license_update_license, /* elo_update_licenses */
+ efx_mcdi_fc_license_get_key_stats, /* elo_get_key_stats */
+ NULL, /* elo_app_state */
+ NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_update_licenses(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensed_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+static const efx_lic_ops_t __efx_lic_v2_ops = {
+ efx_mcdi_licensing_update_licenses, /* elo_update_licenses */
+ efx_mcdi_licensing_get_key_stats, /* elo_get_key_stats */
+ efx_mcdi_licensed_app_state, /* elo_app_state */
+ NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_update_licenses(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_report_license(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_bcount_part_opt(buffer_size, *lengthp)
+ uint8_t *bufferp);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+static const efx_lic_ops_t __efx_lic_v3_ops = {
+ efx_mcdi_licensing_v3_update_licenses, /* elo_update_licenses */
+ efx_mcdi_licensing_v3_report_license, /* elo_get_key_stats */
+ efx_mcdi_licensing_v3_app_state, /* elo_app_state */
+ efx_mcdi_licensing_v3_get_id, /* elo_get_id */
+ efx_lic_v3_find_start, /* elo_find_start*/
+ efx_lic_v3_find_end, /* elo_find_end */
+ efx_lic_v3_find_key, /* elo_find_key */
+ efx_lic_v3_validate_key, /* elo_validate_key */
+ efx_lic_v3_read_key, /* elo_read_key */
+ efx_lic_v3_write_key, /* elo_write_key */
+ efx_lic_v3_delete_key, /* elo_delete_key */
+ efx_lic_v3_create_partition, /* elo_create_partition */
+ efx_lic_v3_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+
+/* V1 Licensing - used in Siena Modena only */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_update_license(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_FC_IN_LICENSE_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FC_OP_LICENSE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
+ MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != 0) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FC_IN_LICENSE_LEN,
+ MC_CMD_FC_OUT_LICENSE_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FC_OP_LICENSE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FC_OUT_LICENSE_LEN;
+
+ MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
+ MC_CMD_FC_IN_LICENSE_GET_KEY_STATS);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FC_OUT_LICENSE_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_VALID_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_INVALID_KEYS);
+ eksp->eks_blacklisted =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_BLACKLISTED_KEYS);
+ eksp->eks_unverifiable = 0;
+ eksp->eks_wrong_node = 0;
+ eksp->eks_licensed_apps_lo = 0;
+ eksp->eks_licensed_apps_hi = 0;
+ eksp->eks_licensed_features_lo = 0;
+ eksp->eks_licensed_features_hi = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+/* V1 and V2 Partition format - based on a 16-bit TLV format */
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+/*
+ * V1/V2 format - defined in SF-108542-TC section 4.2:
+ * Type (T): 16bit - revision/HMAC algorithm
+ * Length (L): 16bit - value length in bytes
+ * Value (V): L bytes - payload
+ */
+#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256)
+#define EFX_LICENSE_V1V2_HEADER_LENGTH (2*sizeof(uint16_t))
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *startp = 0;
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *endp = offset + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ return (0);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ boolean_t found;
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if((size_t)buffer_size - offset < EFX_LICENSE_V1V2_HEADER_LENGTH)
+ goto fail1;
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t*)&bufferp[offset])[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t*)&bufferp[offset])[1]);
+ if ((tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) ||
+ (tlv_type == 0 && tlv_length == 0)) {
+ found = B_FALSE;
+ } else {
+ *startp = offset;
+ *lengthp = tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ found = B_TRUE;
+ }
+ return (found);
+
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V1V2_HEADER_LENGTH) {
+ goto fail1;
+ }
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t*)keyp)[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t*)keyp)[1]);
+
+ if(tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) {
+ goto fail2;
+ }
+ if (tlv_type == 0) {
+ goto fail3;
+ }
+ if ((tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH) != length) {
+ goto fail4;
+ }
+
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ if (key_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ (void) memcpy(keyp, &bufferp[offset], length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ // Ensure space for terminator remains
+ if ((offset + length) >
+ (buffer_size - EFX_LICENSE_V1V2_HEADER_LENGTH) ) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ (void) memcpy(bufferp + offset, keyp, length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ efx_rc_t rc;
+ uint32_t move_start = offset + length;
+ uint32_t move_length = end - move_start;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(end <= buffer_size);
+
+ // Shift everything after the key down
+ memmove(bufferp + offset, bufferp + move_start, move_length);
+
+ *deltap = length;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size);
+
+ // Write terminator
+ (void) memset(bufferp, '\0', EFX_LICENSE_V1V2_HEADER_LENGTH);
+ return (0);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
+/* V2 Licensing - used by Huntington family only. See SF-113611-TC */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensed_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)];
+ uint32_t app_state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ /* V2 licensing supports 32bit app id only */
+ if ((app_id >> 32) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_APP_STATE_IN_APP_ID,
+ app_id & 0xffffffff);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_APP_STATE_OUT_STATE));
+ if (app_state != MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED) {
+ *licensedp = B_TRUE;
+ } else {
+ *licensedp = B_FALSE;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_update_licenses(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LICENSING_IN_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
+ MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != 0) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_IN_LEN,
+ MC_CMD_LICENSING_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
+ MC_CMD_LICENSING_IN_OP_GET_KEY_STATS);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_VALID_APP_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_INVALID_APP_KEYS);
+ eksp->eks_blacklisted =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_BLACKLISTED_APP_KEYS);
+ eksp->eks_unverifiable =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_UNVERIFIABLE_APP_KEYS);
+ eksp->eks_wrong_node =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_WRONG_NODE_APP_KEYS);
+ eksp->eks_licensed_apps_lo = 0;
+ eksp->eks_licensed_apps_hi = 0;
+ eksp->eks_licensed_features_lo = 0;
+ eksp->eks_licensed_features_hi = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+/* V3 Licensing - used starting from Medford family. See SF-114884-SW */
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_update_licenses(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING_V3;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_report_license(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_V3_IN_LEN,
+ MC_CMD_LICENSING_V3_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING_V3;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_V3_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_V3_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_VALID_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_INVALID_KEYS);
+ eksp->eks_blacklisted = 0;
+ eksp->eks_unverifiable =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_UNVERIFIABLE_KEYS);
+ eksp->eks_wrong_node =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_WRONG_NODE_KEYS);
+ eksp->eks_licensed_apps_lo =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_LO);
+ eksp->eks_licensed_apps_hi =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_HI);
+ eksp->eks_licensed_features_lo =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_LO);
+ eksp->eks_licensed_features_hi =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_HI);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN)];
+ uint32_t app_state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO,
+ app_id & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI,
+ app_id >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_V3_APP_STATE_OUT_STATE));
+ if (app_state != MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED) {
+ *licensedp = B_TRUE;
+ } else {
+ *licensedp = B_FALSE;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_bcount_part_opt(buffer_size, *lengthp)
+ uint8_t *bufferp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_GET_ID_V3_IN_LEN,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN)];
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3;
+
+ if (bufferp == NULL) {
+ /* Request id type and length only */
+ req.emr_in_buf = bufferp;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = bufferp;
+ req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
+ (void) memset(payload, 0, sizeof (payload));
+ } else {
+ /* Request full buffer */
+ req.emr_in_buf = bufferp;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = bufferp;
+ req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
+ (void) memset(bufferp, 0, req.emr_out_length);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE);
+ *lengthp = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH);
+
+ if (bufferp == NULL) {
+ /* modify length requirements to indicate to caller the extra buffering
+ ** needed to read the complete output.
+ */
+ *lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
+ } else {
+ /* Shift ID down to start of buffer */
+ memmove(bufferp,
+ bufferp+MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST,
+ *lengthp);
+ (void) memset(bufferp+(*lengthp), 0,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* V3 format uses Huntington TLV format partition. See SF-108797-SW */
+#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64)
+#define EFX_LICENSE_V3_KEY_LENGTH_MAX (128)
+#define EFX_LICENSE_V3_HASH_LENGTH (64)
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item_start(bufferp, buffer_size, startp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item(bufferp, buffer_size,
+ offset, startp, lengthp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ // Check key is a valid V3 key
+ efx_rc_t rc;
+ uint8_t key_type;
+ uint8_t key_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V3_KEY_LENGTH_MIN) {
+ goto fail1;
+ }
+
+ key_type = ((uint8_t*)keyp)[0];
+ key_length = ((uint8_t*)keyp)[1] + EFX_LICENSE_V3_HASH_LENGTH;
+
+ if(key_length > EFX_LICENSE_V3_KEY_LENGTH_MAX) {
+ goto fail2;
+ }
+ if (key_type < 3) {
+ goto fail3;
+ }
+ if (key_length != length) {
+ goto fail4;
+ }
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_get_item(bufferp, buffer_size,
+ offset, length, keyp, key_max_size, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX);
+
+ return ef10_nvram_buffer_insert_item(bufferp, buffer_size,
+ offset, keyp, length, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((rc = ef10_nvram_buffer_delete_item(bufferp,
+ buffer_size, offset, length, end)) != 0) {
+ goto fail1;
+ }
+
+ *deltap = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ efx_rc_t rc;
+
+ // Construct empty partition
+ if ((rc = ef10_nvram_buffer_create(enp,
+ NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_buffer_finish(bufferp,
+ buffer_size)) != 0) {
+ goto fail1;
+ }
+
+ // Validate completed partition
+ if ((rc = ef10_nvram_buffer_validate(enp, NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_lic_init(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_LIC));
+
+ switch (enp->en_family) {
+
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ elop = &__efx_lic_v1_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ elop = &__efx_lic_v2_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ elop = &__efx_lic_v3_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_elop = elop;
+ enp->en_mod_flags |= EFX_MOD_LIC;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_lic_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ enp->en_elop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_LIC;
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_update_licenses(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_update_licenses(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_get_key_stats(enp, eksp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if (elop->elo_app_state == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if ((rc = elop->elo_app_state(enp, app_id, licensedp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_opt uint8_t *bufferp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if (elop->elo_get_id == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = elop->elo_get_id(enp, buffer_size, typep,
+ lengthp, bufferp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Buffer management API - abstracts varying TLV format used for License partition */
+
+ __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_start(enp, bufferp, buffer_size, startp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ boolean_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ EFSYS_ASSERT(bufferp);
+ EFSYS_ASSERT(startp);
+ EFSYS_ASSERT(lengthp);
+
+ return (elop->elo_find_key(enp, bufferp, buffer_size, offset,
+ startp, lengthp));
+}
+
+
+/* Validate that the buffer contains a single key in a recognised format.
+** An empty or terminator buffer is not accepted as a valid key.
+*/
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ boolean_t rc;
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_validate_key(enp, keyp, length)) == B_FALSE)
+ goto fail1;
+
+ return (B_TRUE);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_read_key(enp, bufferp, buffer_size, offset,
+ length, keyp, key_max_size, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_write_key(enp, bufferp, buffer_size, offset,
+ keyp, length, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_delete_key(enp, bufferp, buffer_size, offset,
+ length, end, deltap)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_create_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_finish_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LICENSING */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_mac.c b/usr/src/uts/common/io/sfxge/common/efx_mac.c
new file mode 100644
index 0000000000..e9d3ec1d75
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_mac.c
@@ -0,0 +1,804 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_mac_ops_t __efx_siena_mac_ops = {
+ siena_mac_poll, /* emo_poll */
+ siena_mac_up, /* emo_up */
+ siena_mac_reconfigure, /* emo_addr_set */
+ siena_mac_reconfigure, /* emo_pdu_set */
+ siena_mac_reconfigure, /* emo_reconfigure */
+ siena_mac_multicast_list_set, /* emo_multicast_list_set */
+ NULL, /* emo_filter_set_default_rxq */
+ NULL, /* emo_filter_default_rxq_clear */
+#if EFSYS_OPT_LOOPBACK
+ siena_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ efx_mcdi_mac_stats_upload, /* emo_stats_upload */
+ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */
+ siena_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_mac_ops_t __efx_ef10_mac_ops = {
+ ef10_mac_poll, /* emo_poll */
+ ef10_mac_up, /* emo_up */
+ ef10_mac_addr_set, /* emo_addr_set */
+ ef10_mac_pdu_set, /* emo_pdu_set */
+ ef10_mac_reconfigure, /* emo_reconfigure */
+ ef10_mac_multicast_list_set, /* emo_multicast_list_set */
+ ef10_mac_filter_default_rxq_set, /* emo_filter_default_rxq_set */
+ ef10_mac_filter_default_rxq_clear,
+ /* emo_filter_default_rxq_clear */
+#if EFSYS_OPT_LOOPBACK
+ ef10_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ efx_mcdi_mac_stats_upload, /* emo_stats_upload */
+ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */
+ ef10_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint32_t old_pdu;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (pdu < EFX_MAC_PDU_MIN) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (pdu > EFX_MAC_PDU_MAX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ old_pdu = epp->ep_mac_pdu;
+ epp->ep_mac_pdu = (uint32_t)pdu;
+ if ((rc = emop->emo_pdu_set(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ epp->ep_mac_pdu = old_pdu;
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t old_addr[6];
+ uint32_t oui;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (EFX_MAC_ADDR_IS_MULTICAST(addr)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ oui = addr[0] << 16 | addr[1] << 8 | addr[2];
+ if (oui == 0x000000) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFX_MAC_ADDR_COPY(old_addr, epp->ep_mac_addr);
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, addr);
+ if ((rc = emop->emo_addr_set(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, old_addr);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ boolean_t old_all_unicst;
+ boolean_t old_mulcst;
+ boolean_t old_all_mulcst;
+ boolean_t old_brdcst;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ old_all_unicst = epp->ep_all_unicst;
+ old_mulcst = epp->ep_mulcst;
+ old_all_mulcst = epp->ep_all_mulcst;
+ old_brdcst = epp->ep_brdcst;
+
+ epp->ep_all_unicst = all_unicst;
+ epp->ep_mulcst = mulcst;
+ epp->ep_all_mulcst = all_mulcst;
+ epp->ep_brdcst = brdcst;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_all_unicst = old_all_unicst;
+ epp->ep_mulcst = old_mulcst;
+ epp->ep_all_mulcst = old_all_mulcst;
+ epp->ep_brdcst = old_brdcst;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (epp->ep_mac_drain == enabled)
+ return (0);
+
+ epp->ep_mac_drain = enabled;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((rc = emop->emo_up(enp, mac_upp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ unsigned int old_fcntl;
+ boolean_t old_autoneg;
+ unsigned int old_adv_cap;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((fcntl & ~(EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE)) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ignore a request to set flow control auto-negotiation
+ * if the PHY doesn't support it.
+ */
+ if (~epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ autoneg = B_FALSE;
+
+ old_fcntl = epp->ep_fcntl;
+ old_autoneg = epp->ep_fcntl_autoneg;
+ old_adv_cap = epp->ep_adv_cap_mask;
+
+ epp->ep_fcntl = fcntl;
+ epp->ep_fcntl_autoneg = autoneg;
+
+ /*
+ * Always encode the flow control settings in the advertised
+ * capabilities even if we are not trying to auto-negotiate
+ * them and reconfigure both the PHY and the MAC.
+ */
+ if (fcntl & EFX_FCNTL_RESPOND)
+ epp->ep_adv_cap_mask |= (1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+ else
+ epp->ep_adv_cap_mask &= ~(1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+
+ if (fcntl & EFX_FCNTL_GENERATE)
+ epp->ep_adv_cap_mask ^= (1 << EFX_PHY_CAP_ASYM);
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_fcntl = old_fcntl;
+ epp->ep_fcntl_autoneg = old_autoneg;
+ epp->ep_adv_cap_mask = old_adv_cap;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int wanted = 0;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ /*
+ * Decode the requested flow control settings from the PHY
+ * advertised capabilities.
+ */
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_PAUSE))
+ wanted = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_ASYM))
+ wanted ^= EFX_FCNTL_GENERATE;
+
+ *fcntl_linkp = epp->ep_fcntl;
+ *fcntl_wantedp = wanted;
+}
+
+ __checkReturn efx_rc_t
+efx_mac_multicast_list_set(
+ __in efx_nic_t *enp,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t *old_mulcst_addr_list = NULL;
+ uint32_t old_mulcst_addr_count;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (count > EFX_MAC_MULTICAST_LIST_MAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ old_mulcst_addr_count = epp->ep_mulcst_addr_count;
+ if (old_mulcst_addr_count > 0) {
+ /* Allocate memory to store old list (instead of using stack) */
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ if (old_mulcst_addr_list == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ /* Save the old list in case we need to rollback */
+ (void) memcpy(old_mulcst_addr_list, epp->ep_mulcst_addr_list,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN);
+ }
+
+ /* Store the new list */
+ (void) memcpy(epp->ep_mulcst_addr_list, addrs,
+ count * EFX_MAC_ADDR_LEN);
+ epp->ep_mulcst_addr_count = count;
+
+ if ((rc = emop->emo_multicast_list_set(enp)) != 0)
+ goto fail3;
+
+ if (old_mulcst_addr_count > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ /* Restore original list on failure */
+ epp->ep_mulcst_addr_count = old_mulcst_addr_count;
+ if (old_mulcst_addr_count > 0) {
+ (void) memcpy(epp->ep_mulcst_addr_list, old_mulcst_addr_list,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN);
+
+ EFSYS_KMEM_FREE(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ }
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+ __checkReturn efx_rc_t
+efx_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (emop->emo_filter_default_rxq_set != NULL) {
+ rc = emop->emo_filter_default_rxq_set(enp, erp, using_rss);
+ if (rc != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (emop->emo_filter_default_rxq_clear != NULL)
+ emop->emo_filter_default_rxq_clear(enp);
+}
+
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED EfxMacStatNamesBlock 054d43a31d2d7a45 */
+static const char *__efx_mac_stat_name[] = {
+ "rx_octets",
+ "rx_pkts",
+ "rx_unicst_pkts",
+ "rx_multicst_pkts",
+ "rx_brdcst_pkts",
+ "rx_pause_pkts",
+ "rx_le_64_pkts",
+ "rx_65_to_127_pkts",
+ "rx_128_to_255_pkts",
+ "rx_256_to_511_pkts",
+ "rx_512_to_1023_pkts",
+ "rx_1024_to_15xx_pkts",
+ "rx_ge_15xx_pkts",
+ "rx_errors",
+ "rx_fcs_errors",
+ "rx_drop_events",
+ "rx_false_carrier_errors",
+ "rx_symbol_errors",
+ "rx_align_errors",
+ "rx_internal_errors",
+ "rx_jabber_pkts",
+ "rx_lane0_char_err",
+ "rx_lane1_char_err",
+ "rx_lane2_char_err",
+ "rx_lane3_char_err",
+ "rx_lane0_disp_err",
+ "rx_lane1_disp_err",
+ "rx_lane2_disp_err",
+ "rx_lane3_disp_err",
+ "rx_match_fault",
+ "rx_nodesc_drop_cnt",
+ "tx_octets",
+ "tx_pkts",
+ "tx_unicst_pkts",
+ "tx_multicst_pkts",
+ "tx_brdcst_pkts",
+ "tx_pause_pkts",
+ "tx_le_64_pkts",
+ "tx_65_to_127_pkts",
+ "tx_128_to_255_pkts",
+ "tx_256_to_511_pkts",
+ "tx_512_to_1023_pkts",
+ "tx_1024_to_15xx_pkts",
+ "tx_ge_15xx_pkts",
+ "tx_errors",
+ "tx_sgl_col_pkts",
+ "tx_mult_col_pkts",
+ "tx_ex_col_pkts",
+ "tx_late_col_pkts",
+ "tx_def_pkts",
+ "tx_ex_def_pkts",
+ "pm_trunc_bb_overflow",
+ "pm_discard_bb_overflow",
+ "pm_trunc_vfifo_full",
+ "pm_discard_vfifo_full",
+ "pm_trunc_qbb",
+ "pm_discard_qbb",
+ "pm_discard_mapping",
+ "rxdp_q_disabled_pkts",
+ "rxdp_di_dropped_pkts",
+ "rxdp_streaming_pkts",
+ "rxdp_hlb_fetch",
+ "rxdp_hlb_wait",
+ "vadapter_rx_unicast_packets",
+ "vadapter_rx_unicast_bytes",
+ "vadapter_rx_multicast_packets",
+ "vadapter_rx_multicast_bytes",
+ "vadapter_rx_broadcast_packets",
+ "vadapter_rx_broadcast_bytes",
+ "vadapter_rx_bad_packets",
+ "vadapter_rx_bad_bytes",
+ "vadapter_rx_overflow",
+ "vadapter_tx_unicast_packets",
+ "vadapter_tx_unicast_bytes",
+ "vadapter_tx_multicast_packets",
+ "vadapter_tx_multicast_bytes",
+ "vadapter_tx_broadcast_packets",
+ "vadapter_tx_broadcast_bytes",
+ "vadapter_tx_bad_packets",
+ "vadapter_tx_bad_bytes",
+ "vadapter_tx_overflow",
+};
+/* END MKCONFIG GENERATED EfxMacStatNamesBlock */
+
+ __checkReturn const char *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MAC_NSTATS);
+ return (__efx_mac_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ /*
+ * Don't assert !ep_mac_stats_pending, because the client might
+ * have failed to finalise statistics when previously stopping
+ * the port.
+ */
+ if ((rc = emop->emo_stats_upload(enp, esmp)) != 0)
+ goto fail1;
+
+ epp->ep_mac_stats_pending = B_TRUE;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+
+ if (emop->emo_stats_periodic == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = emop->emo_stats_periodic(enp, esmp, period_ms, events)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *essp,
+ __inout_opt uint32_t *generationp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ rc = emop->emo_stats_update(enp, esmp, essp, generationp);
+ if (rc == 0)
+ epp->ep_mac_stats_pending = B_FALSE;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+ __checkReturn efx_rc_t
+efx_mac_select(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_type_t type = EFX_MAC_INVALID;
+ const efx_mac_ops_t *emop;
+ int rc = EINVAL;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ emop = &__efx_siena_mac_ops;
+ type = EFX_MAC_SIENA;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ emop = &__efx_ef10_mac_ops;
+ type = EFX_MAC_HUNTINGTON;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ emop = &__efx_ef10_mac_ops;
+ type = EFX_MAC_MEDFORD;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT(type != EFX_MAC_INVALID);
+ EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES);
+ EFSYS_ASSERT(emop != NULL);
+
+ epp->ep_emop = emop;
+ epp->ep_mac_type = type;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_SIENA
+
+#define EFX_MAC_HASH_BITS (1 << 8)
+
+/* Compute the multicast hash as used on Falcon and Siena. */
+static void
+siena_mac_multicast_hash_compute(
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count,
+ __out efx_oword_t *hash_low,
+ __out efx_oword_t *hash_high)
+{
+ uint32_t crc, index;
+ int i;
+
+ EFSYS_ASSERT(hash_low != NULL);
+ EFSYS_ASSERT(hash_high != NULL);
+
+ EFX_ZERO_OWORD(*hash_low);
+ EFX_ZERO_OWORD(*hash_high);
+
+ for (i = 0; i < count; i++) {
+ /* Calculate hash bucket (IEEE 802.3 CRC32 of the MAC addr) */
+ crc = efx_crc32_calculate(0xffffffff, addrs, EFX_MAC_ADDR_LEN);
+ index = crc % EFX_MAC_HASH_BITS;
+ if (index < 128) {
+ EFX_SET_OWORD_BIT(*hash_low, index);
+ } else {
+ EFX_SET_OWORD_BIT(*hash_high, index - 128);
+ }
+
+ addrs += EFX_MAC_ADDR_LEN;
+ }
+}
+
+static __checkReturn efx_rc_t
+siena_mac_multicast_list_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_oword_t old_hash[2];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ (void) memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash));
+
+ siena_mac_multicast_hash_compute(
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count,
+ &epp->ep_multicst_hash[0],
+ &epp->ep_multicst_hash[1]);
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ (void) memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash));
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_mcdi.c b/usr/src/uts/common/io/sfxge/common/efx_mcdi.c
new file mode 100644
index 0000000000..0c73afb388
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_mcdi.c
@@ -0,0 +1,2280 @@
+/*
+ * Copyright (c) 2008-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MCDI
+
+/*
+ * There are three versions of the MCDI interface:
+ * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers.
+ * - MCDIv1: Siena firmware and Huntington BootROM.
+ * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM.
+ * Transport uses MCDIv2 headers.
+ *
+ * MCDIv2 Header NOT_EPOCH flag
+ * ----------------------------
+ * A new epoch begins at initial startup or after an MC reboot, and defines when
+ * the MC should reject stale MCDI requests.
+ *
+ * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all
+ * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1.
+ *
+ * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a
+ * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0.
+ */
+
+
+
+#if EFSYS_OPT_SIENA
+
+static const efx_mcdi_ops_t __efx_mcdi_siena_ops = {
+ siena_mcdi_init, /* emco_init */
+ siena_mcdi_send_request, /* emco_send_request */
+ siena_mcdi_poll_reboot, /* emco_poll_reboot */
+ siena_mcdi_poll_response, /* emco_poll_response */
+ siena_mcdi_read_response, /* emco_read_response */
+ siena_mcdi_fini, /* emco_fini */
+ siena_mcdi_feature_supported, /* emco_feature_supported */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
+ ef10_mcdi_init, /* emco_init */
+ ef10_mcdi_send_request, /* emco_send_request */
+ ef10_mcdi_poll_reboot, /* emco_poll_reboot */
+ ef10_mcdi_poll_response, /* emco_poll_response */
+ ef10_mcdi_read_response, /* emco_read_response */
+ ef10_mcdi_fini, /* emco_fini */
+ ef10_mcdi_feature_supported, /* emco_feature_supported */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+
+ __checkReturn efx_rc_t
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *emtp)
+{
+ const efx_mcdi_ops_t *emcop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ emcop = &__efx_mcdi_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (enp->en_features & EFX_FEATURE_MCDI_DMA) {
+ /* MCDI requires a DMA buffer in host memory */
+ if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ }
+ enp->en_mcdi.em_emtp = emtp;
+
+ if (emcop != NULL && emcop->emco_init != NULL) {
+ if ((rc = emcop->emco_init(enp, emtp)) != 0)
+ goto fail3;
+ }
+
+ enp->en_mcdi.em_emcop = emcop;
+ enp->en_mod_flags |= EFX_MOD_MCDI;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_mcdi.em_emcop = NULL;
+ enp->en_mcdi.em_emtp = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+
+ return (rc);
+}
+
+ void
+efx_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
+
+ if (emcop != NULL && emcop->emco_fini != NULL)
+ emcop->emco_fini(enp);
+
+ emip->emi_port = 0;
+ emip->emi_aborted = 0;
+
+ enp->en_mcdi.em_emcop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+}
+
+ void
+efx_mcdi_new_epoch(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ int state;
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emip->emi_new_epoch = B_TRUE;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+static void
+efx_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in void *hdrp,
+ __in size_t hdr_len,
+ __in void *sdup,
+ __in size_t sdu_len)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len);
+}
+
+static efx_rc_t
+efx_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ rc = emcop->emco_poll_reboot(enp);
+ return (rc);
+}
+
+static boolean_t
+efx_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ boolean_t available;
+
+ available = emcop->emco_poll_response(enp);
+ return (available);
+}
+
+static void
+efx_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_read_response(enp, bufferp, offset, length);
+}
+
+ void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr[2];
+ size_t hdr_len;
+ unsigned int max_version;
+ unsigned int seq;
+ unsigned int xflags;
+ boolean_t new_epoch;
+ int state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_request_start() is naturally serialised against both
+ * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(),
+ * by virtue of there only being one outstanding MCDI request.
+ * Unfortunately, upper layers may also call efx_mcdi_request_abort()
+ * at any time, to timeout a pending mcdi request, That request may
+ * then subsequently complete, meaning efx_mcdi_ev_cpl() or
+ * efx_mcdi_ev_death() may end up running in parallel with
+ * efx_mcdi_request_start(). This race is handled by ensuring that
+ * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the
+ * en_eslp lock.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ EFSYS_ASSERT(emip->emi_pending_req == NULL);
+ emip->emi_pending_req = emrp;
+ emip->emi_ev_cpl = ev_cpl;
+ emip->emi_poll_cnt = 0;
+ seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ);
+ new_epoch = emip->emi_new_epoch;
+ max_version = emip->emi_max_version;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ xflags = 0;
+ if (ev_cpl)
+ xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+ /*
+ * Huntington firmware supports MCDIv2, but the Huntington BootROM only
+ * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where
+ * possible to support this.
+ */
+ if ((max_version >= 2) &&
+ ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) ||
+ (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1))) {
+ /* Construct MCDI v2 header */
+ hdr_len = sizeof (hdr);
+ EFX_POPULATE_DWORD_8(hdr[0],
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
+ MCDI_HEADER_ERROR, 0,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length);
+ } else {
+ /* Construct MCDI v1 header */
+ hdr_len = sizeof (hdr[0]);
+ EFX_POPULATE_DWORD_8(hdr[0],
+ MCDI_HEADER_CODE, emrp->emr_cmd,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, emrp->emr_in_length,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
+ MCDI_HEADER_ERROR, 0,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+ }
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST,
+ &hdr[0], hdr_len,
+ emrp->emr_in_buf, emrp->emr_in_length);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+ efx_mcdi_send_request(enp, &hdr[0], hdr_len,
+ emrp->emr_in_buf, emrp->emr_in_length);
+}
+
+
+static void
+efx_mcdi_read_response_header(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr[2];
+ unsigned int hdr_len;
+ unsigned int data_len;
+ unsigned int seq;
+ unsigned int cmd;
+ unsigned int error;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(emrp != NULL);
+
+ efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0]));
+ hdr_len = sizeof (hdr[0]);
+
+ cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE);
+ seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ);
+ error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR);
+
+ if (cmd != MC_CMD_V2_EXTN) {
+ data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN);
+ } else {
+ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
+ hdr_len += sizeof (hdr[1]);
+
+ cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD);
+ data_len =
+ EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ if (error && (data_len == 0)) {
+ /* The MC has rebooted since the request was sent. */
+ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
+ (void) efx_mcdi_poll_reboot(enp);
+ rc = EIO;
+ goto fail1;
+ }
+ if ((cmd != emrp->emr_cmd) ||
+ (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
+ /* Response is for a different request */
+ rc = EIO;
+ goto fail2;
+ }
+ if (error) {
+ efx_dword_t err[2];
+ unsigned int err_len = MIN(data_len, sizeof (err));
+ int err_code = MC_CMD_ERR_EPROTO;
+ int err_arg = 0;
+
+ /* Read error code (and arg num for MCDI v2 commands) */
+ efx_mcdi_read_response(enp, &err, hdr_len, err_len);
+
+ if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t)))
+ err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0);
+#ifdef WITH_MCDI_V2
+ if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t)))
+ err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0);
+#endif
+ emrp->emr_err_code = err_code;
+ emrp->emr_err_arg = err_arg;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ if ((err_code == MC_CMD_ERR_PROXY_PENDING) &&
+ (err_len == sizeof (err))) {
+ /*
+ * The MCDI request would normally fail with EPERM, but
+ * firmware has forwarded it to an authorization agent
+ * attached to a privileged PF.
+ *
+ * Save the authorization request handle. The client
+ * must wait for a PROXY_RESPONSE event, or timeout.
+ */
+ emrp->emr_proxy_handle = err_arg;
+ }
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context,
+ EFX_LOG_MCDI_RESPONSE,
+ &hdr[0], hdr_len,
+ &err[0], err_len);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+ if (!emrp->emr_quiet) {
+ EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd,
+ int, err_code, int, err_arg);
+ }
+
+ rc = efx_mcdi_request_errcode(err_code);
+ goto fail3;
+ }
+
+ emrp->emr_rc = 0;
+ emrp->emr_out_length_used = data_len;
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ emrp->emr_proxy_handle = 0;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+ return;
+
+fail3:
+fail2:
+fail1:
+ emrp->emr_rc = rc;
+ emrp->emr_out_length_used = 0;
+}
+
+static void
+efx_mcdi_finish_response(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+ efx_dword_t hdr[2];
+ unsigned int hdr_len;
+ size_t bytes;
+
+ if (emrp->emr_out_buf == NULL)
+ return;
+
+ /* Read the command header to detect MCDI response format */
+ hdr_len = sizeof (hdr[0]);
+ efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len);
+ if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) {
+ /*
+ * Read the actual payload length. The length given in the event
+ * is only correct for responses with the V1 format.
+ */
+ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
+ hdr_len += sizeof (hdr[1]);
+
+ emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1],
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ /* Copy payload out into caller supplied buffer */
+ bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length);
+ efx_mcdi_read_response(enp, emrp->emr_out_buf, hdr_len, bytes);
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context,
+ EFX_LOG_MCDI_RESPONSE,
+ &hdr[0], hdr_len,
+ emrp->emr_out_buf, bytes);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+}
+
+
+ __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t *emrp;
+ int state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /* Serialise against post-watchdog efx_mcdi_ev* */
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ EFSYS_ASSERT(emip->emi_pending_req != NULL);
+ EFSYS_ASSERT(!emip->emi_ev_cpl);
+ emrp = emip->emi_pending_req;
+
+ /* Check for reboot atomically w.r.t efx_mcdi_request_start */
+ if (emip->emi_poll_cnt++ == 0) {
+ if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ goto fail1;
+ }
+ }
+
+ /* Check if a response is available */
+ if (efx_mcdi_poll_response(enp) == B_FALSE) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (B_FALSE);
+ }
+
+ /* Read the response header */
+ efx_mcdi_read_response_header(enp, emrp);
+
+ /* Request complete */
+ emip->emi_pending_req = NULL;
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if ((rc = emrp->emr_rc) != 0)
+ goto fail2;
+
+ efx_mcdi_finish_response(enp, emrp);
+ return (B_TRUE);
+
+fail2:
+ if (!emrp->emr_quiet)
+ EFSYS_PROBE(fail2);
+fail1:
+ if (!emrp->emr_quiet)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Reboot/Assertion */
+ if (rc == EIO || rc == EINTR)
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ return (B_TRUE);
+}
+
+ __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t *emrp;
+ boolean_t aborted;
+ int state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_ev_* may have already completed this event, and be
+ * spinning/blocked on the upper layer lock. So it *is* legitimate
+ * to for emi_pending_req to be NULL. If there is a pending event
+ * completed request, then provide a "credit" to allow
+ * efx_mcdi_ev_cpl() to accept a single spurious completion.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ aborted = (emrp != NULL);
+ if (aborted) {
+ emip->emi_pending_req = NULL;
+
+ /* Error the request */
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = ETIMEDOUT;
+
+ /* Provide a credit for seqno/emr_pending_req mismatches */
+ if (emip->emi_ev_cpl)
+ ++emip->emi_aborted;
+
+ /*
+ * The upper layer has called us, so we don't
+ * need to complete the request.
+ */
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (aborted);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_request_errcode(
+ __in unsigned int err)
+{
+
+ switch (err) {
+ /* MCDI v1 */
+ case MC_CMD_ERR_EPERM:
+ return (EACCES);
+ case MC_CMD_ERR_ENOENT:
+ return (ENOENT);
+ case MC_CMD_ERR_EINTR:
+ return (EINTR);
+ case MC_CMD_ERR_EACCES:
+ return (EACCES);
+ case MC_CMD_ERR_EBUSY:
+ return (EBUSY);
+ case MC_CMD_ERR_EINVAL:
+ return (EINVAL);
+ case MC_CMD_ERR_EDEADLK:
+ return (EDEADLK);
+ case MC_CMD_ERR_ENOSYS:
+ return (ENOTSUP);
+ case MC_CMD_ERR_ETIME:
+ return (ETIMEDOUT);
+ case MC_CMD_ERR_ENOTSUP:
+ return (ENOTSUP);
+ case MC_CMD_ERR_EALREADY:
+ return (EALREADY);
+
+ /* MCDI v2 */
+#ifdef MC_CMD_ERR_EAGAIN
+ case MC_CMD_ERR_EAGAIN:
+ return (EAGAIN);
+#endif
+#ifdef MC_CMD_ERR_ENOSPC
+ case MC_CMD_ERR_ENOSPC:
+ return (ENOSPC);
+#endif
+
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return (ENOMEM);
+ case MC_CMD_ERR_NO_VADAPTOR:
+ return (ENOENT);
+ case MC_CMD_ERR_NO_EVB_PORT:
+ return (ENOENT);
+ case MC_CMD_ERR_NO_VSWITCH:
+ return (ENODEV);
+ case MC_CMD_ERR_VLAN_LIMIT:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_PCI_FUNC:
+ return (ENODEV);
+ case MC_CMD_ERR_BAD_VLAN_MODE:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_VSWITCH_TYPE:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_VPORT_TYPE:
+ return (EINVAL);
+ case MC_CMD_ERR_MAC_EXIST:
+ return (EEXIST);
+
+ case MC_CMD_ERR_PROXY_PENDING:
+ return (EAGAIN);
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, err);
+ return (EIO);
+ }
+}
+
+ void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_exception_t exception;
+
+ /* Reboot or Assertion failure only */
+ EFSYS_ASSERT(rc == EIO || rc == EINTR);
+
+ /*
+ * If MC_CMD_REBOOT causes a reboot (dependent on parameters),
+ * then the EIO is not worthy of an exception.
+ */
+ if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO)
+ return;
+
+ exception = (rc == EIO)
+ ? EFX_MCDI_EXCEPTION_MC_REBOOT
+ : EFX_MCDI_EXCEPTION_MC_BADASSERT;
+
+ emtp->emt_exception(emtp->emt_context, exception);
+}
+
+ void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emrp->emr_quiet = B_FALSE;
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_execute_quiet(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emrp->emr_quiet = B_TRUE;
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_req_t *emrp;
+ int state;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start()
+ * when we're completing an aborted request.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl ||
+ (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
+ EFSYS_ASSERT(emip->emi_aborted > 0);
+ if (emip->emi_aborted > 0)
+ --emip->emi_aborted;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return;
+ }
+
+ emrp = emip->emi_pending_req;
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (emip->emi_max_version >= 2) {
+ /* MCDIv2 response details do not fit into an event. */
+ efx_mcdi_read_response_header(enp, emrp);
+ } else {
+ if (errcode != 0) {
+ if (!emrp->emr_quiet) {
+ EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd,
+ int, errcode);
+ }
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = efx_mcdi_request_errcode(errcode);
+ } else {
+ emrp->emr_out_length_used = outlen;
+ emrp->emr_rc = 0;
+ }
+ }
+ if (errcode == 0) {
+ efx_mcdi_finish_response(enp, emrp);
+ }
+
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_proxy_handle(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *handlep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_rc_t rc;
+
+ /*
+ * Return proxy handle from MCDI request that returned with error
+ * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching
+ * PROXY_RESPONSE event.
+ */
+ if ((emrp == NULL) || (handlep == NULL)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((emrp->emr_rc != 0) &&
+ (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) {
+ *handlep = emrp->emr_proxy_handle;
+ rc = 0;
+ } else {
+ *handlep = 0;
+ rc = ENOENT;
+ }
+ return (rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_mcdi_ev_proxy_response(
+ __in efx_nic_t *enp,
+ __in unsigned int handle,
+ __in unsigned int status)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_rc_t rc;
+
+ /*
+ * Handle results of an authorization request for a privileged MCDI
+ * command. If authorization was granted then we must re-issue the
+ * original MCDI request. If authorization failed or timed out,
+ * then the original MCDI request should be completed with the
+ * result code from this event.
+ */
+ rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status);
+
+ emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc);
+}
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+ void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_req_t *emrp = NULL;
+ boolean_t ev_cpl;
+ int state;
+
+ /*
+ * The MCDI request (if there is one) has been terminated, either
+ * by a BADASSERT or REBOOT event.
+ *
+ * If there is an outstanding event-completed MCDI operation, then we
+ * will never receive the completion event (because both MCDI
+ * completions and BADASSERT events are sent to the same evq). So
+ * complete this MCDI op.
+ *
+ * This function might run in parallel with efx_mcdi_request_poll()
+ * for poll completed mcdi requests, and also with
+ * efx_mcdi_request_start() for post-watchdog completions.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ ev_cpl = emip->emi_ev_cpl;
+ if (emrp != NULL && emip->emi_ev_cpl) {
+ emip->emi_pending_req = NULL;
+
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = rc;
+ ++emip->emi_aborted;
+ }
+
+ /*
+ * Since we're running in parallel with a request, consume the
+ * status word before dropping the lock.
+ */
+ if (rc == EIO || rc == EINTR) {
+ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
+ (void) efx_mcdi_poll_reboot(enp);
+ emip->emi_new_epoch = B_TRUE;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ if (emrp != NULL && ev_cpl)
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_GET_VERSION_IN_LEN,
+ MC_CMD_GET_VERSION_OUT_LEN),
+ MAX(MC_CMD_GET_BOOT_STATUS_IN_LEN,
+ MC_CMD_GET_BOOT_STATUS_OUT_LEN))];
+ efx_word_t *ver_words;
+ uint16_t version[4];
+ uint32_t build;
+ efx_mcdi_boot_t status;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VERSION;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* bootrom support */
+ if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+ goto version;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION);
+ version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0);
+ version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0);
+ version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0);
+ version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0);
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+version:
+ /* The bootrom doesn't understand BOOT_STATUS */
+ if (MC_FW_VERSION_IS_BOOTLOADER(build)) {
+ status = EFX_MCDI_BOOT_ROM;
+ goto out;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot access BOOT_STATUS */
+ status = EFX_MCDI_BOOT_PRIMARY;
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = 0;
+ goto out;
+ }
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS,
+ GET_BOOT_STATUS_OUT_FLAGS_PRIMARY))
+ status = EFX_MCDI_BOOT_PRIMARY;
+ else
+ status = EFX_MCDI_BOOT_SECONDARY;
+
+out:
+ if (versionp != NULL)
+ (void) memcpy(versionp, version, sizeof (version));
+ if (buildp != NULL)
+ *buildp = build;
+ if (statusp != NULL)
+ *statusp = status;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_do_reboot(
+ __in efx_nic_t *enp,
+ __in boolean_t after_assertion)
+{
+ uint8_t payload[MAX(MC_CMD_REBOOT_IN_LEN, MC_CMD_REBOOT_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ /*
+ * We could require the caller to have caused en_mod_flags=0 to
+ * call this function. This doesn't help the other port though,
+ * who's about to get the MC ripped out from underneath them.
+ * Since they have to cope with the subsequent fallout of MCDI
+ * failures, we should as well.
+ */
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_REBOOT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_REBOOT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS,
+ (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0));
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot reboot the MC. */
+ goto out;
+ }
+
+ /* A successful reboot request returns EIO. */
+ if (req.emr_rc != 0 && req.emr_rc != EIO) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+out:
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_reboot(
+ __in efx_nic_t *enp)
+{
+ return (efx_mcdi_do_reboot(enp, B_FALSE));
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_exit_assertion_handler(
+ __in efx_nic_t *enp)
+{
+ return (efx_mcdi_do_reboot(enp, B_TRUE));
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_read_assertion(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN,
+ MC_CMD_GET_ASSERTS_OUT_LEN)];
+ const char *reason;
+ unsigned int flags;
+ unsigned int index;
+ unsigned int ofst;
+ int retry;
+ efx_rc_t rc;
+
+ /*
+ * Before we attempt to chat to the MC, we should verify that the MC
+ * isn't in it's assertion handler, either due to a previous reboot,
+ * or because we're reinitializing due to an eec_exception().
+ *
+ * Use GET_ASSERTS to read any assertion state that may be present.
+ * Retry this command twice. Once because a boot-time assertion failure
+ * might cause the 1st MCDI request to fail. And once again because
+ * we might race with efx_mcdi_exit_assertion_handler() running on
+ * partner port(s) on the same NIC.
+ */
+ retry = 2;
+ do {
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_ASSERTS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1);
+ efx_mcdi_execute_quiet(enp, &req);
+
+ } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0);
+
+ if (req.emr_rc != 0) {
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot clear assertions. */
+ goto out;
+ }
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /* Print out any assertion state recorded */
+ flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+ return (0);
+
+ reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+ ? "system-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+ ? "thread-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+ ? "watchdog reset"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP)
+ ? "illegal address trap"
+ : "unknown assertion";
+ EFSYS_PROBE3(mcpu_assertion,
+ const char *, reason, unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS));
+
+ /* Print out the registers (r1 ... r31) */
+ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
+ for (index = 1;
+ index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++) {
+ EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int,
+ EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst),
+ EFX_DWORD_0));
+ ofst += sizeof (efx_dword_t);
+ }
+ EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN);
+
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Internal routines for for specific MCDI requests.
+ */
+
+ __checkReturn efx_rc_t
+efx_mcdi_drv_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_DRV_ATTACH_IN_LEN,
+ MC_CMD_DRV_ATTACH_EXT_OUT_LEN)];
+ uint32_t flags;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_DRV_ATTACH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN;
+
+ /*
+ * Use DONT_CARE for the datapath firmware type to ensure that the
+ * driver can attach to an unprivileged function. The datapath firmware
+ * type to use is controlled by the 'sfboot' utility.
+ */
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (attach == B_FALSE) {
+ flags = 0;
+ } else if (enp->en_family == EFX_FAMILY_SIENA) {
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+
+ /* Create synthetic privileges for Siena functions */
+ flags = EFX_NIC_FUNC_LINKCTRL | EFX_NIC_FUNC_TRUSTED;
+ if (emip->emi_port == 1)
+ flags |= EFX_NIC_FUNC_PRIMARY;
+ } else {
+ EFX_STATIC_ASSERT(EFX_NIC_FUNC_PRIMARY ==
+ (1u << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY));
+ EFX_STATIC_ASSERT(EFX_NIC_FUNC_LINKCTRL ==
+ (1u << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL));
+ EFX_STATIC_ASSERT(EFX_NIC_FUNC_TRUSTED ==
+ (1u << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED));
+
+ /* Save function privilege flags (EF10 and later) */
+ if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+ flags = MCDI_OUT_DWORD(req, DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
+ }
+ encp->enc_func_flags = flags;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_board_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *board_typep,
+ __out_opt efx_dword_t *capabilitiesp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMIN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ if (emip->emi_port == 1) {
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0);
+ } else if (emip->emi_port == 2) {
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1);
+ } else {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ if (capabilitiesp != NULL) {
+ if (emip->emi_port == 1) {
+ *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
+ } else if (emip->emi_port == 2) {
+ *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
+ } else {
+ rc = EINVAL;
+ goto fail4;
+ }
+ }
+
+ if (board_typep != NULL) {
+ *board_typep = MCDI_OUT_DWORD(req,
+ GET_BOARD_CFG_OUT_BOARD_TYPE);
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_resource_limits(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *nevqp,
+ __out_opt uint32_t *nrxqp,
+ __out_opt uint32_t *ntxqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
+ MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (nevqp != NULL)
+ *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ);
+ if (nrxqp != NULL)
+ *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ);
+ if (ntxqp != NULL)
+ *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_phy_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN,
+ MC_CMD_GET_PHY_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
+#if EFSYS_OPT_NAMES
+ (void) strncpy(encp->enc_phy_name,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME),
+ MIN(sizeof (encp->enc_phy_name) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
+#endif /* EFSYS_OPT_NAMES */
+ (void) memset(encp->enc_phy_revision, 0,
+ sizeof (encp->enc_phy_revision));
+ (void) memcpy(encp->enc_phy_revision,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION),
+ MIN(sizeof (encp->enc_phy_revision) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN));
+#if EFSYS_OPT_PHY_LED_CONTROL
+ encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) |
+ (1 << EFX_PHY_LED_OFF) |
+ (1 << EFX_PHY_LED_ON));
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ /* Get the media type of the fixed port, if recognised. */
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS);
+ epp->ep_fixed_port_type =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
+ epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
+
+ epp->ep_phy_cap_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+#if EFSYS_OPT_PHY_FLAGS
+ encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT);
+
+ /* Populate internal state */
+ encp->enc_mcdi_mdio_channel =
+ (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL);
+
+#if EFSYS_OPT_PHY_STATS
+ encp->enc_mcdi_phy_stat_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+ encp->enc_bist_mask = 0;
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_SHORT))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_LONG))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL);
+#endif /* EFSYS_OPT_BIST */
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_firmware_update_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported updates */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_macaddr_change_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported MAC changes */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_link_control_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported link control */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_spoofing_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported MAC spoofing */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_BIST
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+/*
+ * Enter bist offline mode. This is a fw mode which puts the NIC into a state
+ * where memory BIST tests can be run and not much else can interfere or happen.
+ * A reboot is required to exit this mode.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mcdi_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_START_BIST_IN_LEN,
+ MC_CMD_START_BIST_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_START_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_START_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_START_BIST_OUT_LEN;
+
+ switch (type) {
+ case EFX_BIST_TYPE_PHY_NORMAL:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST);
+ break;
+ case EFX_BIST_TYPE_PHY_CABLE_SHORT:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_SHORT);
+ break;
+ case EFX_BIST_TYPE_PHY_CABLE_LONG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_LONG);
+ break;
+ case EFX_BIST_TYPE_MC_MEM:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_MC_MEM_BIST);
+ break;
+ case EFX_BIST_TYPE_SAT_MEM:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PORT_MEM_BIST);
+ break;
+ case EFX_BIST_TYPE_REG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_REG_BIST);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+
+/* Enable logging of some events (e.g. link state changes) */
+ __checkReturn efx_rc_t
+efx_mcdi_log_ctrl(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LOG_CTRL_IN_LEN,
+ MC_CMD_LOG_CTRL_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LOG_CTRL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST,
+ MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ);
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef enum efx_stats_action_e
+{
+ EFX_STATS_CLEAR,
+ EFX_STATS_UPLOAD,
+ EFX_STATS_ENABLE_NOEVENTS,
+ EFX_STATS_ENABLE_EVENTS,
+ EFX_STATS_DISABLE,
+} efx_stats_action_t;
+
+static __checkReturn efx_rc_t
+efx_mcdi_mac_stats(
+ __in efx_nic_t *enp,
+ __in_opt efsys_mem_t *esmp,
+ __in efx_stats_action_t action)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
+ MC_CMD_MAC_STATS_OUT_DMA_LEN)];
+ int clear = (action == EFX_STATS_CLEAR);
+ int upload = (action == EFX_STATS_UPLOAD);
+ int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
+ int events = (action == EFX_STATS_ENABLE_EVENTS);
+ int disable = (action == EFX_STATS_DISABLE);
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_MAC_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN;
+
+ MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, upload,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable | events,
+ MAC_STATS_IN_PERIODIC_NOEVENT, !events,
+ MAC_STATS_IN_PERIOD_MS, (enable | events) ? 1000: 0);
+
+ if (esmp != NULL) {
+ int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
+
+ EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <=
+ EFX_MAC_STATS_SIZE);
+
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
+ } else {
+ EFSYS_ASSERT(!upload && !enable && !events);
+ }
+
+ /*
+ * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats,
+ * as this may fail (and leave periodic DMA enabled) if the
+ * vadapter has already been deleted.
+ */
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID,
+ (disable ? EVB_PORT_ID_NULL : enp->en_vport_id));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ /* EF10: Expect ENOENT if no DMA queues are initialised */
+ if ((req.emr_rc != ENOENT) ||
+ (enp->en_rx_qcount + enp->en_tx_qcount != 0)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_clear(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_rc_t rc;
+
+ /*
+ * The MC DMAs aggregate statistics for our convenience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ */
+ if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period,
+ __in boolean_t events)
+{
+ efx_rc_t rc;
+
+ /*
+ * The MC DMAs aggregate statistics for our convenience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ * Huntington uses a fixed 1sec period, so use that on Siena too.
+ */
+ if (period == 0)
+ rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE);
+ else if (events)
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS);
+ else
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS);
+
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+/*
+ * This function returns the pf and vf number of a function. If it is a pf the
+ * vf number is 0xffff. The vf number is the index of the vf on that
+ * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0),
+ * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff).
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN,
+ MC_CMD_GET_FUNCTION_INFO_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF);
+ if (vfp != NULL)
+ *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN,
+ MC_CMD_PRIVILEGE_MASK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION,
+ PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
+ PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mcdi_set_workaround(
+ __in efx_nic_t *enp,
+ __in uint32_t type,
+ __in boolean_t enabled,
+ __out_opt uint32_t *flagsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_WORKAROUND_IN_LEN,
+ MC_CMD_WORKAROUND_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_WORKAROUND;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type);
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (flagsp != NULL) {
+ if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+ *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS);
+ else
+ *flagsp = 0;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_workarounds(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *implementedp,
+ __out_opt uint32_t *enabledp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_GET_WORKAROUNDS_OUT_LEN];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_WORKAROUNDS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (implementedp != NULL) {
+ *implementedp =
+ MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+ }
+
+ if (enabledp != NULL) {
+ *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Size of media information page in accordance with SFF-8472 and SFF-8436.
+ * It is used in MCDI interface as well.
+ */
+#define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_phy_media_info(
+ __in efx_nic_t *enp,
+ __in uint32_t mcdi_page,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE))];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length =
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used !=
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) !=
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ (void) memcpy(data,
+ MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
+ len);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * 2-wire device address of the base information in accordance with SFF-8472
+ * Diagnostic Monitoring Interface for Optical Transceivers section
+ * 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE 0xA0
+
+/*
+ * 2-wire device address of the digital diagnostics monitoring interface
+ * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical
+ * Transceivers section 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM 0xA2
+
+/*
+ * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436
+ * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and
+ * Operation.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP 0xA0
+
+ __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rc_t rc;
+ uint32_t mcdi_lower_page;
+ uint32_t mcdi_upper_page;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ /*
+ * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages.
+ * Offset plus length interface allows to access page 0 only.
+ * I.e. non-zero upper pages are not accessible.
+ * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6
+ * QSFP+ Memory Map for details on how information is structured
+ * and accessible.
+ */
+ switch (epp->ep_fixed_port_type) {
+ case EFX_PHY_MEDIA_SFP_PLUS:
+ /*
+ * In accordance with SFF-8472 Diagnostic Monitoring
+ * Interface for Optical Transceivers section 4 Memory
+ * Organization two 2-wire addresses are defined.
+ */
+ switch (dev_addr) {
+ /* Base information */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE:
+ /*
+ * MCDI page 0 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA0.
+ */
+ mcdi_lower_page = 0;
+ /*
+ * MCDI page 1 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA0.
+ */
+ mcdi_upper_page = 1;
+ break;
+ /* Diagnostics */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM:
+ /*
+ * MCDI page 2 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA2.
+ */
+ mcdi_lower_page = 2;
+ /*
+ * MCDI page 3 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA2.
+ */
+ mcdi_upper_page = 3;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ case EFX_PHY_MEDIA_QSFP_PLUS:
+ switch (dev_addr) {
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP:
+ /*
+ * MCDI page -1 should be used to access lower page 0
+ * (0x00 - 0x7f).
+ */
+ mcdi_lower_page = (uint32_t)-1;
+ /*
+ * MCDI page 0 should be used to access upper page 0
+ * (0x80h - 0xff).
+ */
+ mcdi_upper_page = 0;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ uint8_t read_len =
+ MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_lower_page, offset, read_len, data);
+ if (rc != 0)
+ goto fail2;
+
+ data += read_len;
+ len -= read_len;
+
+ offset = 0;
+ } else {
+ offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE;
+ }
+
+ if (len > 0) {
+ EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+ EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_upper_page, offset, len, data);
+ if (rc != 0)
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MCDI */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_mcdi.h b/usr/src/uts/common/io/sfxge/common/efx_mcdi.h
new file mode 100644
index 0000000000..b543c3be26
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_mcdi.h
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_MCDI_H
+#define _SYS_EFX_MCDI_H
+
+#include "efx.h"
+#include "efx_regs_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * A reboot/assertion causes the MCDI status word to be set after the
+ * command word is set or a REBOOT event is sent. If we notice a reboot
+ * via these mechanisms then wait 10ms for the status word to be set.
+ */
+#define EFX_MCDI_STATUS_SLEEP_US 10000
+
+struct efx_mcdi_req_s {
+ boolean_t emr_quiet;
+ /* Inputs: Command #, input buffer and length */
+ unsigned int emr_cmd;
+ uint8_t *emr_in_buf;
+ size_t emr_in_length;
+ /* Outputs: retcode, buffer, length, and length used*/
+ efx_rc_t emr_rc;
+ uint8_t *emr_out_buf;
+ size_t emr_out_length;
+ size_t emr_out_length_used;
+ /* Internals: low level transport details */
+ unsigned int emr_err_code;
+ unsigned int emr_err_arg;
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ uint32_t emr_proxy_handle;
+#endif
+};
+
+typedef struct efx_mcdi_iface_s {
+ unsigned int emi_port;
+ unsigned int emi_max_version;
+ unsigned int emi_seq;
+ efx_mcdi_req_t *emi_pending_req;
+ boolean_t emi_ev_cpl;
+ boolean_t emi_new_epoch;
+ int emi_aborted;
+ uint32_t emi_poll_cnt;
+ uint32_t emi_mc_reboot_status;
+} efx_mcdi_iface_t;
+
+extern void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_execute_quiet(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode);
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+extern __checkReturn efx_rc_t
+efx_mcdi_get_proxy_handle(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *handlep);
+
+extern void
+efx_mcdi_ev_proxy_response(
+ __in efx_nic_t *enp,
+ __in unsigned int handle,
+ __in unsigned int status);
+#endif
+
+extern void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_request_errcode(
+ __in unsigned int err);
+
+extern void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc);
+
+typedef enum efx_mcdi_boot_e {
+ EFX_MCDI_BOOT_PRIMARY,
+ EFX_MCDI_BOOT_SECONDARY,
+ EFX_MCDI_BOOT_ROM,
+} efx_mcdi_boot_t;
+
+extern __checkReturn efx_rc_t
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_read_assertion(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_exit_assertion_handler(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_drv_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_board_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *board_typep,
+ __out_opt efx_dword_t *capabilitiesp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_phy_cfg(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_firmware_update_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_macaddr_change_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_link_control_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_spoofing_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+
+#if EFSYS_OPT_BIST
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+extern __checkReturn efx_rc_t
+efx_mcdi_bist_enable_offline(
+ __in efx_nic_t *enp);
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+extern __checkReturn efx_rc_t
+efx_mcdi_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+#endif /* EFSYS_OPT_BIST */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_resource_limits(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *nevqp,
+ __out_opt uint32_t *nrxqp,
+ __out_opt uint32_t *ntxqp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_log_ctrl(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period,
+ __in boolean_t events);
+
+
+#if EFSYS_OPT_LOOPBACK
+extern __checkReturn efx_rc_t
+efx_mcdi_get_loopback_modes(
+ __in efx_nic_t *enp);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
+#define MCDI_IN(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_in_buf + (_ofst)))
+
+#define MCDI_IN2(_emr, _type, _ofst) \
+ MCDI_IN(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_IN_SET_BYTE(_emr, _ofst, _value) \
+ EFX_POPULATE_BYTE_1(*MCDI_IN2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0, _value)
+
+#define MCDI_IN_SET_WORD(_emr, _ofst, _value) \
+ EFX_POPULATE_WORD_1(*MCDI_IN2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0, _value)
+
+#define MCDI_IN_SET_DWORD(_emr, _ofst, _value) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0, _value)
+
+#define MCDI_IN_SET_DWORD_FIELD(_emr, _ofst, _field, _value) \
+ EFX_SET_DWORD_FIELD(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field, _value)
+
+#define MCDI_IN_POPULATE_DWORD_1(_emr, _ofst, _field1, _value1) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1)
+
+#define MCDI_IN_POPULATE_DWORD_2(_emr, _ofst, _field1, _value1, \
+ _field2, _value2) \
+ EFX_POPULATE_DWORD_2(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2)
+
+#define MCDI_IN_POPULATE_DWORD_3(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_3(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3)
+
+#define MCDI_IN_POPULATE_DWORD_4(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4) \
+ EFX_POPULATE_DWORD_4(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4)
+
+#define MCDI_IN_POPULATE_DWORD_5(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5) \
+ EFX_POPULATE_DWORD_5(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5)
+
+#define MCDI_IN_POPULATE_DWORD_6(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_6(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6)
+
+#define MCDI_IN_POPULATE_DWORD_7(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7) \
+ EFX_POPULATE_DWORD_7(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7)
+
+#define MCDI_IN_POPULATE_DWORD_8(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8) \
+ EFX_POPULATE_DWORD_8(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8)
+
+#define MCDI_IN_POPULATE_DWORD_9(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_9(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9)
+
+#define MCDI_IN_POPULATE_DWORD_10(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9, _field10, _value10) \
+ EFX_POPULATE_DWORD_10(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9, \
+ MC_CMD_ ## _field10, _value10)
+
+#define MCDI_OUT(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_out_buf + (_ofst)))
+
+#define MCDI_OUT2(_emr, _type, _ofst) \
+ MCDI_OUT(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_OUT_BYTE(_emr, _ofst) \
+ EFX_BYTE_FIELD(*MCDI_OUT2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0)
+
+#define MCDI_OUT_WORD(_emr, _ofst) \
+ EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0)
+
+#define MCDI_OUT_DWORD(_emr, _ofst) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0)
+
+#define MCDI_OUT_DWORD_FIELD(_emr, _ofst, _field) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field)
+
+#define MCDI_EV_FIELD(_eqp, _field) \
+ EFX_QWORD_FIELD(*_eqp, MCDI_EVENT_ ## _field)
+
+#define MCDI_CMD_DWORD_FIELD(_edp, _field) \
+ EFX_DWORD_FIELD(*_edp, MC_CMD_ ## _field)
+
+#define EFX_MCDI_HAVE_PRIVILEGE(mask, priv) \
+ (((mask) & (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) == \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv))
+
+typedef enum efx_mcdi_feature_id_e {
+ EFX_MCDI_FEATURE_FW_UPDATE = 0,
+ EFX_MCDI_FEATURE_LINK_CONTROL,
+ EFX_MCDI_FEATURE_MACADDR_CHANGE,
+ EFX_MCDI_FEATURE_MAC_SPOOFING,
+ EFX_MCDI_FEATURE_NIDS
+} efx_mcdi_feature_id_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_MCDI_H */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_mon.c b/usr/src/uts/common/io/sfxge/common/efx_mon.c
new file mode 100644
index 0000000000..6805d5c4ae
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_mon.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_NAMES
+
+static const char *__efx_mon_name[] = {
+ "",
+ "sfx90x0",
+ "sfx91x0",
+ "sfx92x0"
+};
+
+ const char *
+efx_mon_name(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ EFSYS_ASSERT3U(encp->enc_mon_type, <, EFX_MON_NTYPES);
+ return (__efx_mon_name[encp->enc_mon_type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_MON_MCDI
+static const efx_mon_ops_t __efx_mon_mcdi_ops = {
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MON_STATS */
+};
+#endif
+
+
+ __checkReturn efx_rc_t
+efx_mon_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_MON) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_MON;
+
+ emp->em_type = encp->enc_mon_type;
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ switch (emp->em_type) {
+#if EFSYS_OPT_MON_MCDI
+ case EFX_MON_SFC90X0:
+ case EFX_MON_SFC91X0:
+ case EFX_MON_SFC92X0:
+ emop = &__efx_mon_mcdi_ops;
+ break;
+#endif
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ emp->em_emop = emop;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_MON_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED MonitorStatNamesBlock 31f437eafb0b0437 */
+static const char *__mon_stat_name[] = {
+ "value_2_5v",
+ "value_vccp1",
+ "value_vcc",
+ "value_5v",
+ "value_12v",
+ "value_vccp2",
+ "value_ext_temp",
+ "value_int_temp",
+ "value_ain1",
+ "value_ain2",
+ "controller_cooling",
+ "ext_cooling",
+ "1v",
+ "1_2v",
+ "1_8v",
+ "3_3v",
+ "1_2va",
+ "vref",
+ "vaoe",
+ "aoe_temperature",
+ "psu_aoe_temperature",
+ "psu_temperature",
+ "fan0",
+ "fan1",
+ "fan2",
+ "fan3",
+ "fan4",
+ "vaoe_in",
+ "iaoe",
+ "iaoe_in",
+ "nic_power",
+ "0_9v",
+ "i0_9v",
+ "i1_2v",
+ "0_9v_adc",
+ "controller_temperature2",
+ "vreg_temperature",
+ "vreg_0_9v_temperature",
+ "vreg_1_2v_temperature",
+ "int_vptat",
+ "controller_internal_adc_temperature",
+ "ext_vptat",
+ "controller_external_adc_temperature",
+ "ambient_temperature",
+ "airflow",
+ "vdd08d_vss08d_csr",
+ "vdd08d_vss08d_csr_extadc",
+ "hotpoint_temperature",
+ "phy_power_switch_port0",
+ "phy_power_switch_port1",
+ "mum_vcc",
+ "0v9_a",
+ "i0v9_a",
+ "0v9_a_temp",
+ "0v9_b",
+ "i0v9_b",
+ "0v9_b_temp",
+ "ccom_avreg_1v2_supply",
+ "ccom_avreg_1v2_supply_ext_adc",
+ "ccom_avreg_1v8_supply",
+ "ccom_avreg_1v8_supply_ext_adc",
+ "controller_master_vptat",
+ "controller_master_internal_temp",
+ "controller_master_vptat_ext_adc",
+ "controller_master_internal_temp_ext_adc",
+ "controller_slave_vptat",
+ "controller_slave_internal_temp",
+ "controller_slave_vptat_ext_adc",
+ "controller_slave_internal_temp_ext_adc",
+ "sodimm_vout",
+ "sodimm_0_temp",
+ "sodimm_1_temp",
+ "phy0_vcc",
+ "phy1_vcc",
+ "controller_tdiode_temp",
+ "board_front_temp",
+ "board_back_temp",
+};
+
+/* END MKCONFIG GENERATED MonitorStatNamesBlock */
+
+extern const char *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS);
+ return (__mon_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop = emp->em_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ return (emop->emo_stats_update(enp, esmp, values));
+}
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+ void
+efx_mon_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ emp->em_emop = NULL;
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+}
diff --git a/usr/src/uts/common/io/sfxge/common/efx_nic.c b/usr/src/uts/common/io/sfxge/common/efx_nic.c
new file mode 100644
index 0000000000..934a2533c0
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_nic.c
@@ -0,0 +1,963 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp)
+{
+ if (venid == EFX_PCI_VENID_SFC) {
+ switch (devid) {
+#if EFSYS_OPT_SIENA
+ case EFX_PCI_DEVID_SIENA_F1_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Siena.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+
+ case EFX_PCI_DEVID_BETHPAGE:
+ case EFX_PCI_DEVID_SIENA:
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Huntington.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+
+ case EFX_PCI_DEVID_FARMINGDALE:
+ case EFX_PCI_DEVID_GREENPORT:
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+
+ case EFX_PCI_DEVID_FARMINGDALE_VF:
+ case EFX_PCI_DEVID_GREENPORT_VF:
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_PCI_DEVID_MEDFORD_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Medford.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+
+ case EFX_PCI_DEVID_MEDFORD:
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+
+ case EFX_PCI_DEVID_MEDFORD_VF:
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+#endif /* EFSYS_OPT_MEDFORD */
+
+ case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */
+ default:
+ break;
+ }
+ }
+
+ *efp = EFX_FAMILY_INVALID;
+ return (ENOTSUP);
+}
+
+
+#define EFX_BIU_MAGIC0 0x01234567
+#define EFX_BIU_MAGIC1 0xfedcba98
+
+ __checkReturn efx_rc_t
+efx_nic_biu_test(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ /*
+ * Write magic values to scratch registers 0 and 1, then
+ * verify that the values were written correctly. Interleave
+ * the accesses to ensure that the BIU is not just reading
+ * back the cached value that was last written.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ /*
+ * Perform the same test, with the values swapped. This
+ * ensures that subsequent tests don't start with the correct
+ * values already written into the scratch registers.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_SIENA
+
+static const efx_nic_ops_t __efx_nic_siena_ops = {
+ siena_nic_probe, /* eno_probe */
+ NULL, /* eno_board_cfg */
+ NULL, /* eno_set_drv_limits */
+ siena_nic_reset, /* eno_reset */
+ siena_nic_init, /* eno_init */
+ NULL, /* eno_get_vi_pool */
+ NULL, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ siena_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nic_fini, /* eno_fini */
+ siena_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static const efx_nic_ops_t __efx_nic_hunt_ops = {
+ ef10_nic_probe, /* eno_probe */
+ hunt_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+
+static const efx_nic_ops_t __efx_nic_medford_ops = {
+ ef10_nic_probe, /* eno_probe */
+ medford_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp)
+{
+ efx_nic_t *enp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID);
+ EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES);
+
+ /* Allocate a NIC object */
+ EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp);
+
+ if (enp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_magic = EFX_NIC_MAGIC;
+
+ switch (family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ enp->en_enop = &__efx_nic_siena_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LFSR_HASH_INSERT |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_WOL |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_LOOKAHEAD_SPLIT |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_TX_SRC_FILTERS;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ enp->en_enop = &__efx_nic_hunt_ops;
+ /* FIXME: Add WOL support */
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ enp->en_enop = &__efx_nic_medford_ops;
+ /*
+ * FW_ASSISTED_TSO omitted as Medford only supports firmware
+ * assisted TSO version 2, not the v1 scheme used on Huntington.
+ */
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ enp->en_family = family;
+ enp->en_esip = esip;
+ enp->en_esbp = esbp;
+ enp->en_eslp = eslp;
+
+ *enpp = enp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_probe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
+
+ enop = enp->en_enop;
+ if ((rc = enop->eno_probe(enp)) != 0)
+ goto fail1;
+
+ if ((rc = efx_phy_probe(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_PROBE;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ enop->eno_unprobe(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enop->eno_set_drv_limits != NULL) {
+ if ((rc = enop->eno_set_drv_limits(enp, edlp)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enop->eno_get_bar_region == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if ((rc = (enop->eno_get_bar_region)(enp,
+ region, offsetp, sizep)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *evq_countp,
+ __out uint32_t *rxq_countp,
+ __out uint32_t *txq_countp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enop->eno_get_vi_pool != NULL) {
+ uint32_t vi_count = 0;
+
+ if ((rc = (enop->eno_get_vi_pool)(enp, &vi_count)) != 0)
+ goto fail1;
+
+ *evq_countp = vi_count;
+ *rxq_countp = vi_count;
+ *txq_countp = vi_count;
+ } else {
+ /* Use NIC limits as default value */
+ *evq_countp = encp->enc_evq_limit;
+ *rxq_countp = encp->enc_rxq_limit;
+ *txq_countp = encp->enc_txq_limit;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_init(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_NIC) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_NIC;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_nic_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ enop->eno_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_NIC;
+}
+
+ void
+efx_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ efx_phy_unprobe(enp);
+
+ enop->eno_unprobe(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_PROBE;
+}
+
+ void
+efx_nic_destroy(
+ __in efx_nic_t *enp)
+{
+ efsys_identifier_t *esip = enp->en_esip;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ enp->en_family = 0;
+ enp->en_esip = NULL;
+ enp->en_esbp = NULL;
+ enp->en_eslp = NULL;
+
+ enp->en_enop = NULL;
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_reset(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ unsigned int mod_flags;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ /*
+ * All modules except the MCDI, PROBE, NVRAM, VPD, MON, LIC
+ * (which we do not reset here) must have been shut down or never
+ * initialized.
+ *
+ * A rule of thumb here is: If the controller or MC reboots, is *any*
+ * state lost. If it's lost and needs reapplying, then the module
+ * *must* not be initialised during the reset.
+ */
+ mod_flags = enp->en_mod_flags;
+ mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
+ EFX_MOD_VPD | EFX_MOD_MON | EFX_MOD_LIC);
+ EFSYS_ASSERT3U(mod_flags, ==, 0);
+ if (mod_flags != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_reset(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ return (&(enp->en_nic_cfg));
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+
+ if ((rc = enop->eno_register_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in size_t count)
+{
+ unsigned int bit;
+ efx_oword_t original;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ while (count > 0) {
+ /* This function is only suitable for registers */
+ EFSYS_ASSERT(rsp->rows == 1);
+
+ /* bit sweep on and off */
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+ for (bit = 0; bit < 128; bit++) {
+ /* Is this bit in the mask? */
+ if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit))
+ continue;
+
+ /* Test this bit can be set in isolation */
+ reg = original;
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFX_SET_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ /* Test this bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, rsp->mask);
+ EFX_CLEAR_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail2;
+ }
+ }
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+
+ --count;
+ ++rsp;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count)
+{
+ efx_sram_pattern_fn_t func;
+ unsigned int index;
+ unsigned int address;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[pattern];
+
+ while (count > 0) {
+ /* Write */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_WRITEO(enp->en_esbp, address, &reg, B_TRUE);
+
+ address += rsp->step;
+ }
+
+ /* Read */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ address += rsp->step;
+ }
+
+ ++rsp;
+ --count;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_LOOPBACK
+
+extern void
+efx_loopback_mask(
+ __in efx_loopback_kind_t loopback_kind,
+ __out efx_qword_t *maskp)
+{
+ efx_qword_t mask;
+
+ EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS);
+ EFSYS_ASSERT(maskp != NULL);
+
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XPORT == EFX_LOOPBACK_XPORT);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII_WS == EFX_LOOPBACK_XGMII_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS == EFX_LOOPBACK_XAUI_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_FAR ==
+ EFX_LOOPBACK_XAUI_WS_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_NEAR ==
+ EFX_LOOPBACK_XAUI_WS_NEAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_WS == EFX_LOOPBACK_GMII_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS == EFX_LOOPBACK_XFI_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS_FAR ==
+ EFX_LOOPBACK_XFI_WS_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS_WS == EFX_LOOPBACK_PHYXS_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT == EFX_LOOPBACK_PMA_INT);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_NEAR == EFX_LOOPBACK_SD_NEAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FAR == EFX_LOOPBACK_SD_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT_WS ==
+ EFX_LOOPBACK_PMA_INT_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP2_WS ==
+ EFX_LOOPBACK_SD_FEP2_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP1_5_WS ==
+ EFX_LOOPBACK_SD_FEP1_5_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP_WS == EFX_LOOPBACK_SD_FEP_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FES_WS == EFX_LOOPBACK_SD_FES_WS);
+
+ /* Build bitmask of possible loopback types */
+ EFX_ZERO_QWORD(mask);
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_OFF) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_OFF);
+ }
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_MAC) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ /*
+ * The "MAC" grouping has historically been used by drivers to
+ * mean loopbacks supported by on-chip hardware. Keep that
+ * meaning here, and include on-chip PHY layer loopbacks.
+ */
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_DATA);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMAC);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGXS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGBR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_INT);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_NEAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_FAR);
+ }
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_PHY) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ /*
+ * The "PHY" grouping has historically been used by drivers to
+ * mean loopbacks supported by off-chip hardware. Keep that
+ * meaning here.
+ */
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GPHY);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PHY_XS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PCS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_PMD);
+ }
+
+ *maskp = mask;
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_loopback_modes(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
+ MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)];
+ efx_qword_t mask;
+ efx_qword_t modes;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /*
+ * We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree
+ * in efx_loopback_mask() and in siena_phy.c:siena_phy_get_link().
+ */
+ efx_loopback_mask(EFX_LOOPBACK_KIND_ALL, &mask);
+
+ EFX_AND_QWORD(mask,
+ *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_SUGGESTED));
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_100M);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_100FDX] = modes;
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_1G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_1000FDX] = modes;
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_10G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_10000FDX] = modes;
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) {
+ /* Response includes 40G loopback modes */
+ modes =
+ *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_40G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_40000FDX] = modes;
+ }
+
+ EFX_ZERO_QWORD(modes);
+ EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]);
+ encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_nvram.c b/usr/src/uts/common/io/sfxge/common/efx_nvram.c
new file mode 100644
index 0000000000..0e02a6f5bc
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_nvram.c
@@ -0,0 +1,980 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_SIENA
+
+static const efx_nvram_ops_t __efx_nvram_siena_ops = {
+#if EFSYS_OPT_DIAG
+ siena_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nvram_type_to_partn, /* envo_type_to_partn */
+ siena_nvram_partn_size, /* envo_partn_size */
+ siena_nvram_partn_rw_start, /* envo_partn_rw_start */
+ siena_nvram_partn_read, /* envo_partn_read */
+ siena_nvram_partn_erase, /* envo_partn_erase */
+ siena_nvram_partn_write, /* envo_partn_write */
+ siena_nvram_partn_rw_finish, /* envo_partn_rw_finish */
+ siena_nvram_partn_get_version, /* envo_partn_get_version */
+ siena_nvram_partn_set_version, /* envo_partn_set_version */
+ NULL, /* envo_partn_validate */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
+#if EFSYS_OPT_DIAG
+ ef10_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nvram_type_to_partn, /* envo_type_to_partn */
+ ef10_nvram_partn_size, /* envo_partn_size */
+ ef10_nvram_partn_rw_start, /* envo_partn_rw_start */
+ ef10_nvram_partn_read, /* envo_partn_read */
+ ef10_nvram_partn_erase, /* envo_partn_erase */
+ ef10_nvram_partn_write, /* envo_partn_write */
+ ef10_nvram_partn_rw_finish, /* envo_partn_rw_finish */
+ ef10_nvram_partn_get_version, /* envo_partn_get_version */
+ ef10_nvram_partn_set_version, /* envo_partn_set_version */
+ ef10_nvram_buffer_validate, /* envo_buffer_validate */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_nvram_init(
+ __in efx_nic_t *enp)
+{
+ const efx_nvram_ops_t *envop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NVRAM));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ envop = &__efx_nvram_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_envop = envop;
+ enp->en_mod_flags |= EFX_MOD_NVRAM;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_nvram_test(
+ __in efx_nic_t *enp)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn efx_rc_t
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_size(enp, partn, sizep)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ *sizep = 0;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_get_version(enp, partn,
+ subtypep, version)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *chunk_sizep)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_rw_start(enp, partn, chunk_sizep)) != 0)
+ goto fail2;
+
+ enp->en_nvram_locked = type;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_read(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ unsigned int offset = 0;
+ size_t size = 0;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_size(enp, partn, &size)) != 0)
+ goto fail2;
+
+ if ((rc = envop->envo_partn_erase(enp, partn, offset, size)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_write(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if (envop->envo_type_to_partn(enp, type, &partn) == 0)
+ envop->envo_partn_rw_finish(enp, partn);
+
+ enp->en_nvram_locked = EFX_NVRAM_INVALID;
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_ecount(4) uint16_t version[4])
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ /*
+ * The Siena implementation of envo_set_version() will attempt to
+ * acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG sector.
+ * Therefore, you can't have already acquired the NVRAM_UPDATE lock.
+ */
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_set_version(enp, partn, version)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if (envop->envo_type_to_partn != NULL &&
+ ((rc = envop->envo_buffer_validate(enp, partn,
+ partn_data, partn_size)) != 0))
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+void
+efx_nvram_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ enp->en_envop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_NVRAM;
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+/*
+ * Internal MCDI request handling
+ */
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_partitions(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __out unsigned int *npartnp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_PARTITIONS_IN_LEN,
+ MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX)];
+ unsigned int npartn;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_PARTITIONS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_PARTITIONS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ npartn = MCDI_OUT_DWORD(req, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LEN(npartn)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (size < npartn * sizeof (uint32_t)) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ *npartnp = npartn;
+
+ (void) memcpy(data,
+ MCDI_OUT2(req, void, NVRAM_PARTITIONS_OUT_TYPE_ID),
+ (npartn * sizeof (uint32_t)));
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_metadata(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4],
+ __out_bcount_opt(size) char *descp,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_METADATA_IN_LEN,
+ MC_CMD_NVRAM_METADATA_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_METADATA;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_METADATA_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_METADATA_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_METADATA_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_METADATA_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_SUBTYPE_VALID)) {
+ *subtypep = MCDI_OUT_DWORD(req, NVRAM_METADATA_OUT_SUBTYPE);
+ } else {
+ *subtypep = 0;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_VERSION_VALID)) {
+ version[0] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_W);
+ version[1] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_X);
+ version[2] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Y);
+ version[3] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Z);
+ } else {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_DESCRIPTION_VALID)) {
+ /* Return optional descrition string */
+ if ((descp != NULL) && (size > 0)) {
+ size_t desclen;
+
+ descp[0] = '\0';
+ desclen = (req.emr_out_length_used
+ - MC_CMD_NVRAM_METADATA_OUT_LEN(0));
+
+ EFSYS_ASSERT3U(desclen, <=,
+ MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM);
+
+ if (size < desclen) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ (void) memcpy(descp, MCDI_OUT2(req, char,
+ NVRAM_METADATA_OUT_DESCRIPTION),
+ desclen);
+
+ /* Ensure string is NUL terminated */
+ descp[desclen] = '\0';
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_info(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt size_t *sizep,
+ __out_opt uint32_t *addressp,
+ __out_opt uint32_t *erase_sizep,
+ __out_opt uint32_t *write_sizep)
+{
+ uint8_t payload[MAX(MC_CMD_NVRAM_INFO_IN_LEN,
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_INFO_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_INFO_IN_TYPE, partn);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (sizep)
+ *sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_SIZE);
+
+ if (addressp)
+ *addressp = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_PHYSADDR);
+
+ if (erase_sizep)
+ *erase_sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_ERASESIZE);
+
+ if (write_sizep) {
+ *write_sizep =
+ (req.emr_out_length_used <
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN) ?
+ 0 : MCDI_OUT_DWORD(req, NVRAM_INFO_V2_OUT_WRITESIZE);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_START_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_START_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_START;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_UPDATE_START_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_START_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_READ_IN_V2_LEN,
+ MC_CMD_NVRAM_READ_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ if (size > MC_CMD_NVRAM_READ_OUT_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_READ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_READ_IN_V2_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_READ_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_LENGTH, size);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_MODE, mode);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_READ_OUT_LEN(size)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ (void) memcpy(data,
+ MCDI_OUT2(req, uint8_t, NVRAM_READ_OUT_READ_BUFFER),
+ size);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_ERASE_IN_LEN,
+ MC_CMD_NVRAM_ERASE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_ERASE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_ERASE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * The NVRAM_WRITE MCDI command is a V1 command and so is supported by both
+ * Sienna and EF10 based boards. However EF10 based boards support the use
+ * of this command with payloads up to the maximum MCDI V2 payload length.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MCDI_CTL_SDU_LEN_MAX_V1,
+ MCDI_CTL_SDU_LEN_MAX_V2)];
+ efx_rc_t rc;
+ size_t max_data_size;
+
+ max_data_size = enp->en_nic_cfg.enc_mcdi_max_payload_length
+ - MC_CMD_NVRAM_WRITE_IN_LEN(0);
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_mcdi_max_payload_length, >, 0);
+ EFSYS_ASSERT3U(max_data_size, <,
+ enp->en_nic_cfg.enc_mcdi_max_payload_length);
+
+ if (size > max_data_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_WRITE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(size);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_WRITE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_LENGTH, size);
+
+ (void) memcpy(MCDI_IN2(req, uint8_t, NVRAM_WRITE_IN_WRITE_BUFFER),
+ data, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t reboot)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_IN_REBOOT, reboot);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_test(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TEST_IN_LEN,
+ MC_CMD_NVRAM_TEST_OUT_LEN)];
+ int result;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_TEST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TEST_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_TEST_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TEST_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ result = MCDI_OUT_DWORD(req, NVRAM_TEST_OUT_RESULT);
+ if (result == MC_CMD_NVRAM_TEST_FAIL) {
+
+ EFSYS_PROBE1(nvram_test_failure, int, partn);
+
+ rc = (EINVAL);
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_phy.c b/usr/src/uts/common/io/sfxge/common/efx_phy.c
new file mode 100644
index 0000000000..b75b0abb3f
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_phy.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+static const efx_phy_ops_t __efx_phy_siena_ops = {
+ siena_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ siena_phy_reconfigure, /* epo_reconfigure */
+ siena_phy_verify, /* epo_verify */
+ siena_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ siena_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ NULL, /* epo_bist_enable_offline */
+ siena_phy_bist_start, /* epo_bist_start */
+ siena_phy_bist_poll, /* epo_bist_poll */
+ siena_phy_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_BIST */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_phy_ops_t __efx_phy_ef10_ops = {
+ ef10_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ ef10_phy_reconfigure, /* epo_reconfigure */
+ ef10_phy_verify, /* epo_verify */
+ ef10_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ ef10_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ /* FIXME: Are these BIST methods appropriate for Medford? */
+ hunt_bist_enable_offline, /* epo_bist_enable_offline */
+ hunt_bist_start, /* epo_bist_start */
+ hunt_bist_poll, /* epo_bist_poll */
+ hunt_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_BIST */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_phy_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ const efx_phy_ops_t *epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_port = encp->enc_port;
+ epp->ep_phy_type = encp->enc_phy_type;
+
+ /* Hook in operations structure */
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ epop = &__efx_phy_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ epp->ep_epop = epop;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_verify(enp));
+}
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+ __checkReturn efx_rc_t
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode)
+{
+ efx_nic_cfg_t *encp = (&enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_phy_led_mode == mode)
+ goto done;
+
+ mask = (1 << EFX_PHY_LED_DEFAULT);
+ mask |= encp->enc_led_mask;
+
+ if (!((1 << mode) & mask)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(mode, <, EFX_PHY_LED_NMODES);
+ epp->ep_phy_led_mode = mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ switch (flag) {
+ case EFX_PHY_CAP_CURRENT:
+ *maskp = epp->ep_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_DEFAULT:
+ *maskp = epp->ep_default_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_PERM:
+ *maskp = epp->ep_phy_cap_mask;
+ break;
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+ __checkReturn efx_rc_t
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t old_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((mask & ~epp->ep_phy_cap_mask) != 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_adv_cap_mask == mask)
+ goto done;
+
+ old_mask = epp->ep_adv_cap_mask;
+ epp->ep_adv_cap_mask = mask;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_adv_cap_mask = old_mask;
+ /* Reconfigure for robustness */
+ if (epop->epo_reconfigure(enp) != 0) {
+ /*
+ * We may have an inconsistent view of our advertised speed
+ * capabilities.
+ */
+ EFSYS_ASSERT(0);
+ }
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ *maskp = epp->ep_lp_cap_mask;
+}
+
+ __checkReturn efx_rc_t
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_oui_get(enp, ouip));
+}
+
+ void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_module_type != EFX_PHY_MEDIA_INVALID)
+ *typep = epp->ep_module_type;
+ else
+ *typep = epp->ep_fixed_port_type;
+}
+
+ __checkReturn efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(data != NULL);
+
+ if ((uint32_t)offset + len > 0xff) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_phy_module_get_info(enp, dev_addr,
+ offset, len, data)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED PhyStatNamesBlock d5f79b4bc2c050fe */
+static const char *__efx_phy_stat_name[] = {
+ "oui",
+ "pma_pmd_link_up",
+ "pma_pmd_rx_fault",
+ "pma_pmd_tx_fault",
+ "pma_pmd_rev_a",
+ "pma_pmd_rev_b",
+ "pma_pmd_rev_c",
+ "pma_pmd_rev_d",
+ "pcs_link_up",
+ "pcs_rx_fault",
+ "pcs_tx_fault",
+ "pcs_ber",
+ "pcs_block_errors",
+ "phy_xs_link_up",
+ "phy_xs_rx_fault",
+ "phy_xs_tx_fault",
+ "phy_xs_align",
+ "phy_xs_sync_a",
+ "phy_xs_sync_b",
+ "phy_xs_sync_c",
+ "phy_xs_sync_d",
+ "an_link_up",
+ "an_master",
+ "an_local_rx_ok",
+ "an_remote_rx_ok",
+ "cl22ext_link_up",
+ "snr_a",
+ "snr_b",
+ "snr_c",
+ "snr_d",
+ "pma_pmd_signal_a",
+ "pma_pmd_signal_b",
+ "pma_pmd_signal_c",
+ "pma_pmd_signal_d",
+ "an_complete",
+ "pma_pmd_rev_major",
+ "pma_pmd_rev_minor",
+ "pma_pmd_rev_micro",
+ "pcs_fw_version_0",
+ "pcs_fw_version_1",
+ "pcs_fw_version_2",
+ "pcs_fw_version_3",
+ "pcs_fw_build_yy",
+ "pcs_fw_build_mm",
+ "pcs_fw_build_dd",
+ "pcs_op_mode",
+};
+
+/* END MKCONFIG GENERATED PhyStatNamesBlock */
+
+ const char *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t type)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_PHY_NSTATS);
+
+ return (__efx_phy_stat_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_stats_update(enp, esmp, stat));
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+efx_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ if (epop->epo_bist_enable_offline == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_enable_offline(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+ __checkReturn efx_rc_t
+efx_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, EFX_BIST_TYPE_UNKNOWN);
+
+ if (epop->epo_bist_start == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_start(enp, type)) != 0)
+ goto fail2;
+
+ epp->ep_current_bist = type;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_poll != NULL);
+ if (epop->epo_bist_poll == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_poll(enp, type, resultp, value_maskp,
+ valuesp, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_stop != NULL);
+
+ if (epop->epo_bist_stop != NULL)
+ epop->epo_bist_stop(enp, type);
+
+ epp->ep_current_bist = EFX_BIST_TYPE_UNKNOWN;
+}
+
+#endif /* EFSYS_OPT_BIST */
+ void
+efx_phy_unprobe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_epop = NULL;
+
+ epp->ep_adv_cap_mask = 0;
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+}
diff --git a/usr/src/uts/common/io/sfxge/common/efx_phy_ids.h b/usr/src/uts/common/io/sfxge/common/efx_phy_ids.h
new file mode 100644
index 0000000000..fecda26342
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_phy_ids.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_PHY_IDS_H
+#define _SYS_EFX_PHY_IDS_H
+
+#define EFX_PHY_NULL 0
+
+typedef enum efx_phy_type_e { /* GENERATED BY scripts/genfwdef */
+ EFX_PHY_TXC43128 = 1,
+ EFX_PHY_SFX7101 = 3,
+ EFX_PHY_QT2022C2 = 4,
+ EFX_PHY_PM8358 = 6,
+ EFX_PHY_SFT9001A = 8,
+ EFX_PHY_QT2025C = 9,
+ EFX_PHY_SFT9001B = 10,
+ EFX_PHY_QLX111V = 12,
+ EFX_PHY_QT2025_KR = 17,
+ EFX_PHY_AEL3020 = 18,
+ EFX_PHY_XFI_FARMI = 19,
+} efx_phy_type_t;
+
+
+#endif /* _SYS_EFX_PHY_IDS_H */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_port.c b/usr/src/uts/common/io/sfxge/common/efx_port.c
new file mode 100644
index 0000000000..446fd56a2c
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_port.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_port_init(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_PORT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_PORT;
+
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_link_mode = EFX_LINK_UNKNOWN;
+ epp->ep_mac_drain = B_TRUE;
+
+ /* Configure the MAC */
+ if ((rc = efx_mac_select(enp)) != 0)
+ goto fail1;
+
+ epp->ep_emop->emo_reconfigure(enp);
+
+ /* Pick up current phy capababilities */
+ (void) efx_port_poll(enp, NULL);
+
+ /*
+ * Turn on the PHY if available, otherwise reset it, and
+ * reconfigure it with the current configuration.
+ */
+ if (epop->epo_power != NULL) {
+ if ((rc = epop->epo_power(enp, B_TRUE)) != 0)
+ goto fail2;
+ } else {
+ if ((rc = epop->epo_reset(enp)) != 0)
+ goto fail2;
+ }
+
+ EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY);
+ enp->en_reset_flags &= ~EFX_RESET_PHY;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out_opt efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_link_mode_t ignore_link_mode;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+ EFSYS_ASSERT(!epp->ep_mac_stats_pending);
+
+ if (link_modep == NULL)
+ link_modep = &ignore_link_mode;
+
+ if ((rc = emop->emo_poll(enp, link_modep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ EFSYS_ASSERT(link_mode < EFX_LINK_NMODES);
+
+ if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode],
+ loopback_type) == 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_loopback_type == loopback_type &&
+ epp->ep_loopback_link_mode == link_mode)
+ return (0);
+
+ if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_NAMES
+
+static const char *__efx_loopback_type_name[] = {
+ "OFF",
+ "DATA",
+ "GMAC",
+ "XGMII",
+ "XGXS",
+ "XAUI",
+ "GMII",
+ "SGMII",
+ "XGBR",
+ "XFI",
+ "XAUI_FAR",
+ "GMII_FAR",
+ "SGMII_FAR",
+ "XFI_FAR",
+ "GPHY",
+ "PHY_XS",
+ "PCS",
+ "PMA_PMD",
+ "XPORT",
+ "XGMII_WS",
+ "XAUI_WS",
+ "XAUI_WS_FAR",
+ "XAUI_WS_NEAR",
+ "GMII_WS",
+ "XFI_WS",
+ "XFI_WS_FAR",
+ "PHYXS_WS",
+ "PMA_INT",
+ "SD_NEAR",
+ "SD_FAR",
+ "PMA_INT_WS",
+ "SD_FEP2_WS",
+ "SD_FEP1_5_WS",
+ "SD_FEP_WS",
+ "SD_FES_WS",
+};
+
+ __checkReturn const char *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type)
+{
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__efx_loopback_type_name) ==
+ EFX_LOOPBACK_NTYPES);
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES);
+
+ return (__efx_loopback_type_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ void
+efx_port_fini(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(epp->ep_mac_drain);
+
+ epp->ep_emop = NULL;
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_mac_drain = B_FALSE;
+
+ /* Turn off the PHY */
+ if (epop->epo_power != NULL)
+ (void) epop->epo_power(enp, B_FALSE);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+}
diff --git a/usr/src/uts/common/io/sfxge/common/efx_regs.h b/usr/src/uts/common/io/sfxge/common/efx_regs.h
new file mode 100644
index 0000000000..7b1a31b667
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_regs.h
@@ -0,0 +1,3870 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_REGS_H
+#define _SYS_EFX_REGS_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * FR_AB_EE_VPD_CFG0_REG_SF(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_SF_OFST 0x00000300
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_EE_VPD_CFG0_REG(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_OFST 0x00000140
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPDW_BASE_LBN 64
+#define FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define FRF_AB_EE_VPD_BASE_LBN 32
+#define FRF_AB_EE_VPD_BASE_WIDTH 24
+#define FRF_AB_EE_VPD_LENGTH_LBN 16
+#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define FRF_AB_EE_VPD_EN_LBN 0
+#define FRF_AB_EE_VPD_EN_WIDTH 1
+
+
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG_SF(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_SF_OFST 0x00000320
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_OFST 0x00000320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define FRF_AB_PCIE_OFFSET_LBN 56
+#define FRF_AB_PCIE_OFFSET_WIDTH 8
+#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_H_LBN 51
+#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_L_LBN 50
+#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define FRF_AB_PCIE_LPBK_LBN 40
+#define FRF_AB_PCIE_LPBK_WIDTH 8
+#define FRF_AB_PCIE_PARLPBK_LBN 32
+#define FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define FFE_AB_PCIE_RXEQCTL_OFF 2
+#define FFE_AB_PCIE_RXEQCTL_MIN 1
+#define FFE_AB_PCIE_RXEQCTL_MAX 0
+#define FRF_AB_PCIE_HIDRV_LBN 8
+#define FRF_AB_PCIE_HIDRV_WIDTH 8
+#define FRF_AB_PCIE_LODRV_LBN 0
+#define FRF_AB_PCIE_LODRV_WIDTH 8
+
+
+/*
+ * FR_AB_PCIE_SD_CTL45_REG_SF(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_SF_OFST 0x00000330
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL45_REG(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_OFST 0x00000330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_DTX7_LBN 60
+#define FRF_AB_PCIE_DTX7_WIDTH 4
+#define FRF_AB_PCIE_DTX6_LBN 56
+#define FRF_AB_PCIE_DTX6_WIDTH 4
+#define FRF_AB_PCIE_DTX5_LBN 52
+#define FRF_AB_PCIE_DTX5_WIDTH 4
+#define FRF_AB_PCIE_DTX4_LBN 48
+#define FRF_AB_PCIE_DTX4_WIDTH 4
+#define FRF_AB_PCIE_DTX3_LBN 44
+#define FRF_AB_PCIE_DTX3_WIDTH 4
+#define FRF_AB_PCIE_DTX2_LBN 40
+#define FRF_AB_PCIE_DTX2_WIDTH 4
+#define FRF_AB_PCIE_DTX1_LBN 36
+#define FRF_AB_PCIE_DTX1_WIDTH 4
+#define FRF_AB_PCIE_DTX0_LBN 32
+#define FRF_AB_PCIE_DTX0_WIDTH 4
+#define FRF_AB_PCIE_DEQ7_LBN 28
+#define FRF_AB_PCIE_DEQ7_WIDTH 4
+#define FRF_AB_PCIE_DEQ6_LBN 24
+#define FRF_AB_PCIE_DEQ6_WIDTH 4
+#define FRF_AB_PCIE_DEQ5_LBN 20
+#define FRF_AB_PCIE_DEQ5_WIDTH 4
+#define FRF_AB_PCIE_DEQ4_LBN 16
+#define FRF_AB_PCIE_DEQ4_WIDTH 4
+#define FRF_AB_PCIE_DEQ3_LBN 12
+#define FRF_AB_PCIE_DEQ3_WIDTH 4
+#define FRF_AB_PCIE_DEQ2_LBN 8
+#define FRF_AB_PCIE_DEQ2_WIDTH 4
+#define FRF_AB_PCIE_DEQ1_LBN 4
+#define FRF_AB_PCIE_DEQ1_WIDTH 4
+#define FRF_AB_PCIE_DEQ0_LBN 0
+#define FRF_AB_PCIE_DEQ0_WIDTH 4
+
+
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG_SF(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_SF_OFST 0x00000340
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_OFST 0x00000340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define FRF_AB_PCIE_PRBSERR_LBN 40
+#define FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSEL_LBN 0
+#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+
+/*
+ * FR_AB_HW_INIT_REG_SF(128bit):
+ * Hardware initialization register
+ */
+#define FR_AB_HW_INIT_REG_SF_OFST 0x00000350
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_HW_INIT_REG(128bit):
+ * Hardware initialization register
+ */
+#define FR_AZ_HW_INIT_REG_OFST 0x000000c0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define FRF_CZ_TX_MRG_TAGS_LBN 120
+#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define FRF_AZ_TRGT_MASK_ALL_LBN 100
+#define FRF_AZ_TRGT_MASK_ALL_WIDTH 1
+#define FRF_AZ_DOORBELL_DROP_LBN 92
+#define FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define FRF_AB_PE_EIDLE_DIS_LBN 75
+#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define FRF_AZ_FC_BLOCKING_EN_LBN 45
+#define FRF_AZ_FC_BLOCKING_EN_WIDTH 1
+#define FRF_AZ_B2B_REQ_EN_LBN 44
+#define FRF_AZ_B2B_REQ_EN_WIDTH 1
+#define FRF_AZ_POST_WR_MASK_LBN 40
+#define FRF_AZ_POST_WR_MASK_WIDTH 4
+#define FRF_AZ_TLP_TC_LBN 34
+#define FRF_AZ_TLP_TC_WIDTH 3
+#define FRF_AZ_TLP_ATTR_LBN 32
+#define FRF_AZ_TLP_ATTR_WIDTH 2
+#define FRF_AB_INTB_VEC_LBN 24
+#define FRF_AB_INTB_VEC_WIDTH 5
+#define FRF_AB_INTA_VEC_LBN 16
+#define FRF_AB_INTA_VEC_WIDTH 5
+#define FRF_AZ_WD_TIMER_LBN 8
+#define FRF_AZ_WD_TIMER_WIDTH 8
+#define FRF_AZ_US_DISABLE_LBN 5
+#define FRF_AZ_US_DISABLE_WIDTH 1
+#define FRF_AZ_TLP_EP_LBN 4
+#define FRF_AZ_TLP_EP_WIDTH 1
+#define FRF_AZ_ATTR_SEL_LBN 3
+#define FRF_AZ_ATTR_SEL_WIDTH 1
+#define FRF_AZ_TD_SEL_LBN 1
+#define FRF_AZ_TD_SEL_WIDTH 1
+#define FRF_AZ_TLP_TD_LBN 0
+#define FRF_AZ_TLP_TD_WIDTH 1
+
+
+/*
+ * FR_AB_NIC_STAT_REG_SF(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_SF_OFST 0x00000360
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_NIC_STAT_REG(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_OFST 0x00000200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_AER_DIS_LBN 34
+#define FRF_BB_AER_DIS_WIDTH 1
+#define FRF_BB_EE_STRAP_EN_LBN 31
+#define FRF_BB_EE_STRAP_EN_WIDTH 1
+#define FRF_BB_EE_STRAP_LBN 24
+#define FRF_BB_EE_STRAP_WIDTH 4
+#define FRF_BB_REVISION_ID_LBN 17
+#define FRF_BB_REVISION_ID_WIDTH 7
+#define FRF_AB_ONCHIP_SRAM_LBN 16
+#define FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define FRF_AB_SF_PRST_LBN 9
+#define FRF_AB_SF_PRST_WIDTH 1
+#define FRF_AB_EE_PRST_LBN 8
+#define FRF_AB_EE_PRST_WIDTH 1
+#define FRF_AB_ATE_MODE_LBN 3
+#define FRF_AB_ATE_MODE_WIDTH 1
+#define FRF_AB_STRAP_PINS_LBN 0
+#define FRF_AB_STRAP_PINS_WIDTH 3
+
+
+/*
+ * FR_AB_GLB_CTL_REG_SF(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_SF_OFST 0x00000370
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_GLB_CTL_REG(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_OFST 0x00000220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define FRF_AA_PCIX_RST_CTL_LBN 60
+#define FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define FRF_BB_BIU_RST_CTL_LBN 60
+#define FRF_BB_BIU_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define FRF_AB_XGRX_RST_CTL_LBN 56
+#define FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define FRF_AB_XGTX_RST_CTL_LBN 55
+#define FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define FRF_AB_EM_RST_CTL_LBN 54
+#define FRF_AB_EM_RST_CTL_WIDTH 1
+#define FRF_AB_EV_RST_CTL_LBN 53
+#define FRF_AB_EV_RST_CTL_WIDTH 1
+#define FRF_AB_SR_RST_CTL_LBN 52
+#define FRF_AB_SR_RST_CTL_WIDTH 1
+#define FRF_AB_RX_RST_CTL_LBN 51
+#define FRF_AB_RX_RST_CTL_WIDTH 1
+#define FRF_AB_TX_RST_CTL_LBN 50
+#define FRF_AB_TX_RST_CTL_WIDTH 1
+#define FRF_AB_EE_RST_CTL_LBN 49
+#define FRF_AB_EE_RST_CTL_WIDTH 1
+#define FRF_AB_CS_RST_CTL_LBN 48
+#define FRF_AB_CS_RST_CTL_WIDTH 1
+#define FRF_AB_HOT_RST_CTL_LBN 40
+#define FRF_AB_HOT_RST_CTL_WIDTH 2
+#define FRF_AB_RST_EXT_PHY_LBN 31
+#define FRF_AB_RST_EXT_PHY_WIDTH 1
+#define FRF_AB_RST_XAUI_SD_LBN 30
+#define FRF_AB_RST_XAUI_SD_WIDTH 1
+#define FRF_AB_RST_PCIE_SD_LBN 29
+#define FRF_AB_RST_PCIE_SD_WIDTH 1
+#define FRF_AA_RST_PCIX_LBN 28
+#define FRF_AA_RST_PCIX_WIDTH 1
+#define FRF_BB_RST_BIU_LBN 28
+#define FRF_BB_RST_BIU_WIDTH 1
+#define FRF_AB_RST_PCIE_STKY_LBN 27
+#define FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define FRF_AB_RST_PCIE_CORE_LBN 25
+#define FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define FRF_AB_RST_XGRX_LBN 24
+#define FRF_AB_RST_XGRX_WIDTH 1
+#define FRF_AB_RST_XGTX_LBN 23
+#define FRF_AB_RST_XGTX_WIDTH 1
+#define FRF_AB_RST_EM_LBN 22
+#define FRF_AB_RST_EM_WIDTH 1
+#define FRF_AB_RST_EV_LBN 21
+#define FRF_AB_RST_EV_WIDTH 1
+#define FRF_AB_RST_SR_LBN 20
+#define FRF_AB_RST_SR_WIDTH 1
+#define FRF_AB_RST_RX_LBN 19
+#define FRF_AB_RST_RX_WIDTH 1
+#define FRF_AB_RST_TX_LBN 18
+#define FRF_AB_RST_TX_WIDTH 1
+#define FRF_AB_RST_SF_LBN 17
+#define FRF_AB_RST_SF_WIDTH 1
+#define FRF_AB_RST_CS_LBN 16
+#define FRF_AB_RST_CS_WIDTH 1
+#define FRF_AB_INT_RST_DUR_LBN 4
+#define FRF_AB_INT_RST_DUR_WIDTH 3
+#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define FRF_AB_SWRST_LBN 0
+#define FRF_AB_SWRST_WIDTH 1
+
+
+/*
+ * FR_AZ_IOM_IND_ADR_REG(32bit):
+ * IO-mapped indirect access address register
+ */
+#define FR_AZ_IOM_IND_ADR_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_LBN 24
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_WIDTH 1
+#define FRF_AZ_IOM_IND_ADR_LBN 0
+#define FRF_AZ_IOM_IND_ADR_WIDTH 24
+
+
+/*
+ * FR_AZ_IOM_IND_DAT_REG(32bit):
+ * IO-mapped indirect access data register
+ */
+#define FR_AZ_IOM_IND_DAT_REG_OFST 0x00000004
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_IND_DAT_LBN 0
+#define FRF_AZ_IOM_IND_DAT_WIDTH 32
+
+
+/*
+ * FR_AZ_ADR_REGION_REG(128bit):
+ * Address region register
+ */
+#define FR_AZ_ADR_REGION_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ADR_REGION3_LBN 96
+#define FRF_AZ_ADR_REGION3_WIDTH 18
+#define FRF_AZ_ADR_REGION2_LBN 64
+#define FRF_AZ_ADR_REGION2_WIDTH 18
+#define FRF_AZ_ADR_REGION1_LBN 32
+#define FRF_AZ_ADR_REGION1_WIDTH 18
+#define FRF_AZ_ADR_REGION0_LBN 0
+#define FRF_AZ_ADR_REGION0_WIDTH 18
+
+
+/*
+ * FR_AZ_INT_EN_REG_KER(128bit):
+ * Kernel driver Interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_KER_OFST 0x00000010
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_KER_INT_CHAR_LBN 4
+#define FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define FRF_AZ_KER_INT_KER_LBN 3
+#define FRF_AZ_KER_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_EN_REG_CHAR(128bit):
+ * Char Driver interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_CHAR_OFST 0x00000020
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_CHAR_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_CHAR_INT_CHAR_LBN 4
+#define FRF_AZ_CHAR_INT_CHAR_WIDTH 1
+#define FRF_AZ_CHAR_INT_KER_LBN 3
+#define FRF_AZ_CHAR_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_CHAR_LBN 0
+#define FRF_AZ_DRV_INT_EN_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_ADR_REG_KER(128bit):
+ * Interrupt host address for Kernel driver
+ */
+#define FR_AZ_INT_ADR_REG_KER_OFST 0x00000030
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define FRF_AZ_INT_ADR_KER_LBN 0
+#define FRF_AZ_INT_ADR_KER_WIDTH 64
+#define FRF_AZ_INT_ADR_KER_DW0_LBN 0
+#define FRF_AZ_INT_ADR_KER_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_KER_DW1_LBN 32
+#define FRF_AZ_INT_ADR_KER_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_INT_ADR_REG_CHAR(128bit):
+ * Interrupt host address for Char driver
+ */
+#define FR_AZ_INT_ADR_REG_CHAR_OFST 0x00000040
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define FRF_AZ_INT_ADR_CHAR_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_WIDTH 64
+#define FRF_AZ_INT_ADR_CHAR_DW0_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_LBN 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_KER(32bit):
+ * Kernel interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_KER_OFST 0x00000050
+/* falcona0=net_func_bar2 */
+
+#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+
+/*
+ * FR_BZ_INT_ISR0_REG(128bit):
+ * Function 0 Interrupt Acknowlege Status register
+ */
+#define FR_BZ_INT_ISR0_REG_OFST 0x00000090
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_INT_ISR_REG_LBN 0
+#define FRF_BZ_INT_ISR_REG_WIDTH 64
+#define FRF_BZ_INT_ISR_REG_DW0_LBN 0
+#define FRF_BZ_INT_ISR_REG_DW0_WIDTH 32
+#define FRF_BZ_INT_ISR_REG_DW1_LBN 32
+#define FRF_BZ_INT_ISR_REG_DW1_WIDTH 32
+
+
+/*
+ * FR_AB_EE_SPI_HCMD_REG(128bit):
+ * SPI host command register
+ */
+#define FR_AB_EE_SPI_HCMD_REG_OFST 0x00000100
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+
+/*
+ * FR_CZ_USR_EV_CFG(32bit):
+ * User Level Event Configuration register
+ */
+#define FR_CZ_USR_EV_CFG_OFST 0x00000100
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_USREV_DIS_LBN 16
+#define FRF_CZ_USREV_DIS_WIDTH 1
+#define FRF_CZ_DFLT_EVQ_LBN 0
+#define FRF_CZ_DFLT_EVQ_WIDTH 10
+
+
+/*
+ * FR_AB_EE_SPI_HADR_REG(128bit):
+ * SPI host address register
+ */
+#define FR_AB_EE_SPI_HADR_REG_OFST 0x00000110
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+
+/*
+ * FR_AB_EE_SPI_HDATA_REG(128bit):
+ * SPI host data register
+ */
+#define FR_AB_EE_SPI_HDATA_REG_OFST 0x00000120
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HDATA3_LBN 96
+#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA2_LBN 64
+#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA1_LBN 32
+#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA0_LBN 0
+#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+
+/*
+ * FR_AB_EE_BASE_PAGE_REG(128bit):
+ * Expansion ROM base mirror register
+ */
+#define FR_AB_EE_BASE_PAGE_REG_OFST 0x00000130
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_EXPROM_MASK_LBN 16
+#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+
+/*
+ * FR_AB_EE_VPD_SW_CNTL_REG(128bit):
+ * VPD access SW control register
+ */
+#define FR_AB_EE_VPD_SW_CNTL_REG_OFST 0x00000150
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+
+/*
+ * FR_AB_EE_VPD_SW_DATA_REG(128bit):
+ * VPD access SW data register
+ */
+#define FR_AB_EE_VPD_SW_DATA_REG_OFST 0x00000160
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+
+/*
+ * FR_BB_PCIE_CORE_INDIRECT_REG(64bit):
+ * Indirect Access to PCIE Core registers
+ */
+#define FR_BB_PCIE_CORE_INDIRECT_REG_OFST 0x000001f0
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+
+/*
+ * FR_AB_GPIO_CTL_REG(128bit):
+ * GPIO control register
+ */
+#define FR_AB_GPIO_CTL_REG_OFST 0x00000210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GPIO15_OEN_LBN 63
+#define FRF_AB_GPIO15_OEN_WIDTH 1
+#define FRF_AB_GPIO14_OEN_LBN 62
+#define FRF_AB_GPIO14_OEN_WIDTH 1
+#define FRF_AB_GPIO13_OEN_LBN 61
+#define FRF_AB_GPIO13_OEN_WIDTH 1
+#define FRF_AB_GPIO12_OEN_LBN 60
+#define FRF_AB_GPIO12_OEN_WIDTH 1
+#define FRF_AB_GPIO11_OEN_LBN 59
+#define FRF_AB_GPIO11_OEN_WIDTH 1
+#define FRF_AB_GPIO10_OEN_LBN 58
+#define FRF_AB_GPIO10_OEN_WIDTH 1
+#define FRF_AB_GPIO9_OEN_LBN 57
+#define FRF_AB_GPIO9_OEN_WIDTH 1
+#define FRF_AB_GPIO8_OEN_LBN 56
+#define FRF_AB_GPIO8_OEN_WIDTH 1
+#define FRF_AB_GPIO15_OUT_LBN 55
+#define FRF_AB_GPIO15_OUT_WIDTH 1
+#define FRF_AB_GPIO14_OUT_LBN 54
+#define FRF_AB_GPIO14_OUT_WIDTH 1
+#define FRF_AB_GPIO13_OUT_LBN 53
+#define FRF_AB_GPIO13_OUT_WIDTH 1
+#define FRF_AB_GPIO12_OUT_LBN 52
+#define FRF_AB_GPIO12_OUT_WIDTH 1
+#define FRF_AB_GPIO11_OUT_LBN 51
+#define FRF_AB_GPIO11_OUT_WIDTH 1
+#define FRF_AB_GPIO10_OUT_LBN 50
+#define FRF_AB_GPIO10_OUT_WIDTH 1
+#define FRF_AB_GPIO9_OUT_LBN 49
+#define FRF_AB_GPIO9_OUT_WIDTH 1
+#define FRF_AB_GPIO8_OUT_LBN 48
+#define FRF_AB_GPIO8_OUT_WIDTH 1
+#define FRF_AB_GPIO15_IN_LBN 47
+#define FRF_AB_GPIO15_IN_WIDTH 1
+#define FRF_AB_GPIO14_IN_LBN 46
+#define FRF_AB_GPIO14_IN_WIDTH 1
+#define FRF_AB_GPIO13_IN_LBN 45
+#define FRF_AB_GPIO13_IN_WIDTH 1
+#define FRF_AB_GPIO12_IN_LBN 44
+#define FRF_AB_GPIO12_IN_WIDTH 1
+#define FRF_AB_GPIO11_IN_LBN 43
+#define FRF_AB_GPIO11_IN_WIDTH 1
+#define FRF_AB_GPIO10_IN_LBN 42
+#define FRF_AB_GPIO10_IN_WIDTH 1
+#define FRF_AB_GPIO9_IN_LBN 41
+#define FRF_AB_GPIO9_IN_WIDTH 1
+#define FRF_AB_GPIO8_IN_LBN 40
+#define FRF_AB_GPIO8_IN_WIDTH 1
+#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define FRF_BB_CLK156_OUT_EN_LBN 31
+#define FRF_BB_CLK156_OUT_EN_WIDTH 1
+#define FRF_BB_USE_NIC_CLK_LBN 30
+#define FRF_BB_USE_NIC_CLK_WIDTH 1
+#define FRF_AB_GPIO5_OEN_LBN 29
+#define FRF_AB_GPIO5_OEN_WIDTH 1
+#define FRF_AB_GPIO4_OEN_LBN 28
+#define FRF_AB_GPIO4_OEN_WIDTH 1
+#define FRF_AB_GPIO3_OEN_LBN 27
+#define FRF_AB_GPIO3_OEN_WIDTH 1
+#define FRF_AB_GPIO2_OEN_LBN 26
+#define FRF_AB_GPIO2_OEN_WIDTH 1
+#define FRF_AB_GPIO1_OEN_LBN 25
+#define FRF_AB_GPIO1_OEN_WIDTH 1
+#define FRF_AB_GPIO0_OEN_LBN 24
+#define FRF_AB_GPIO0_OEN_WIDTH 1
+#define FRF_AB_GPIO5_OUT_LBN 21
+#define FRF_AB_GPIO5_OUT_WIDTH 1
+#define FRF_AB_GPIO4_OUT_LBN 20
+#define FRF_AB_GPIO4_OUT_WIDTH 1
+#define FRF_AB_GPIO3_OUT_LBN 19
+#define FRF_AB_GPIO3_OUT_WIDTH 1
+#define FRF_AB_GPIO2_OUT_LBN 18
+#define FRF_AB_GPIO2_OUT_WIDTH 1
+#define FRF_AB_GPIO1_OUT_LBN 17
+#define FRF_AB_GPIO1_OUT_WIDTH 1
+#define FRF_AB_GPIO0_OUT_LBN 16
+#define FRF_AB_GPIO0_OUT_WIDTH 1
+#define FRF_AB_GPIO5_IN_LBN 13
+#define FRF_AB_GPIO5_IN_WIDTH 1
+#define FRF_AB_GPIO4_IN_LBN 12
+#define FRF_AB_GPIO4_IN_WIDTH 1
+#define FRF_AB_GPIO3_IN_LBN 11
+#define FRF_AB_GPIO3_IN_WIDTH 1
+#define FRF_AB_GPIO2_IN_LBN 10
+#define FRF_AB_GPIO2_IN_WIDTH 1
+#define FRF_AB_GPIO1_IN_LBN 9
+#define FRF_AB_GPIO1_IN_WIDTH 1
+#define FRF_AB_GPIO0_IN_LBN 8
+#define FRF_AB_GPIO0_IN_WIDTH 1
+#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_KER(128bit):
+ * Fatal interrupt register for Kernel
+ */
+#define FR_AZ_FATAL_INTR_REG_KER_OFST 0x00000230
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_CHAR(128bit):
+ * Fatal interrupt register for Char
+ */
+#define FR_AZ_FATAL_INTR_REG_CHAR_OFST 0x00000240
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_LBN 8
+#define FRF_AZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_LBN 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_LBN 0
+#define FRF_AZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_DP_CTRL_REG(128bit):
+ * Datapath control register
+ */
+#define FR_AZ_DP_CTRL_REG_OFST 0x00000250
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_FLS_EVQ_ID_LBN 0
+#define FRF_AZ_FLS_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_MEM_STAT_REG(128bit):
+ * Memory status register
+ */
+#define FR_AZ_MEM_STAT_REG_OFST 0x00000260
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MEM_PERR_VEC_LBN 53
+#define FRF_AB_MEM_PERR_VEC_WIDTH 40
+#define FRF_AB_MEM_PERR_VEC_DW0_LBN 53
+#define FRF_AB_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_AB_MEM_PERR_VEC_DW1_LBN 85
+#define FRF_AB_MEM_PERR_VEC_DW1_WIDTH 6
+#define FRF_AB_MBIST_CORR_LBN 38
+#define FRF_AB_MBIST_CORR_WIDTH 15
+#define FRF_AB_MBIST_ERR_LBN 0
+#define FRF_AB_MBIST_ERR_WIDTH 40
+#define FRF_AB_MBIST_ERR_DW0_LBN 0
+#define FRF_AB_MBIST_ERR_DW0_WIDTH 32
+#define FRF_AB_MBIST_ERR_DW1_LBN 32
+#define FRF_AB_MBIST_ERR_DW1_WIDTH 6
+#define FRF_CZ_MEM_PERR_VEC_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
+#define FRF_CZ_MEM_PERR_VEC_DW0_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_LBN 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_WIDTH 3
+
+
+/*
+ * FR_PORT0_CS_DEBUG_REG(128bit):
+ * Debug register
+ */
+
+#define FR_AZ_CS_DEBUG_REG_OFST 0x00000270
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define FRF_CZ_CS_PORT_NUM_LBN 40
+#define FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_RESERVED_LBN 36
+#define FRF_CZ_CS_RESERVED_WIDTH 4
+#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_PORT_FPE_DW0_LBN 1
+#define FRF_CZ_CS_PORT_FPE_DW0_WIDTH 32
+#define FRF_CZ_CS_PORT_FPE_DW1_LBN 33
+#define FRF_CZ_CS_PORT_FPE_DW1_WIDTH 3
+#define FRF_CZ_CS_PORT_FPE_LBN 1
+#define FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define FRF_AZ_CS_DEBUG_EN_LBN 0
+#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_DRIVER_REG(128bit):
+ * Driver scratch register [0-7]
+ */
+#define FR_AZ_DRIVER_REG_OFST 0x00000280
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_DRIVER_REG_STEP 16
+#define FR_AZ_DRIVER_REG_ROWS 8
+
+#define FRF_AZ_DRIVER_DW0_LBN 0
+#define FRF_AZ_DRIVER_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_ALTERA_BUILD_REG(128bit):
+ * Altera build register
+ */
+#define FR_AZ_ALTERA_BUILD_REG_OFST 0x00000300
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+
+/*
+ * FR_AZ_CSR_SPARE_REG(128bit):
+ * Spare register
+ */
+#define FR_AZ_CSR_SPARE_REG_OFST 0x00000310
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_LBN 72
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define FRF_AZ_MEM_PERR_EN_LBN 64
+#define FRF_AZ_MEM_PERR_EN_WIDTH 38
+#define FRF_AZ_MEM_PERR_EN_DW0_LBN 64
+#define FRF_AZ_MEM_PERR_EN_DW0_WIDTH 32
+#define FRF_AZ_MEM_PERR_EN_DW1_LBN 96
+#define FRF_AZ_MEM_PERR_EN_DW1_WIDTH 6
+#define FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+
+/*
+ * FR_BZ_DEBUG_DATA_OUT_REG(128bit):
+ * Live Debug and Debug 2 out ports
+ */
+#define FR_BZ_DEBUG_DATA_OUT_REG_OFST 0x00000350
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_DEBUG2_PORT_LBN 25
+#define FRF_BZ_DEBUG2_PORT_WIDTH 15
+#define FRF_BZ_DEBUG1_PORT_LBN 0
+#define FRF_BZ_DEBUG1_PORT_WIDTH 25
+
+
+/*
+ * FR_BZ_EVQ_RPTR_REGP0(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BZ_EVQ_RPTR_REGP0_OFST 0x00000400
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_EVQ_RPTR_REGP0_STEP 8192
+#define FR_BZ_EVQ_RPTR_REGP0_ROWS 1024
+/*
+ * FR_AA_EVQ_RPTR_REG_KER(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AA_EVQ_RPTR_REG_KER_OFST 0x00011b00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_RPTR_REG_KER_STEP 4
+#define FR_AA_EVQ_RPTR_REG_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_RPTR_REG(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AZ_EVQ_RPTR_REG_OFST 0x00fa0000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_RPTR_REG_STEP 16
+#define FR_AB_EVQ_RPTR_REG_ROWS 4096
+#define FR_CZ_EVQ_RPTR_REG_ROWS 1024
+/*
+ * FR_BB_EVQ_RPTR_REGP123(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BB_EVQ_RPTR_REGP123_OFST 0x01000400
+/* falconb0=net_func_bar2 */
+#define FR_BB_EVQ_RPTR_REGP123_STEP 8192
+#define FR_BB_EVQ_RPTR_REGP123_ROWS 3072
+
+#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define FRF_AZ_EVQ_RPTR_LBN 0
+#define FRF_AZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * FR_BZ_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_BZ_TIMER_COMMAND_REGP0_OFST 0x00000420
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_BZ_TIMER_COMMAND_REGP0_ROWS 1024
+/*
+ * FR_AA_TIMER_COMMAND_REG_KER(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REG_KER_OFST 0x00000420
+/* falcona0=net_func_bar2 */
+#define FR_AA_TIMER_COMMAND_REG_KER_STEP 8192
+#define FR_AA_TIMER_COMMAND_REG_KER_ROWS 4
+/*
+ * FR_AB_TIMER_COMMAND_REGP123(128bit):
+ * Timer Command Registers
+ */
+#define FR_AB_TIMER_COMMAND_REGP123_OFST 0x01000420
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TIMER_COMMAND_REGP123_STEP 8192
+#define FR_AB_TIMER_COMMAND_REGP123_ROWS 3072
+/*
+ * FR_AA_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REGP0_OFST 0x00008420
+/* falcona0=char_func_bar0 */
+#define FR_AA_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_AA_TIMER_COMMAND_REGP0_ROWS 1020
+
+#define FRF_CZ_TC_TIMER_MODE_LBN 14
+#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define FRF_AB_TC_TIMER_MODE_LBN 12
+#define FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define FRF_CZ_TC_TIMER_VAL_LBN 0
+#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define FRF_AB_TC_TIMER_VAL_LBN 0
+#define FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_AZ_DRV_EV_REG(128bit):
+ * Driver generated event register
+ */
+#define FR_AZ_DRV_EV_REG_OFST 0x00000440
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_DRV_EV_QID_LBN 64
+#define FRF_AZ_DRV_EV_QID_WIDTH 12
+#define FRF_AZ_DRV_EV_DATA_LBN 0
+#define FRF_AZ_DRV_EV_DATA_WIDTH 64
+#define FRF_AZ_DRV_EV_DATA_DW0_LBN 0
+#define FRF_AZ_DRV_EV_DATA_DW0_WIDTH 32
+#define FRF_AZ_DRV_EV_DATA_DW1_LBN 32
+#define FRF_AZ_DRV_EV_DATA_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_EVQ_CTL_REG(128bit):
+ * Event queue control register
+ */
+#define FR_AZ_EVQ_CTL_REG_OFST 0x00000450
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+
+/*
+ * FR_AZ_EVQ_CNT1_REG(128bit):
+ * Event counter 1 register
+ */
+#define FR_AZ_EVQ_CNT1_REG_OFST 0x00000460
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_AZ_EVQ_CNT2_REG(128bit):
+ * Event counter 2 register
+ */
+#define FR_AZ_EVQ_CNT2_REG_OFST 0x00000470
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_CZ_USR_EV_REG(32bit):
+ * Event mailbox register
+ */
+#define FR_CZ_USR_EV_REG_OFST 0x00000540
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_USR_EV_REG_STEP 8192
+#define FR_CZ_USR_EV_REG_ROWS 1024
+
+#define FRF_CZ_USR_EV_DATA_LBN 0
+#define FRF_CZ_USR_EV_DATA_WIDTH 32
+
+
+/*
+ * FR_AZ_BUF_TBL_CFG_REG(128bit):
+ * Buffer table configuration register
+ */
+#define FR_AZ_BUF_TBL_CFG_REG_OFST 0x00000600
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_TBL_MODE_LBN 3
+#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+
+/*
+ * FR_AZ_SRM_RX_DC_CFG_REG(128bit):
+ * SRAM receive descriptor cache configuration register
+ */
+#define FR_AZ_SRM_RX_DC_CFG_REG_OFST 0x00000610
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_TX_DC_CFG_REG(128bit):
+ * SRAM transmit descriptor cache configuration register
+ */
+#define FR_AZ_SRM_TX_DC_CFG_REG_OFST 0x00000620
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_SF_OFST 0x00000380
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_OFST 0x00000630
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define FRF_AZ_SRM_INIT_EN_LBN 3
+#define FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define FRF_AZ_SRM_NUM_BANK_LBN 2
+#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+
+/*
+ * FR_AZ_BUF_TBL_UPD_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_BUF_TBL_UPD_REG_OFST 0x00000650
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_UPD_CMD_LBN 63
+#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_CMD_LBN 62
+#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+
+/*
+ * FR_AZ_SRM_UPD_EVQ_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_SRM_UPD_EVQ_REG_OFST 0x00000660
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_SRAM_PARITY_REG(128bit):
+ * SRAM parity register.
+ */
+#define FR_AZ_SRAM_PARITY_REG_OFST 0x00000670
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_BYPASS_ECC_LBN 3
+#define FRF_CZ_BYPASS_ECC_WIDTH 1
+#define FRF_CZ_SEC_INT_LBN 2
+#define FRF_CZ_SEC_INT_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+#define FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_CFG_REG(128bit):
+ * Receive configuration register
+ */
+#define FR_AZ_RX_CFG_REG_OFST 0x00000800
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define FRF_BZ_RX_TCP_SUP_LBN 48
+#define FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define FRF_BZ_RX_INGR_EN_LBN 47
+#define FRF_BZ_RX_INGR_EN_WIDTH 1
+#define FRF_BZ_RX_IP_HASH_LBN 46
+#define FRF_BZ_RX_IP_HASH_WIDTH 1
+#define FRF_BZ_RX_HASH_ALG_LBN 45
+#define FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define FRF_BZ_RX_XON_TX_TH_LBN 33
+#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_OWNERR_CTL_LBN 30
+#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_RX_XON_TX_TH_LBN 25
+#define FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XON_MAC_TH_LBN 6
+#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_FILTER_CTL_REG(128bit):
+ * Receive filter control registers
+ */
+#define FR_AZ_RX_FILTER_CTL_REG_OFST 0x00000810
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_NUM_KER_LBN 24
+#define FRF_AZ_NUM_KER_WIDTH 2
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+
+/*
+ * FR_AZ_RX_FLUSH_DESCQ_REG(128bit):
+ * Receive flush descriptor queue register
+ */
+#define FR_AZ_RX_FLUSH_DESCQ_REG_OFST 0x00000820
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_BZ_RX_DESC_UPD_REGP0_OFST 0x00000830
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_RX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_RX_DESC_UPD_REG_KER(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REG_KER_OFST 0x00000830
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_RX_DESC_UPD_REG_KER_ROWS 4
+/*
+ * FR_AB_RX_DESC_UPD_REGP123(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AB_RX_DESC_UPD_REGP123_OFST 0x01000830
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_RX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REGP0_OFST 0x00008830
+/* falcona0=char_func_bar0 */
+#define FR_AA_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_RX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_RX_DESC_WPTR_LBN 96
+#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_RX_DESC_LBN 0
+#define FRF_AZ_RX_DESC_WIDTH 64
+#define FRF_AZ_RX_DESC_DW0_LBN 0
+#define FRF_AZ_RX_DESC_DW0_WIDTH 32
+#define FRF_AZ_RX_DESC_DW1_LBN 32
+#define FRF_AZ_RX_DESC_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_DC_CFG_REG(128bit):
+ * Receive descriptor cache configuration register
+ */
+#define FR_AZ_RX_DC_CFG_REG_OFST 0x00000840
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_MAX_PF_LBN 2
+#define FRF_AZ_RX_MAX_PF_WIDTH 2
+#define FRF_AZ_RX_DC_SIZE_LBN 0
+#define FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define FFE_AZ_RX_DC_SIZE_64 3
+#define FFE_AZ_RX_DC_SIZE_32 2
+#define FFE_AZ_RX_DC_SIZE_16 1
+#define FFE_AZ_RX_DC_SIZE_8 0
+
+
+/*
+ * FR_AZ_RX_DC_PF_WM_REG(128bit):
+ * Receive descriptor cache pre-fetch watermark register
+ */
+#define FR_AZ_RX_DC_PF_WM_REG_OFST 0x00000850
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+
+/*
+ * FR_BZ_RX_RSS_TKEY_REG(128bit):
+ * RSS Toeplitz hash key
+ */
+#define FR_BZ_RX_RSS_TKEY_REG_OFST 0x00000860
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_RX_RSS_TKEY_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW3_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_DW3_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW2_LBN 64
+#define FRF_BZ_RX_RSS_TKEY_DW2_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_LBN 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW0_LBN 0
+#define FRF_BZ_RX_RSS_TKEY_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_NODESC_DROP_REG(128bit):
+ * Receive dropped packet counter register
+ */
+#define FR_AZ_RX_NODESC_DROP_REG_OFST 0x00000880
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_AZ_RX_NODESC_DROP_CNT_WIDTH 16
+
+
+/*
+ * FR_AZ_RX_SELF_RST_REG(128bit):
+ * Receive self reset register
+ */
+#define FR_AZ_RX_SELF_RST_REG_OFST 0x00000890
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_ISCSI_DIS_LBN 17
+#define FRF_AZ_RX_ISCSI_DIS_WIDTH 1
+#define FRF_AB_RX_SW_RST_REG_LBN 16
+#define FRF_AB_RX_SW_RST_REG_WIDTH 1
+#define FRF_AB_RX_SELF_RST_EN_LBN 8
+#define FRF_AB_RX_SELF_RST_EN_WIDTH 1
+#define FRF_AZ_RX_MAX_PF_LAT_LBN 4
+#define FRF_AZ_RX_MAX_PF_LAT_WIDTH 4
+#define FRF_AZ_RX_MAX_LU_LAT_LBN 0
+#define FRF_AZ_RX_MAX_LU_LAT_WIDTH 4
+
+
+/*
+ * FR_AZ_RX_DEBUG_REG(128bit):
+ * undocumented register
+ */
+#define FR_AZ_RX_DEBUG_REG_OFST 0x000008a0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DEBUG_LBN 0
+#define FRF_AZ_RX_DEBUG_WIDTH 64
+#define FRF_AZ_RX_DEBUG_DW0_LBN 0
+#define FRF_AZ_RX_DEBUG_DW0_WIDTH 32
+#define FRF_AZ_RX_DEBUG_DW1_LBN 32
+#define FRF_AZ_RX_DEBUG_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_PUSH_DROP_REG(128bit):
+ * Receive descriptor push dropped counter register
+ */
+#define FR_AZ_RX_PUSH_DROP_REG_OFST 0x000008b0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG1(128bit):
+ * IPv6 RSS Toeplitz hash key low bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG1_OFST 0x000008d0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG2(128bit):
+ * IPv6 RSS Toeplitz hash key middle bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG2_OFST 0x000008e0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG3(128bit):
+ * IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings
+ */
+#define FR_CZ_RX_RSS_IPV6_REG3_OFST 0x000008f0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_FLUSH_DESCQ_REG(128bit):
+ * Transmit flush descriptor queue register
+ */
+#define FR_AZ_TX_FLUSH_DESCQ_REG_OFST 0x00000a00
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_BZ_TX_DESC_UPD_REGP0_OFST 0x00000a10
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_TX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_TX_DESC_UPD_REG_KER(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REG_KER_OFST 0x00000a10
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_TX_DESC_UPD_REG_KER_ROWS 8
+/*
+ * FR_AB_TX_DESC_UPD_REGP123(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AB_TX_DESC_UPD_REGP123_OFST 0x01000a10
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_TX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REGP0_OFST 0x00008a10
+/* falcona0=char_func_bar0 */
+#define FR_AA_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_TX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_TX_DESC_WPTR_LBN 96
+#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_TX_DESC_LBN 0
+#define FRF_AZ_TX_DESC_WIDTH 95
+#define FRF_AZ_TX_DESC_DW0_LBN 0
+#define FRF_AZ_TX_DESC_DW0_WIDTH 32
+#define FRF_AZ_TX_DESC_DW1_LBN 32
+#define FRF_AZ_TX_DESC_DW1_WIDTH 32
+#define FRF_AZ_TX_DESC_DW2_LBN 64
+#define FRF_AZ_TX_DESC_DW2_WIDTH 31
+
+
+/*
+ * FR_AZ_TX_DC_CFG_REG(128bit):
+ * Transmit descriptor cache configuration register
+ */
+#define FR_AZ_TX_DC_CFG_REG_OFST 0x00000a20
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_DC_SIZE_LBN 0
+#define FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define FFE_AZ_TX_DC_SIZE_32 2
+#define FFE_AZ_TX_DC_SIZE_16 1
+#define FFE_AZ_TX_DC_SIZE_8 0
+
+
+/*
+ * FR_AA_TX_CHKSM_CFG_REG(128bit):
+ * Transmit checksum configuration register
+ */
+#define FR_AA_TX_CHKSM_CFG_REG_OFST 0x00000a30
+/* falcona0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_CFG_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_CFG_REG_OFST 0x00000a50
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_TX_PUSH_DROP_REG(128bit):
+ * Transmit push dropped register
+ */
+#define FR_AZ_TX_PUSH_DROP_REG_OFST 0x00000a60
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_RESERVED_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_RESERVED_REG_OFST 0x00000a80
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_EVT_CNT_LBN 121
+#define FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define FRF_AZ_TX_PUSH_EN_LBN 89
+#define FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define FRF_AZ_TX_DMAQ_ST_LBN 78
+#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_LBN 64
+#define FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define FRF_AZ_TX_XP_TIMER_LBN 52
+#define FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define FRF_AZ_TX_PREF_SPACER_LBN 44
+#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define FRF_AZ_TX_ONLY1TAG_LBN 21
+#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define FRF_AA_TX_DMA_FF_THR_LBN 16
+#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define FRF_AZ_TX_DMA_SPACER_LBN 8
+#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define FRF_AA_TX_TCP_DIS_LBN 7
+#define FRF_AA_TX_TCP_DIS_WIDTH 1
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define FRF_AA_TX_IP_DIS_LBN 6
+#define FRF_AA_TX_IP_DIS_WIDTH 1
+#define FRF_AZ_TX_MAX_CPL_LBN 2
+#define FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define FFE_AZ_TX_MAX_CPL_16 3
+#define FFE_AZ_TX_MAX_CPL_8 2
+#define FFE_AZ_TX_MAX_CPL_4 1
+#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define FRF_AZ_TX_MAX_PREF_LBN 0
+#define FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define FFE_AZ_TX_MAX_PREF_32 3
+#define FFE_AZ_TX_MAX_PREF_16 2
+#define FFE_AZ_TX_MAX_PREF_8 1
+#define FFE_AZ_TX_MAX_PREF_OFF 0
+
+
+/*
+ * FR_BZ_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_BZ_TX_PACE_REG_OFST 0x00000a90
+/* falconb0,sienaa0=net_func_bar2 */
+/*
+ * FR_AA_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_AA_TX_PACE_REG_OFST 0x00f80000
+/* falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_SB_NOT_AF_LBN 19
+#define FRF_AZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_SB_AF_LBN 9
+#define FRF_AZ_TX_PACE_SB_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_FB_BASE_LBN 5
+#define FRF_AZ_TX_PACE_FB_BASE_WIDTH 4
+#define FRF_AZ_TX_PACE_BIN_TH_LBN 0
+#define FRF_AZ_TX_PACE_BIN_TH_WIDTH 5
+
+
+/*
+ * FR_AZ_TX_PACE_DROP_QID_REG(128bit):
+ * PACE Drop QID Counter
+ */
+#define FR_AZ_TX_PACE_DROP_QID_REG_OFST 0x00000aa0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_TX_VLAN_REG(128bit):
+ * Transmit VLAN tag register
+ */
+#define FR_AB_TX_VLAN_REG_OFST 0x00000ae0
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_TX_VLAN_EN_LBN 127
+#define FRF_AB_TX_VLAN_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT1_EN_LBN 125
+#define FRF_AB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT0_EN_LBN 124
+#define FRF_AB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_LBN 112
+#define FRF_AB_TX_VLAN7_WIDTH 12
+#define FRF_AB_TX_VLAN6_PORT1_EN_LBN 109
+#define FRF_AB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_PORT0_EN_LBN 108
+#define FRF_AB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_LBN 96
+#define FRF_AB_TX_VLAN6_WIDTH 12
+#define FRF_AB_TX_VLAN5_PORT1_EN_LBN 93
+#define FRF_AB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_PORT0_EN_LBN 92
+#define FRF_AB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_LBN 80
+#define FRF_AB_TX_VLAN5_WIDTH 12
+#define FRF_AB_TX_VLAN4_PORT1_EN_LBN 77
+#define FRF_AB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_PORT0_EN_LBN 76
+#define FRF_AB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_LBN 64
+#define FRF_AB_TX_VLAN4_WIDTH 12
+#define FRF_AB_TX_VLAN3_PORT1_EN_LBN 61
+#define FRF_AB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_PORT0_EN_LBN 60
+#define FRF_AB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_LBN 48
+#define FRF_AB_TX_VLAN3_WIDTH 12
+#define FRF_AB_TX_VLAN2_PORT1_EN_LBN 45
+#define FRF_AB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_PORT0_EN_LBN 44
+#define FRF_AB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_LBN 32
+#define FRF_AB_TX_VLAN2_WIDTH 12
+#define FRF_AB_TX_VLAN1_PORT1_EN_LBN 29
+#define FRF_AB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_PORT0_EN_LBN 28
+#define FRF_AB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_LBN 16
+#define FRF_AB_TX_VLAN1_WIDTH 12
+#define FRF_AB_TX_VLAN0_PORT1_EN_LBN 13
+#define FRF_AB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_PORT0_EN_LBN 12
+#define FRF_AB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_LBN 0
+#define FRF_AB_TX_VLAN0_WIDTH 12
+
+
+/*
+ * FR_AZ_TX_IPFIL_PORTEN_REG(128bit):
+ * Transmit filter control register
+ */
+#define FR_AZ_TX_IPFIL_PORTEN_REG_OFST 0x00000af0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_MADR0_FIL_EN_LBN 64
+#define FRF_AZ_TX_MADR0_FIL_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL31_PORT_EN_LBN 62
+#define FRF_AB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL30_PORT_EN_LBN 60
+#define FRF_AB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL29_PORT_EN_LBN 58
+#define FRF_AB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL28_PORT_EN_LBN 56
+#define FRF_AB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL27_PORT_EN_LBN 54
+#define FRF_AB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL26_PORT_EN_LBN 52
+#define FRF_AB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL25_PORT_EN_LBN 50
+#define FRF_AB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL24_PORT_EN_LBN 48
+#define FRF_AB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL23_PORT_EN_LBN 46
+#define FRF_AB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL22_PORT_EN_LBN 44
+#define FRF_AB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL21_PORT_EN_LBN 42
+#define FRF_AB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL20_PORT_EN_LBN 40
+#define FRF_AB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL19_PORT_EN_LBN 38
+#define FRF_AB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL18_PORT_EN_LBN 36
+#define FRF_AB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL17_PORT_EN_LBN 34
+#define FRF_AB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL16_PORT_EN_LBN 32
+#define FRF_AB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL15_PORT_EN_LBN 30
+#define FRF_AB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL14_PORT_EN_LBN 28
+#define FRF_AB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL13_PORT_EN_LBN 26
+#define FRF_AB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL12_PORT_EN_LBN 24
+#define FRF_AB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL11_PORT_EN_LBN 22
+#define FRF_AB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL10_PORT_EN_LBN 20
+#define FRF_AB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL9_PORT_EN_LBN 18
+#define FRF_AB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL8_PORT_EN_LBN 16
+#define FRF_AB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL7_PORT_EN_LBN 14
+#define FRF_AB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL6_PORT_EN_LBN 12
+#define FRF_AB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL5_PORT_EN_LBN 10
+#define FRF_AB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL4_PORT_EN_LBN 8
+#define FRF_AB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL3_PORT_EN_LBN 6
+#define FRF_AB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL2_PORT_EN_LBN 4
+#define FRF_AB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL1_PORT_EN_LBN 2
+#define FRF_AB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL0_PORT_EN_LBN 0
+#define FRF_AB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+
+/*
+ * FR_AB_TX_IPFIL_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_AB_TX_IPFIL_TBL_OFST 0x00000b00
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_IPFIL_TBL_STEP 16
+#define FR_AB_TX_IPFIL_TBL_ROWS 16
+
+#define FRF_AB_TX_IPFIL_MASK_1_LBN 96
+#define FRF_AB_TX_IPFIL_MASK_1_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_1_LBN 64
+#define FRF_AB_TX_IP_SRC_ADR_1_WIDTH 32
+#define FRF_AB_TX_IPFIL_MASK_0_LBN 32
+#define FRF_AB_TX_IPFIL_MASK_0_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_0_LBN 0
+#define FRF_AB_TX_IP_SRC_ADR_0_WIDTH 32
+
+
+/*
+ * FR_AB_MD_TXD_REG(128bit):
+ * PHY management transmit data register
+ */
+#define FR_AB_MD_TXD_REG_OFST 0x00000c00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_TXD_LBN 0
+#define FRF_AB_MD_TXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_RXD_REG(128bit):
+ * PHY management receive data register
+ */
+#define FR_AB_MD_RXD_REG_OFST 0x00000c10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RXD_LBN 0
+#define FRF_AB_MD_RXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_CS_REG(128bit):
+ * PHY management configuration & status register
+ */
+#define FR_AB_MD_CS_REG_OFST 0x00000c20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RD_EN_LBN 15
+#define FRF_AB_MD_RD_EN_WIDTH 1
+#define FRF_AB_MD_WR_EN_LBN 14
+#define FRF_AB_MD_WR_EN_WIDTH 1
+#define FRF_AB_MD_ADDR_CMD_LBN 13
+#define FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define FRF_AB_MD_PT_LBN 7
+#define FRF_AB_MD_PT_WIDTH 3
+#define FRF_AB_MD_PL_LBN 6
+#define FRF_AB_MD_PL_WIDTH 1
+#define FRF_AB_MD_INT_CLR_LBN 5
+#define FRF_AB_MD_INT_CLR_WIDTH 1
+#define FRF_AB_MD_GC_LBN 4
+#define FRF_AB_MD_GC_WIDTH 1
+#define FRF_AB_MD_PRSP_LBN 3
+#define FRF_AB_MD_PRSP_WIDTH 1
+#define FRF_AB_MD_RIC_LBN 2
+#define FRF_AB_MD_RIC_WIDTH 1
+#define FRF_AB_MD_RDC_LBN 1
+#define FRF_AB_MD_RDC_WIDTH 1
+#define FRF_AB_MD_WRC_LBN 0
+#define FRF_AB_MD_WRC_WIDTH 1
+
+
+/*
+ * FR_AB_MD_PHY_ADR_REG(128bit):
+ * PHY management PHY address register
+ */
+#define FR_AB_MD_PHY_ADR_REG_OFST 0x00000c30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PHY_ADR_LBN 0
+#define FRF_AB_MD_PHY_ADR_WIDTH 16
+
+
+/*
+ * FR_AB_MD_ID_REG(128bit):
+ * PHY management ID register
+ */
+#define FR_AB_MD_ID_REG_OFST 0x00000c40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PRT_ADR_LBN 11
+#define FRF_AB_MD_PRT_ADR_WIDTH 5
+#define FRF_AB_MD_DEV_ADR_LBN 6
+#define FRF_AB_MD_DEV_ADR_WIDTH 5
+
+
+/*
+ * FR_AB_MD_STAT_REG(128bit):
+ * PHY management status & mask register
+ */
+#define FR_AB_MD_STAT_REG_OFST 0x00000c50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PINT_LBN 4
+#define FRF_AB_MD_PINT_WIDTH 1
+#define FRF_AB_MD_DONE_LBN 3
+#define FRF_AB_MD_DONE_WIDTH 1
+#define FRF_AB_MD_BSERR_LBN 2
+#define FRF_AB_MD_BSERR_WIDTH 1
+#define FRF_AB_MD_LNFL_LBN 1
+#define FRF_AB_MD_LNFL_WIDTH 1
+#define FRF_AB_MD_BSY_LBN 0
+#define FRF_AB_MD_BSY_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_STAT_DMA_REG(128bit):
+ * Port MAC statistical counter DMA register
+ */
+#define FR_AB_MAC_STAT_DMA_REG_OFST 0x00000c60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_WIDTH 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_LBN 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_WIDTH 16
+
+
+/*
+ * FR_AB_MAC_CTRL_REG(128bit):
+ * Port MAC control register
+ */
+#define FR_AB_MAC_CTRL_REG_OFST 0x00000c80
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_XOFF_VAL_LBN 16
+#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define FRF_AB_MAC_UC_PROM_LBN 3
+#define FRF_AB_MAC_UC_PROM_WIDTH 1
+#define FRF_AB_MAC_LINK_STATUS_LBN 2
+#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define FRF_AB_MAC_SPEED_LBN 0
+#define FRF_AB_MAC_SPEED_WIDTH 2
+#define FRF_AB_MAC_SPEED_10M 0
+#define FRF_AB_MAC_SPEED_100M 1
+#define FRF_AB_MAC_SPEED_1G 2
+#define FRF_AB_MAC_SPEED_10G 3
+
+/*
+ * FR_BB_GEN_MODE_REG(128bit):
+ * General Purpose mode register (external interrupt mask)
+ */
+#define FR_BB_GEN_MODE_REG_OFST 0x00000c90
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG0(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH0_REG_OFST 0x00000ca0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH0_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH0_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH0_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG1(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH1_REG_OFST 0x00000cb0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH1_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH1_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH1_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_GM_CFG1_REG(32bit):
+ * GMAC configuration register 1
+ */
+#define FR_AB_GM_CFG1_REG_OFST 0x00000e00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_SW_RST_LBN 31
+#define FRF_AB_GM_SW_RST_WIDTH 1
+#define FRF_AB_GM_SIM_RST_LBN 30
+#define FRF_AB_GM_SIM_RST_WIDTH 1
+#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define FRF_AB_GM_LOOP_LBN 8
+#define FRF_AB_GM_LOOP_WIDTH 1
+#define FRF_AB_GM_RX_FC_EN_LBN 5
+#define FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define FRF_AB_GM_TX_FC_EN_LBN 4
+#define FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_RXEN_LBN 3
+#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define FRF_AB_GM_RX_EN_LBN 2
+#define FRF_AB_GM_RX_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_TXEN_LBN 1
+#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define FRF_AB_GM_TX_EN_LBN 0
+#define FRF_AB_GM_TX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_GM_CFG2_REG(32bit):
+ * GMAC configuration register 2
+ */
+#define FR_AB_GM_CFG2_REG_OFST 0x00000e10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_PAMBL_LEN_LBN 12
+#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define FRF_AB_GM_IF_MODE_LBN 8
+#define FRF_AB_GM_IF_MODE_WIDTH 2
+#define FRF_AB_GM_IF_MODE_BYTE_MODE 2
+#define FRF_AB_GM_IF_MODE_NIBBLE_MODE 1
+#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define FRF_AB_GM_LEN_CHK_LBN 4
+#define FRF_AB_GM_LEN_CHK_WIDTH 1
+#define FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define FRF_AB_GM_CRC_EN_LBN 1
+#define FRF_AB_GM_CRC_EN_WIDTH 1
+#define FRF_AB_GM_FD_LBN 0
+#define FRF_AB_GM_FD_WIDTH 1
+
+
+/*
+ * FR_AB_GM_IPG_REG(32bit):
+ * GMAC IPG register
+ */
+#define FR_AB_GM_IPG_REG_OFST 0x00000e20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define FRF_AB_GM_B2B_IPG_LBN 0
+#define FRF_AB_GM_B2B_IPG_WIDTH 7
+
+
+/*
+ * FR_AB_GM_HD_REG(32bit):
+ * GMAC half duplex register
+ */
+#define FR_AB_GM_HD_REG_OFST 0x00000e30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define FRF_AB_GM_DIS_BOFF_LBN 17
+#define FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define FRF_AB_GM_COL_WIN_LBN 0
+#define FRF_AB_GM_COL_WIN_WIDTH 10
+
+
+/*
+ * FR_AB_GM_MAX_FLEN_REG(32bit):
+ * GMAC maximum frame length register
+ */
+#define FR_AB_GM_MAX_FLEN_REG_OFST 0x00000e40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_FLEN_LBN 0
+#define FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+
+/*
+ * FR_AB_GM_TEST_REG(32bit):
+ * GMAC test register
+ */
+#define FR_AB_GM_TEST_REG_OFST 0x00000e70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_BOFF_LBN 3
+#define FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define FRF_AB_GM_TEST_PAUSE_LBN 1
+#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define FRF_AB_GM_SHORT_SLOT_LBN 0
+#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+
+/*
+ * FR_AB_GM_ADR1_REG(32bit):
+ * GMAC station address register 1
+ */
+#define FR_AB_GM_ADR1_REG_OFST 0x00000f00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B0_LBN 24
+#define FRF_AB_GM_ADR_B0_WIDTH 8
+#define FRF_AB_GM_ADR_B1_LBN 16
+#define FRF_AB_GM_ADR_B1_WIDTH 8
+#define FRF_AB_GM_ADR_B2_LBN 8
+#define FRF_AB_GM_ADR_B2_WIDTH 8
+#define FRF_AB_GM_ADR_B3_LBN 0
+#define FRF_AB_GM_ADR_B3_WIDTH 8
+
+
+/*
+ * FR_AB_GM_ADR2_REG(32bit):
+ * GMAC station address register 2
+ */
+#define FR_AB_GM_ADR2_REG_OFST 0x00000f10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B4_LBN 24
+#define FRF_AB_GM_ADR_B4_WIDTH 8
+#define FRF_AB_GM_ADR_B5_LBN 16
+#define FRF_AB_GM_ADR_B5_WIDTH 8
+
+
+/*
+ * FR_AB_GMF_CFG0_REG(32bit):
+ * GMAC FIFO configuration register 0
+ */
+#define FR_AB_GMF_CFG0_REG_OFST 0x00000f20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_FTFENRPLY_LBN 20
+#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define FRF_AB_GMF_STFENRPLY_LBN 19
+#define FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define FRF_AB_GMF_FRFENRPLY_LBN 18
+#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_SRFENRPLY_LBN 17
+#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_WTMENRPLY_LBN 16
+#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define FRF_AB_GMF_FTFENREQ_LBN 12
+#define FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define FRF_AB_GMF_STFENREQ_LBN 11
+#define FRF_AB_GMF_STFENREQ_WIDTH 1
+#define FRF_AB_GMF_FRFENREQ_LBN 10
+#define FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define FRF_AB_GMF_SRFENREQ_LBN 9
+#define FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define FRF_AB_GMF_WTMENREQ_LBN 8
+#define FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFT_LBN 4
+#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define FRF_AB_GMF_HSTRSTST_LBN 3
+#define FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFR_LBN 2
+#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTSR_LBN 1
+#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTWT_LBN 0
+#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+
+/*
+ * FR_AB_GMF_CFG1_REG(32bit):
+ * GMAC FIFO configuration register 1
+ */
+#define FR_AB_GMF_CFG1_REG_OFST 0x00000f30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGFRTH_LBN 16
+#define FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+
+/*
+ * FR_AB_GMF_CFG2_REG(32bit):
+ * GMAC FIFO configuration register 2
+ */
+#define FR_AB_GMF_CFG2_REG_OFST 0x00000f40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWM_LBN 16
+#define FRF_AB_GMF_CFGHWM_WIDTH 6
+#define FRF_AB_GMF_CFGLWM_LBN 0
+#define FRF_AB_GMF_CFGLWM_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG3_REG(32bit):
+ * GMAC FIFO configuration register 3
+ */
+#define FR_AB_GMF_CFG3_REG_OFST 0x00000f50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWMFT_LBN 16
+#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define FRF_AB_GMF_CFGFTTH_LBN 0
+#define FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG4_REG(32bit):
+ * GMAC FIFO configuration register 4
+ */
+#define FR_AB_GMF_CFG4_REG_OFST 0x00000f60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+
+/*
+ * FR_AB_GMF_CFG5_REG(32bit):
+ * GMAC FIFO configuration register 5
+ */
+#define FR_AB_GMF_CFG5_REG_OFST 0x00000f70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHDPLX_LBN 22
+#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define FRF_AB_GMF_SRFULL_LBN 21
+#define FRF_AB_GMF_SRFULL_WIDTH 1
+#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+
+/*
+ * FR_BB_TX_SRC_MAC_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_BB_TX_SRC_MAC_TBL_OFST 0x00001000
+/* falconb0=net_func_bar2 */
+#define FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
+
+#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_LBN 96
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_WIDTH 16
+#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_LBN 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_WIDTH 16
+
+
+/*
+ * FR_BB_TX_SRC_MAC_CTL_REG(128bit):
+ * Transmit MAC source address filter control
+ */
+#define FR_BB_TX_SRC_MAC_CTL_REG_OFST 0x00001100
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+
+/*
+ * FR_AB_XM_ADR_LO_REG(128bit):
+ * XGMAC address register low
+ */
+#define FR_AB_XM_ADR_LO_REG_OFST 0x00001200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_LO_LBN 0
+#define FRF_AB_XM_ADR_LO_WIDTH 32
+
+
+/*
+ * FR_AB_XM_ADR_HI_REG(128bit):
+ * XGMAC address register high
+ */
+#define FR_AB_XM_ADR_HI_REG_OFST 0x00001210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_HI_LBN 0
+#define FRF_AB_XM_ADR_HI_WIDTH 16
+
+
+/*
+ * FR_AB_XM_GLB_CFG_REG(128bit):
+ * XGMAC global configuration
+ */
+#define FR_AB_XM_GLB_CFG_REG_OFST 0x00001220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define FRF_AB_XM_DEBUG_MODE_LBN 16
+#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define FRF_AB_XM_RX_STAT_EN_LBN 11
+#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_TX_STAT_EN_LBN 10
+#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_WAN_MODE_LBN 5
+#define FRF_AB_XM_WAN_MODE_WIDTH 1
+#define FRF_AB_XM_INTCLR_MODE_LBN 3
+#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define FRF_AB_XM_CORE_RST_LBN 0
+#define FRF_AB_XM_CORE_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_TX_CFG_REG(128bit):
+ * XGMAC transmit configuration
+ */
+#define FR_AB_XM_TX_CFG_REG_OFST 0x00001230
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PROG_LBN 24
+#define FRF_AB_XM_TX_PROG_WIDTH 1
+#define FRF_AB_XM_IPG_LBN 16
+#define FRF_AB_XM_IPG_WIDTH 4
+#define FRF_AB_XM_FCNTL_LBN 10
+#define FRF_AB_XM_FCNTL_WIDTH 1
+#define FRF_AB_XM_TXCRC_LBN 8
+#define FRF_AB_XM_TXCRC_WIDTH 1
+#define FRF_AB_XM_EDRC_LBN 6
+#define FRF_AB_XM_EDRC_WIDTH 1
+#define FRF_AB_XM_AUTO_PAD_LBN 5
+#define FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define FRF_AB_XM_TX_PRMBL_LBN 2
+#define FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define FRF_AB_XM_TXEN_LBN 1
+#define FRF_AB_XM_TXEN_WIDTH 1
+#define FRF_AB_XM_TX_RST_LBN 0
+#define FRF_AB_XM_TX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_RX_CFG_REG(128bit):
+ * XGMAC receive configuration
+ */
+#define FR_AB_XM_RX_CFG_REG_OFST 0x00001240
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PASS_LENERR_LBN 26
+#define FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_REJ_BCAST_LBN 20
+#define FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define FRF_AB_XM_RXCRC_LBN 3
+#define FRF_AB_XM_RXCRC_WIDTH 1
+#define FRF_AB_XM_RX_PRMBL_LBN 2
+#define FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define FRF_AB_XM_RXEN_LBN 1
+#define FRF_AB_XM_RXEN_WIDTH 1
+#define FRF_AB_XM_RX_RST_LBN 0
+#define FRF_AB_XM_RX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_MGT_INT_MASK(128bit):
+ * documentation to be written for sum_XM_MGT_INT_MASK
+ */
+#define FR_AB_XM_MGT_INT_MASK_OFST 0x00001250
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XM_FC_REG(128bit):
+ * XGMAC flow control register
+ */
+#define FR_AB_XM_FC_REG_OFST 0x00001270
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PAUSE_TIME_LBN 16
+#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_MCNTL_PASS_LBN 8
+#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define FRF_AB_XM_ZPAUSE_LBN 2
+#define FRF_AB_XM_ZPAUSE_WIDTH 1
+#define FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define FRF_AB_XM_DIS_FCNTL_LBN 0
+#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+
+/*
+ * FR_AB_XM_PAUSE_TIME_REG(128bit):
+ * XGMAC pause time register
+ */
+#define FR_AB_XM_PAUSE_TIME_REG_OFST 0x00001290
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_XM_TX_PARAM_REG(128bit):
+ * XGMAC transmit parameter register
+ */
+#define FR_AB_XM_TX_PARAM_REG_OFST 0x000012d0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define FRF_AB_XM_PAD_CHAR_LBN 0
+#define FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+
+/*
+ * FR_AB_XM_RX_PARAM_REG(128bit):
+ * XGMAC receive parameter register
+ */
+#define FR_AB_XM_RX_PARAM_REG_OFST 0x000012e0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+
+/*
+ * FR_AB_XM_MGT_INT_MSK_REG(128bit):
+ * XGMAC management interrupt mask register
+ */
+#define FR_AB_XM_MGT_INT_REG_OFST 0x000012f0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_RMTFLT_LBN 1
+#define FRF_AB_XM_RMTFLT_WIDTH 1
+#define FRF_AB_XM_LCLFLT_LBN 0
+#define FRF_AB_XM_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PWR_RST_REG(128bit):
+ * XGXS/XAUI powerdown/reset register
+ */
+#define FR_AB_XX_PWR_RST_REG_OFST 0x00001300
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_PWRDND_SIG_LBN 31
+#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define FRF_AB_XX_SIM_MODE_LBN 27
+#define FRF_AB_XX_SIM_MODE_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETD_SIG_LBN 23
+#define FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define FRF_AB_XX_RESETC_SIG_LBN 22
+#define FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define FRF_AB_XX_RESETB_SIG_LBN 21
+#define FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETA_SIG_LBN 20
+#define FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define FRF_AB_XX_SD_RST_ACT_LBN 16
+#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define FRF_AB_XX_PWRDND_EN_LBN 15
+#define FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNC_EN_LBN 14
+#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNB_EN_LBN 13
+#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNA_EN_LBN 12
+#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define FRF_AB_XX_RESETD_EN_LBN 7
+#define FRF_AB_XX_RESETD_EN_WIDTH 1
+#define FRF_AB_XX_RESETC_EN_LBN 6
+#define FRF_AB_XX_RESETC_EN_WIDTH 1
+#define FRF_AB_XX_RESETB_EN_LBN 5
+#define FRF_AB_XX_RESETB_EN_WIDTH 1
+#define FRF_AB_XX_RESETA_EN_LBN 4
+#define FRF_AB_XX_RESETA_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define FRF_AB_XX_RST_XX_EN_LBN 0
+#define FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_SD_CTL_REG(128bit):
+ * XGXS/XAUI powerdown/reset control register
+ */
+#define FR_AB_XX_SD_CTL_REG_OFST 0x00001310
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_TERMADJ1_LBN 17
+#define FRF_AB_XX_TERMADJ1_WIDTH 1
+#define FRF_AB_XX_TERMADJ0_LBN 16
+#define FRF_AB_XX_TERMADJ0_WIDTH 1
+#define FRF_AB_XX_HIDRVD_LBN 15
+#define FRF_AB_XX_HIDRVD_WIDTH 1
+#define FRF_AB_XX_LODRVD_LBN 14
+#define FRF_AB_XX_LODRVD_WIDTH 1
+#define FRF_AB_XX_HIDRVC_LBN 13
+#define FRF_AB_XX_HIDRVC_WIDTH 1
+#define FRF_AB_XX_LODRVC_LBN 12
+#define FRF_AB_XX_LODRVC_WIDTH 1
+#define FRF_AB_XX_HIDRVB_LBN 11
+#define FRF_AB_XX_HIDRVB_WIDTH 1
+#define FRF_AB_XX_LODRVB_LBN 10
+#define FRF_AB_XX_LODRVB_WIDTH 1
+#define FRF_AB_XX_HIDRVA_LBN 9
+#define FRF_AB_XX_HIDRVA_WIDTH 1
+#define FRF_AB_XX_LODRVA_LBN 8
+#define FRF_AB_XX_LODRVA_WIDTH 1
+#define FRF_AB_XX_LPBKD_LBN 3
+#define FRF_AB_XX_LPBKD_WIDTH 1
+#define FRF_AB_XX_LPBKC_LBN 2
+#define FRF_AB_XX_LPBKC_WIDTH 1
+#define FRF_AB_XX_LPBKB_LBN 1
+#define FRF_AB_XX_LPBKB_WIDTH 1
+#define FRF_AB_XX_LPBKA_LBN 0
+#define FRF_AB_XX_LPBKA_WIDTH 1
+
+
+/*
+ * FR_AB_XX_TXDRV_CTL_REG(128bit):
+ * XAUI SerDes transmit drive control register
+ */
+#define FR_AB_XX_TXDRV_CTL_REG_OFST 0x00001320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_DEQD_LBN 28
+#define FRF_AB_XX_DEQD_WIDTH 4
+#define FRF_AB_XX_DEQC_LBN 24
+#define FRF_AB_XX_DEQC_WIDTH 4
+#define FRF_AB_XX_DEQB_LBN 20
+#define FRF_AB_XX_DEQB_WIDTH 4
+#define FRF_AB_XX_DEQA_LBN 16
+#define FRF_AB_XX_DEQA_WIDTH 4
+#define FRF_AB_XX_DTXD_LBN 12
+#define FRF_AB_XX_DTXD_WIDTH 4
+#define FRF_AB_XX_DTXC_LBN 8
+#define FRF_AB_XX_DTXC_WIDTH 4
+#define FRF_AB_XX_DTXB_LBN 4
+#define FRF_AB_XX_DTXB_WIDTH 4
+#define FRF_AB_XX_DTXA_LBN 0
+#define FRF_AB_XX_DTXA_WIDTH 4
+
+
+/*
+ * FR_AB_XX_PRBS_CTL_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CTL_REG
+ */
+#define FR_AB_XX_PRBS_CTL_REG_OFST 0x00001330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_CHK_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CHK_REG
+ */
+#define FR_AB_XX_PRBS_CHK_REG_OFST 0x00001340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_REV_LB_EN_LBN 16
+#define FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_ERR_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_ERR_REG
+ */
+#define FR_AB_XX_PRBS_ERR_REG_OFST 0x00001350
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+
+/*
+ * FR_AB_XX_CORE_STAT_REG(128bit):
+ * XAUI XGXS core status register
+ */
+#define FR_AB_XX_CORE_STAT_REG_OFST 0x00001360
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_FORCE_SIG3_LBN 31
+#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_LBN 29
+#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_LBN 27
+#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_LBN 25
+#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define FRF_AB_XX_MATCH_FAULT_LBN 21
+#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define FRF_AB_XX_ALIGN_DONE_LBN 20
+#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT3_LBN 19
+#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT2_LBN 18
+#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT1_LBN 17
+#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT0_LBN 16
+#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH3_LBN 3
+#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH2_LBN 2
+#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH1_LBN 1
+#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH0_LBN 0
+#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+
+/*
+ * FR_AA_RX_DESC_PTR_TBL_KER(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AA_RX_DESC_PTR_TBL_KER_OFST 0x00011800
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_RX_DESC_PTR_TBL(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AZ_RX_DESC_PTR_TBL_OFST 0x00f40000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_DESC_PTR_TBL_STEP 16
+#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define FR_AB_RX_DESC_PTR_TBL_ROWS 4096
+
+#define FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define FRF_AZ_RX_RESET_LBN 89
+#define FRF_AZ_RX_RESET_WIDTH 1
+#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define FFE_AZ_RX_DESCQ_SIZE_512 0
+#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define FRF_AZ_RX_DESCQ_EN_LBN 0
+#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+
+/*
+ * FR_AA_TX_DESC_PTR_TBL_KER(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AA_TX_DESC_PTR_TBL_KER_OFST 0x00011900
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/*
+ * FR_AZ_TX_DESC_PTR_TBL(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AZ_TX_DESC_PTR_TBL_OFST 0x00f50000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TX_DESC_PTR_TBL_STEP 16
+#define FR_AB_TX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define FRF_AZ_TX_DESCQ_EN_LBN 88
+#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define FFE_AZ_TX_DESCQ_SIZE_512 0
+#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+
+/*
+ * FR_AA_EVQ_PTR_TBL_KER(128bit):
+ * Event queue pointer table
+ */
+#define FR_AA_EVQ_PTR_TBL_KER_OFST 0x00011a00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_PTR_TBL(128bit):
+ * Event queue pointer table
+ */
+#define FR_AZ_EVQ_PTR_TBL_OFST 0x00f60000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_PTR_TBL_STEP 16
+#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define FR_AB_EVQ_PTR_TBL_ROWS 4096
+
+#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_LBN 39
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define FRF_AZ_EVQ_EN_LBN 23
+#define FRF_AZ_EVQ_EN_WIDTH 1
+#define FRF_AZ_EVQ_SIZE_LBN 20
+#define FRF_AZ_EVQ_SIZE_WIDTH 3
+#define FFE_AZ_EVQ_SIZE_32K 6
+#define FFE_AZ_EVQ_SIZE_16K 5
+#define FFE_AZ_EVQ_SIZE_8K 4
+#define FFE_AZ_EVQ_SIZE_4K 3
+#define FFE_AZ_EVQ_SIZE_2K 2
+#define FFE_AZ_EVQ_SIZE_1K 1
+#define FFE_AZ_EVQ_SIZE_512 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+
+/*
+ * FR_AA_BUF_HALF_TBL_KER(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_HALF_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_HALF_TBL(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_HALF_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_HALF_TBL_STEP 8
+#define FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define FR_AB_BUF_HALF_TBL_ROWS 524288
+
+#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+
+/*
+ * FR_AA_BUF_FULL_TBL_KER(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_FULL_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_FULL_TBL(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_FULL_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_FULL_TBL_STEP 8
+
+#define FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define FR_AB_BUF_FULL_TBL_ROWS 917504
+
+#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define FRF_AZ_BUF_ADR_REGION_LBN 48
+#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define FFE_AZ_BUF_ADR_REGN3 3
+#define FFE_AZ_BUF_ADR_REGN2 2
+#define FFE_AZ_BUF_ADR_REGN1 1
+#define FFE_AZ_BUF_ADR_REGN0 0
+#define FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define FRF_AZ_BUF_ADR_FBUF_DW0_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_DW0_WIDTH 32
+#define FRF_AZ_BUF_ADR_FBUF_DW1_LBN 46
+#define FRF_AZ_BUF_ADR_FBUF_DW1_WIDTH 2
+#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+
+/*
+ * FR_AZ_RX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AZ_RX_FILTER_TBL0_OFST 0x00f00000
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_FILTER_TBL0_STEP 32
+#define FR_AZ_RX_FILTER_TBL0_ROWS 8192
+/*
+ * FR_AB_RX_FILTER_TBL1(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AB_RX_FILTER_TBL1_OFST 0x00f00010
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_FILTER_TBL1_STEP 32
+#define FR_AB_RX_FILTER_TBL1_ROWS 8192
+
+#define FRF_BZ_RSS_EN_LBN 110
+#define FRF_BZ_RSS_EN_WIDTH 1
+#define FRF_BZ_SCATTER_EN_LBN 109
+#define FRF_BZ_SCATTER_EN_WIDTH 1
+#define FRF_AZ_TCP_UDP_LBN 108
+#define FRF_AZ_TCP_UDP_WIDTH 1
+#define FRF_AZ_RXQ_ID_LBN 96
+#define FRF_AZ_RXQ_ID_WIDTH 12
+#define FRF_AZ_DEST_IP_LBN 64
+#define FRF_AZ_DEST_IP_WIDTH 32
+#define FRF_AZ_DEST_PORT_TCP_LBN 48
+#define FRF_AZ_DEST_PORT_TCP_WIDTH 16
+#define FRF_AZ_SRC_IP_LBN 16
+#define FRF_AZ_SRC_IP_WIDTH 32
+#define FRF_AZ_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_AZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_RX_MAC_FILTER_TBL0(128bit):
+ * Receive Ethernet filter table
+ */
+#define FR_CZ_RX_MAC_FILTER_TBL0_OFST 0x00f00010
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_RMFT_RSS_EN_LBN 75
+#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
+#define FRF_CZ_RMFT_DEST_MAC_DW0_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_DW0_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_DW1_LBN 44
+#define FRF_CZ_RMFT_DEST_MAC_DW1_WIDTH 16
+#define FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_TIMER_TBL(128bit):
+ * Timer table
+ */
+#define FR_AZ_TIMER_TBL_OFST 0x00f70000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TIMER_TBL_STEP 16
+#define FR_CZ_TIMER_TBL_ROWS 1024
+#define FR_AB_TIMER_TBL_ROWS 4096
+
+#define FRF_CZ_TIMER_Q_EN_LBN 33
+#define FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define FRF_CZ_INT_ARMD_LBN 32
+#define FRF_CZ_INT_ARMD_WIDTH 1
+#define FRF_CZ_INT_PEND_LBN 31
+#define FRF_CZ_INT_PEND_WIDTH 1
+#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define FRF_CZ_TIMER_MODE_LBN 14
+#define FRF_CZ_TIMER_MODE_WIDTH 2
+#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define FFE_CZ_TIMER_MODE_TRIG_START 2
+#define FFE_CZ_TIMER_MODE_IMMED_START 1
+#define FFE_CZ_TIMER_MODE_DIS 0
+#define FRF_AB_TIMER_MODE_LBN 12
+#define FRF_AB_TIMER_MODE_WIDTH 2
+#define FFE_AB_TIMER_MODE_INT_HLDOFF 2
+#define FFE_AB_TIMER_MODE_TRIG_START 2
+#define FFE_AB_TIMER_MODE_IMMED_START 1
+#define FFE_AB_TIMER_MODE_DIS 0
+#define FRF_CZ_TIMER_VAL_LBN 0
+#define FRF_CZ_TIMER_VAL_WIDTH 14
+#define FRF_AB_TIMER_VAL_LBN 0
+#define FRF_AB_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_BZ_TX_PACE_TBL_OFST 0x00f80000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2 */
+#define FR_AZ_TX_PACE_TBL_STEP 16
+#define FR_CZ_TX_PACE_TBL_ROWS 1024
+#define FR_BB_TX_PACE_TBL_ROWS 4096
+/*
+ * FR_AA_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_AA_TX_PACE_TBL_OFST 0x00f80040
+/* falcona0=char_func_bar0 */
+/* FR_AZ_TX_PACE_TBL_STEP 16 */
+#define FR_AA_TX_PACE_TBL_ROWS 4092
+
+#define FRF_AZ_TX_PACE_LBN 0
+#define FRF_AZ_TX_PACE_WIDTH 5
+
+
+/*
+ * FR_BZ_RX_INDIRECTION_TBL(7bit):
+ * RX Indirection Table
+ */
+#define FR_BZ_RX_INDIRECTION_TBL_OFST 0x00fb0000
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+
+#define FRF_BZ_IT_QUEUE_LBN 0
+#define FRF_BZ_IT_QUEUE_WIDTH 6
+
+
+/*
+ * FR_CZ_TX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Transmit filter table
+ */
+#define FR_CZ_TX_FILTER_TBL0_OFST 0x00fc0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
+
+#define FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TIFT_DEST_IP_LBN 64
+#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define FRF_CZ_TIFT_SRC_IP_LBN 16
+#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_TX_MAC_FILTER_TBL0(128bit):
+ * Transmit Ethernet filter table
+ */
+#define FR_CZ_TX_MAC_FILTER_TBL0_OFST 0x00fe0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
+#define FRF_CZ_TMFT_SRC_MAC_DW0_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_DW0_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_DW1_LBN 44
+#define FRF_CZ_TMFT_SRC_MAC_DW1_WIDTH 16
+#define FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_CZ_MC_TREG_SMEM(32bit):
+ * MC Shared Memory
+ */
+#define FR_CZ_MC_TREG_SMEM_OFST 0x00ff0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_MC_TREG_SMEM_STEP 4
+#define FR_CZ_MC_TREG_SMEM_ROWS 512
+
+#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_BB_MSIX_VECTOR_TABLE_OFST 0x00ff0000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/*
+ * FR_CZ_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_CZ_MSIX_VECTOR_TABLE_OFST 0x00000000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+
+#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_BB_MSIX_PBA_TABLE_OFST 0x00ff2000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define FR_BB_MSIX_PBA_TABLE_ROWS 2
+/*
+ * FR_CZ_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_CZ_MSIX_PBA_TABLE_OFST 0x00008000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
+
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+
+/*
+ * FR_AZ_SRM_DBG_REG(64bit):
+ * SRAM debug access
+ */
+#define FR_AZ_SRM_DBG_REG_OFST 0x03000000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_SRM_DBG_REG_STEP 8
+
+#define FR_CZ_SRM_DBG_REG_ROWS 262144
+#define FR_AB_SRM_DBG_REG_ROWS 2097152
+
+#define FRF_AZ_SRM_DBG_LBN 0
+#define FRF_AZ_SRM_DBG_WIDTH 64
+#define FRF_AZ_SRM_DBG_DW0_LBN 0
+#define FRF_AZ_SRM_DBG_DW0_WIDTH 32
+#define FRF_AZ_SRM_DBG_DW1_LBN 32
+#define FRF_AZ_SRM_DBG_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_CHAR(32bit):
+ * CHAR interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_CHAR_OFST 0x00000060
+/* falcona0=char_func_bar0 */
+
+#define FRF_AA_INT_ACK_CHAR_FIELD_LBN 0
+#define FRF_AA_INT_ACK_CHAR_FIELD_WIDTH 32
+
+
+/* FS_DRIVER_EV */
+#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define FSE_AZ_TX_DSC_ERROR_EV 15
+#define FSE_AZ_RX_DSC_ERROR_EV 14
+#define FSE_AZ_RX_RECOVER_EV 11
+#define FSE_AZ_TIMER_EV 10
+#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define FSE_AZ_WAKE_UP_EV 6
+#define FSE_AZ_SRM_UPD_DONE_EV 5
+#define FSE_AZ_EVQ_NOT_EN_EV 3
+#define FSE_AZ_EVQ_INIT_DONE_EV 2
+#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+
+/* FS_EVENT_ENTRY */
+#define FSF_AZ_EV_CODE_LBN 60
+#define FSF_AZ_EV_CODE_WIDTH 4
+#define FSE_AZ_EV_CODE_USER_EV 8
+#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define FSE_AZ_EV_CODE_DRIVER_EV 5
+#define FSE_AZ_EV_CODE_TX_EV 2
+#define FSE_AZ_EV_CODE_RX_EV 0
+#define FSF_AZ_EV_DATA_LBN 0
+#define FSF_AZ_EV_DATA_WIDTH 60
+#define FSF_AZ_EV_DATA_DW0_LBN 0
+#define FSF_AZ_EV_DATA_DW0_WIDTH 32
+#define FSF_AZ_EV_DATA_DW1_LBN 32
+#define FSF_AZ_EV_DATA_DW1_WIDTH 28
+
+
+/* FS_GLOBAL_EV */
+#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 12
+#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_LBN 11
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_LBN 9
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_LBN 7
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+
+/* FS_RX_EV */
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define FSF_AZ_RX_EV_PORT_LBN 30
+#define FSF_AZ_RX_EV_PORT_WIDTH 1
+#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define FSF_AZ_RX_EV_SOP_LBN 15
+#define FSF_AZ_RX_EV_SOP_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_RX_KER_DESC */
+#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_RX_USER_DESC */
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+
+/* FS_TX_EV */
+#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_TX_EV_PORT_LBN 16
+#define FSF_AZ_TX_EV_PORT_WIDTH 1
+#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_COMP_LBN 12
+#define FSF_AZ_TX_EV_COMP_WIDTH 1
+#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_TX_KER_DESC */
+#define FSF_AZ_TX_KER_CONT_LBN 62
+#define FSF_AZ_TX_KER_CONT_WIDTH 1
+#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_TX_USER_DESC */
+#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define FSF_AZ_TX_USER_CONT_LBN 46
+#define FSF_AZ_TX_USER_CONT_WIDTH 1
+#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+
+/* FS_USER_EV */
+#define FSF_CZ_USER_QID_LBN 32
+#define FSF_CZ_USER_QID_WIDTH 10
+#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+
+/* FS_NET_IVEC */
+#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+
+
+/**************************************************************************
+ *
+ * Falcon non-volatile configuration
+ *
+ **************************************************************************
+ */
+
+
+#define FR_AZ_TX_PACE_TBL_OFST FR_BZ_TX_PACE_TBL_OFST
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+
+#endif /* _SYS_EFX_REGS_H */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_regs_ef10.h b/usr/src/uts/common/io/sfxge/common/efx_regs_ef10.h
new file mode 100644
index 0000000000..1e7007f58d
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_regs_ef10.h
@@ -0,0 +1,547 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_EF10_REGS_H
+#define _SYS_EFX_EF10_REGS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**************************************************************************
+ * NOTE: the line below marks the start of the autogenerated section
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * BIU_HW_REV_ID_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face
+
+
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+
+/*
+ * BIU_MC_SFT_STATUS_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face
+
+
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+
+/*
+ * BIU_INT_ISR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_INT_ISR_REG_RESET 0x0
+
+
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+
+/*
+ * MC_DB_LWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_MC_DB_LWRD_REG_RESET 0x0
+
+
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+
+/*
+ * MC_DB_HWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_MC_DB_HWRD_REG_RESET 0x0
+
+
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+
+/*
+ * EVQ_RPTR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_RPTR_REG_OFST 0x00000400
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_EVQ_RPTR_REG_STEP 8192
+#define ER_DZ_EVQ_RPTR_REG_ROWS 2048
+#define ER_DZ_EVQ_RPTR_REG_RESET 0x0
+
+
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * EVQ_TMR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_TMR_REG_OFST 0x00000420
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_EVQ_TMR_REG_STEP 8192
+#define ER_DZ_EVQ_TMR_REG_ROWS 2048
+#define ER_DZ_EVQ_TMR_REG_RESET 0x0
+
+
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+
+/*
+ * RX_DESC_UPD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_RX_DESC_UPD_REG_STEP 8192
+#define ER_DZ_RX_DESC_UPD_REG_ROWS 2048
+#define ER_DZ_RX_DESC_UPD_REG_RESET 0x0
+
+
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+
+/*
+ * TX_DESC_UPD_REG(96bit):
+ *
+ */
+
+#define ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_TX_DESC_UPD_REG_STEP 8192
+#define ER_DZ_TX_DESC_UPD_REG_ROWS 2048
+#define ER_DZ_TX_DESC_UPD_REG_RESET 0x0
+
+
+#define ERF_DZ_RSVD_LBN 76
+#define ERF_DZ_RSVD_WIDTH 20
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+
+/* ES_DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 3
+#define ESE_DZ_DRV_START_UP_EV 2
+#define ESE_DZ_DRV_WAKE_UP_EV 1
+#define ESF_DZ_DRV_SUB_DATA_DW0_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_DW0_WIDTH 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_LBN 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_WIDTH 24
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+
+/* ES_EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_DW0_LBN 0
+#define ESF_DZ_EV_DATA_DW0_WIDTH 32
+#define ESF_DZ_EV_DATA_DW1_LBN 32
+#define ESF_DZ_EV_DATA_DW1_WIDTH 28
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+
+/* ES_MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_DW0_LBN 0
+#define ESF_DZ_MC_SOFT_DW0_WIDTH 32
+#define ESF_DZ_MC_SOFT_DW1_LBN 32
+#define ESF_DZ_MC_SOFT_DW1_WIDTH 26
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+
+/* ES_RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DD_RX_EV_RSVD2_LBN 54
+#define ESF_DD_RX_EV_RSVD2_WIDTH 4
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_EV_RSVD2_LBN 54
+#define ESF_EZ_RX_EV_RSVD2_WIDTH 2
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DZ_RX_L4_CLASS_LBN 45
+#define ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define ESE_DZ_L4_CLASS_RSVD7 7
+#define ESE_DZ_L4_CLASS_RSVD6 6
+#define ESE_DZ_L4_CLASS_RSVD5 5
+#define ESE_DZ_L4_CLASS_RSVD4 4
+#define ESE_DZ_L4_CLASS_RSVD3 3
+#define ESE_DZ_L4_CLASS_UDP 2
+#define ESE_DZ_L4_CLASS_TCP 1
+#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DD_RX_EV_SOFT1_LBN 32
+#define ESF_DD_RX_EV_SOFT1_WIDTH 3
+#define ESF_EZ_RX_EV_SOFT1_LBN 34
+#define ESF_EZ_RX_EV_SOFT1_WIDTH 1
+#define ESF_EZ_RX_ENCAP_HDR_LBN 32
+#define ESF_EZ_RX_ENCAP_HDR_WIDTH 2
+#define ESE_EZ_ENCAP_HDR_GRE 2
+#define ESE_EZ_ENCAP_HDR_VXLAN 1
+#define ESE_EZ_ENCAP_HDR_NONE 0
+#define ESF_DD_RX_EV_RSVD1_LBN 30
+#define ESF_DD_RX_EV_RSVD1_WIDTH 2
+#define ESF_EZ_RX_EV_RSVD1_LBN 31
+#define ESF_EZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_EZ_RX_ABORT_LBN 30
+#define ESF_EZ_RX_ABORT_WIDTH 1
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 5
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+
+/* ES_RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_CSUM_TSTAMP_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1
+#define ESF_DZ_TX_TIMESTAMP_LBN 5
+#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+
+/* ES_TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DD_TX_EV_RSVD_LBN 48
+#define ESF_DD_TX_EV_RSVD_WIDTH 10
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_TX_EV_RSVD_LBN 48
+#define ESF_EZ_TX_EV_RSVD_WIDTH 8
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DD_TX_SOFT1_LBN 24
+#define ESF_DD_TX_SOFT1_WIDTH 8
+#define ESF_EZ_TX_CAN_MERGE_LBN 31
+#define ESF_EZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_EZ_TX_SOFT1_LBN 24
+#define ESF_EZ_TX_SOFT1_WIDTH 7
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 5
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+
+/* ES_TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_PIO_DESC */
+#define ESF_DZ_TX_PIO_TYPE_LBN 63
+#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define ESF_DZ_TX_PIO_OPT_LBN 60
+#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESF_DZ_TX_PIO_CONT_LBN 59
+#define ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+
+/* ES_TX_TSO_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2A_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2B_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 16
+#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
+#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
+#define ESF_DZ_TX_TSO_INNER_PE_CSUM_LBN 0
+#define ESF_DZ_TX_TSO_INNER_PE_CSUM_WIDTH 16
+
+
+/* ES_TX_VLAN_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_VLAN_OP_LBN 32
+#define ESF_DZ_TX_VLAN_OP_WIDTH 2
+#define ESF_DZ_TX_VLAN_TAG2_LBN 16
+#define ESF_DZ_TX_VLAN_TAG2_WIDTH 16
+#define ESF_DZ_TX_VLAN_TAG1_LBN 0
+#define ESF_DZ_TX_VLAN_TAG1_WIDTH 16
+
+
+/*************************************************************************
+ * NOTE: the comment line above marks the end of the autogenerated section
+ */
+
+/*
+ * The workaround for bug 35388 requires multiplexing writes through
+ * the ERF_DZ_TX_DESC_WPTR address.
+ * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
+ * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
+ */
+#define ER_DD_EVQ_INDIRECT_OFST (ER_DZ_TX_DESC_UPD_REG_OFST + 2 * 4)
+#define ER_DD_EVQ_INDIRECT_STEP ER_DZ_TX_DESC_UPD_REG_STEP
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define ERF_DD_EVQ_IND_RPTR_LBN 0
+#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_EF10_REGS_H */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_regs_mcdi.h b/usr/src/uts/common/io/sfxge/common/efx_regs_mcdi.h
new file mode 100644
index 0000000000..fe325ec24b
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_regs_mcdi.h
@@ -0,0 +1,14242 @@
+/*
+ * Copyright 2008-2013 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*! \cidoxg_firmware_mc_cmd */
+
+#ifndef _SIENA_MC_DRIVER_PCOL_H
+#define _SIENA_MC_DRIVER_PCOL_H
+
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode. This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
+
+/* Siena MC shared memmory offsets */
+/* The 'doorbell' addresses are hard-wired to alert the MC when written */
+#define MC_SMEM_P0_DOORBELL_OFST 0x000
+#define MC_SMEM_P1_DOORBELL_OFST 0x004
+/* The rest of these are firmware-defined */
+#define MC_SMEM_P0_PDU_OFST 0x008
+#define MC_SMEM_P1_PDU_OFST 0x108
+#define MC_SMEM_PDU_LEN 0x100
+#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
+#define MC_SMEM_P0_STATUS_OFST 0x7f8
+#define MC_SMEM_P1_STATUS_OFST 0x7fc
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#ifdef WITH_MCDI_V2
+#define MCDI_PCOL_VERSION 2
+#else
+#define MCDI_PCOL_VERSION 1
+#endif
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/* MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writting
+ * back into shared memory, or by writting out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#ifdef WITH_MCDI_V2
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+#else
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V1
+#endif
+
+
+/* The MC can generate events for two reasons:
+ * - To complete a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* The requested operation might require the
+ command to be passed between MCs, and the
+ transport doesn't support that. Should
+ only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+/* We define 8 "escape" commands to allow
+ for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
+/* Points to the recovery mode entry point. */
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
+
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
+ (1 << MC_CMD_READ32) | \
+ (1 << MC_CMD_WRITE32) | \
+ (1 << MC_CMD_COPYCODE) | \
+ (1 << MC_CMD_GET_VERSION), \
+ 0, 0, 0 }
+
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n) (((n) & 0xff) << 16)
+
+
+#ifdef WITH_MCDI_V2
+
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
+#endif
+
+/* MCDI_EVENT structuredef */
+#define MCDI_EVENT_LEN 8
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
+#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_FWALERT_DATA_LBN 8
+#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define MCDI_EVENT_FWALERT_REASON_LBN 0
+#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
+#define MCDI_EVENT_FLR_VF_LBN 0
+#define MCDI_EVENT_FLR_VF_WIDTH 8
+#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
+#define MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
+#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define MCDI_EVENT_AOE_PTP_STATUS 0xb
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define MCDI_EVENT_MUM_WATCHDOG 0x3
+#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+#define MCDI_EVENT_EV_CODE_LBN 60
+#define MCDI_EVENT_EV_CODE_WIDTH 4
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define MCDI_EVENT_SW_EVENT 0x0
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
+#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LBN 0
+#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
+#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
+#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
+#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LBN 0
+#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define FCDI_EVENT_PTP_STATE_LBN 0
+#define FCDI_EVENT_PTP_STATE_WIDTH 32
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+
+/* MUM_EVENT structuredef */
+#define MUM_EVENT_LEN 8
+#define MUM_EVENT_CONT_LBN 32
+#define MUM_EVENT_CONT_WIDTH 1
+#define MUM_EVENT_LEVEL_LBN 33
+#define MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MUM_EVENT_LEVEL_FATAL 0x3
+#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_SENSOR_ID_LBN 0
+#define MUM_EVENT_SENSOR_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MUM_EVENT_SENSOR_STATE_LBN 8
+#define MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define MUM_EVENT_PORT_PHY_READY_LBN 0
+#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define MUM_EVENT_DATA_LBN 0
+#define MUM_EVENT_DATA_WIDTH 32
+#define MUM_EVENT_SRC_LBN 36
+#define MUM_EVENT_SRC_WIDTH 8
+#define MUM_EVENT_EV_CODE_LBN 60
+#define MUM_EVENT_EV_CODE_WIDTH 4
+#define MUM_EVENT_CODE_LBN 44
+#define MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LBN 0
+#define MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory.
+ */
+#define MC_CMD_READ32 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ32_IN msgrequest */
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define MC_CMD_READ32_OUT_LENMIN 4
+#define MC_CMD_READ32_OUT_LENMAX 252
+#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
+
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define MC_CMD_WRITE32_IN_LENMIN 8
+#define MC_CMD_WRITE32_IN_LENMAX 252
+#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump.
+ */
+#define MC_CMD_COPYCODE 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
+
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
+ */
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
+/* Destination address */
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+/* Address of where to jump after copy. */
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x4
+#undef MC_CMD_0x4_PRIVILEGE_CTG
+
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+#undef MC_CMD_0x5_PRIVILEGE_CTG
+
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
+
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
+ */
+#define MC_CMD_LOG_CTRL 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
+
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about the MC firmware.
+ */
+#define MC_CMD_GET_VERSION 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
+
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
+
+
+/***********************************/
+/* MC_CMD_FC
+ * Perform an FC operation
+ */
+#define MC_CMD_FC 0x9
+
+/* MC_CMD_FC_IN msgrequest */
+#define MC_CMD_FC_IN_LEN 4
+#define MC_CMD_FC_IN_OP_HDR_OFST 0
+#define MC_CMD_FC_IN_OP_LBN 0
+#define MC_CMD_FC_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to FC. */
+#define MC_CMD_FC_OP_NULL 0x1
+/* enum: Unused opcode */
+#define MC_CMD_FC_OP_UNUSED 0x2
+/* enum: MAC driver commands */
+#define MC_CMD_FC_OP_MAC 0x3
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_READ32 0x4
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_WRITE32 0x5
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_READ 0x6
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_WRITE 0x7
+/* enum: FC firmware Version */
+#define MC_CMD_FC_OP_GET_VERSION 0x8
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_RX_READ 0x9
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa
+/* enum: SFP parameters */
+#define MC_CMD_FC_OP_SFP 0xb
+/* enum: DDR3 test */
+#define MC_CMD_FC_OP_DDR_TEST 0xc
+/* enum: Get Crash context from FC */
+#define MC_CMD_FC_OP_GET_ASSERT 0xd
+/* enum: Get FPGA Build registers */
+#define MC_CMD_FC_OP_FPGA_BUILD 0xe
+/* enum: Read map support commands */
+#define MC_CMD_FC_OP_READ_MAP 0xf
+/* enum: FC Capabilities */
+#define MC_CMD_FC_OP_CAPABILITIES 0x10
+/* enum: FC Global flags */
+#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11
+/* enum: FC IO using relative addressing modes */
+#define MC_CMD_FC_OP_IO_REL 0x12
+/* enum: FPGA link information */
+#define MC_CMD_FC_OP_UHLINK 0x13
+/* enum: Configure loopbacks and link on FPGA ports */
+#define MC_CMD_FC_OP_SET_LINK 0x14
+/* enum: Licensing operations relating to AOE */
+#define MC_CMD_FC_OP_LICENSE 0x15
+/* enum: Startup information to the FC */
+#define MC_CMD_FC_OP_STARTUP 0x16
+/* enum: Configure a DMA read */
+#define MC_CMD_FC_OP_DMA 0x17
+/* enum: Configure a timed read */
+#define MC_CMD_FC_OP_TIMED_READ 0x18
+/* enum: Control UART logging */
+#define MC_CMD_FC_OP_LOG 0x19
+/* enum: Get the value of a given clock_id */
+#define MC_CMD_FC_OP_CLOCK 0x1a
+/* enum: DDR3/QDR3 parameters */
+#define MC_CMD_FC_OP_DDR 0x1b
+/* enum: PTP and timestamp control */
+#define MC_CMD_FC_OP_TIMESTAMP 0x1c
+/* enum: Commands for SPI Flash interface */
+#define MC_CMD_FC_OP_SPI 0x1d
+/* enum: Commands for diagnostic components */
+#define MC_CMD_FC_OP_DIAG 0x1e
+/* enum: External AOE port. */
+#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0
+/* enum: Internal AOE port. */
+#define MC_CMD_FC_IN_PORT_INT_OFST 0x40
+
+/* MC_CMD_FC_IN_NULL msgrequest */
+#define MC_CMD_FC_IN_NULL_LEN 4
+#define MC_CMD_FC_IN_CMD_OFST 0
+
+/* MC_CMD_FC_IN_PHY msgrequest */
+#define MC_CMD_FC_IN_PHY_LEN 5
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FC PHY driver operation code */
+#define MC_CMD_FC_IN_PHY_OP_OFST 4
+#define MC_CMD_FC_IN_PHY_OP_LEN 1
+/* enum: PHY init handler */
+#define MC_CMD_FC_OP_PHY_OP_INIT 0x1
+/* enum: PHY reconfigure handler */
+#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2
+/* enum: PHY reboot handler */
+#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3
+/* enum: PHY get_supported_cap handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4
+/* enum: PHY get_config handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5
+/* enum: PHY get_media_info handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6
+/* enum: PHY set_led handler */
+#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7
+/* enum: PHY lasi_interrupt handler */
+#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8
+/* enum: PHY check_link handler */
+#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9
+/* enum: PHY fill_stats handler */
+#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa
+/* enum: PHY bpx_link_state_changed handler */
+#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb
+/* enum: PHY get_state handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc
+/* enum: PHY start_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd
+/* enum: PHY poll_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe
+/* enum: PHY nvram_test handler */
+#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf
+/* enum: PHY relinquish handler */
+#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10
+/* enum: PHY read connection from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11
+/* enum: PHY read flags from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12
+
+/* MC_CMD_FC_IN_PHY_INIT msgrequest */
+#define MC_CMD_FC_IN_PHY_INIT_LEN 4
+#define MC_CMD_FC_IN_PHY_CMD_OFST 0
+
+/* MC_CMD_FC_IN_MAC msgrequest */
+#define MC_CMD_FC_IN_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_MAC_HEADER_OFST 4
+#define MC_CMD_FC_IN_MAC_OP_LBN 0
+#define MC_CMD_FC_IN_MAC_OP_WIDTH 8
+/* enum: MAC reconfigure handler */
+#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1
+/* enum: MAC Set command - same as MC_CMD_SET_MAC */
+#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2
+/* enum: MAC statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3
+/* enum: MAC RX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6
+/* enum: MAC TX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7
+/* enum: MAC Read status */
+#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8
+/* enum: External FPGA port. */
+#define MC_CMD_FC_PORT_EXT 0x0
+/* enum: Internal Siena-facing FPGA ports. */
+#define MC_CMD_FC_PORT_INT 0x1
+#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */
+#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MTU size */
+#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8
+/* Drain Tx FIFO */
+#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28
+
+/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */
+#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC Statistics index */
+#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8
+#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1
+/* Number of statistics to read */
+#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16
+#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */
+#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */
+
+/* MC_CMD_FC_IN_READ32 msgrequest */
+#define MC_CMD_FC_IN_READ32_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12
+
+/* MC_CMD_FC_IN_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_WRITE32_LENMIN 16
+#define MC_CMD_FC_IN_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12
+#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_TRC_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8
+
+/* MC_CMD_FC_IN_TRC_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_WRITE_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4
+
+/* MC_CMD_FC_IN_GET_VERSION msgrequest */
+#define MC_CMD_FC_IN_GET_VERSION_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8
+
+/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2
+
+/* MC_CMD_FC_IN_SFP msgrequest */
+#define MC_CMD_FC_IN_SFP_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Link speed is 100, 1000, 10000, 40000 */
+#define MC_CMD_FC_IN_SFP_SPEED_OFST 4
+/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */
+#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8
+/* Not relevant for cards with QSFP modules. For older cards, true if module is
+ * a dual speed SFP+ module.
+ */
+#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16
+/* The type of the SFP+ Module. For later cards with QSFP modules, this field
+ * is unused and the type is communicated by other means.
+ */
+#define MC_CMD_FC_IN_SFP_TYPE_OFST 20
+/* Capabilities corresponding to 1 bits. */
+#define MC_CMD_FC_IN_SFP_CAPS_OFST 24
+
+/* MC_CMD_FC_IN_DDR_TEST msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4
+#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8
+/* enum: DRAM Test Start */
+#define MC_CMD_FC_OP_DDR_TEST_START 0x1
+/* enum: DRAM Test Poll */
+#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2
+
+/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1
+
+/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12
+#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+/* Clear previous test result and prepare for restarting DDR test */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8
+
+/* MC_CMD_FC_IN_GET_ASSERT msgrequest */
+#define MC_CMD_FC_IN_GET_ASSERT_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */
+#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FPGA build info operation code */
+#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4
+/* enum: Get the build registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1
+/* enum: Get the services registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2
+/* enum: Get the BSP version */
+#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3
+/* enum: Get build register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4
+/* enum: GEt the services register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5
+
+/* MC_CMD_FC_IN_READ_MAP msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4
+#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0
+#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8
+/* enum: Get the number of map regions */
+#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1
+/* enum: Get the specified map */
+#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2
+
+/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_MAP_INDEX_OFST 8
+
+/* MC_CMD_FC_IN_CAPABILITIES msgrequest */
+#define MC_CMD_FC_IN_CAPABILITIES_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1
+
+/* MC_CMD_FC_IN_IO_REL msgrequest */
+#define MC_CMD_FC_IN_IO_REL_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4
+#define MC_CMD_FC_IN_IO_REL_OP_LBN 0
+#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8
+/* enum: Get the base address that the FC applies to relative commands */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1
+/* enum: Read data */
+#define MC_CMD_FC_IN_IO_REL_READ32 0x2
+/* enum: Write data */
+#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8
+/* enum: Application address space */
+#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1
+/* enum: Flash address space */
+#define MC_CMD_FC_COMP_TYPE_FLASH 0x2
+
+/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16
+
+/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59
+
+/* MC_CMD_FC_IN_UHLINK msgrequest */
+#define MC_CMD_FC_IN_UHLINK_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4
+#define MC_CMD_FC_IN_UHLINK_OP_LBN 0
+#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8
+/* enum: Get PHY configuration info */
+#define MC_CMD_FC_OP_UHLINK_PHY 0x1
+/* enum: Get MAC configuration info */
+#define MC_CMD_FC_OP_UHLINK_MAC 0x2
+/* enum: Get Rx eye table */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5
+/* enum: Retune Rx settings */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6
+/* enum: Set loopback mode on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7
+/* enum: Get loopback mode config state on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */
+#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */
+#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8
+#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16
+#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8
+
+/* MC_CMD_FC_IN_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_SET_LINK_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4
+#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8
+#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1
+
+/* MC_CMD_FC_IN_LICENSE msgrequest */
+#define MC_CMD_FC_IN_LICENSE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_LICENSE_OP_OFST 4
+#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */
+#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */
+
+/* MC_CMD_FC_IN_STARTUP msgrequest */
+#define MC_CMD_FC_IN_STARTUP_LEN 40
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4
+#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8
+/* Length of identifier */
+#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12
+/* Identifier for AOE FPGA */
+#define MC_CMD_FC_IN_STARTUP_ID_OFST 16
+#define MC_CMD_FC_IN_STARTUP_ID_LEN 1
+#define MC_CMD_FC_IN_STARTUP_ID_NUM 24
+
+/* MC_CMD_FC_IN_DMA msgrequest */
+#define MC_CMD_FC_IN_DMA_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DMA_OP_OFST 4
+#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */
+#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DMA_STOP msgrequest */
+#define MC_CMD_FC_IN_DMA_STOP_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_DMA_READ msgrequest */
+#define MC_CMD_FC_IN_DMA_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8
+#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12
+
+/* MC_CMD_FC_IN_TIMED_READ msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4
+#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */
+
+/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28
+/* Length of host transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32
+/* Offset back from aoe_address to apply operation to */
+#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36
+/* Data to apply at offset */
+#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40
+#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */
+/* Period at which reads are performed (100ms units) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48
+
+/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_LOG msgrequest */
+#define MC_CMD_FC_IN_LOG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_LOG_OP_OFST 4
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */
+#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */
+
+/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* Partition offset into flash */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8
+/* Partition length */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12
+/* Partition erase size */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16
+
+/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* Enable/disable printing to JTAG UART */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8
+
+/* MC_CMD_FC_IN_CLOCK msgrequest */
+#define MC_CMD_FC_IN_CLOCK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_CLOCK_OP_OFST 4
+#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */
+/* Perform a clock operation */
+#define MC_CMD_FC_IN_CLOCK_ID_OFST 8
+#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */
+
+/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest */
+#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* Retrieve the clock value of the specified clock */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+
+/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16
+/* Set the clock value of the specified clock */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20
+
+/* MC_CMD_FC_IN_DDR msgrequest */
+#define MC_CMD_FC_IN_DDR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DDR_OP_OFST 4
+#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_OFST 8
+#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */
+#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* Flags */
+#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12
+#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */
+/* 128-byte page of serial presence detect data read from module's EEPROM */
+#define MC_CMD_FC_IN_DDR_SPD_OFST 16
+#define MC_CMD_FC_IN_DDR_SPD_LEN 1
+#define MC_CMD_FC_IN_DDR_SPD_NUM 128
+/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */
+#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144
+
+/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* Size of DDR */
+#define MC_CMD_FC_IN_DDR_SIZE_OFST 12
+
+/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */
+#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+
+/* MC_CMD_FC_IN_TIMESTAMP msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FC timestamp operation code */
+#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4
+/* enum: Read transmit timestamp(s) */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0
+/* enum: Read snapshot timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1
+/* enum: Clear all transmit timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4
+/* Control filtering of the returned timestamp and sequence number specified
+ * here
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8
+/* enum: Return most recent timestamp. No filtering */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0
+/* enum: Match timestamp against the PTP clock ID, port number and sequence
+ * number specified
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1
+/* Clock identity of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16
+/* Port number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20
+/* Sequence number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4
+
+/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4
+
+/* MC_CMD_FC_IN_SPI msgrequest */
+#define MC_CMD_FC_IN_SPI_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Basic commands for SPI Flash. */
+#define MC_CMD_FC_IN_SPI_OP_OFST 4
+/* enum: SPI Flash read */
+#define MC_CMD_FC_IN_SPI_READ 0x0
+/* enum: SPI Flash write */
+#define MC_CMD_FC_IN_SPI_WRITE 0x1
+/* enum: SPI Flash erase */
+#define MC_CMD_FC_IN_SPI_ERASE 0x2
+
+/* MC_CMD_FC_IN_SPI_READ msgrequest */
+#define MC_CMD_FC_IN_SPI_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12
+
+/* MC_CMD_FC_IN_SPI_WRITE msgrequest */
+#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16
+#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252
+#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_SPI_ERASE msgrequest */
+#define MC_CMD_FC_IN_SPI_ERASE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12
+
+/* MC_CMD_FC_IN_DIAG msgrequest */
+#define MC_CMD_FC_IN_DIAG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Operation code indicating component type */
+#define MC_CMD_FC_IN_DIAG_OP_OFST 4
+/* enum: Power noise generator. */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0
+/* enum: DDR soak test component. */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1
+/* enum: Diagnostics datapath control component. */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8
+/* enum: Read the configuration (the 32-bit values in each of the clock enable
+ * count and toggle count registers)
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0
+/* enum: Write a new configuration to the clock enable count and toggle count
+ * registers
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8
+/* The 32-bit value to be written to the toggle count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12
+/* The 32-bit value to be written to the clock enable count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8
+/* enum: Starts DDR soak test on selected banks */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0
+/* enum: Read status of DDR soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1
+/* enum: Stop test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2
+/* enum: Set or clear bit that triggers fake errors. These cause subsequent
+ * tests to fail until the bit is cleared.
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12
+/* Pattern to use in the soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */
+/* Either multiple automatic tests until a STOP command is issued, or one
+ * single test
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8
+/* DDR bank to read status from */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12
+#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */
+#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */
+#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */
+#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */
+#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8
+/* Mask of DDR banks to set/clear error flag on */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8
+/* enum: Set a known datapath configuration */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0
+/* enum: Apply raw config to datapath control registers */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8
+/* Datapath configuration identifier */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8
+/* Value to write into control register 1 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12
+/* Value to write into control register 2 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16
+/* Value to write into control register 3 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20
+
+/* MC_CMD_FC_OUT msgresponse */
+#define MC_CMD_FC_OUT_LEN 0
+
+/* MC_CMD_FC_OUT_NULL msgresponse */
+#define MC_CMD_FC_OUT_NULL_LEN 0
+
+/* MC_CMD_FC_OUT_READ32 msgresponse */
+#define MC_CMD_FC_OUT_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_TRC_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_READ_LEN 16
+#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4
+
+/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_GET_VERSION msgresponse */
+#define MC_CMD_FC_OUT_GET_VERSION_LEN 12
+#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2
+
+/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */
+#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0
+
+/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS
+#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */
+#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_RX_NSTATS 0x19
+
+/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS
+#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */
+#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_TX_NSTATS 0x16
+
+/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3)
+/* MAC Statistics */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_MAC msgresponse */
+#define MC_CMD_FC_OUT_MAC_LEN 0
+
+/* MC_CMD_FC_OUT_SFP msgresponse */
+#define MC_CMD_FC_OUT_SFP_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8
+/* enum: Test not yet initiated */
+#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0
+/* enum: Test is in progress */
+#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1
+/* enum: Timed completed */
+#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2
+/* enum: Test did not complete in specified time */
+#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1
+/* Test result from FPGA */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */
+
+/* MC_CMD_FC_OUT_DDR_TEST msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_LEN 0
+
+/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */
+#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144
+/* Assertion status flag. */
+#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8
+/* enum: No crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0
+/* enum: New crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1
+/* enum: Crash data has been sent */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8
+/* enum: No crash has been recorded. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0
+/* enum: Crash due to exception. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1
+/* enum: Crash due to assertion. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2
+/* Failing PC value */
+#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31
+/* Exception Type */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132
+/* Instruction at which exception occurred */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136
+/* BAD Address that triggered address-based exception */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140
+
+/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8
+#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */
+#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */
+#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4
+#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */
+#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4
+/* Qsys system ID */
+#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4
+
+/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4
+/* Number of maps */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0
+
+/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164
+/* Index of the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0
+/* Options for the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */
+/* Address of start of map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12
+/* Length of address map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20
+/* Component information field */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24
+/* License expiry data for map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32
+/* Name of the component */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128
+
+/* MC_CMD_FC_OUT_READ_MAP msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_LEN 0
+
+/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */
+#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8
+/* Number of internal ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0
+/* Number of external ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4
+
+/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0
+
+/* MC_CMD_FC_OUT_IO_REL msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_LEN 0
+
+/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4
+
+/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16
+/* Transceiver Transmit settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16
+/* Transceiver Receive settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16
+/* Rx eye opening */
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16
+/* PCS status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16
+/* Link status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1
+/* Current SFp parameters applied */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20
+/* Link speed is 100, 1000, 10000 */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24
+/* Length of copper cable - zero when not relevant */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28
+/* True if a dual speed SFP+ module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36
+/* The type of the SFP+ Module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40
+/* PHY config flags */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1
+
+/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20
+/* MAC configuration applied */
+#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0
+/* MTU size */
+#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4
+/* IF Mode status */
+#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8
+/* MAC address configured */
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16
+
+/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3)
+/* Rx Eye measurements */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3)
+/* Has the eye plot dump completed and data returned is valid? */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0
+/* Rx Eye binary plot */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0
+
+/* MC_CMD_FC_OUT_UHLINK msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LEN 0
+
+/* MC_CMD_FC_OUT_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_LICENSE msgresponse */
+#define MC_CMD_FC_OUT_LICENSE_LEN 12
+/* Count of valid keys */
+#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0
+/* Count of invalid keys */
+#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4
+/* Count of blacklisted keys */
+#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8
+
+/* MC_CMD_FC_OUT_STARTUP msgresponse */
+#define MC_CMD_FC_OUT_STARTUP_LEN 4
+/* Capabilities of the FPGA/FC */
+#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1
+
+/* MC_CMD_FC_OUT_DMA_READ msgresponse */
+#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1
+#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252
+#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num))
+/* The data read */
+#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252
+
+/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4
+/* Timer handle */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0
+
+/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20
+/* Length of host transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24
+/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28
+#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32
+/* When active, start read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40
+/* When active, end read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48
+
+/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */
+#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0
+
+/* MC_CMD_FC_OUT_LOG msgresponse */
+#define MC_CMD_FC_OUT_LOG_LEN 0
+
+/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20
+
+/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num))
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31
+
+/* MC_CMD_FC_OUT_SPI_READ msgresponse */
+#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4
+#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252
+#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */
+#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */
+#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8
+/* The 32-bit value read from the toggle count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0
+/* The 32-bit value read from the clock enable count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8
+/* DDR soak test status word; bits [4:0] are relevant. */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1
+/* DDR soak test error count */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0
+
+
+/***********************************/
+/* MC_CMD_AOE
+ * AOE operations on MC
+ */
+#define MC_CMD_AOE 0xa
+
+/* MC_CMD_AOE_IN msgrequest */
+#define MC_CMD_AOE_IN_LEN 4
+#define MC_CMD_AOE_IN_OP_HDR_OFST 0
+#define MC_CMD_AOE_IN_OP_LBN 0
+#define MC_CMD_AOE_IN_OP_WIDTH 8
+/* enum: FPGA and CPLD information */
+#define MC_CMD_AOE_OP_INFO 0x1
+/* enum: Currents and voltages read from MCP3424s; DEBUG */
+#define MC_CMD_AOE_OP_CURRENTS 0x2
+/* enum: Temperatures at locations around the PCB; DEBUG */
+#define MC_CMD_AOE_OP_TEMPERATURES 0x3
+/* enum: Set CPLD to idle */
+#define MC_CMD_AOE_OP_CPLD_IDLE 0x4
+/* enum: Read from CPLD register */
+#define MC_CMD_AOE_OP_CPLD_READ 0x5
+/* enum: Write to CPLD register */
+#define MC_CMD_AOE_OP_CPLD_WRITE 0x6
+/* enum: Execute CPLD instruction */
+#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7
+/* enum: Reprogram the CPLD on the AOE device */
+#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8
+/* enum: AOE power control */
+#define MC_CMD_AOE_OP_POWER 0x9
+/* enum: AOE image loading */
+#define MC_CMD_AOE_OP_LOAD 0xa
+/* enum: Fan monitoring */
+#define MC_CMD_AOE_OP_FAN_CONTROL 0xb
+/* enum: Fan failures since last reset */
+#define MC_CMD_AOE_OP_FAN_FAILURES 0xc
+/* enum: Get generic AOE MAC statistics */
+#define MC_CMD_AOE_OP_MAC_STATS 0xd
+/* enum: Retrieve PHY specific information */
+#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe
+/* enum: Write a number of JTAG primitive commands, return will give data */
+#define MC_CMD_AOE_OP_JTAG_WRITE 0xf
+/* enum: Control access to the FPGA via the Siena JTAG Chain */
+#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10
+/* enum: Set the MTU offset between Siena and AOE MACs */
+#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11
+/* enum: How link state is handled */
+#define MC_CMD_AOE_OP_LINK_STATE 0x12
+/* enum: How Siena MAC statistics are reported (deprecated - use
+ * MC_CMD_AOE_OP_ASIC_STATS)
+ */
+#define MC_CMD_AOE_OP_SIENA_STATS 0x13
+/* enum: How native ASIC MAC statistics are reported - replaces the deprecated
+ * command MC_CMD_AOE_OP_SIENA_STATS
+ */
+#define MC_CMD_AOE_OP_ASIC_STATS 0x13
+/* enum: DDR memory information */
+#define MC_CMD_AOE_OP_DDR 0x14
+/* enum: FC control */
+#define MC_CMD_AOE_OP_FC 0x15
+/* enum: DDR ECC status reads */
+#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16
+/* enum: Commands for MC-SPI Master emulation */
+#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17
+/* enum: Commands for FC boot control */
+#define MC_CMD_AOE_OP_FC_BOOT 0x18
+
+/* MC_CMD_AOE_OUT msgresponse */
+#define MC_CMD_AOE_OUT_LEN 0
+
+/* MC_CMD_AOE_IN_INFO msgrequest */
+#define MC_CMD_AOE_IN_INFO_LEN 4
+#define MC_CMD_AOE_IN_CMD_OFST 0
+
+/* MC_CMD_AOE_IN_CURRENTS msgrequest */
+#define MC_CMD_AOE_IN_CURRENTS_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */
+#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_CPLD_READ msgrequest */
+#define MC_CMD_AOE_IN_CPLD_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8
+
+/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8
+#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12
+
+/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4
+
+/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4
+/* enum: Reprogram CPLD, poll for completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1
+/* enum: Reprogram CPLD, send event on completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3
+/* enum: Get status of reprogramming operation */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4
+
+/* MC_CMD_AOE_IN_POWER msgrequest */
+#define MC_CMD_AOE_IN_POWER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Turn on or off AOE power */
+#define MC_CMD_AOE_IN_POWER_OP_OFST 4
+/* enum: Turn off FPGA power */
+#define MC_CMD_AOE_IN_POWER_OFF 0x0
+/* enum: Turn on FPGA power */
+#define MC_CMD_AOE_IN_POWER_ON 0x1
+/* enum: Clear peak power measurement */
+#define MC_CMD_AOE_IN_POWER_CLEAR 0x2
+/* enum: Show current power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3
+/* enum: Show peak power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4
+/* enum: Show current DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5
+/* enum: Show peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6
+/* enum: Clear peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7
+
+/* MC_CMD_AOE_IN_LOAD msgrequest */
+#define MC_CMD_AOE_IN_LOAD_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence
+ */
+#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4
+
+/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */
+#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* If non zero report measured fan RPM rather than nominal */
+#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4
+
+/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */
+#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_MAC_STATS msgrequest */
+#define MC_CMD_AOE_IN_MAC_STATS_LEN 24
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port */
+#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4
+/* Host memory address for statistics */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12
+#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16
+/* Length of DMA data (optional) */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20
+
+/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8
+
+/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num))
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Enable or disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4
+/* enum: Enable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1
+/* enum: Disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2
+
+/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4
+/* enum: Apply to all external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000
+/* enum: Apply to all internal ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000
+/* The MTU offset to be applied to the external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8
+
+/* MC_CMD_AOE_IN_LINK_STATE msgrequest */
+#define MC_CMD_AOE_IN_LINK_STATE_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8
+/* enum: AOE and associated external port */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0
+/* enum: AOE and OR of all external ports */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1
+/* enum: Individual ports */
+#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2
+/* enum: Configure link state mode on given AOE port */
+#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8
+/* enum: No-op */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0
+/* enum: logical OR of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1
+/* enum: logical AND of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16
+
+/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */
+#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4
+/* enum: Statistics from Siena (default) */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */
+#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4
+/* enum: Statistics from the ASIC (default) */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_DDR msgrequest */
+#define MC_CMD_AOE_IN_DDR_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_DDR_BANK_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+/* Page index of SPD data */
+#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8
+
+/* MC_CMD_AOE_IN_FC msgrequest */
+#define MC_CMD_AOE_IN_FC_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Basic commands for MC SPI Master emulation. */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4
+/* enum: MC SPI read */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0
+/* enum: MC SPI write */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12
+
+/* MC_CMD_AOE_IN_FC_BOOT msgrequest */
+#define MC_CMD_AOE_IN_FC_BOOT_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* FC boot control flags */
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1
+
+/* MC_CMD_AOE_OUT_INFO msgresponse */
+#define MC_CMD_AOE_OUT_INFO_LEN 44
+/* JTAG IDCODE of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0
+/* Version of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4
+/* JTAG IDCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8
+/* JTAG USERCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12
+/* FPGA type - read from CPLD straps */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */
+/* FPGA state (debug) */
+#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20
+/* FPGA image - partition from which loaded */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24
+/* FC state */
+#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28
+/* enum: Set if watchdog working */
+#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1
+/* enum: Set if MC-FC communications working */
+#define MC_CMD_AOE_OUT_INFO_COMMS 0x2
+/* Random pieces of information */
+#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32
+/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */
+#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1
+/* enum: CPLD apparently good */
+#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2
+/* enum: FPGA working normally */
+#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4
+/* enum: FPGA is powered */
+#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8
+/* enum: Board has incompatible SODIMMs fitted */
+#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10
+/* enum: Board has ByteBlaster connected */
+#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20
+/* enum: FPGA Boot flash has an invalid header. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40
+/* enum: FPGA Application flash is accessible. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80
+/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */
+#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36
+#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */
+/* Result of FC booting - not valid while a ByteBlaster is connected. */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40
+/* enum: No error */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0
+/* enum: Bad address set in CPLD */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1
+/* enum: Bad header */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2
+/* enum: Bad text section details */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3
+/* enum: Bad checksum */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4
+/* enum: Bad BSP */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5
+/* enum: Flash mode is invalid */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6
+/* enum: FC application loaded and execution attempted */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80
+/* enum: FC application Started */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81
+/* enum: No bootrom in FPGA */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff
+
+/* MC_CMD_AOE_OUT_CURRENTS msgresponse */
+#define MC_CMD_AOE_OUT_CURRENTS_LEN 68
+/* Set of currents and voltages (mA or mV as appropriate) */
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17
+#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */
+
+/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */
+#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40
+/* Set of temperatures */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10
+/* enum: The first set of enum values are for Modena code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */
+/* enum: The second set of enum values are for Sorrento code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */
+
+/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4
+/* The value read from the CPLD */
+#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0
+
+/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num))
+/* Failure counts for each fan */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63
+
+/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4
+/* Results of status command (only) */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0
+
+/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */
+#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0
+
+/* MC_CMD_AOE_OUT_POWER_ON msgresponse */
+#define MC_CMD_AOE_OUT_POWER_ON_LEN 0
+
+/* MC_CMD_AOE_OUT_LOAD msgresponse */
+#define MC_CMD_AOE_OUT_LOAD_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */
+#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA
+ * for details
+ */
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+
+/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num))
+/* Used to align the in and out data blocks so the MC can re-use the cmd */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0
+/* out bytes */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */
+#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR msgresponse */
+#define MC_CMD_AOE_OUT_DDR_LENMIN 17
+#define MC_CMD_AOE_OUT_DDR_LENMAX 252
+#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num))
+/* Information on the module. */
+#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1
+/* Memory size, in MB. */
+#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4
+/* The memory type, as reported from SPD information */
+#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8
+/* Nominal voltage of the module (as applied) */
+#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12
+/* SPD data read from the module */
+#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16
+#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236
+
+/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */
+#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0
+
+/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */
+#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0
+
+/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */
+#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */
+#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_FC msgresponse */
+#define MC_CMD_AOE_OUT_FC_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8
+/* Flags describing status info on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1
+/* DDR ECC status on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0
+
+/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */
+#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PTP
+ * Perform PTP operation
+ */
+#define MC_CMD_PTP 0xb
+#undef MC_CMD_0xb_PRIVILEGE_CTG
+
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PTP_IN msgrequest */
+#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
+#define MC_CMD_PTP_IN_OP_OFST 0
+#define MC_CMD_PTP_IN_OP_LEN 1
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x1c
+
+/* MC_CMD_PTP_IN_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_ENABLE_LEN 16
+#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+/* Event queue for PTP events */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+/* PTP timestamping mode */
+#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
+
+/* MC_CMD_PTP_IN_DISABLE msgrequest */
+#define MC_CMD_PTP_IN_DISABLE_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
+#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
+#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Transmit packet length */
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+/* Transmit packet data */
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_STATUS msgrequest */
+#define MC_CMD_PTP_IN_STATUS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+
+/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of time readings to capture */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+
+/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Enable or disable packet testing */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
+#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset PTP statistics */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_DEBUG msgrequest */
+#define MC_CMD_PTP_IN_DEBUG_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Debug operations */
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Queue id to send events back */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Original field containing queue ID. Now extended to include flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
+/* MC_CMD_PTP_OUT msgresponse */
+#define MC_CMD_PTP_OUT_LEN 0
+
+/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
+#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+/* Number of packets transmitted and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+/* Number of packets received and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+/* Number of packets timestamped by the FPGA */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+/* Number of packets filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+/* Number of packets not filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+/* Number of PPS overflows (noise on input?) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+/* Number of PPS bad periods */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+/* Minimum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+/* Maximum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+/* Last period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+/* Mean period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+/* Last offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+/* Mean offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+
+/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+/* A set of host and NIC times */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+/* Host time immediately before NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+/* Host time immediately after NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+/* Number of nanoseconds waited after reading NIC's hardware clock */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
+/* Presence of external oscillator */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+/* Number of packets received by FPGA */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+/* Number of packets received by Siena filters */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+#undef MC_CMD_0xc_PRIVILEGE_CTG
+
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+/* The last dword is the status, not a value read */
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+#undef MC_CMD_0xd_PRIVILEGE_CTG
+
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+/* Address */
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+#undef MC_CMD_0x54_PRIVILEGE_CTG
+
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+#undef MC_CMD_0xf_PRIVILEGE_CTG
+
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define MC_CMD_STACKINFO_OUT_LENMIN 12
+#define MC_CMD_STACKINFO_OUT_LENMAX 252
+#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+/* (thread ptr, stack size, free space) for each thread in system */
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
+ */
+#define MC_CMD_MDIO_READ 0x10
+#undef MC_CMD_0x10_PRIVILEGE_CTG
+
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
+
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+#undef MC_CMD_0x11_PRIVILEGE_CTG
+
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* enum: Internal. */
+/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
+/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+/* Value */
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* enum: Good. */
+/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
+ */
+#define MC_CMD_DBI_WRITE 0x12
+#undef MC_CMD_0x12_PRIVILEGE_CTG
+
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+/* Status */
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+/* Value */
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+/* Value */
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+#undef MC_CMD_0x18_PRIVILEGE_CTG
+
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+/* This field contains a 16-bit value for each of the types of NVRAM area. The
+ * values are defined in the firmware/mc/platform/.c file for a specific board
+ * type, but otherwise have no meaning to the MC; they are used by the driver
+ * to manage selection of appropriate firmware updates.
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s) -- extended functionality
+ */
+#define MC_CMD_DBI_READX 0x19
+#undef MC_CMD_0x19_PRIVILEGE_CTG
+
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define MC_CMD_DBI_READX_IN_LENMIN 8
+#define MC_CMD_DBI_READX_IN_LENMAX 248
+#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+/* Each Read op consists of an address (offset 0), VF/CS2) */
+#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define MC_CMD_DBI_READX_OUT_LENMIN 4
+#define MC_CMD_DBI_READX_OUT_LENMAX 252
+#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+/* Value */
+#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC pseudo-random generator.
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+#undef MC_CMD_0x1a_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the LTSSM, if the build supports it.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
+#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+#undef MC_CMD_0x1c_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state to set if UPDATE=1 */
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_PREBOOT_LBN 1
+#define MC_CMD_DRV_PREBOOT_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
+
+
+/***********************************/
+/* MC_CMD_SHMUART
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+
+/* MC_CMD_SHMUART_IN msgrequest */
+#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+#undef MC_CMD_0x20_PRIVILEGE_CTG
+
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ENTITY_RESET
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
+ */
+#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
+
+/* MC_CMD_ENTITY_RESET_IN msgrequest */
+#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
+#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
+
+/* MC_CMD_ENTITY_RESET_OUT msgresponse */
+#define MC_CMD_ENTITY_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+/* wipe statistics */
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define MC_CMD_RXD_MONITOR_IN_LEN 12
+#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * Copy the given ASCII string out onto UART and/or out of the network port.
+ */
+#define MC_CMD_PUTS 0x23
+#undef MC_CMD_0x23_PRIVILEGE_CTG
+
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define MC_CMD_PUTS_IN_LENMIN 13
+#define MC_CMD_PUTS_IN_LENMAX 252
+#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_UART_LBN 0
+#define MC_CMD_PUTS_IN_UART_WIDTH 1
+#define MC_CMD_PUTS_IN_PORT_LBN 1
+#define MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define MC_CMD_PUTS_IN_DHOST_OFST 4
+#define MC_CMD_PUTS_IN_DHOST_LEN 6
+#define MC_CMD_PUTS_IN_STRING_OFST 12
+#define MC_CMD_PUTS_IN_STRING_LEN 1
+#define MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+#undef MC_CMD_0x24_PRIVILEGE_CTG
+
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
+#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+#undef MC_CMD_0x25_PRIVILEGE_CTG
+
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
+ */
+#define MC_CMD_POLL_BIST 0x26
+#undef MC_CMD_0x26_PRIVILEGE_CTG
+
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+/* Status of each channel A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel C */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel D */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Returns a bitmask of loopback modes available at each speed.
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#undef MC_CMD_0x28_PRIVILEGE_CTG
+
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
+ */
+#define MC_CMD_GET_LINK 0x29
+#undef MC_CMD_0x29_PRIVILEGE_CTG
+
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+/* link-partner advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME
+ */
+#define MC_CMD_SET_LINK 0x2a
+#undef MC_CMD_0x2a_PRIVILEGE_CTG
+
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define MC_CMD_SET_LINK_IN_LEN 16
+/* ??? */
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+/* Flags */
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+#undef MC_CMD_0x2b_PRIVILEGE_CTG
+
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+#undef MC_CMD_0x2c_PRIVILEGE_CTG
+
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define MC_CMD_SET_MAC_IN_LEN 28
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_PHY_STATS 0x2d
+#undef MC_CMD_0x2d_PRIVILEGE_CTG
+
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
+ */
+#define MC_CMD_MAC_STATS 0x2e
+#undef MC_CMD_0x2e_PRIVILEGE_CTG
+
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define MC_CMD_MAC_STATS_IN_LEN 20
+/* ??? */
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
+#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define MC_CMD_SRIOV_IN_LEN 12
+#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define MC_CMD_SRIOV_OUT_LEN 8
+#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define MC_CMD_MEMCPY_IN_LENMIN 32
+#define MC_CMD_MEMCPY_IN_LENMAX 224
+#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
+#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+#undef MC_CMD_0x32_PRIVILEGE_CTG
+
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#undef MC_CMD_0x33_PRIVILEGE_CTG
+
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+#undef MC_CMD_0x34_PRIVILEGE_CTG
+
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+#undef MC_CMD_0x36_PRIVILEGE_CTG
+
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+#undef MC_CMD_0x37_PRIVILEGE_CTG
+
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held).
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+#undef MC_CMD_0x38_PRIVILEGE_CTG
+
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+#undef MC_CMD_0x39_PRIVILEGE_CTG
+
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
+#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+#undef MC_CMD_0x3a_PRIVILEGE_CTG
+
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
+#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+#undef MC_CMD_0x3b_PRIVILEGE_CTG
+
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
+ * type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#undef MC_CMD_0x3c_PRIVILEGE_CTG
+
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_REBOOT
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+#undef MC_CMD_0x3d_PRIVILEGE_CTG
+
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_IN msgrequest */
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+#undef MC_CMD_0x3e_PRIVILEGE_CTG
+
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+#undef MC_CMD_0x3f_PRIVILEGE_CTG
+
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+#undef MC_CMD_0x41_PRIVILEGE_CTG
+
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+#define MC_CMD_SENSOR_ENTRY_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LEN 8
+#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
+#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+#undef MC_CMD_0x42_PRIVILEGE_CTG
+
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+#undef MC_CMD_0x43_PRIVILEGE_CTG
+
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
+
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
+ */
+#define MC_CMD_SETUP_8021QBB 0x44
+
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
+
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+#undef MC_CMD_0x45_PRIVILEGE_CTG
+
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#undef MC_CMD_0x46_PRIVILEGE_CTG
+
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#undef MC_CMD_0x47_PRIVILEGE_CTG
+
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
+ */
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
+ */
+#define MC_CMD_TESTASSERT 0x49
+#undef MC_CMD_0x49_PRIVILEGE_CTG
+
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#undef MC_CMD_0x4a_PRIVILEGE_CTG
+
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#undef MC_CMD_0x4b_PRIVILEGE_CTG
+
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#undef MC_CMD_0x4c_PRIVILEGE_CTG
+
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+/* 0-8 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+/* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+/* direction */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#undef MC_CMD_0x4e_PRIVILEGE_CTG
+
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+#undef MC_CMD_0x51_PRIVILEGE_CTG
+
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+#undef MC_CMD_0x52_PRIVILEGE_CTG
+
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requesting function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#undef MC_CMD_0x55_PRIVILEGE_CTG
+
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+#undef MC_CMD_0x56_PRIVILEGE_CTG
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define MC_CMD_CLP_IN_DEFAULT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_LEN 12
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+#undef MC_CMD_0x57_PRIVILEGE_CTG
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define MC_CMD_MUM_IN_LEN 4
+#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_LBN 0
+#define MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/* MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_READ_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+/* Discrete (soldered) DDR resistor strap info */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+/* Array of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+/* enum: None */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Spare partition 0 */
+#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+/* enum: Spare partition 1 */
+#define NVRAM_PARTITION_TYPE_SPARE_1 0x1100
+/* enum: Spare partition 2 */
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Spare partition 3 */
+#define NVRAM_PARTITION_TYPE_SPARE_3 0x1300
+/* enum: Spare partition 4 */
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
+/* LICENSED_FEATURES structuredef */
+#define LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_FEATURES_MASK_OFST 0
+#define LICENSED_FEATURES_MASK_LEN 8
+#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_PIO_LBN 1
+#define LICENSED_FEATURES_PIO_WIDTH 1
+#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_FEATURES_CLOCK_LBN 3
+#define LICENSED_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_MASK_LBN 0
+#define LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define LICENSED_V3_APPS_MASK_OFST 0
+#define LICENSED_V3_APPS_MASK_LEN 8
+#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_ONLOAD_LBN 0
+#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_PTP_LBN 1
+#define LICENSED_V3_APPS_PTP_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_MASK_LBN 0
+#define LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_V3_FEATURES_MASK_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LEN 8
+#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_PIO_LBN 1
+#define LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_MASK_LBN 0
+#define LICENSED_V3_FEATURES_MASK_WIDTH 64
+
+/* TX_TIMESTAMP_EVENT structuredef */
+#define TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is the low part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define RSS_MODE_HASH_SELECTOR_OFST 0
+#define RSS_MODE_HASH_SELECTOR_LEN 1
+#define RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define RSS_MODE_HASH_DST_ADDR_LBN 1
+#define RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define RSS_MODE_HASH_SRC_PORT_LBN 2
+#define RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define RSS_MODE_HASH_DST_PORT_LBN 3
+#define RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define RSS_MODE_HASH_SELECTOR_LBN 0
+#define RSS_MODE_HASH_SELECTOR_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+#undef MC_CMD_0x50_PRIVILEGE_CTG
+
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+#undef MC_CMD_0x80_PRIVILEGE_CTG
+
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+#undef MC_CMD_0x81_PRIVILEGE_CTG
+
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10
+#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+#undef MC_CMD_0x82_PRIVILEGE_CTG
+
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+#undef MC_CMD_0x83_PRIVILEGE_CTG
+
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+#undef MC_CMD_0x84_PRIVILEGE_CTG
+
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+#undef MC_CMD_0x85_PRIVILEGE_CTG
+
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+#undef MC_CMD_0x86_PRIVILEGE_CTG
+
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+#undef MC_CMD_0x5b_PRIVILEGE_CTG
+
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+#undef MC_CMD_0x58_PRIVILEGE_CTG
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+#undef MC_CMD_0x5f_PRIVILEGE_CTG
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#undef MC_CMD_0x87_PRIVILEGE_CTG
+
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#undef MC_CMD_0x88_PRIVILEGE_CTG
+
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#undef MC_CMD_0x89_PRIVILEGE_CTG
+
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+/* PORT_CONFIG_ENTRY structuredef */
+#define PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define PORT_CONFIG_ENTRY_CORE_OFST 1
+#define PORT_CONFIG_ENTRY_CORE_LEN 1
+#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
+#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
+#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
+#define PORT_CONFIG_ENTRY_CORE_LBN 8
+#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define PORT_CONFIG_ENTRY_LANES_OFST 4
+#define PORT_CONFIG_ENTRY_LANES_LBN 32
+#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+#undef MC_CMD_0x8a_PRIVILEGE_CTG
+
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_OUT/HANDLE */
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#undef MC_CMD_0xe4_PRIVILEGE_CTG
+
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
+ * Please note that this interface is only of use to debug tools which have
+ * knowledge of firmware and hardware data structures; nothing here is intended
+ * for use by normal driver code.
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+#undef MC_CMD_0xe5_PRIVILEGE_CTG
+
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+/* enum: RX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine (with original metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+/* enum: RX1 dispatcher CPU (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+/* enum: Miscellaneous other state (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
+/* identifies the type of operation requested */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: write a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address (DICPU targets) or LUE index (LUE targets) */
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* selector (for MISC_STATE target) */
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+/* enum: Port to datapath mapping */
+#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
+/* value to write (for DMEM writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+/* value to write (for LUE writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+/* value read (for LUE reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
+#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+#undef MC_CMD_0xb6_PRIVILEGE_CTG
+
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#undef MC_CMD_0xb8_PRIVILEGE_CTG
+
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#undef MC_CMD_0xb9_PRIVILEGE_CTG
+
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+#undef MC_CMD_0x8b_PRIVILEGE_CTG
+
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+#undef MC_CMD_0x8c_PRIVILEGE_CTG
+
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+#undef MC_CMD_0xba_PRIVILEGE_CTG
+
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+#undef MC_CMD_0xbb_PRIVILEGE_CTG
+
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#undef MC_CMD_0x8d_PRIVILEGE_CTG
+
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+#undef MC_CMD_0x8e_PRIVILEGE_CTG
+
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+#undef MC_CMD_0x8f_PRIVILEGE_CTG
+
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+#undef MC_CMD_0x90_PRIVILEGE_CTG
+
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#undef MC_CMD_0xb0_PRIVILEGE_CTG
+
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#undef MC_CMD_0xb1_PRIVILEGE_CTG
+
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#undef MC_CMD_0xbc_PRIVILEGE_CTG
+
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* enum: MISC. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#undef MC_CMD_0xbd_PRIVILEGE_CTG
+
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#undef MC_CMD_0x91_PRIVILEGE_CTG
+
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+/* enum: Last chunk, containing checksum rather than data */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+/* Data for this chunk */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+/* enum: Code download OK, completed. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+#undef MC_CMD_0xbe_PRIVILEGE_CTG
+
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#undef MC_CMD_0xb2_PRIVILEGE_CTG
+
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+#undef MC_CMD_0xb3_PRIVILEGE_CTG
+
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+#undef MC_CMD_0xb4_PRIVILEGE_CTG
+
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+#undef MC_CMD_0xb5_PRIVILEGE_CTG
+
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+#undef MC_CMD_0x92_PRIVILEGE_CTG
+
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+#undef MC_CMD_0x93_PRIVILEGE_CTG
+
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+#undef MC_CMD_0x94_PRIVILEGE_CTG
+
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+#undef MC_CMD_0x95_PRIVILEGE_CTG
+
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+#undef MC_CMD_0x63_PRIVILEGE_CTG
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+#undef MC_CMD_0x96_PRIVILEGE_CTG
+
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+#undef MC_CMD_0x97_PRIVILEGE_CTG
+
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+#undef MC_CMD_0x98_PRIVILEGE_CTG
+
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+#undef MC_CMD_0x99_PRIVILEGE_CTG
+
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+#undef MC_CMD_0x5d_PRIVILEGE_CTG
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+#undef MC_CMD_0x5e_PRIVILEGE_CTG
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+#undef MC_CMD_0x61_PRIVILEGE_CTG
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+/* The number of VLAN tags that may still be added */
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#undef MC_CMD_0x9a_PRIVILEGE_CTG
+
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#undef MC_CMD_0x9b_PRIVILEGE_CTG
+
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#undef MC_CMD_0x9c_PRIVILEGE_CTG
+
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#undef MC_CMD_0x9d_PRIVILEGE_CTG
+
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#undef MC_CMD_0x9e_PRIVILEGE_CTG
+
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* Number of queues spanned by this context, in the range 1-64; valid offsets
+ * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#undef MC_CMD_0x9f_PRIVILEGE_CTG
+
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#undef MC_CMD_0xa0_PRIVILEGE_CTG
+
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#undef MC_CMD_0xa1_PRIVILEGE_CTG
+
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#undef MC_CMD_0xa2_PRIVILEGE_CTG
+
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#undef MC_CMD_0xa3_PRIVILEGE_CTG
+
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#undef MC_CMD_0xe1_PRIVILEGE_CTG
+
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#undef MC_CMD_0xe2_PRIVILEGE_CTG
+
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#undef MC_CMD_0xa4_PRIVILEGE_CTG
+
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#undef MC_CMD_0xa5_PRIVILEGE_CTG
+
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#undef MC_CMD_0xa6_PRIVILEGE_CTG
+
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#undef MC_CMD_0xa7_PRIVILEGE_CTG
+
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+#undef MC_CMD_0xbf_PRIVILEGE_CTG
+
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+#undef MC_CMD_0xc0_PRIVILEGE_CTG
+
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#undef MC_CMD_0xa8_PRIVILEGE_CTG
+
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#undef MC_CMD_0xa9_PRIVILEGE_CTG
+
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#undef MC_CMD_0xaa_PRIVILEGE_CTG
+
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+
+
+/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+#undef MC_CMD_0xeb_PRIVILEGE_CTG
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+/* Flags requesting what should be changed. */
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+/* MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+#undef MC_CMD_0x62_PRIVILEGE_CTG
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#undef MC_CMD_0xab_PRIVILEGE_CTG
+
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+/* Number of buffer table entries to dump. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+#undef MC_CMD_0xc1_PRIVILEGE_CTG
+
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
+/* enum: pad to 64 bytes */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+/* enum: pad to 128 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+/* enum: pad to 256 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+#undef MC_CMD_0xc2_PRIVILEGE_CTG
+
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+#undef MC_CMD_0xac_PRIVILEGE_CTG
+
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+#undef MC_CMD_0xad_PRIVILEGE_CTG
+
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+/* enum: Leave the system clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for inter-core clock domain */
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for DPCPU clock domain */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for MC clock domain */
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define MC_CMD_SET_CLOCK_OUT_LEN 28
+/* Resulting system frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting inter-core frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
+/* Resulting DPCPU frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+/* Resulting PCS frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting MC frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+/* Resulting rmon frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+/* Resulting vswitch frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+#undef MC_CMD_0xae_PRIVILEGE_CTG
+
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define MC_CMD_DPCPU_RPC_IN_LEN 36
+#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+/* enum: RxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
+/* enum: TxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+/* Register address. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+/* DATA */
+#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#undef MC_CMD_0xe3_PRIVILEGE_CTG
+
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+#undef MC_CMD_0xe6_PRIVILEGE_CTG
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+#undef MC_CMD_0xe7_PRIVILEGE_CTG
+
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+#undef MC_CMD_0xe8_PRIVILEGE_CTG
+
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define MC_CMD_DUMP_DO_IN_LEN 52
+#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define MC_CMD_DUMP_DO_OUT_LEN 4
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#undef MC_CMD_0xe9_PRIVILEGE_CTG
+
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+#undef MC_CMD_0xea_PRIVILEGE_CTG
+
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+#undef MC_CMD_0xec_PRIVILEGE_CTG
+
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#undef MC_CMD_0xed_PRIVILEGE_CTG
+
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+#undef MC_CMD_0xee_PRIVILEGE_CTG
+
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+#undef MC_CMD_0xef_PRIVILEGE_CTG
+
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+#undef MC_CMD_0xf0_PRIVILEGE_CTG
+
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+#undef MC_CMD_0xf1_PRIVILEGE_CTG
+
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define MC_CMD_KR_TUNE_IN_LENMIN 4
+#define MC_CMD_KR_TUNE_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15, Huntington) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15, Huntington) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
+ * positive, Medford - 0-31)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-31)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+/* enum: Edge DFE DLEV (0-128 for Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+/* enum: Variable Gain Amplifier (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+/* enum: CTLE EQ Capacitor (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (0-7, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude (Huntington, Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+/* enum: TX Termination Impedance control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+/* enum: TX Amplitude Fine control (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-shoot Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: De-emphasis Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+#undef MC_CMD_0xf2_PRIVILEGE_CTG
+
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+/* enum: DFE DLev */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+/* enum: Figure of Merit */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+/* enum: CTLE EQ Capacitor (HF Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (DC Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
+ */
+#define MC_CMD_LICENSING 0xf3
+#undef MC_CMD_0xf3_PRIVILEGE_CTG
+
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+#undef MC_CMD_0xd0_PRIVILEGE_CTG
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+/* count of keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+/* count of keys which are invalid due to being for the wrong node */
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+#undef MC_CMD_0xd1_PRIVILEGE_CTG
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+/* type of license (eg 3) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+/* length of the license ID (in bytes) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+/* the unique license ID of the adapter */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
+#undef MC_CMD_0xf4_PRIVILEGE_CTG
+
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.) Not used for V3 licensing
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#undef MC_CMD_0xf5_PRIVILEGE_CTG
+
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+#undef MC_CMD_0xd2_PRIVILEGE_CTG
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+#undef MC_CMD_0xd3_PRIVILEGE_CTG
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+#undef MC_CMD_0xf6_PRIVILEGE_CTG
+
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+#undef MC_CMD_0xd4_PRIVILEGE_CTG
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 72
+/* application ID expressed as a single bit mask */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 4
+/* challenge for validation */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 72
+/* application expiry time */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 0
+/* application expiry units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 4
+/* enum: expiry units are accounting units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* validation response to challenge */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+#undef MC_CMD_0xd5_PRIVILEGE_CTG
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+/* enum: turn the features off */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure RX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#undef MC_CMD_0xf7_PRIVILEGE_CTG
+
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current RX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#undef MC_CMD_0xf8_PRIVILEGE_CTG
+
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+#undef MC_CMD_0xf9_PRIVILEGE_CTG
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+#undef MC_CMD_0xfa_PRIVILEGE_CTG
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+#undef MC_CMD_0xfb_PRIVILEGE_CTG
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+#undef MC_CMD_0xfc_PRIVILEGE_CTG
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+#undef MC_CMD_0xfe_PRIVILEGE_CTG
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+#undef MC_CMD_0xff_PRIVILEGE_CTG
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+#undef MC_CMD_0x100_PRIVILEGE_CTG
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define MC_CMD_READ_ATB_IN_LEN 16
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define MC_CMD_READ_ATB_OUT_LEN 4
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+#undef MC_CMD_0x59_PRIVILEGE_CTG
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+#undef MC_CMD_0x5a_PRIVILEGE_CTG
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+#undef MC_CMD_0x5c_PRIVILEGE_CTG
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+#undef MC_CMD_0x101_PRIVILEGE_CTG
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+#undef MC_CMD_0x102_PRIVILEGE_CTG
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+#undef MC_CMD_0x60_PRIVILEGE_CTG
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+#undef MC_CMD_0x103_PRIVILEGE_CTG
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+#undef MC_CMD_0x104_PRIVILEGE_CTG
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+#undef MC_CMD_0x105_PRIVILEGE_CTG
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+/* Sector data */
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+#undef MC_CMD_0x106_PRIVILEGE_CTG
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+#undef MC_CMD_0x107_PRIVILEGE_CTG
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+#undef MC_CMD_0x108_PRIVILEGE_CTG
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+#undef MC_CMD_0x109_PRIVILEGE_CTG
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+#undef MC_CMD_0x10a_PRIVILEGE_CTG
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+#undef MC_CMD_0x10b_PRIVILEGE_CTG
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EXEC_SIGNED
+ * Check the CMAC of the contents of IMEM and DMEM against the value supplied
+ * and if correct begin execution from the start of IMEM. The caller supplies a
+ * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
+ * computation runs from the start of IMEM, and from the start of DMEM + 16k,
+ * to match flash booting. The command will respond with EINVAL if the CMAC
+ * does match, otherwise it will respond with success before it jumps to IMEM.
+ */
+#define MC_CMD_EXEC_SIGNED 0x10c
+#undef MC_CMD_0x10c_PRIVILEGE_CTG
+
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_EXEC_SIGNED_IN msgrequest */
+#define MC_CMD_EXEC_SIGNED_IN_LEN 28
+/* the length of code to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+/* the length of date to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+/* the XPM sector containing the key to use */
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+/* the expected CMAC value */
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
+
+/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
+#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PREPARE_SIGNED
+ * Prepare to upload a signed image. This will scrub the specified length of
+ * the data region, which must be at least as large as the DATALEN supplied to
+ * MC_CMD_EXEC_SIGNED.
+ */
+#define MC_CMD_PREPARE_SIGNED 0x10d
+#undef MC_CMD_0x10d_PRIVILEGE_CTG
+
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
+#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
+/* the length of data area to clear */
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+
+/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
+#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_SECURITY_RULE
+ * Set blacklist and/or whitelist action for a particular match criteria.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SET_SECURITY_RULE 0x10f
+#undef MC_CMD_0x10f_PRIVILEGE_CTG
+
+#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SECURITY_RULE_IN msgrequest */
+#define MC_CMD_SET_SECURITY_RULE_IN_LEN 92
+/* fields to include in match criteria */
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_LBN 2
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_LBN 3
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_LBN 4
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_LBN 5
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_LBN 10
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_LBN 11
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_LBN 12
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_LBN 13
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_LBN 14
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_WIDTH 1
+/* remote MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_LEN 6
+/* remote port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_OFST 10
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_LEN 2
+/* local MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_OFST 12
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_LEN 6
+/* local port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_OFST 18
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_OFST 20
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_OFST 22
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_OFST 24
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_OFST 26
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2
+/* Physical port to match (as little-endian 32-bit value) */
+#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28
+/* Reserved; set to 0 */
+#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32
+/* remote IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_OFST 36
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_LEN 16
+/* local IP address to match (as bytes in network order; set last 12 bytes to 0
+ * for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_OFST 52
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_LEN 16
+/* remote subnet ID to match (as little-endian 32-bit value); note that remote
+ * subnets are matched by mapping the remote IP address to a "subnet ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_SUBNET_MAP_SET_NODE appropriately
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68
+/* remote portrange ID to match (as little-endian 32-bit value); note that
+ * remote port ranges are matched by mapping the remote port to a "portrange
+ * ID" via a data structure which must already have been configured using
+ * MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72
+/* local portrange ID to match (as little-endian 32-bit value); note that local
+ * port ranges are matched by mapping the local port to a "portrange ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76
+/* set the action for transmitted packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2
+/* enum: do not change the current TX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff
+/* set the action for received packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2
+/* enum: do not change the current RX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff
+/* counter ID to associate with this rule; IDs are allocated using
+ * MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88
+/* enum: special value for the null counter ID */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0
+
+/* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 28
+/* new reference count for uses of counter ID */
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0
+/* constructed match bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12
+/* constructed discriminator bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16
+/* base location for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20
+/* step for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24
+
+
+/***********************************/
+/* MC_CMD_RESET_SECURITY_RULES
+ * Reset all blacklist and whitelist actions for a particular physical port, or
+ * all ports. (Medford-only; for use by SolarSecure apps, not directly by
+ * drivers. See SF-114946-SW.) NOTE - this message definition is provisional.
+ * It has not yet been used in any released code and may change during
+ * development. This note will be removed once it is regarded as stable.
+ */
+#define MC_CMD_RESET_SECURITY_RULES 0x110
+#undef MC_CMD_0x110_PRIVILEGE_CTG
+
+#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */
+#define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4
+/* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */
+#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0
+/* enum: special value to reset all physical ports */
+#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff
+
+/* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */
+#define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SECURITY_RULESET_VERSION
+ * Return a large hash value representing a "version" of the complete set of
+ * currently active blacklist / whitelist rules and associated data structures.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION 0x111
+#undef MC_CMD_0x111_PRIVILEGE_CTG
+
+#define MC_CMD_0x111_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_IN msgrequest */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMIN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMAX 252
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LEN(num) (0+1*(num))
+/* Opaque hash value; length may vary depending on the hash scheme used */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_LEN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MINNUM 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112
+#undef MC_CMD_0x112_PRIVILEGE_CTG
+
+#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4
+/* the number of new counter IDs to request */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LEN(num) (4+4*(num))
+/* the number of new counter IDs allocated (may be less than the number
+ * requested if resources are unavailable)
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0
+/* new counter ID(s) */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113
+#undef MC_CMD_0x113_PRIVILEGE_CTG
+
+#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
+/* the number of counter IDs to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0
+/* the counter ID(s) to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MAXNUM 62
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SUBNET_MAP_SET_NODE
+ * Atomically update a trie node in the map of subnets to subnet IDs. The
+ * constants in the descriptions of the fields of this message may be retrieved
+ * by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. (Medford-
+ * only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE 0x114
+#undef MC_CMD_0x114_PRIVILEGE_CTG
+
+#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMAX 252
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num))
+/* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0
+/* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer
+ * to the next node, expressed as an offset in the trie memory (i.e. node ID
+ * multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range
+ * SUBNET_ID_MIN .. SUBNET_ID_MAX
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_OFST 4
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_LEN 2
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MINNUM 1
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MAXNUM 124
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_OUT msgresponse */
+#define MC_CMD_SUBNET_MAP_SET_NODE_OUT_LEN 0
+
+/* PORTRANGE_TREE_ENTRY structuredef */
+#define PORTRANGE_TREE_ENTRY_LEN 4
+/* key for branch nodes (<= key takes left branch, > key takes right branch),
+ * or magic value for leaf nodes
+ */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16
+/* final portrange ID for leaf nodes (don't care for branch nodes) */
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_OFST 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LBN 16
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115
+#undef MC_CMD_0x115_PRIVILEGE_CTG
+
+#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116
+#undef MC_CMD_0x116_PRIVILEGE_CTG
+
+#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+#endif /* _SIENA_MC_DRIVER_PCOL_H */
+/*! \cidoxg_end */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_regs_pci.h b/usr/src/uts/common/io/sfxge/common/efx_regs_pci.h
new file mode 100644
index 0000000000..7372efe62d
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_regs_pci.h
@@ -0,0 +1,2356 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_REGS_PCI_H
+#define _SYS_EFX_REGS_PCI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PC_VEND_ID_REG(16bit):
+ * Vendor ID register
+ */
+
+#define PCR_AZ_VEND_ID_REG 0x00000000
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VEND_ID_LBN 0
+#define PCRF_AZ_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_DEV_ID_REG(16bit):
+ * Device ID register
+ */
+
+#define PCR_AZ_DEV_ID_REG 0x00000002
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DEV_ID_LBN 0
+#define PCRF_AZ_DEV_ID_WIDTH 16
+
+
+/*
+ * PC_CMD_REG(16bit):
+ * Command register
+ */
+
+#define PCR_AZ_CMD_REG 0x00000004
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INTX_DIS_LBN 10
+#define PCRF_AZ_INTX_DIS_WIDTH 1
+#define PCRF_AZ_FB2B_EN_LBN 9
+#define PCRF_AZ_FB2B_EN_WIDTH 1
+#define PCRF_AZ_SERR_EN_LBN 8
+#define PCRF_AZ_SERR_EN_WIDTH 1
+#define PCRF_AZ_IDSEL_CTL_LBN 7
+#define PCRF_AZ_IDSEL_CTL_WIDTH 1
+#define PCRF_AZ_PERR_EN_LBN 6
+#define PCRF_AZ_PERR_EN_WIDTH 1
+#define PCRF_AZ_VGA_PAL_SNP_LBN 5
+#define PCRF_AZ_VGA_PAL_SNP_WIDTH 1
+#define PCRF_AZ_MWI_EN_LBN 4
+#define PCRF_AZ_MWI_EN_WIDTH 1
+#define PCRF_AZ_SPEC_CYC_LBN 3
+#define PCRF_AZ_SPEC_CYC_WIDTH 1
+#define PCRF_AZ_MST_EN_LBN 2
+#define PCRF_AZ_MST_EN_WIDTH 1
+#define PCRF_AZ_MEM_EN_LBN 1
+#define PCRF_AZ_MEM_EN_WIDTH 1
+#define PCRF_AZ_IO_EN_LBN 0
+#define PCRF_AZ_IO_EN_WIDTH 1
+
+
+/*
+ * PC_STAT_REG(16bit):
+ * Status register
+ */
+
+#define PCR_AZ_STAT_REG 0x00000006
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DET_PERR_LBN 15
+#define PCRF_AZ_DET_PERR_WIDTH 1
+#define PCRF_AZ_SIG_SERR_LBN 14
+#define PCRF_AZ_SIG_SERR_WIDTH 1
+#define PCRF_AZ_GOT_MABRT_LBN 13
+#define PCRF_AZ_GOT_MABRT_WIDTH 1
+#define PCRF_AZ_GOT_TABRT_LBN 12
+#define PCRF_AZ_GOT_TABRT_WIDTH 1
+#define PCRF_AZ_SIG_TABRT_LBN 11
+#define PCRF_AZ_SIG_TABRT_WIDTH 1
+#define PCRF_AZ_DEVSEL_TIM_LBN 9
+#define PCRF_AZ_DEVSEL_TIM_WIDTH 2
+#define PCRF_AZ_MDAT_PERR_LBN 8
+#define PCRF_AZ_MDAT_PERR_WIDTH 1
+#define PCRF_AZ_FB2B_CAP_LBN 7
+#define PCRF_AZ_FB2B_CAP_WIDTH 1
+#define PCRF_AZ_66MHZ_CAP_LBN 5
+#define PCRF_AZ_66MHZ_CAP_WIDTH 1
+#define PCRF_AZ_CAP_LIST_LBN 4
+#define PCRF_AZ_CAP_LIST_WIDTH 1
+#define PCRF_AZ_INTX_STAT_LBN 3
+#define PCRF_AZ_INTX_STAT_WIDTH 1
+
+
+/*
+ * PC_REV_ID_REG(8bit):
+ * Class code & revision ID register
+ */
+
+#define PCR_AZ_REV_ID_REG 0x00000008
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_REV_ID_LBN 0
+#define PCRF_AZ_REV_ID_WIDTH 8
+
+
+/*
+ * PC_CC_REG(24bit):
+ * Class code register
+ */
+
+#define PCR_AZ_CC_REG 0x00000009
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BASE_CC_LBN 16
+#define PCRF_AZ_BASE_CC_WIDTH 8
+#define PCRF_AZ_SUB_CC_LBN 8
+#define PCRF_AZ_SUB_CC_WIDTH 8
+#define PCRF_AZ_PROG_IF_LBN 0
+#define PCRF_AZ_PROG_IF_WIDTH 8
+
+
+/*
+ * PC_CACHE_LSIZE_REG(8bit):
+ * Cache line size
+ */
+
+#define PCR_AZ_CACHE_LSIZE_REG 0x0000000c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CACHE_LSIZE_LBN 0
+#define PCRF_AZ_CACHE_LSIZE_WIDTH 8
+
+
+/*
+ * PC_MST_LAT_REG(8bit):
+ * Master latency timer register
+ */
+
+#define PCR_AZ_MST_LAT_REG 0x0000000d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MST_LAT_LBN 0
+#define PCRF_AZ_MST_LAT_WIDTH 8
+
+
+/*
+ * PC_HDR_TYPE_REG(8bit):
+ * Header type register
+ */
+
+#define PCR_AZ_HDR_TYPE_REG 0x0000000e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MULT_FUNC_LBN 7
+#define PCRF_AZ_MULT_FUNC_WIDTH 1
+#define PCRF_AZ_TYPE_LBN 0
+#define PCRF_AZ_TYPE_WIDTH 7
+
+
+/*
+ * PC_BIST_REG(8bit):
+ * BIST register
+ */
+
+#define PCR_AZ_BIST_REG 0x0000000f
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BIST_LBN 0
+#define PCRF_AZ_BIST_WIDTH 8
+
+
+/*
+ * PC_BAR0_REG(32bit):
+ * Primary function base address register 0
+ */
+
+#define PCR_AZ_BAR0_REG 0x00000010
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR0_LBN 4
+#define PCRF_AZ_BAR0_WIDTH 28
+#define PCRF_AZ_BAR0_PREF_LBN 3
+#define PCRF_AZ_BAR0_PREF_WIDTH 1
+#define PCRF_AZ_BAR0_TYPE_LBN 1
+#define PCRF_AZ_BAR0_TYPE_WIDTH 2
+#define PCRF_AZ_BAR0_IOM_LBN 0
+#define PCRF_AZ_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR1_REG(32bit):
+ * Primary function base address register 1, BAR1 is not implemented so read only.
+ */
+
+#define PCR_DZ_BAR1_REG 0x00000014
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_BAR1_LBN 0
+#define PCRF_DZ_BAR1_WIDTH 32
+
+
+/*
+ * PC_BAR2_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_AZ_BAR2_LO_REG 0x00000018
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_LO_LBN 4
+#define PCRF_AZ_BAR2_LO_WIDTH 28
+#define PCRF_AZ_BAR2_PREF_LBN 3
+#define PCRF_AZ_BAR2_PREF_WIDTH 1
+#define PCRF_AZ_BAR2_TYPE_LBN 1
+#define PCRF_AZ_BAR2_TYPE_WIDTH 2
+#define PCRF_AZ_BAR2_IOM_LBN 0
+#define PCRF_AZ_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR2_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_AZ_BAR2_HI_REG 0x0000001c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_HI_LBN 0
+#define PCRF_AZ_BAR2_HI_WIDTH 32
+
+
+/*
+ * PC_BAR4_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_CZ_BAR4_LO_REG 0x00000020
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_LO_LBN 4
+#define PCRF_CZ_BAR4_LO_WIDTH 28
+#define PCRF_CZ_BAR4_PREF_LBN 3
+#define PCRF_CZ_BAR4_PREF_WIDTH 1
+#define PCRF_CZ_BAR4_TYPE_LBN 1
+#define PCRF_CZ_BAR4_TYPE_WIDTH 2
+#define PCRF_CZ_BAR4_IOM_LBN 0
+#define PCRF_CZ_BAR4_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR4_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_CZ_BAR4_HI_REG 0x00000024
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_HI_LBN 0
+#define PCRF_CZ_BAR4_HI_WIDTH 32
+
+
+/*
+ * PC_SS_VEND_ID_REG(16bit):
+ * Sub-system vendor ID register
+ */
+
+#define PCR_AZ_SS_VEND_ID_REG 0x0000002c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_VEND_ID_LBN 0
+#define PCRF_AZ_SS_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_SS_ID_REG(16bit):
+ * Sub-system ID register
+ */
+
+#define PCR_AZ_SS_ID_REG 0x0000002e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_ID_LBN 0
+#define PCRF_AZ_SS_ID_WIDTH 16
+
+
+/*
+ * PC_EXPROM_BAR_REG(32bit):
+ * Expansion ROM base address register
+ */
+
+#define PCR_AZ_EXPROM_BAR_REG 0x00000030
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXPROM_BAR_LBN 11
+#define PCRF_AZ_EXPROM_BAR_WIDTH 21
+#define PCRF_AB_EXPROM_MIN_SIZE_LBN 2
+#define PCRF_AB_EXPROM_MIN_SIZE_WIDTH 9
+#define PCRF_CZ_EXPROM_MIN_SIZE_LBN 1
+#define PCRF_CZ_EXPROM_MIN_SIZE_WIDTH 10
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_LBN 1
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_WIDTH 1
+#define PCRF_AZ_EXPROM_EN_LBN 0
+#define PCRF_AZ_EXPROM_EN_WIDTH 1
+
+
+/*
+ * PC_CAP_PTR_REG(8bit):
+ * Capability pointer register
+ */
+
+#define PCR_AZ_CAP_PTR_REG 0x00000034
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CAP_PTR_LBN 0
+#define PCRF_AZ_CAP_PTR_WIDTH 8
+
+
+/*
+ * PC_INT_LINE_REG(8bit):
+ * Interrupt line register
+ */
+
+#define PCR_AZ_INT_LINE_REG 0x0000003c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_LINE_LBN 0
+#define PCRF_AZ_INT_LINE_WIDTH 8
+
+
+/*
+ * PC_INT_PIN_REG(8bit):
+ * Interrupt pin register
+ */
+
+#define PCR_AZ_INT_PIN_REG 0x0000003d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_PIN_LBN 0
+#define PCRF_AZ_INT_PIN_WIDTH 8
+#define PCFE_DZ_INTPIN_INTD 4
+#define PCFE_DZ_INTPIN_INTC 3
+#define PCFE_DZ_INTPIN_INTB 2
+#define PCFE_DZ_INTPIN_INTA 1
+
+
+/*
+ * PC_PM_CAP_ID_REG(8bit):
+ * Power management capability ID
+ */
+
+#define PCR_AZ_PM_CAP_ID_REG 0x00000040
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_CAP_ID_LBN 0
+#define PCRF_AZ_PM_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PM_NXT_PTR_REG(8bit):
+ * Power management next item pointer
+ */
+
+#define PCR_AZ_PM_NXT_PTR_REG 0x00000041
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_NXT_PTR_LBN 0
+#define PCRF_AZ_PM_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_PM_CAP_REG(16bit):
+ * Power management capabilities register
+ */
+
+#define PCR_AZ_PM_CAP_REG 0x00000042
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_SUPT_LBN 11
+#define PCRF_AZ_PM_PME_SUPT_WIDTH 5
+#define PCRF_AZ_PM_D2_SUPT_LBN 10
+#define PCRF_AZ_PM_D2_SUPT_WIDTH 1
+#define PCRF_AZ_PM_D1_SUPT_LBN 9
+#define PCRF_AZ_PM_D1_SUPT_WIDTH 1
+#define PCRF_AZ_PM_AUX_CURR_LBN 6
+#define PCRF_AZ_PM_AUX_CURR_WIDTH 3
+#define PCRF_AZ_PM_DSI_LBN 5
+#define PCRF_AZ_PM_DSI_WIDTH 1
+#define PCRF_AZ_PM_PME_CLK_LBN 3
+#define PCRF_AZ_PM_PME_CLK_WIDTH 1
+#define PCRF_AZ_PM_PME_VER_LBN 0
+#define PCRF_AZ_PM_PME_VER_WIDTH 3
+
+
+/*
+ * PC_PM_CS_REG(16bit):
+ * Power management control & status register
+ */
+
+#define PCR_AZ_PM_CS_REG 0x00000044
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_STAT_LBN 15
+#define PCRF_AZ_PM_PME_STAT_WIDTH 1
+#define PCRF_AZ_PM_DAT_SCALE_LBN 13
+#define PCRF_AZ_PM_DAT_SCALE_WIDTH 2
+#define PCRF_AZ_PM_DAT_SEL_LBN 9
+#define PCRF_AZ_PM_DAT_SEL_WIDTH 4
+#define PCRF_AZ_PM_PME_EN_LBN 8
+#define PCRF_AZ_PM_PME_EN_WIDTH 1
+#define PCRF_CZ_NO_SOFT_RESET_LBN 3
+#define PCRF_CZ_NO_SOFT_RESET_WIDTH 1
+#define PCRF_AZ_PM_PWR_ST_LBN 0
+#define PCRF_AZ_PM_PWR_ST_WIDTH 2
+
+
+/*
+ * PC_MSI_CAP_ID_REG(8bit):
+ * MSI capability ID
+ */
+
+#define PCR_AZ_MSI_CAP_ID_REG 0x00000050
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_CAP_ID_LBN 0
+#define PCRF_AZ_MSI_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSI_NXT_PTR_REG(8bit):
+ * MSI next item pointer
+ */
+
+#define PCR_AZ_MSI_NXT_PTR_REG 0x00000051
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_NXT_PTR_LBN 0
+#define PCRF_AZ_MSI_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSI_CTL_REG(16bit):
+ * MSI control register
+ */
+
+#define PCR_AZ_MSI_CTL_REG 0x00000052
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_64_EN_LBN 7
+#define PCRF_AZ_MSI_64_EN_WIDTH 1
+#define PCRF_AZ_MSI_MULT_MSG_EN_LBN 4
+#define PCRF_AZ_MSI_MULT_MSG_EN_WIDTH 3
+#define PCRF_AZ_MSI_MULT_MSG_CAP_LBN 1
+#define PCRF_AZ_MSI_MULT_MSG_CAP_WIDTH 3
+#define PCRF_AZ_MSI_EN_LBN 0
+#define PCRF_AZ_MSI_EN_WIDTH 1
+
+
+/*
+ * PC_MSI_ADR_LO_REG(32bit):
+ * MSI low 32 bits address register
+ */
+
+#define PCR_AZ_MSI_ADR_LO_REG 0x00000054
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_LO_LBN 2
+#define PCRF_AZ_MSI_ADR_LO_WIDTH 30
+
+
+/*
+ * PC_MSI_ADR_HI_REG(32bit):
+ * MSI high 32 bits address register
+ */
+
+#define PCR_AZ_MSI_ADR_HI_REG 0x00000058
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_HI_LBN 0
+#define PCRF_AZ_MSI_ADR_HI_WIDTH 32
+
+
+/*
+ * PC_MSI_DAT_REG(16bit):
+ * MSI data register
+ */
+
+#define PCR_AZ_MSI_DAT_REG 0x0000005c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_DAT_LBN 0
+#define PCRF_AZ_MSI_DAT_WIDTH 16
+
+
+/*
+ * PC_PCIE_CAP_LIST_REG(16bit):
+ * PCIe capability list register
+ */
+
+#define PCR_AB_PCIE_CAP_LIST_REG 0x00000060
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_PCIE_CAP_LIST_REG 0x00000070
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_NXT_PTR_LBN 8
+#define PCRF_AZ_PCIE_NXT_PTR_WIDTH 8
+#define PCRF_AZ_PCIE_CAP_ID_LBN 0
+#define PCRF_AZ_PCIE_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PCIE_CAP_REG(16bit):
+ * PCIe capability register
+ */
+
+#define PCR_AB_PCIE_CAP_REG 0x00000062
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_PCIE_CAP_REG 0x00000072
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_INT_MSG_NUM_LBN 9
+#define PCRF_AZ_PCIE_INT_MSG_NUM_WIDTH 5
+#define PCRF_AZ_PCIE_SLOT_IMP_LBN 8
+#define PCRF_AZ_PCIE_SLOT_IMP_WIDTH 1
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_LBN 4
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_WIDTH 4
+#define PCRF_AZ_PCIE_CAP_VER_LBN 0
+#define PCRF_AZ_PCIE_CAP_VER_WIDTH 4
+
+
+/*
+ * PC_DEV_CAP_REG(32bit):
+ * PCIe device capabilities register
+ */
+
+#define PCR_AB_DEV_CAP_REG 0x00000064
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_CAP_REG 0x00000074
+/* sienaa0=pci_f0_config,hunta0=pci_f0_config */
+
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_LBN 28
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_LBN 26
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_WIDTH 2
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_LBN 18
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_WIDTH 8
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_LBN 15
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_WIDTH 1
+#define PCRF_AB_PWR_IND_LBN 14
+#define PCRF_AB_PWR_IND_WIDTH 1
+#define PCRF_AB_ATTN_IND_LBN 13
+#define PCRF_AB_ATTN_IND_WIDTH 1
+#define PCRF_AB_ATTN_BUTTON_LBN 12
+#define PCRF_AB_ATTN_BUTTON_WIDTH 1
+#define PCRF_AZ_ENDPT_L1_LAT_LBN 9
+#define PCRF_AZ_ENDPT_L1_LAT_WIDTH 3
+#define PCRF_AZ_ENDPT_L0_LAT_LBN 6
+#define PCRF_AZ_ENDPT_L0_LAT_WIDTH 3
+#define PCRF_AZ_TAG_FIELD_LBN 5
+#define PCRF_AZ_TAG_FIELD_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_LBN 3
+#define PCRF_AZ_PHAN_FUNC_WIDTH 2
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_LBN 0
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_WIDTH 3
+
+
+/*
+ * PC_DEV_CTL_REG(16bit):
+ * PCIe device control register
+ */
+
+#define PCR_AB_DEV_CTL_REG 0x00000068
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_CTL_REG 0x00000078
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_FN_LEVEL_RESET_LBN 15
+#define PCRF_CZ_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_MAX_RD_REQ_SIZE_LBN 12
+#define PCRF_AZ_MAX_RD_REQ_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_4096 5
+#define PCFE_AZ_MAX_RD_REQ_SIZE_2048 4
+#define PCFE_AZ_MAX_RD_REQ_SIZE_1024 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_512 2
+#define PCFE_AZ_MAX_RD_REQ_SIZE_256 1
+#define PCFE_AZ_MAX_RD_REQ_SIZE_128 0
+#define PCRF_AZ_EN_NO_SNOOP_LBN 11
+#define PCRF_AZ_EN_NO_SNOOP_WIDTH 1
+#define PCRF_AZ_AUX_PWR_PM_EN_LBN 10
+#define PCRF_AZ_AUX_PWR_PM_EN_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_EN_LBN 9
+#define PCRF_AZ_PHAN_FUNC_EN_WIDTH 1
+#define PCRF_AB_DEV_CAP_REG_RSVD0_LBN 8
+#define PCRF_AB_DEV_CAP_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_EXTENDED_TAG_EN_LBN 8
+#define PCRF_CZ_EXTENDED_TAG_EN_WIDTH 1
+#define PCRF_AZ_MAX_PAYL_SIZE_LBN 5
+#define PCRF_AZ_MAX_PAYL_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_PAYL_SIZE_4096 5
+#define PCFE_AZ_MAX_PAYL_SIZE_2048 4
+#define PCFE_AZ_MAX_PAYL_SIZE_1024 3
+#define PCFE_AZ_MAX_PAYL_SIZE_512 2
+#define PCFE_AZ_MAX_PAYL_SIZE_256 1
+#define PCFE_AZ_MAX_PAYL_SIZE_128 0
+#define PCRF_AZ_EN_RELAX_ORDER_LBN 4
+#define PCRF_AZ_EN_RELAX_ORDER_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_LBN 3
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_RPT_EN_LBN 2
+#define PCRF_AZ_FATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_CORR_ERR_RPT_EN_LBN 0
+#define PCRF_AZ_CORR_ERR_RPT_EN_WIDTH 1
+
+
+/*
+ * PC_DEV_STAT_REG(16bit):
+ * PCIe device status register
+ */
+
+#define PCR_AB_DEV_STAT_REG 0x0000006a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_STAT_REG 0x0000007a
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_TRNS_PEND_LBN 5
+#define PCRF_AZ_TRNS_PEND_WIDTH 1
+#define PCRF_AZ_AUX_PWR_DET_LBN 4
+#define PCRF_AZ_AUX_PWR_DET_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_DET_LBN 3
+#define PCRF_AZ_UNSUP_REQ_DET_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_DET_LBN 2
+#define PCRF_AZ_FATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_DET_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_CORR_ERR_DET_LBN 0
+#define PCRF_AZ_CORR_ERR_DET_WIDTH 1
+
+
+/*
+ * PC_LNK_CAP_REG(32bit):
+ * PCIe link capabilities register
+ */
+
+#define PCR_AB_LNK_CAP_REG 0x0000006c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_CAP_REG 0x0000007c
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PORT_NUM_LBN 24
+#define PCRF_AZ_PORT_NUM_WIDTH 8
+#define PCRF_DZ_ASPM_OPTIONALITY_CAP_LBN 22
+#define PCRF_DZ_ASPM_OPTIONALITY_CAP_WIDTH 1
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_LBN 21
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_WIDTH 1
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_LBN 20
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_WIDTH 1
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_LBN 19
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_WIDTH 1
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_LBN 18
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_WIDTH 1
+#define PCRF_AZ_DEF_L1_EXIT_LAT_LBN 15
+#define PCRF_AZ_DEF_L1_EXIT_LAT_WIDTH 3
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_LBN 12
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_WIDTH 3
+#define PCRF_AZ_AS_LNK_PM_SUPT_LBN 10
+#define PCRF_AZ_AS_LNK_PM_SUPT_WIDTH 2
+#define PCRF_AZ_MAX_LNK_WIDTH_LBN 4
+#define PCRF_AZ_MAX_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_MAX_LNK_SP_LBN 0
+#define PCRF_AZ_MAX_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_LNK_CTL_REG(16bit):
+ * PCIe link control register
+ */
+
+#define PCR_AB_LNK_CTL_REG 0x00000070
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_CTL_REG 0x00000080
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXT_SYNC_LBN 7
+#define PCRF_AZ_EXT_SYNC_WIDTH 1
+#define PCRF_AZ_COMM_CLK_CFG_LBN 6
+#define PCRF_AZ_COMM_CLK_CFG_WIDTH 1
+#define PCRF_AB_LNK_CTL_REG_RSVD0_LBN 5
+#define PCRF_AB_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_LNK_RETRAIN_LBN 5
+#define PCRF_CZ_LNK_RETRAIN_WIDTH 1
+#define PCRF_AZ_LNK_DIS_LBN 4
+#define PCRF_AZ_LNK_DIS_WIDTH 1
+#define PCRF_AZ_RD_COM_BDRY_LBN 3
+#define PCRF_AZ_RD_COM_BDRY_WIDTH 1
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_LBN 0
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_WIDTH 2
+
+
+/*
+ * PC_LNK_STAT_REG(16bit):
+ * PCIe link status register
+ */
+
+#define PCR_AB_LNK_STAT_REG 0x00000072
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_STAT_REG 0x00000082
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SLOT_CLK_CFG_LBN 12
+#define PCRF_AZ_SLOT_CLK_CFG_WIDTH 1
+#define PCRF_AZ_LNK_TRAIN_LBN 11
+#define PCRF_AZ_LNK_TRAIN_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_LBN 10
+#define PCRF_AB_TRAIN_ERR_WIDTH 1
+#define PCRF_AZ_LNK_WIDTH_LBN 4
+#define PCRF_AZ_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_LNK_SP_LBN 0
+#define PCRF_AZ_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_SLOT_CAP_REG(32bit):
+ * PCIe slot capabilities register
+ */
+
+#define PCR_AB_SLOT_CAP_REG 0x00000074
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_NUM_LBN 19
+#define PCRF_AB_SLOT_NUM_WIDTH 13
+#define PCRF_AB_SLOT_PWR_LIM_SCL_LBN 15
+#define PCRF_AB_SLOT_PWR_LIM_SCL_WIDTH 2
+#define PCRF_AB_SLOT_PWR_LIM_VAL_LBN 7
+#define PCRF_AB_SLOT_PWR_LIM_VAL_WIDTH 8
+#define PCRF_AB_SLOT_HP_CAP_LBN 6
+#define PCRF_AB_SLOT_HP_CAP_WIDTH 1
+#define PCRF_AB_SLOT_HP_SURP_LBN 5
+#define PCRF_AB_SLOT_HP_SURP_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_PRST_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_PRST_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_PRST_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_PRST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_LBN 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_WIDTH 1
+
+
+/*
+ * PC_SLOT_CTL_REG(16bit):
+ * PCIe slot control register
+ */
+
+#define PCR_AB_SLOT_CTL_REG 0x00000078
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_LBN 10
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_CTL_LBN 8
+#define PCRF_AB_SLOT_PWR_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_ATT_IND_CTL_LBN 6
+#define PCRF_AB_SLOT_ATT_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_HP_INT_EN_LBN 5
+#define PCRF_AB_SLOT_HP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_LBN 4
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_LBN 3
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_LBN 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_EN_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_EN_WIDTH 1
+
+
+/*
+ * PC_SLOT_STAT_REG(16bit):
+ * PCIe slot status register
+ */
+
+#define PCR_AB_SLOT_STAT_REG 0x0000007a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_PRES_DET_ST_LBN 6
+#define PCRF_AB_PRES_DET_ST_WIDTH 1
+#define PCRF_AB_MRL_SENS_ST_LBN 5
+#define PCRF_AB_MRL_SENS_ST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_WIDTH 1
+#define PCRF_AB_PWR_FLTDET_LBN 1
+#define PCRF_AB_PWR_FLTDET_WIDTH 1
+#define PCRF_AB_ATTN_BUTDET_LBN 0
+#define PCRF_AB_ATTN_BUTDET_WIDTH 1
+
+
+/*
+ * PC_MSIX_CAP_ID_REG(8bit):
+ * MSIX Capability ID
+ */
+
+#define PCR_BB_MSIX_CAP_ID_REG 0x00000090
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CAP_ID_REG 0x000000b0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_CAP_ID_LBN 0
+#define PCRF_BZ_MSIX_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSIX_NXT_PTR_REG(8bit):
+ * MSIX Capability Next Capability Ptr
+ */
+
+#define PCR_BB_MSIX_NXT_PTR_REG 0x00000091
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_NXT_PTR_REG 0x000000b1
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_NXT_PTR_LBN 0
+#define PCRF_BZ_MSIX_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSIX_CTL_REG(16bit):
+ * MSIX control register
+ */
+
+#define PCR_BB_MSIX_CTL_REG 0x00000092
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CTL_REG 0x000000b2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_EN_LBN 15
+#define PCRF_BZ_MSIX_EN_WIDTH 1
+#define PCRF_BZ_MSIX_FUNC_MASK_LBN 14
+#define PCRF_BZ_MSIX_FUNC_MASK_WIDTH 1
+#define PCRF_BZ_MSIX_TBL_SIZE_LBN 0
+#define PCRF_BZ_MSIX_TBL_SIZE_WIDTH 11
+
+
+/*
+ * PC_MSIX_TBL_BASE_REG(32bit):
+ * MSIX Capability Vector Table Base
+ */
+
+#define PCR_BB_MSIX_TBL_BASE_REG 0x00000094
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_TBL_BASE_REG 0x000000b4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_TBL_OFF_LBN 3
+#define PCRF_BZ_MSIX_TBL_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_TBL_BIR_LBN 0
+#define PCRF_BZ_MSIX_TBL_BIR_WIDTH 3
+
+
+/*
+ * PC_DEV_CAP2_REG(32bit):
+ * PCIe Device Capabilities 2
+ */
+
+#define PCR_CZ_DEV_CAP2_REG 0x00000094
+/* sienaa0=pci_f0_config,hunta0=pci_f0_config */
+
+#define PCRF_DZ_OBFF_SUPPORTED_LBN 18
+#define PCRF_DZ_OBFF_SUPPORTED_WIDTH 2
+#define PCRF_DZ_TPH_CMPL_SUPPORTED_LBN 12
+#define PCRF_DZ_TPH_CMPL_SUPPORTED_WIDTH 2
+#define PCRF_DZ_LTR_M_SUPPORTED_LBN 11
+#define PCRF_DZ_LTR_M_SUPPORTED_WIDTH 1
+#define PCRF_CC_CMPL_TIMEOUT_DIS_LBN 4
+#define PCRF_CC_CMPL_TIMEOUT_DIS_WIDTH 1
+#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_LBN 4
+#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_WIDTH 4
+#define PCFE_CZ_CMPL_TIMEOUT_17000_TO_6400MS 14
+#define PCFE_CZ_CMPL_TIMEOUT_4000_TO_1300MS 13
+#define PCFE_CZ_CMPL_TIMEOUT_1000_TO_3500MS 10
+#define PCFE_CZ_CMPL_TIMEOUT_260_TO_900MS 9
+#define PCFE_CZ_CMPL_TIMEOUT_65_TO_210MS 6
+#define PCFE_CZ_CMPL_TIMEOUT_16_TO_55MS 5
+#define PCFE_CZ_CMPL_TIMEOUT_1_TO_10MS 2
+#define PCFE_CZ_CMPL_TIMEOUT_50_TO_100US 1
+#define PCFE_CZ_CMPL_TIMEOUT_DEFAULT 0
+
+
+/*
+ * PC_DEV_CTL2_REG(16bit):
+ * PCIe Device Control 2
+ */
+
+#define PCR_CZ_DEV_CTL2_REG 0x00000098
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_DZ_OBFF_ENABLE_LBN 13
+#define PCRF_DZ_OBFF_ENABLE_WIDTH 2
+#define PCRF_DZ_LTR_ENABLE_LBN 10
+#define PCRF_DZ_LTR_ENABLE_WIDTH 1
+#define PCRF_DZ_IDO_COMPLETION_ENABLE_LBN 9
+#define PCRF_DZ_IDO_COMPLETION_ENABLE_WIDTH 1
+#define PCRF_DZ_IDO_REQUEST_ENABLE_LBN 8
+#define PCRF_DZ_IDO_REQUEST_ENABLE_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_LBN 4
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_WIDTH 4
+
+
+/*
+ * PC_MSIX_PBA_BASE_REG(32bit):
+ * MSIX Capability PBA Base
+ */
+
+#define PCR_BB_MSIX_PBA_BASE_REG 0x00000098
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_PBA_BASE_REG 0x000000b8
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_PBA_OFF_LBN 3
+#define PCRF_BZ_MSIX_PBA_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_PBA_BIR_LBN 0
+#define PCRF_BZ_MSIX_PBA_BIR_WIDTH 3
+
+
+/*
+ * PC_LNK_CAP2_REG(32bit):
+ * PCIe Link Capability 2
+ */
+
+#define PCR_DZ_LNK_CAP2_REG 0x0000009c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LNK_SPEED_SUP_LBN 1
+#define PCRF_DZ_LNK_SPEED_SUP_WIDTH 7
+
+
+/*
+ * PC_LNK_CTL2_REG(16bit):
+ * PCIe Link Control 2
+ */
+
+#define PCR_CZ_LNK_CTL2_REG 0x000000a0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_POLLING_DEEMPH_LVL_LBN 12
+#define PCRF_CZ_POLLING_DEEMPH_LVL_WIDTH 1
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_LBN 11
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_WIDTH 1
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_LBN 10
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TRANSMIT_MARGIN_LBN 7
+#define PCRF_CZ_TRANSMIT_MARGIN_WIDTH 3
+#define PCRF_CZ_SELECT_DEEMPH_LBN 6
+#define PCRF_CZ_SELECT_DEEMPH_WIDTH 1
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_LBN 5
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_WIDTH 1
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_LBN 4
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_LBN 0
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_WIDTH 4
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN3 3
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN2 2
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN1 1
+
+
+/*
+ * PC_LNK_STAT2_REG(16bit):
+ * PCIe Link Status 2
+ */
+
+#define PCR_CZ_LNK_STAT2_REG 0x000000a2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_CURRENT_DEEMPH_LBN 0
+#define PCRF_CZ_CURRENT_DEEMPH_WIDTH 1
+
+
+/*
+ * PC_VPD_CAP_ID_REG(8bit):
+ * VPD data register
+ */
+
+#define PCR_AB_VPD_CAP_ID_REG 0x000000b0
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_CAP_ID_LBN 0
+#define PCRF_AB_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_VPD_NXT_PTR_REG(8bit):
+ * VPD next item pointer
+ */
+
+#define PCR_AB_VPD_NXT_PTR_REG 0x000000b1
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_NXT_PTR_LBN 0
+#define PCRF_AB_VPD_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_VPD_ADDR_REG(16bit):
+ * VPD address register
+ */
+
+#define PCR_AB_VPD_ADDR_REG 0x000000b2
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_FLAG_LBN 15
+#define PCRF_AB_VPD_FLAG_WIDTH 1
+#define PCRF_AB_VPD_ADDR_LBN 0
+#define PCRF_AB_VPD_ADDR_WIDTH 15
+
+
+/*
+ * PC_VPD_CAP_DATA_REG(32bit):
+ * documentation to be written for sum_PC_VPD_CAP_DATA_REG
+ */
+
+#define PCR_AB_VPD_CAP_DATA_REG 0x000000b4
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_VPD_CAP_DATA_REG 0x000000d4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VPD_DATA_LBN 0
+#define PCRF_AZ_VPD_DATA_WIDTH 32
+
+
+/*
+ * PC_VPD_CAP_CTL_REG(8bit):
+ * VPD control and capabilities register
+ */
+
+#define PCR_CZ_VPD_CAP_CTL_REG 0x000000d0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_VPD_FLAG_LBN 31
+#define PCRF_CZ_VPD_FLAG_WIDTH 1
+#define PCRF_CZ_VPD_ADDR_LBN 16
+#define PCRF_CZ_VPD_ADDR_WIDTH 15
+#define PCRF_CZ_VPD_NXT_PTR_LBN 8
+#define PCRF_CZ_VPD_NXT_PTR_WIDTH 8
+#define PCRF_CZ_VPD_CAP_ID_LBN 0
+#define PCRF_CZ_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_AER_CAP_HDR_REG(32bit):
+ * AER capability header register
+ */
+
+#define PCR_AZ_AER_CAP_HDR_REG 0x00000100
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_LBN 20
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_AZ_AERCAPHDR_VER_LBN 16
+#define PCRF_AZ_AERCAPHDR_VER_WIDTH 4
+#define PCRF_AZ_AERCAPHDR_ID_LBN 0
+#define PCRF_AZ_AERCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_AER_UNCORR_ERR_STAT_REG(32bit):
+ * AER Uncorrectable error status register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_STAT_REG 0x00000104
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_STAT_LBN 19
+#define PCRF_AZ_ECRC_ERR_STAT_WIDTH 1
+#define PCRF_AZ_MALF_TLP_STAT_LBN 18
+#define PCRF_AZ_MALF_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_OVF_STAT_LBN 17
+#define PCRF_AZ_RX_OVF_STAT_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_STAT_LBN 16
+#define PCRF_AZ_UNEXP_COMP_STAT_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_STAT_LBN 15
+#define PCRF_AZ_COMP_ABRT_STAT_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_STAT_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_STAT_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_STAT_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AZ_PSON_TLP_STAT_LBN 12
+#define PCRF_AZ_PSON_TLP_STAT_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_STAT_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_STAT_LBN 0
+#define PCRF_AB_TRAIN_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_MASK_REG(32bit):
+ * AER Uncorrectable error mask register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_MASK_REG 0x00000108
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_LBN 24
+#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_WIDTH 1
+#define PCRF_DZ_UNCORR_INT_ERR_MASK_LBN 22
+#define PCRF_DZ_UNCORR_INT_ERR_MASK_WIDTH 1
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_MASK_LBN 19
+#define PCRF_AZ_ECRC_ERR_MASK_WIDTH 1
+#define PCRF_AZ_MALF_TLP_MASK_LBN 18
+#define PCRF_AZ_MALF_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_OVF_MASK_LBN 17
+#define PCRF_AZ_RX_OVF_MASK_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_MASK_LBN 16
+#define PCRF_AZ_UNEXP_COMP_MASK_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_MASK_LBN 15
+#define PCRF_AZ_COMP_ABRT_MASK_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_MASK_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_MASK_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_MASK_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AZ_PSON_TLP_MASK_LBN 12
+#define PCRF_AZ_PSON_TLP_MASK_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_MASK_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_MASK_LBN 0
+#define PCRF_AB_TRAIN_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_SEV_REG(32bit):
+ * AER Uncorrectable error severity register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_SEV_REG 0x0000010c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_SEV_LBN 19
+#define PCRF_AZ_ECRC_ERR_SEV_WIDTH 1
+#define PCRF_AZ_MALF_TLP_SEV_LBN 18
+#define PCRF_AZ_MALF_TLP_SEV_WIDTH 1
+#define PCRF_AZ_RX_OVF_SEV_LBN 17
+#define PCRF_AZ_RX_OVF_SEV_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_SEV_LBN 16
+#define PCRF_AZ_UNEXP_COMP_SEV_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_SEV_LBN 15
+#define PCRF_AZ_COMP_ABRT_SEV_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_SEV_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_SEV_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_SEV_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AZ_PSON_TLP_SEV_LBN 12
+#define PCRF_AZ_PSON_TLP_SEV_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_SEV_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_SEV_LBN 0
+#define PCRF_AB_TRAIN_ERR_SEV_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_STAT_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_STAT_REG 0x00000110
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_STAT_LBN 7
+#define PCRF_AZ_BAD_DLLP_STAT_WIDTH 1
+#define PCRF_AZ_BAD_TLP_STAT_LBN 6
+#define PCRF_AZ_BAD_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_ERR_STAT_LBN 0
+#define PCRF_AZ_RX_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_MASK_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_MASK_REG 0x00000114
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_MASK_LBN 7
+#define PCRF_AZ_BAD_DLLP_MASK_WIDTH 1
+#define PCRF_AZ_BAD_TLP_MASK_LBN 6
+#define PCRF_AZ_BAD_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_ERR_MASK_LBN 0
+#define PCRF_AZ_RX_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_CAP_CTL_REG(32bit):
+ * AER capability and control register
+ */
+
+#define PCR_AZ_AER_CAP_CTL_REG 0x00000118
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_ECRC_CHK_EN_LBN 8
+#define PCRF_AZ_ECRC_CHK_EN_WIDTH 1
+#define PCRF_AZ_ECRC_CHK_CAP_LBN 7
+#define PCRF_AZ_ECRC_CHK_CAP_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_EN_LBN 6
+#define PCRF_AZ_ECRC_GEN_EN_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_CAP_LBN 5
+#define PCRF_AZ_ECRC_GEN_CAP_WIDTH 1
+#define PCRF_AZ_1ST_ERR_PTR_LBN 0
+#define PCRF_AZ_1ST_ERR_PTR_WIDTH 5
+
+
+/*
+ * PC_AER_HDR_LOG_REG(128bit):
+ * AER Header log register
+ */
+
+#define PCR_AZ_AER_HDR_LOG_REG 0x0000011c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_HDR_LOG_LBN 0
+#define PCRF_AZ_HDR_LOG_WIDTH 128
+
+
+/*
+ * PC_DEVSN_CAP_HDR_REG(32bit):
+ * Device serial number capability header register
+ */
+
+#define PCR_CZ_DEVSN_CAP_HDR_REG 0x00000140
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_DEVSNCAPHDR_VER_LBN 16
+#define PCRF_CZ_DEVSNCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_DEVSNCAPHDR_ID_LBN 0
+#define PCRF_CZ_DEVSNCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_DEVSN_DWORD0_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_CZ_DEVSN_DWORD0_REG 0x00000144
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD0_LBN 0
+#define PCRF_CZ_DEVSN_DWORD0_WIDTH 32
+
+
+/*
+ * PC_DEVSN_DWORD1_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_CZ_DEVSN_DWORD1_REG 0x00000148
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD1_LBN 0
+#define PCRF_CZ_DEVSN_DWORD1_WIDTH 32
+
+
+/*
+ * PC_ARI_CAP_HDR_REG(32bit):
+ * ARI capability header register
+ */
+
+#define PCR_CZ_ARI_CAP_HDR_REG 0x00000150
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_ARICAPHDR_VER_LBN 16
+#define PCRF_CZ_ARICAPHDR_VER_WIDTH 4
+#define PCRF_CZ_ARICAPHDR_ID_LBN 0
+#define PCRF_CZ_ARICAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_ARI_CAP_REG(16bit):
+ * ARI Capabilities
+ */
+
+#define PCR_CZ_ARI_CAP_REG 0x00000154
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARI_NXT_FN_NUM_LBN 8
+#define PCRF_CZ_ARI_NXT_FN_NUM_WIDTH 8
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_WIDTH 1
+
+
+/*
+ * PC_ARI_CTL_REG(16bit):
+ * ARI Control
+ */
+
+#define PCR_CZ_ARI_CTL_REG 0x00000156
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARI_FN_GRP_LBN 4
+#define PCRF_CZ_ARI_FN_GRP_WIDTH 3
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_WIDTH 1
+
+
+/*
+ * PC_SEC_PCIE_CAP_REG(32bit):
+ * Secondary PCIE Capability Register
+ */
+
+#define PCR_DZ_SEC_PCIE_CAP_REG 0x00000160
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_SEC_NXT_PTR_LBN 20
+#define PCRF_DZ_SEC_NXT_PTR_WIDTH 12
+#define PCRF_DZ_SEC_VERSION_LBN 16
+#define PCRF_DZ_SEC_VERSION_WIDTH 4
+#define PCRF_DZ_SEC_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_SEC_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_HDR_REG(32bit):
+ * SRIOV capability header register
+ */
+
+#define PCR_CC_SRIOV_CAP_HDR_REG 0x00000160
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_HDR_REG 0x00000180
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_SRIOVCAPHDR_VER_LBN 16
+#define PCRF_CZ_SRIOVCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_SRIOVCAPHDR_ID_LBN 0
+#define PCRF_CZ_SRIOVCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_REG(32bit):
+ * SRIOV Capabilities
+ */
+
+#define PCR_CC_SRIOV_CAP_REG 0x00000164
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_REG 0x00000184
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_LBN 21
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_WIDTH 11
+#define PCRF_DZ_VF_ARI_CAP_PRESV_LBN 1
+#define PCRF_DZ_VF_ARI_CAP_PRESV_WIDTH 1
+#define PCRF_CZ_VF_MIGR_CAP_LBN 0
+#define PCRF_CZ_VF_MIGR_CAP_WIDTH 1
+
+
+/*
+ * PC_LINK_CONTROL3_REG(32bit):
+ * Link Control 3.
+ */
+
+#define PCR_DZ_LINK_CONTROL3_REG 0x00000164
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LINK_EQ_INT_EN_LBN 1
+#define PCRF_DZ_LINK_EQ_INT_EN_WIDTH 1
+#define PCRF_DZ_PERFORM_EQL_LBN 0
+#define PCRF_DZ_PERFORM_EQL_WIDTH 1
+
+
+/*
+ * PC_LANE_ERROR_STAT_REG(32bit):
+ * Lane Error Status Register.
+ */
+
+#define PCR_DZ_LANE_ERROR_STAT_REG 0x00000168
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE_STATUS_LBN 0
+#define PCRF_DZ_LANE_STATUS_WIDTH 8
+
+
+/*
+ * PC_SRIOV_CTL_REG(16bit):
+ * SRIOV Control
+ */
+
+#define PCR_CC_SRIOV_CTL_REG 0x00000168
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CTL_REG 0x00000188
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_LBN 4
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_WIDTH 1
+#define PCRF_CZ_VF_MSE_LBN 3
+#define PCRF_CZ_VF_MSE_WIDTH 1
+#define PCRF_CZ_VF_MIGR_INT_EN_LBN 2
+#define PCRF_CZ_VF_MIGR_INT_EN_WIDTH 1
+#define PCRF_CZ_VF_MIGR_EN_LBN 1
+#define PCRF_CZ_VF_MIGR_EN_WIDTH 1
+#define PCRF_CZ_VF_EN_LBN 0
+#define PCRF_CZ_VF_EN_WIDTH 1
+
+
+/*
+ * PC_SRIOV_STAT_REG(16bit):
+ * SRIOV Status
+ */
+
+#define PCR_CC_SRIOV_STAT_REG 0x0000016a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_STAT_REG 0x0000018a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_STAT_LBN 0
+#define PCRF_CZ_VF_MIGR_STAT_WIDTH 1
+
+
+/*
+ * PC_LANE01_EQU_CONTROL_REG(32bit):
+ * Lanes 0,1 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE01_EQU_CONTROL_REG 0x0000016c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE1_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE1_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE0_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE0_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_INITIALVFS_REG(16bit):
+ * SRIOV Initial VFs
+ */
+
+#define PCR_CC_SRIOV_INITIALVFS_REG 0x0000016c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_INITIALVFS_REG 0x0000018c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_INITIALVFS_LBN 0
+#define PCRF_CZ_VF_INITIALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_TOTALVFS_REG(10bit):
+ * SRIOV Total VFs
+ */
+
+#define PCR_CC_SRIOV_TOTALVFS_REG 0x0000016e
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_TOTALVFS_REG 0x0000018e
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_TOTALVFS_LBN 0
+#define PCRF_CZ_VF_TOTALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_NUMVFS_REG(16bit):
+ * SRIOV Number of VFs
+ */
+
+#define PCR_CC_SRIOV_NUMVFS_REG 0x00000170
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_NUMVFS_REG 0x00000190
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_NUMVFS_LBN 0
+#define PCRF_CZ_VF_NUMVFS_WIDTH 16
+
+
+/*
+ * PC_LANE23_EQU_CONTROL_REG(32bit):
+ * Lanes 2,3 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE23_EQU_CONTROL_REG 0x00000170
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE3_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE3_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE2_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE2_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_FN_DPND_LNK_REG(16bit):
+ * SRIOV Function dependency link
+ */
+
+#define PCR_CC_SRIOV_FN_DPND_LNK_REG 0x00000172
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_FN_DPND_LNK_REG 0x00000192
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_LBN 0
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_WIDTH 8
+
+
+/*
+ * PC_SRIOV_1STVF_OFFSET_REG(16bit):
+ * SRIOV First VF Offset
+ */
+
+#define PCR_CC_SRIOV_1STVF_OFFSET_REG 0x00000174
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_1STVF_OFFSET_REG 0x00000194
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_1STVF_OFFSET_LBN 0
+#define PCRF_CZ_VF_1STVF_OFFSET_WIDTH 16
+
+
+/*
+ * PC_LANE45_EQU_CONTROL_REG(32bit):
+ * Lanes 4,5 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE45_EQU_CONTROL_REG 0x00000174
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE5_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE5_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE4_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE4_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_VFSTRIDE_REG(16bit):
+ * SRIOV VF Stride
+ */
+
+#define PCR_CC_SRIOV_VFSTRIDE_REG 0x00000176
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_VFSTRIDE_REG 0x00000196
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_VFSTRIDE_LBN 0
+#define PCRF_CZ_VF_VFSTRIDE_WIDTH 16
+
+
+/*
+ * PC_LANE67_EQU_CONTROL_REG(32bit):
+ * Lanes 6,7 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE67_EQU_CONTROL_REG 0x00000178
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE7_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE7_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE6_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE6_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_DEVID_REG(16bit):
+ * SRIOV VF Device ID
+ */
+
+#define PCR_CC_SRIOV_DEVID_REG 0x0000017a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_DEVID_REG 0x0000019a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_DEVID_LBN 0
+#define PCRF_CZ_VF_DEVID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SUP_PAGESZ_REG(16bit):
+ * SRIOV Supported Page Sizes
+ */
+
+#define PCR_CC_SRIOV_SUP_PAGESZ_REG 0x0000017c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SUP_PAGESZ_REG 0x0000019c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SUP_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SUP_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SYS_PAGESZ_REG(32bit):
+ * SRIOV System Page Size
+ */
+
+#define PCR_CC_SRIOV_SYS_PAGESZ_REG 0x00000180
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SYS_PAGESZ_REG 0x000001a0
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SYS_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SYS_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_BAR0_REG(32bit):
+ * SRIOV VF Bar0
+ */
+
+#define PCR_CC_SRIOV_BAR0_REG 0x00000184
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR0_REG 0x000001a4
+/* hunta0=pci_f0_config */
+
+#define PCRF_CC_VF_BAR_ADDRESS_LBN 0
+#define PCRF_CC_VF_BAR_ADDRESS_WIDTH 32
+#define PCRF_DZ_VF_BAR0_ADDRESS_LBN 4
+#define PCRF_DZ_VF_BAR0_ADDRESS_WIDTH 28
+#define PCRF_DZ_VF_BAR0_PREF_LBN 3
+#define PCRF_DZ_VF_BAR0_PREF_WIDTH 1
+#define PCRF_DZ_VF_BAR0_TYPE_LBN 1
+#define PCRF_DZ_VF_BAR0_TYPE_WIDTH 2
+#define PCRF_DZ_VF_BAR0_IOM_LBN 0
+#define PCRF_DZ_VF_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_SRIOV_BAR1_REG(32bit):
+ * SRIOV Bar1
+ */
+
+#define PCR_CC_SRIOV_BAR1_REG 0x00000188
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR1_REG 0x000001a8
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR1_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR1_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR2_REG(32bit):
+ * SRIOV Bar2
+ */
+
+#define PCR_CC_SRIOV_BAR2_REG 0x0000018c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR2_REG 0x000001ac
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR2_ADDRESS_LBN 4
+#define PCRF_DZ_VF_BAR2_ADDRESS_WIDTH 28
+#define PCRF_DZ_VF_BAR2_PREF_LBN 3
+#define PCRF_DZ_VF_BAR2_PREF_WIDTH 1
+#define PCRF_DZ_VF_BAR2_TYPE_LBN 1
+#define PCRF_DZ_VF_BAR2_TYPE_WIDTH 2
+#define PCRF_DZ_VF_BAR2_IOM_LBN 0
+#define PCRF_DZ_VF_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_SRIOV_BAR3_REG(32bit):
+ * SRIOV Bar3
+ */
+
+#define PCR_CC_SRIOV_BAR3_REG 0x00000190
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR3_REG 0x000001b0
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR3_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR3_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR4_REG(32bit):
+ * SRIOV Bar4
+ */
+
+#define PCR_CC_SRIOV_BAR4_REG 0x00000194
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR4_REG 0x000001b4
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR4_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR4_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR5_REG(32bit):
+ * SRIOV Bar5
+ */
+
+#define PCR_CC_SRIOV_BAR5_REG 0x00000198
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR5_REG 0x000001b8
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR5_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR5_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_RSVD_REG(16bit):
+ * Reserved register
+ */
+
+#define PCR_DZ_SRIOV_RSVD_REG 0x00000198
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_VF_RSVD_LBN 0
+#define PCRF_DZ_VF_RSVD_WIDTH 16
+
+
+/*
+ * PC_SRIOV_MIBR_SARRAY_OFFSET_REG(32bit):
+ * SRIOV VF Migration State Array Offset
+ */
+
+#define PCR_CC_SRIOV_MIBR_SARRAY_OFFSET_REG 0x0000019c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_MIBR_SARRAY_OFFSET_REG 0x000001bc
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_OFFSET_LBN 3
+#define PCRF_CZ_VF_MIGR_OFFSET_WIDTH 29
+#define PCRF_CZ_VF_MIGR_BIR_LBN 0
+#define PCRF_CZ_VF_MIGR_BIR_WIDTH 3
+
+
+/*
+ * PC_TPH_CAP_HDR_REG(32bit):
+ * TPH Capability Header Register
+ */
+
+#define PCR_DZ_TPH_CAP_HDR_REG 0x000001c0
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_NXT_PTR_LBN 20
+#define PCRF_DZ_TPH_NXT_PTR_WIDTH 12
+#define PCRF_DZ_TPH_VERSION_LBN 16
+#define PCRF_DZ_TPH_VERSION_WIDTH 4
+#define PCRF_DZ_TPH_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_TPH_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_TPH_REQ_CAP_REG(32bit):
+ * TPH Requester Capability Register
+ */
+
+#define PCR_DZ_TPH_REQ_CAP_REG 0x000001c4
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_ST_TBLE_SIZE_LBN 16
+#define PCRF_DZ_ST_TBLE_SIZE_WIDTH 11
+#define PCRF_DZ_ST_TBLE_LOC_LBN 9
+#define PCRF_DZ_ST_TBLE_LOC_WIDTH 2
+#define PCRF_DZ_EXT_TPH_MODE_SUP_LBN 8
+#define PCRF_DZ_EXT_TPH_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_DEV_MODE_SUP_LBN 2
+#define PCRF_DZ_TPH_DEV_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_LBN 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_NOST_MODE_SUP_LBN 0
+#define PCRF_DZ_TPH_NOST_MODE_SUP_WIDTH 1
+
+
+/*
+ * PC_TPH_REQ_CTL_REG(32bit):
+ * TPH Requester Control Register
+ */
+
+#define PCR_DZ_TPH_REQ_CTL_REG 0x000001c8
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_REQ_ENABLE_LBN 8
+#define PCRF_DZ_TPH_REQ_ENABLE_WIDTH 2
+#define PCRF_DZ_TPH_ST_MODE_LBN 0
+#define PCRF_DZ_TPH_ST_MODE_WIDTH 3
+
+
+/*
+ * PC_LTR_CAP_HDR_REG(32bit):
+ * Latency Tolerance Reporting Cap Header Reg
+ */
+
+#define PCR_DZ_LTR_CAP_HDR_REG 0x00000290
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_NXT_PTR_LBN 20
+#define PCRF_DZ_LTR_NXT_PTR_WIDTH 12
+#define PCRF_DZ_LTR_VERSION_LBN 16
+#define PCRF_DZ_LTR_VERSION_WIDTH 4
+#define PCRF_DZ_LTR_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_LTR_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_LTR_MAX_SNOOP_REG(32bit):
+ * LTR Maximum Snoop/No Snoop Register
+ */
+
+#define PCR_DZ_LTR_MAX_SNOOP_REG 0x00000294
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_LBN 26
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_LBN 16
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_WIDTH 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_LBN 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_LBN 0
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_WIDTH 10
+
+
+/*
+ * PC_ACK_LAT_TMR_REG(32bit):
+ * ACK latency timer & replay timer register
+ */
+
+#define PCR_AC_ACK_LAT_TMR_REG 0x00000700
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RT_LBN 16
+#define PCRF_AC_RT_WIDTH 16
+#define PCRF_AC_ALT_LBN 0
+#define PCRF_AC_ALT_WIDTH 16
+
+
+/*
+ * PC_OTHER_MSG_REG(32bit):
+ * Other message register
+ */
+
+#define PCR_AC_OTHER_MSG_REG 0x00000704
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_OM_CRPT3_LBN 24
+#define PCRF_AC_OM_CRPT3_WIDTH 8
+#define PCRF_AC_OM_CRPT2_LBN 16
+#define PCRF_AC_OM_CRPT2_WIDTH 8
+#define PCRF_AC_OM_CRPT1_LBN 8
+#define PCRF_AC_OM_CRPT1_WIDTH 8
+#define PCRF_AC_OM_CRPT0_LBN 0
+#define PCRF_AC_OM_CRPT0_WIDTH 8
+
+
+/*
+ * PC_FORCE_LNK_REG(24bit):
+ * Port force link register
+ */
+
+#define PCR_AC_FORCE_LNK_REG 0x00000708
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_LFS_LBN 16
+#define PCRF_AC_LFS_WIDTH 6
+#define PCRF_AC_FL_LBN 15
+#define PCRF_AC_FL_WIDTH 1
+#define PCRF_AC_LN_LBN 0
+#define PCRF_AC_LN_WIDTH 8
+
+
+/*
+ * PC_ACK_FREQ_REG(32bit):
+ * ACK frequency register
+ */
+
+#define PCR_AC_ACK_FREQ_REG 0x0000070c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_LBN 30
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_WIDTH 1
+#define PCRF_AC_L1_ENTR_LAT_LBN 27
+#define PCRF_AC_L1_ENTR_LAT_WIDTH 3
+#define PCRF_AC_L0_ENTR_LAT_LBN 24
+#define PCRF_AC_L0_ENTR_LAT_WIDTH 3
+#define PCRF_CC_COMM_NFTS_LBN 16
+#define PCRF_CC_COMM_NFTS_WIDTH 8
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_LBN 16
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_WIDTH 3
+#define PCRF_AC_MAX_FTS_LBN 8
+#define PCRF_AC_MAX_FTS_WIDTH 8
+#define PCRF_AC_ACK_FREQ_LBN 0
+#define PCRF_AC_ACK_FREQ_WIDTH 8
+
+
+/*
+ * PC_PORT_LNK_CTL_REG(32bit):
+ * Port link control register
+ */
+
+#define PCR_AC_PORT_LNK_CTL_REG 0x00000710
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AB_LRE_LBN 27
+#define PCRF_AB_LRE_WIDTH 1
+#define PCRF_AB_ESYNC_LBN 26
+#define PCRF_AB_ESYNC_WIDTH 1
+#define PCRF_AB_CRPT_LBN 25
+#define PCRF_AB_CRPT_WIDTH 1
+#define PCRF_AB_XB_LBN 24
+#define PCRF_AB_XB_WIDTH 1
+#define PCRF_AC_LC_LBN 16
+#define PCRF_AC_LC_WIDTH 6
+#define PCRF_AC_LDR_LBN 8
+#define PCRF_AC_LDR_WIDTH 4
+#define PCRF_AC_FLM_LBN 7
+#define PCRF_AC_FLM_WIDTH 1
+#define PCRF_AC_LKD_LBN 6
+#define PCRF_AC_LKD_WIDTH 1
+#define PCRF_AC_DLE_LBN 5
+#define PCRF_AC_DLE_WIDTH 1
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_LBN 4
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_AC_RA_LBN 3
+#define PCRF_AC_RA_WIDTH 1
+#define PCRF_AC_LE_LBN 2
+#define PCRF_AC_LE_WIDTH 1
+#define PCRF_AC_SD_LBN 1
+#define PCRF_AC_SD_WIDTH 1
+#define PCRF_AC_OMR_LBN 0
+#define PCRF_AC_OMR_WIDTH 1
+
+
+/*
+ * PC_LN_SKEW_REG(32bit):
+ * Lane skew register
+ */
+
+#define PCR_AC_LN_SKEW_REG 0x00000714
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_DIS_LBN 31
+#define PCRF_AC_DIS_WIDTH 1
+#define PCRF_AB_RST_LBN 30
+#define PCRF_AB_RST_WIDTH 1
+#define PCRF_AC_AD_LBN 25
+#define PCRF_AC_AD_WIDTH 1
+#define PCRF_AC_FCD_LBN 24
+#define PCRF_AC_FCD_WIDTH 1
+#define PCRF_AC_LS2_LBN 16
+#define PCRF_AC_LS2_WIDTH 8
+#define PCRF_AC_LS1_LBN 8
+#define PCRF_AC_LS1_WIDTH 8
+#define PCRF_AC_LS0_LBN 0
+#define PCRF_AC_LS0_WIDTH 8
+
+
+/*
+ * PC_SYM_NUM_REG(16bit):
+ * Symbol number register
+ */
+
+#define PCR_AC_SYM_NUM_REG 0x00000718
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_MAX_FUNCTIONS_LBN 29
+#define PCRF_CC_MAX_FUNCTIONS_WIDTH 3
+#define PCRF_CC_FC_WATCHDOG_TMR_LBN 24
+#define PCRF_CC_FC_WATCHDOG_TMR_WIDTH 5
+#define PCRF_CC_ACK_NAK_TMR_MOD_LBN 19
+#define PCRF_CC_ACK_NAK_TMR_MOD_WIDTH 5
+#define PCRF_CC_REPLAY_TMR_MOD_LBN 14
+#define PCRF_CC_REPLAY_TMR_MOD_WIDTH 5
+#define PCRF_AB_ES_LBN 12
+#define PCRF_AB_ES_WIDTH 3
+#define PCRF_AB_SYM_NUM_REG_RSVD0_LBN 11
+#define PCRF_AB_SYM_NUM_REG_RSVD0_WIDTH 1
+#define PCRF_CC_NUM_SKP_SYMS_LBN 8
+#define PCRF_CC_NUM_SKP_SYMS_WIDTH 3
+#define PCRF_AB_TS2_LBN 4
+#define PCRF_AB_TS2_WIDTH 4
+#define PCRF_AC_TS1_LBN 0
+#define PCRF_AC_TS1_WIDTH 4
+
+
+/*
+ * PC_SYM_TMR_FLT_MSK_REG(16bit):
+ * Symbol timer and Filter Mask Register
+ */
+
+#define PCR_CC_SYM_TMR_FLT_MSK_REG 0x0000071c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK1_LBN 16
+#define PCRF_CC_DEFAULT_FLT_MSK1_WIDTH 16
+#define PCRF_CC_FC_WDOG_TMR_DIS_LBN 15
+#define PCRF_CC_FC_WDOG_TMR_DIS_WIDTH 1
+#define PCRF_CC_SI1_LBN 8
+#define PCRF_CC_SI1_WIDTH 3
+#define PCRF_CC_SKIP_INT_VAL_LBN 0
+#define PCRF_CC_SKIP_INT_VAL_WIDTH 11
+#define PCRF_CC_SI0_LBN 0
+#define PCRF_CC_SI0_WIDTH 8
+
+
+/*
+ * PC_SYM_TMR_REG(16bit):
+ * Symbol timer register
+ */
+
+#define PCR_AB_SYM_TMR_REG 0x0000071c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_ET_LBN 11
+#define PCRF_AB_ET_WIDTH 4
+#define PCRF_AB_SI1_LBN 8
+#define PCRF_AB_SI1_WIDTH 3
+#define PCRF_AB_SI0_LBN 0
+#define PCRF_AB_SI0_WIDTH 8
+
+
+/*
+ * PC_FLT_MSK_REG(32bit):
+ * Filter Mask Register 2
+ */
+
+#define PCR_CC_FLT_MSK_REG 0x00000720
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK2_LBN 0
+#define PCRF_CC_DEFAULT_FLT_MSK2_WIDTH 32
+
+
+/*
+ * PC_PHY_STAT_REG(32bit):
+ * PHY status register
+ */
+
+#define PCR_AB_PHY_STAT_REG 0x00000720
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_STAT_REG 0x00000810
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_SSL_LBN 3
+#define PCRF_AC_SSL_WIDTH 1
+#define PCRF_AC_SSR_LBN 2
+#define PCRF_AC_SSR_WIDTH 1
+#define PCRF_AC_SSCL_LBN 1
+#define PCRF_AC_SSCL_WIDTH 1
+#define PCRF_AC_SSCD_LBN 0
+#define PCRF_AC_SSCD_WIDTH 1
+
+
+/*
+ * PC_PHY_CTL_REG(32bit):
+ * PHY control register
+ */
+
+#define PCR_AB_PHY_CTL_REG 0x00000724
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_CTL_REG 0x00000814
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_BD_LBN 31
+#define PCRF_AC_BD_WIDTH 1
+#define PCRF_AC_CDS_LBN 30
+#define PCRF_AC_CDS_WIDTH 1
+#define PCRF_AC_DWRAP_LB_LBN 29
+#define PCRF_AC_DWRAP_LB_WIDTH 1
+#define PCRF_AC_EBD_LBN 28
+#define PCRF_AC_EBD_WIDTH 1
+#define PCRF_AC_SNR_LBN 27
+#define PCRF_AC_SNR_WIDTH 1
+#define PCRF_AC_RX_NOT_DET_LBN 2
+#define PCRF_AC_RX_NOT_DET_WIDTH 1
+#define PCRF_AC_FORCE_LOS_VAL_LBN 1
+#define PCRF_AC_FORCE_LOS_VAL_WIDTH 1
+#define PCRF_AC_FORCE_LOS_EN_LBN 0
+#define PCRF_AC_FORCE_LOS_EN_WIDTH 1
+
+
+/*
+ * PC_DEBUG0_REG(32bit):
+ * Debug register 0
+ */
+
+#define PCR_AC_DEBUG0_REG 0x00000728
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI03_LBN 24
+#define PCRF_AC_CDI03_WIDTH 8
+#define PCRF_AC_CDI0_LBN 0
+#define PCRF_AC_CDI0_WIDTH 32
+#define PCRF_AC_CDI02_LBN 16
+#define PCRF_AC_CDI02_WIDTH 8
+#define PCRF_AC_CDI01_LBN 8
+#define PCRF_AC_CDI01_WIDTH 8
+#define PCRF_AC_CDI00_LBN 0
+#define PCRF_AC_CDI00_WIDTH 8
+
+
+/*
+ * PC_DEBUG1_REG(32bit):
+ * Debug register 1
+ */
+
+#define PCR_AC_DEBUG1_REG 0x0000072c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI13_LBN 24
+#define PCRF_AC_CDI13_WIDTH 8
+#define PCRF_AC_CDI1_LBN 0
+#define PCRF_AC_CDI1_WIDTH 32
+#define PCRF_AC_CDI12_LBN 16
+#define PCRF_AC_CDI12_WIDTH 8
+#define PCRF_AC_CDI11_LBN 8
+#define PCRF_AC_CDI11_WIDTH 8
+#define PCRF_AC_CDI10_LBN 0
+#define PCRF_AC_CDI10_WIDTH 8
+
+
+/*
+ * PC_XPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XPFCC_STAT_REG
+ */
+
+#define PCR_AC_XPFCC_STAT_REG 0x00000730
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XPDC_LBN 12
+#define PCRF_AC_XPDC_WIDTH 8
+#define PCRF_AC_XPHC_LBN 0
+#define PCRF_AC_XPHC_WIDTH 12
+
+
+/*
+ * PC_XNPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XNPFCC_STAT_REG
+ */
+
+#define PCR_AC_XNPFCC_STAT_REG 0x00000734
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XNPDC_LBN 12
+#define PCRF_AC_XNPDC_WIDTH 8
+#define PCRF_AC_XNPHC_LBN 0
+#define PCRF_AC_XNPHC_WIDTH 12
+
+
+/*
+ * PC_XCFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XCFCC_STAT_REG
+ */
+
+#define PCR_AC_XCFCC_STAT_REG 0x00000738
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XCDC_LBN 12
+#define PCRF_AC_XCDC_WIDTH 8
+#define PCRF_AC_XCHC_LBN 0
+#define PCRF_AC_XCHC_WIDTH 12
+
+
+/*
+ * PC_Q_STAT_REG(8bit):
+ * documentation to be written for sum_PC_Q_STAT_REG
+ */
+
+#define PCR_AC_Q_STAT_REG 0x0000073c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RQNE_LBN 2
+#define PCRF_AC_RQNE_WIDTH 1
+#define PCRF_AC_XRNE_LBN 1
+#define PCRF_AC_XRNE_WIDTH 1
+#define PCRF_AC_RCNR_LBN 0
+#define PCRF_AC_RCNR_WIDTH 1
+
+
+/*
+ * PC_VC_XMIT_ARB1_REG(32bit):
+ * VC Transmit Arbitration Register 1
+ */
+
+#define PCR_CC_VC_XMIT_ARB1_REG 0x00000740
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC_XMIT_ARB2_REG(32bit):
+ * VC Transmit Arbitration Register 2
+ */
+
+#define PCR_CC_VC_XMIT_ARB2_REG 0x00000744
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_P_RQ_CTL_REG(32bit):
+ * VC0 Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_P_RQ_CTL_REG 0x00000748
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_NP_RQ_CTL_REG(32bit):
+ * VC0 Non-Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_NP_RQ_CTL_REG 0x0000074c
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_C_RQ_CTL_REG(32bit):
+ * VC0 Completion Receive Queue Control
+ */
+
+#define PCR_CC_VC0_C_RQ_CTL_REG 0x00000750
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_GEN2_REG(32bit):
+ * Gen2 Register
+ */
+
+#define PCR_CC_GEN2_REG 0x0000080c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_SET_DE_EMPHASIS_LBN 20
+#define PCRF_CC_SET_DE_EMPHASIS_WIDTH 1
+#define PCRF_CC_CFG_TX_COMPLIANCE_LBN 19
+#define PCRF_CC_CFG_TX_COMPLIANCE_WIDTH 1
+#define PCRF_CC_CFG_TX_SWING_LBN 18
+#define PCRF_CC_CFG_TX_SWING_WIDTH 1
+#define PCRF_CC_DIR_SPEED_CHANGE_LBN 17
+#define PCRF_CC_DIR_SPEED_CHANGE_WIDTH 1
+#define PCRF_CC_LANE_ENABLE_LBN 8
+#define PCRF_CC_LANE_ENABLE_WIDTH 9
+#define PCRF_CC_NUM_FTS_LBN 0
+#define PCRF_CC_NUM_FTS_WIDTH 8
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_REGS_PCI_H */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_rx.c b/usr/src/uts/common/io/sfxge/common/efx_rx.c
new file mode 100644
index 0000000000..54ada6aebb
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_rx.c
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_rx_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+static __checkReturn efx_rc_t
+siena_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+static __checkReturn efx_rc_t
+siena_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+static __checkReturn efx_rc_t
+siena_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+static __checkReturn uint32_t
+siena_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static __checkReturn efx_rc_t
+siena_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+static void
+siena_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+static void
+siena_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+static __checkReturn efx_rc_t
+siena_rx_qflush(
+ __in efx_rxq_t *erp);
+
+static void
+siena_rx_qenable(
+ __in efx_rxq_t *erp);
+
+static __checkReturn efx_rc_t
+siena_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+static void
+siena_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_rx_ops_t __efx_rx_siena_ops = {
+ siena_rx_init, /* erxo_init */
+ siena_rx_fini, /* erxo_fini */
+#if EFSYS_OPT_RX_SCATTER
+ siena_rx_scatter_enable, /* erxo_scatter_enable */
+#endif
+#if EFSYS_OPT_RX_SCALE
+ siena_rx_scale_mode_set, /* erxo_scale_mode_set */
+ siena_rx_scale_key_set, /* erxo_scale_key_set */
+ siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ siena_rx_prefix_hash, /* erxo_prefix_hash */
+#endif
+ siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ siena_rx_qpost, /* erxo_qpost */
+ siena_rx_qpush, /* erxo_qpush */
+ siena_rx_qflush, /* erxo_qflush */
+ siena_rx_qenable, /* erxo_qenable */
+ siena_rx_qcreate, /* erxo_qcreate */
+ siena_rx_qdestroy, /* erxo_qdestroy */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_rx_ops_t __efx_rx_ef10_ops = {
+ ef10_rx_init, /* erxo_init */
+ ef10_rx_fini, /* erxo_fini */
+#if EFSYS_OPT_RX_SCATTER
+ ef10_rx_scatter_enable, /* erxo_scatter_enable */
+#endif
+#if EFSYS_OPT_RX_SCALE
+ ef10_rx_scale_mode_set, /* erxo_scale_mode_set */
+ ef10_rx_scale_key_set, /* erxo_scale_key_set */
+ ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ ef10_rx_prefix_hash, /* erxo_prefix_hash */
+#endif
+ ef10_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ ef10_rx_qpost, /* erxo_qpost */
+ ef10_rx_qpush, /* erxo_qpush */
+ ef10_rx_qflush, /* erxo_qflush */
+ ef10_rx_qenable, /* erxo_qenable */
+ ef10_rx_qcreate, /* erxo_qcreate */
+ ef10_rx_qdestroy, /* erxo_qdestroy */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_rx_init(
+ __inout efx_nic_t *enp)
+{
+ const efx_rx_ops_t *erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_RX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ erxop = &__efx_rx_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ if ((rc = erxop->erxo_init(enp)) != 0)
+ goto fail4;
+
+ enp->en_erxop = erxop;
+ enp->en_mod_flags |= EFX_MOD_RX;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_erxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+ return (rc);
+}
+
+ void
+efx_rx_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0);
+
+ erxop->erxo_fini(enp);
+
+ enp->en_erxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+}
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_hash_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_hash_support_t *supportp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (supportp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Report if resources are available to insert RX hash value */
+ *supportp = enp->en_hash_support;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_scale_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_support_t *supportp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (supportp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Report if resources are available to support RSS */
+ *supportp = enp->en_rss_support;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (erxop->erxo_scale_mode_set != NULL) {
+ if ((rc = erxop->erxo_scale_mode_set(enp, alg,
+ type, insert)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scale_key_set(enp, key, n)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scale_tbl_set(enp, table, n)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpost(erp, addrp, size, n, completed, added);
+}
+
+ void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpush(erp, added, pushedp);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ if ((rc = erxop->erxo_qflush(erp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qenable(erp);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rxq_t *erp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ /* Allocate an RXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp);
+
+ if (erp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ erp->er_magic = EFX_RXQ_MAGIC;
+ erp->er_enp = enp;
+ erp->er_index = index;
+ erp->er_mask = n - 1;
+ erp->er_esmp = esmp;
+
+ if ((rc = erxop->erxo_qcreate(enp, index, label, type, esmp, n, id,
+ eep, erp)) != 0)
+ goto fail2;
+
+ enp->en_rx_qcount++;
+ *erpp = erp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qdestroy(erp);
+}
+
+ __checkReturn efx_rc_t
+efx_psuedo_hdr_pkt_length_get(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp));
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn uint32_t
+efx_psuedo_hdr_hash_get(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE);
+ return (erxop->erxo_prefix_hash(enp, func, buffer));
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_rx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ unsigned int index;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Zero the RSS table */
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS;
+ index++) {
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+ }
+
+#if EFSYS_OPT_RX_SCALE
+ /* The RSS key and indirection table are writable. */
+ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
+
+ /* Hardware can insert RX hash with/without RSS */
+ enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCATTER
+static __checkReturn efx_rc_t
+siena_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ unsigned int nbuf32;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ nbuf32 = buf_size / 32;
+ if ((nbuf32 == 0) ||
+ (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) ||
+ ((buf_size % 32) != 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_rx_qcount > 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+
+ /* Set scatter buffer size */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Enable scatter for packets not matching a filter */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#define EFX_RX_LFSR_HASH(_enp, _insert) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ if ((_enp)->en_family == EFX_FAMILY_SIENA) { \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ } \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \
+ (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \
+ (_tcp) ? 0 : 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ \
+ (_rc) = 0; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+#if EFSYS_OPT_RX_SCALE
+
+static __checkReturn efx_rc_t
+siena_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ efx_rc_t rc;
+
+ switch (alg) {
+ case EFX_RX_HASHALG_LFSR:
+ EFX_RX_LFSR_HASH(enp, insert);
+ break;
+
+ case EFX_RX_HASHALG_TOEPLITZ:
+ EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
+ type & (1 << EFX_RX_HASH_IPV4),
+ type & (1 << EFX_RX_HASH_TCPIPV4));
+
+ EFX_RX_TOEPLITZ_IPV6_HASH(enp,
+ type & (1 << EFX_RX_HASH_IPV6),
+ type & (1 << EFX_RX_HASH_TCPIPV6),
+ rc);
+ if (rc != 0)
+ goto fail1;
+
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ EFX_RX_LFSR_HASH(enp, B_FALSE);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ unsigned int byte;
+ unsigned int offset;
+ efx_rc_t rc;
+
+ byte = 0;
+
+ /* Write Toeplitz IPv4 hash key */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+
+ byte = 0;
+
+ /* Verify Toeplitz IPv4 hash key */
+ EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+
+ if ((enp->en_features & EFX_FEATURE_IPV6) == 0)
+ goto done;
+
+ byte = 0;
+
+ /* Write Toeplitz IPv6 hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+
+ /* Write Toeplitz IPv6 hash key 2 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+
+ /* Write Toeplitz IPv6 hash key 1 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+
+ byte = 0;
+
+ /* Verify Toeplitz IPv6 hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ /* Verify Toeplitz IPv6 hash key 2 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ /* Verify Toeplitz IPv6 hash key 1 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail4;
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ int index;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS);
+ EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH));
+
+ if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) {
+ uint32_t byte;
+
+ /* Calculate the entry to place in the table */
+ byte = (n > 0) ? (uint32_t)table[index % n] : 0;
+
+ EFSYS_PROBE2(table, int, index, uint32_t, byte);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte);
+
+ /* Write the table */
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+ }
+
+ for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) {
+ uint32_t byte;
+
+ /* Determine if we're starting a new batch */
+ byte = (n > 0) ? (uint32_t)table[index % n] : 0;
+
+ /* Read the table */
+ EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+
+ /* Verify the entry */
+ if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+/*
+ * Falcon/Siena psuedo-header
+ * --------------------------
+ *
+ * Receive packets are prefixed by an optional 16 byte pseudo-header.
+ * The psuedo-header is a byte array of one of the forms:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.TT.TT.TT.TT
+ * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.LL.LL
+ *
+ * where:
+ * TT.TT.TT.TT Toeplitz hash (32-bit big-endian)
+ * LL.LL LFSR hash (16-bit big-endian)
+ */
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn uint32_t
+siena_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ _NOTE(ARGUNUSED(enp));
+
+ switch (func) {
+ case EFX_RX_HASHALG_TOEPLITZ:
+ return ((buffer[12] << 24) |
+ (buffer[13] << 16) |
+ (buffer[14] << 8) |
+ buffer[15]);
+
+ case EFX_RX_HASHALG_LFSR:
+ return ((buffer[14] << 8) | buffer[15]);
+
+ default:
+ EFSYS_ASSERT(0);
+ return (0);
+ }
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static __checkReturn efx_rc_t
+siena_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp));
+ _NOTE(ARGUNUSED(buffer));
+ _NOTE(ARGUNUSED(lengthp));
+ /* Not supported by Falcon/Siena hardware */
+ EFSYS_ASSERT(0);
+ return (ENOTSUP);
+}
+
+
+static void
+siena_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + n, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < n; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size),
+ FSF_AZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ FSF_AZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+static void
+siena_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ unsigned int pushed = *pushedp;
+ uint32_t wptr;
+ efx_oword_t oword;
+ efx_dword_t dword;
+
+ /* All descriptors are pushed */
+ *pushedp = added;
+
+ /* Push the populated descriptors out */
+ wptr = added & erp->er_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+ wptr, pushed & erp->er_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0,
+ erp->er_index, &dword, B_FALSE);
+}
+
+static __checkReturn efx_rc_t
+siena_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ label = erp->er_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword);
+
+ return (0);
+}
+
+static void
+siena_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t size;
+ boolean_t jumbo;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp));
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS ==
+ (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_RXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ switch (type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ jumbo = B_FALSE;
+ break;
+
+#if EFSYS_OPT_RX_SCATTER
+ case EFX_RXQ_TYPE_SCATTER:
+ if (enp->en_family < EFX_FAMILY_SIENA) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ jumbo = B_TRUE;
+ break;
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ default:
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Set up the new descriptor queue */
+ EFX_POPULATE_OWORD_7(oword,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL, label,
+ FRF_AZ_RX_DESCQ_SIZE, size,
+ FRF_AZ_RX_DESCQ_TYPE, 0,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ /* Free the RXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+static void
+siena_rx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_sram.c b/usr/src/uts/common/io/sfxge/common/efx_sram.c
new file mode 100644
index 0000000000..e508dd01c4
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_sram.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n)
+{
+ efx_qword_t qword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+ efsys_dma_addr_t addr;
+ efx_oword_t oword;
+ unsigned int count;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD) {
+ /*
+ * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+ * pulled inside the Falcon/Siena queue create/destroy code,
+ * and then the original functions can be removed (see bug30834
+ * comment #1). But, for now, we just ensure that they are
+ * no-ops for EF10, to allow bringing up existing drivers
+ * without modification.
+ */
+
+ return (0);
+ }
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ if (stop >= EFX_BUF_TBL_SIZE) {
+ rc = EFBIG;
+ goto fail1;
+ }
+
+ /* Add the entries into the buffer table */
+ addr = EFSYS_MEM_ADDR(esmp);
+ for (id = start; id != stop; id++) {
+ EFX_POPULATE_QWORD_5(qword,
+ FRF_AZ_IP_DAT_BUF_SIZE, 0, FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF_DW0,
+ (uint32_t)((addr >> 12) & 0xffffffff),
+ FRF_AZ_BUF_ADR_FBUF_DW1,
+ (uint32_t)((addr >> 12) >> 32),
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_FULL_TBL,
+ id, &qword);
+
+ addr += EFX_BUF_SIZE;
+ }
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ /* Flush the write buffer */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_BUF_UPD_CMD, 1,
+ FRF_AZ_BUF_CLR_CMD, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+ /* Poll for the last entry being written to the buffer table */
+ EFSYS_ASSERT3U(id, ==, stop);
+ addr -= EFX_BUF_SIZE;
+
+ count = 0;
+ do {
+ EFSYS_PROBE1(wait, unsigned int, count);
+
+ /* Spin for 1 ms */
+ EFSYS_SPIN(1000);
+
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) ==
+ (uint32_t)((addr >> 12) & 0xffffffff) &&
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) ==
+ (uint32_t)((addr >> 12) >> 32))
+ goto verify;
+
+ } while (++count < 100);
+
+ rc = ETIMEDOUT;
+ goto fail2;
+
+verify:
+ /* Verify the rest of the entries in the buffer table */
+ while (--id != start) {
+ addr -= EFX_BUF_SIZE;
+
+ /* Read the buffer table entry */
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) !=
+ (uint32_t)((addr >> 12) & 0xffffffff) ||
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) !=
+ (uint32_t)((addr >> 12) >> 32)) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ id = stop;
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, id - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD) {
+ /*
+ * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+ * pulled inside the Falcon/Siena queue create/destroy code,
+ * and then the original functions can be removed (see bug30834
+ * comment #1). But, for now, we just ensure that they are
+ * no-ops for EF10, to allow bringing up existing drivers
+ * without modification.
+ */
+
+ return;
+ }
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, stop - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+static void
+efx_sram_byte_increment_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ _NOTE(ARGUNUSED(negate))
+
+ for (index = 0; index < sizeof (efx_qword_t); index++)
+ eqp->eq_u8[index] = offset + index;
+}
+
+static void
+efx_sram_all_the_same_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ if (negate)
+ EFX_SET_QWORD(*eqp);
+ else
+ EFX_ZERO_QWORD(*eqp);
+}
+
+static void
+efx_sram_bit_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x55555555 : 0xaaaaaaaa,
+ EFX_DWORD_1, (negate) ? 0x55555555 : 0xaaaaaaaa);
+}
+
+static void
+efx_sram_byte_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x00ff00ff : 0xff00ff00,
+ EFX_DWORD_1, (negate) ? 0x00ff00ff : 0xff00ff00);
+}
+
+static void
+efx_sram_byte_changing_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ for (index = 0; index < sizeof (efx_qword_t); index++) {
+ uint8_t byte;
+
+ if (offset / 256 == 0)
+ byte = (uint8_t)((offset % 257) % 256);
+ else
+ byte = (uint8_t)(~((offset - 8) % 257) % 256);
+
+ eqp->eq_u8[index] = (negate) ? ~byte : byte;
+ }
+}
+
+static void
+efx_sram_bit_sweep_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+
+ if (negate) {
+ EFX_SET_QWORD(*eqp);
+ EFX_CLEAR_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ } else {
+ EFX_ZERO_QWORD(*eqp);
+ EFX_SET_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ }
+}
+
+efx_sram_pattern_fn_t __efx_sram_pattern_fns[] = {
+ efx_sram_byte_increment_set,
+ efx_sram_all_the_same_set,
+ efx_sram_bit_alternate_set,
+ efx_sram_byte_alternate_set,
+ efx_sram_byte_changing_set,
+ efx_sram_bit_sweep_set
+};
+
+ __checkReturn efx_rc_t
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type)
+{
+ efx_sram_pattern_fn_t func;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ /* SRAM testing is only available on Siena. */
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ return (0);
+
+ /* Select pattern generator */
+ EFSYS_ASSERT3U(type, <, EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[type];
+
+ return (siena_sram_test(enp, func));
+}
+
+#endif /* EFSYS_OPT_DIAG */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_tx.c b/usr/src/uts/common/io/sfxge/common/efx_tx.c
new file mode 100644
index 0000000000..24019f5f00
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_tx.c
@@ -0,0 +1,1089 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_tx_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_tx_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+static void
+siena_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+static __checkReturn efx_rc_t
+siena_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+static void
+siena_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+static __checkReturn efx_rc_t
+siena_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+static __checkReturn efx_rc_t
+siena_tx_qflush(
+ __in efx_txq_t *etp);
+
+static void
+siena_tx_qenable(
+ __in efx_txq_t *etp);
+
+ __checkReturn efx_rc_t
+siena_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+ void
+siena_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+#endif
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_tx_ops_t __efx_tx_siena_ops = {
+ siena_tx_init, /* etxo_init */
+ siena_tx_fini, /* etxo_fini */
+ siena_tx_qcreate, /* etxo_qcreate */
+ siena_tx_qdestroy, /* etxo_qdestroy */
+ siena_tx_qpost, /* etxo_qpost */
+ siena_tx_qpush, /* etxo_qpush */
+ siena_tx_qpace, /* etxo_qpace */
+ siena_tx_qflush, /* etxo_qflush */
+ siena_tx_qenable, /* etxo_qenable */
+ NULL, /* etxo_qpio_enable */
+ NULL, /* etxo_qpio_disable */
+ NULL, /* etxo_qpio_write */
+ NULL, /* etxo_qpio_post */
+ siena_tx_qdesc_post, /* etxo_qdesc_post */
+ siena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ NULL, /* etxo_qdesc_tso2_create */
+ NULL, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ siena_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+static const efx_tx_ops_t __efx_tx_hunt_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+static const efx_tx_ops_t __efx_tx_medford_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_tx_init(
+ __in efx_nic_t *enp)
+{
+ const efx_tx_ops_t *etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_TX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ etxop = &__efx_tx_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ etxop = &__efx_tx_hunt_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ etxop = &__efx_tx_medford_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ if ((rc = etxop->etxo_init(enp)) != 0)
+ goto fail4;
+
+ enp->en_etxop = etxop;
+ enp->en_mod_flags |= EFX_MOD_TX;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_etxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+ return (rc);
+}
+
+ void
+efx_tx_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ etxop->etxo_fini(enp);
+
+ enp->en_etxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp,
+ __out unsigned int *addedp)
+{
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_txq_t *etp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, encp->enc_txq_limit);
+
+ /* Allocate an TXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp);
+
+ if (etp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ etp->et_magic = EFX_TXQ_MAGIC;
+ etp->et_enp = enp;
+ etp->et_index = index;
+ etp->et_mask = n - 1;
+ etp->et_esmp = esmp;
+
+ /* Initial descriptor index may be modified by etxo_qcreate */
+ *addedp = 0;
+
+ if ((rc = etxop->etxo_qcreate(enp, index, label, esmp,
+ n, id, flags, eep, etp, addedp)) != 0)
+ goto fail2;
+
+ enp->en_tx_qcount++;
+ *etpp = etp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_tx_qcount != 0);
+ --enp->en_tx_qcount;
+
+ etxop->etxo_qdestroy(etp);
+
+ /* Free the TXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qpost(etp, eb,
+ n, completed, addedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qpush(etp, added, pushed);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qpace(etp, ns)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qflush(etp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qenable(etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_enable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (~enp->en_features & EFX_FEATURE_PIO_BUFFERS) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if (etxop->etxo_qpio_enable == NULL) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ if ((rc = etxop->etxo_qpio_enable(etp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qpio_disable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_disable != NULL)
+ etxop->etxo_qpio_disable(etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_write != NULL) {
+ if ((rc = etxop->etxo_qpio_write(etp, buffer, buf_length,
+ pio_buf_offset)) != 0)
+ goto fail1;
+ return (0);
+ }
+
+ return (ENOTSUP);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_post != NULL) {
+ if ((rc = etxop->etxo_qpio_post(etp, pkt_length, completed,
+ addedp)) != 0)
+ goto fail1;
+ return (0);
+ }
+
+ return (ENOTSUP);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qdesc_post(etp, ed,
+ n, completed, addedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_dma_create != NULL);
+
+ etxop->etxo_qdesc_dma_create(etp, addr, size, eop, edp);
+}
+
+ void
+efx_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_tso_create != NULL);
+
+ etxop->etxo_qdesc_tso_create(etp, ipv4_id, tcp_seq, tcp_flags, edp);
+}
+
+ void
+efx_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL);
+
+ etxop->etxo_qdesc_tso2_create(etp, ipv4_id, tcp_seq, mss, edp, count);
+}
+
+ void
+efx_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_vlantci_create != NULL);
+
+ etxop->etxo_qdesc_vlantci_create(etp, tci, edp);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qstats_update(etp, stat);
+}
+#endif
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_tx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * Disable the timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level (although always allow a
+ * minimal trickle).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+
+ /*
+ * Filter all packets less than 14 bytes to avoid parsing
+ * errors.
+ */
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+
+ /*
+ * Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ return (0);
+}
+
+#define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \
+ do { \
+ unsigned int id; \
+ size_t offset; \
+ efx_qword_t qword; \
+ \
+ id = (_added)++ & (_etp)->et_mask; \
+ offset = id * sizeof (efx_qword_t); \
+ \
+ EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \
+ unsigned int, id, efsys_dma_addr_t, (_addr), \
+ size_t, (_size), boolean_t, (_eop)); \
+ \
+ EFX_POPULATE_QWORD_4(qword, \
+ FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW0, \
+ (uint32_t)((_addr) & 0xffffffff), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW1, \
+ (uint32_t)((_addr) >> 32)); \
+ EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+static __checkReturn efx_rc_t
+siena_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ int rc = ENOSPC;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1))
+ goto fail1;
+
+ for (i = 0; i < n; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t start = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ efsys_dma_addr_t end = start + size;
+
+ /* Fragments must not span 4k boundaries. */
+ EFSYS_ASSERT(P2ROUNDUP(start + 1, 4096) >= end);
+
+ EFX_TX_DESC(etp, start, size, ebp->eb_eop, added);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ uint32_t wptr;
+ efx_dword_t dword;
+ efx_oword_t oword;
+
+ /* Push the populated descriptors out */
+ wptr = added & etp->et_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
+ wptr, pushed & etp->et_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0,
+ etp->et_index, &dword, B_FALSE);
+}
+
+#define EFX_MAX_PACE_VALUE 20
+
+static __checkReturn efx_rc_t
+siena_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ unsigned int pace_val;
+ unsigned int timer_period;
+ efx_rc_t rc;
+
+ if (ns == 0) {
+ pace_val = 0;
+ } else {
+ /*
+ * The pace_val to write into the table is s.t
+ * ns <= timer_period * (2 ^ pace_val)
+ */
+ timer_period = 104 / encp->enc_clk_mult;
+ for (pace_val = 1; pace_val <= EFX_MAX_PACE_VALUE; pace_val++) {
+ if ((timer_period << pace_val) >= ns)
+ break;
+ }
+ }
+ if (pace_val > EFX_MAX_PACE_VALUE) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Update the pacing table */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_PACE, pace_val);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_PACE_TBL, etp->et_index,
+ &oword, B_TRUE);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ (void) efx_tx_qpace(etp, 0);
+
+ label = etp->et_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword);
+
+ return (0);
+}
+
+static void
+siena_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+
+ EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index,
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0));
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t size;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp));
+
+ EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS ==
+ (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS);
+
+ EFSYS_ASSERT(ISP2(EFX_TXQ_MAXNDESCS(encp)));
+ EFX_STATIC_ASSERT(ISP2(EFX_TXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_TXQ_MINNDESCS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_txq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0;
+ (1 << size) <= (EFX_TXQ_MAXNDESCS(encp) / EFX_TXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_TXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Set up the new descriptor queue */
+ *addedp = 0;
+
+ EFX_POPULATE_OWORD_6(oword,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, label,
+ FRF_AZ_TX_DESCQ_SIZE, size,
+ FRF_AZ_TX_DESCQ_TYPE, 0);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_desc_t *edp = &ed[i];
+ unsigned int id;
+ size_t offset;
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_desc_t);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+ }
+
+ EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+ unsigned int, added, unsigned int, n);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+siena_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ /* Fragments must not span 4k boundaries. */
+ EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size);
+
+ EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+ efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_4(edp->ed_eq,
+ FSF_AZ_TX_KER_CONT, eop ? 0 : 1,
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)size,
+ FSF_AZ_TX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addr & 0xffffffff),
+ FSF_AZ_TX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addr >> 32));
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_QSTATS
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 9d8d26a0a5e2c453 */
+static const char *__efx_tx_qstat_name[] = {
+ "post",
+ "post_pio",
+};
+/* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */
+
+ const char *
+efx_tx_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, TX_NQSTATS);
+
+ return (__efx_tx_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+static void
+siena_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+}
+
+static void
+siena_tx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_types.h b/usr/src/uts/common/io/sfxge/common/efx_types.h
new file mode 100644
index 0000000000..a9ae1e60e7
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_types.h
@@ -0,0 +1,1653 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ *
+ * Ackowledgement to Fen Systems Ltd.
+ */
+
+#ifndef _SYS_EFX_TYPES_H
+#define _SYS_EFX_TYPES_H
+
+#include "efsys.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Bitfield access
+ *
+ * Solarflare NICs make extensive use of bitfields up to 128 bits
+ * wide. Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian. Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and efx_dword_t)
+ * to be little-endian.
+ *
+ * In the less common case of using PIO for individual register
+ * writes, we construct the little-endian datatype in host memory and
+ * then use non-swapping register access primitives, rather than
+ * constructing a native-endian datatype and relying on implicit
+ * byte-swapping. (We use a similar strategy for register reads.)
+ */
+
+/*
+ * NOTE: Field definitions here and elsewhere are done in terms of a lowest
+ * bit number (LBN) and a width.
+ */
+
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+
+#define EFX_BYTE_0_LBN 0
+#define EFX_BYTE_0_WIDTH 8
+
+#define EFX_BYTE_1_LBN 8
+#define EFX_BYTE_1_WIDTH 8
+
+#define EFX_BYTE_2_LBN 16
+#define EFX_BYTE_2_WIDTH 8
+
+#define EFX_BYTE_3_LBN 24
+#define EFX_BYTE_3_WIDTH 8
+
+#define EFX_BYTE_4_LBN 32
+#define EFX_BYTE_4_WIDTH 8
+
+#define EFX_BYTE_5_LBN 40
+#define EFX_BYTE_5_WIDTH 8
+
+#define EFX_BYTE_6_LBN 48
+#define EFX_BYTE_6_WIDTH 8
+
+#define EFX_BYTE_7_LBN 56
+#define EFX_BYTE_7_WIDTH 8
+
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
+
+#define EFX_WORD_2_LBN 32
+#define EFX_WORD_2_WIDTH 16
+
+#define EFX_WORD_3_LBN 48
+#define EFX_WORD_3_WIDTH 16
+
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+
+/* There are intentionally no EFX_QWORD_0 or EFX_QWORD_1 field definitions
+ * here as the implementaion of EFX_QWORD_FIELD and EFX_OWORD_FIELD do not
+ * support field widths larger than 32 bits.
+ */
+
+/* Specified attribute (i.e. LBN ow WIDTH) of the specified field */
+#define EFX_VAL(_field, _attribute) \
+ _field ## _ ## _attribute
+
+/* Lowest bit number of the specified field */
+#define EFX_LOW_BIT(_field) \
+ EFX_VAL(_field, LBN)
+
+/* Width of the specified field */
+#define EFX_WIDTH(_field) \
+ EFX_VAL(_field, WIDTH)
+
+/* Highest bit number of the specified field */
+#define EFX_HIGH_BIT(_field) \
+ (EFX_LOW_BIT(_field) + EFX_WIDTH(_field) - 1)
+
+/*
+ * 64-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x000000000000001f.
+ */
+#define EFX_MASK64(_field) \
+ ((EFX_WIDTH(_field) == 64) ? ~((uint64_t)0) : \
+ (((((uint64_t)1) << EFX_WIDTH(_field))) - 1))
+/*
+ * 32-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x0000001f.
+ */
+#define EFX_MASK32(_field) \
+ ((EFX_WIDTH(_field) == 32) ? ~((uint32_t)0) : \
+ (((((uint32_t)1) << EFX_WIDTH(_field))) - 1))
+
+/*
+ * 16-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x001f.
+ */
+#define EFX_MASK16(_field) \
+ ((EFX_WIDTH(_field) == 16) ? 0xffffu : \
+ (uint16_t)((1 << EFX_WIDTH(_field)) - 1))
+
+/*
+ * 8-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ */
+#define EFX_MASK8(_field) \
+ ((uint8_t)((1 << EFX_WIDTH(_field)) - 1))
+
+#pragma pack(1)
+
+/*
+ * A byte (i.e. 8-bit) datatype
+ */
+typedef union efx_byte_u {
+ uint8_t eb_u8[1];
+} efx_byte_t;
+
+/*
+ * A word (i.e. 16-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_word_u {
+ efx_byte_t ew_byte[2];
+ uint16_t ew_u16[1];
+ uint8_t ew_u8[2];
+} efx_word_t;
+
+/*
+ * A doubleword (i.e. 32-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_dword_u {
+ efx_byte_t ed_byte[4];
+ efx_word_t ed_word[2];
+ uint32_t ed_u32[1];
+ uint16_t ed_u16[2];
+ uint8_t ed_u8[4];
+} efx_dword_t;
+
+/*
+ * A quadword (i.e. 64-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_qword_u {
+ efx_byte_t eq_byte[8];
+ efx_word_t eq_word[4];
+ efx_dword_t eq_dword[2];
+#if EFSYS_HAS_UINT64
+ uint64_t eq_u64[1];
+#endif
+ uint32_t eq_u32[2];
+ uint16_t eq_u16[4];
+ uint8_t eq_u8[8];
+} efx_qword_t;
+
+/*
+ * An octword (i.e. 128-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_oword_u {
+ efx_byte_t eo_byte[16];
+ efx_word_t eo_word[8];
+ efx_dword_t eo_dword[4];
+ efx_qword_t eo_qword[2];
+#if EFSYS_HAS_SSE2_M128
+ __m128i eo_u128[1];
+#endif
+#if EFSYS_HAS_UINT64
+ uint64_t eo_u64[2];
+#endif
+ uint32_t eo_u32[4];
+ uint16_t eo_u16[8];
+ uint8_t eo_u8[16];
+} efx_oword_t;
+
+#pragma pack()
+
+#define __SWAP16(_x) \
+ ((((_x) & 0xff) << 8) | \
+ (((_x) >> 8) & 0xff))
+
+#define __SWAP32(_x) \
+ ((__SWAP16((_x) & 0xffff) << 16) | \
+ __SWAP16(((_x) >> 16) & 0xffff))
+
+#define __SWAP64(_x) \
+ ((__SWAP32((_x) & 0xffffffff) << 32) | \
+ __SWAP32(((_x) >> 32) & 0xffffffff))
+
+#define __NOSWAP16(_x) (_x)
+#define __NOSWAP32(_x) (_x)
+#define __NOSWAP64(_x) (_x)
+
+#if EFSYS_IS_BIG_ENDIAN
+
+#define __CPU_TO_LE_16(_x) (uint16_t)__SWAP16(_x)
+#define __LE_TO_CPU_16(_x) (uint16_t)__SWAP16(_x)
+#define __CPU_TO_BE_16(_x) (uint16_t)__NOSWAP16(_x)
+#define __BE_TO_CPU_16(_x) (uint16_t)__NOSWAP16(_x)
+
+#define __CPU_TO_LE_32(_x) (uint32_t)__SWAP32(_x)
+#define __LE_TO_CPU_32(_x) (uint32_t)__SWAP32(_x)
+#define __CPU_TO_BE_32(_x) (uint32_t)__NOSWAP32(_x)
+#define __BE_TO_CPU_32(_x) (uint32_t)__NOSWAP32(_x)
+
+#define __CPU_TO_LE_64(_x) (uint64_t)__SWAP64(_x)
+#define __LE_TO_CPU_64(_x) (uint64_t)__SWAP64(_x)
+#define __CPU_TO_BE_64(_x) (uint64_t)__NOSWAP64(_x)
+#define __BE_TO_CPU_64(_x) (uint64_t)__NOSWAP64(_x)
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+#define __CPU_TO_LE_16(_x) (uint16_t)__NOSWAP16(_x)
+#define __LE_TO_CPU_16(_x) (uint16_t)__NOSWAP16(_x)
+#define __CPU_TO_BE_16(_x) (uint16_t)__SWAP16(_x)
+#define __BE_TO_CPU_16(_x) (uint16_t)__SWAP16(_x)
+
+#define __CPU_TO_LE_32(_x) (uint32_t)__NOSWAP32(_x)
+#define __LE_TO_CPU_32(_x) (uint32_t)__NOSWAP32(_x)
+#define __CPU_TO_BE_32(_x) (uint32_t)__SWAP32(_x)
+#define __BE_TO_CPU_32(_x) (uint32_t)__SWAP32(_x)
+
+#define __CPU_TO_LE_64(_x) (uint64_t)__NOSWAP64(_x)
+#define __LE_TO_CPU_64(_x) (uint64_t)__NOSWAP64(_x)
+#define __CPU_TO_BE_64(_x) (uint64_t)__SWAP64(_x)
+#define __BE_TO_CPU_64(_x) (uint64_t)__SWAP64(_x)
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
+
+#define __NATIVE_8(_x) (uint8_t)(_x)
+
+/* Format string for printing an efx_byte_t */
+#define EFX_BYTE_FMT "0x%02x"
+
+/* Format string for printing an efx_word_t */
+#define EFX_WORD_FMT "0x%04x"
+
+/* Format string for printing an efx_dword_t */
+#define EFX_DWORD_FMT "0x%08x"
+
+/* Format string for printing an efx_qword_t */
+#define EFX_QWORD_FMT "0x%08x:%08x"
+
+/* Format string for printing an efx_oword_t */
+#define EFX_OWORD_FMT "0x%08x:%08x:%08x:%08x"
+
+/* Parameters for printing an efx_byte_t */
+#define EFX_BYTE_VAL(_byte) \
+ ((unsigned int)__NATIVE_8((_byte).eb_u8[0]))
+
+/* Parameters for printing an efx_word_t */
+#define EFX_WORD_VAL(_word) \
+ ((unsigned int)__LE_TO_CPU_16((_word).ew_u16[0]))
+
+/* Parameters for printing an efx_dword_t */
+#define EFX_DWORD_VAL(_dword) \
+ ((unsigned int)__LE_TO_CPU_32((_dword).ed_u32[0]))
+
+/* Parameters for printing an efx_qword_t */
+#define EFX_QWORD_VAL(_qword) \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[0]))
+
+/* Parameters for printing an efx_oword_t */
+#define EFX_OWORD_VAL(_oword) \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[3])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[2])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[0]))
+
+/*
+ * Stop lint complaining about some shifts.
+ */
+#ifdef __lint
+extern int fix_lint;
+#define FIX_LINT(_x) (_x + fix_lint)
+#else
+#define FIX_LINT(_x) (_x)
+#endif
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT(_element, 32, 63, 28, 45) would give
+ *
+ * (_element) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(_element, _min, _max, _low, _high) \
+ ((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ ((_element) >> (_low - _min)) : \
+ ((_element) << (_min - _low))))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_64(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_32(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 16-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT16(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_16(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 8-bit
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT8(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__NATIVE_8(_element), _min, _max, _low, _high)
+
+#define EFX_EXTRACT_OWORD64(_oword, _low, _high) \
+ (EFX_EXTRACT64((_oword).eo_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT64((_oword).eo_u64[1], FIX_LINT(64), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_OWORD32(_oword, _low, _high) \
+ (EFX_EXTRACT32((_oword).eo_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[2], FIX_LINT(64), FIX_LINT(95), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[3], FIX_LINT(96), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD64(_qword, _low, _high) \
+ (EFX_EXTRACT64((_qword).eq_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD32(_qword, _low, _high) \
+ (EFX_EXTRACT32((_qword).eq_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_qword).eq_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_DWORD(_dword, _low, _high) \
+ (EFX_EXTRACT32((_dword).ed_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high))
+
+#define EFX_EXTRACT_WORD(_word, _low, _high) \
+ (EFX_EXTRACT16((_word).ew_u16[0], FIX_LINT(0), FIX_LINT(15), \
+ _low, _high))
+
+#define EFX_EXTRACT_BYTE(_byte, _low, _high) \
+ (EFX_EXTRACT8((_byte).eb_u8[0], FIX_LINT(0), FIX_LINT(7), \
+ _low, _high))
+
+
+#define EFX_OWORD_FIELD64(_oword, _field) \
+ ((uint32_t)EFX_EXTRACT_OWORD64(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_OWORD_FIELD32(_oword, _field) \
+ (EFX_EXTRACT_OWORD32(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD64(_qword, _field) \
+ ((uint32_t)EFX_EXTRACT_QWORD64(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD32(_qword, _field) \
+ (EFX_EXTRACT_QWORD32(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_DWORD_FIELD(_dword, _field) \
+ (EFX_EXTRACT_DWORD(_dword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_WORD_FIELD(_word, _field) \
+ (EFX_EXTRACT_WORD(_word, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK16(_field))
+
+#define EFX_BYTE_FIELD(_byte, _field) \
+ (EFX_EXTRACT_BYTE(_byte, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK8(_field))
+
+
+#define EFX_OWORD_IS_EQUAL64(_oword_a, _oword_b) \
+ ((_oword_a).eo_u64[0] == (_oword_b).eo_u64[0] && \
+ (_oword_a).eo_u64[1] == (_oword_b).eo_u64[1])
+
+#define EFX_OWORD_IS_EQUAL32(_oword_a, _oword_b) \
+ ((_oword_a).eo_u32[0] == (_oword_b).eo_u32[0] && \
+ (_oword_a).eo_u32[1] == (_oword_b).eo_u32[1] && \
+ (_oword_a).eo_u32[2] == (_oword_b).eo_u32[2] && \
+ (_oword_a).eo_u32[3] == (_oword_b).eo_u32[3])
+
+#define EFX_QWORD_IS_EQUAL64(_qword_a, _qword_b) \
+ ((_qword_a).eq_u64[0] == (_qword_b).eq_u64[0])
+
+#define EFX_QWORD_IS_EQUAL32(_qword_a, _qword_b) \
+ ((_qword_a).eq_u32[0] == (_qword_b).eq_u32[0] && \
+ (_qword_a).eq_u32[1] == (_qword_b).eq_u32[1])
+
+#define EFX_DWORD_IS_EQUAL(_dword_a, _dword_b) \
+ ((_dword_a).ed_u32[0] == (_dword_b).ed_u32[0])
+
+#define EFX_WORD_IS_EQUAL(_word_a, _word_b) \
+ ((_word_a).ew_u16[0] == (_word_b).ew_u16[0])
+
+#define EFX_BYTE_IS_EQUAL(_byte_a, _byte_b) \
+ ((_byte_a).eb_u8[0] == (_byte_b).eb_u8[0])
+
+
+#define EFX_OWORD_IS_ZERO64(_oword) \
+ (((_oword).eo_u64[0] | \
+ (_oword).eo_u64[1]) == 0)
+
+#define EFX_OWORD_IS_ZERO32(_oword) \
+ (((_oword).eo_u32[0] | \
+ (_oword).eo_u32[1] | \
+ (_oword).eo_u32[2] | \
+ (_oword).eo_u32[3]) == 0)
+
+#define EFX_QWORD_IS_ZERO64(_qword) \
+ (((_qword).eq_u64[0]) == 0)
+
+#define EFX_QWORD_IS_ZERO32(_qword) \
+ (((_qword).eq_u32[0] | \
+ (_qword).eq_u32[1]) == 0)
+
+#define EFX_DWORD_IS_ZERO(_dword) \
+ (((_dword).ed_u32[0]) == 0)
+
+#define EFX_WORD_IS_ZERO(_word) \
+ (((_word).ew_u16[0]) == 0)
+
+#define EFX_BYTE_IS_ZERO(_byte) \
+ (((_byte).eb_u8[0]) == 0)
+
+
+#define EFX_OWORD_IS_SET64(_oword) \
+ (((_oword).eo_u64[0] & \
+ (_oword).eo_u64[1]) == ~((uint64_t)0))
+
+#define EFX_OWORD_IS_SET32(_oword) \
+ (((_oword).eo_u32[0] & \
+ (_oword).eo_u32[1] & \
+ (_oword).eo_u32[2] & \
+ (_oword).eo_u32[3]) == ~((uint32_t)0))
+
+#define EFX_QWORD_IS_SET64(_qword) \
+ (((_qword).eq_u64[0]) == ~((uint64_t)0))
+
+#define EFX_QWORD_IS_SET32(_qword) \
+ (((_qword).eq_u32[0] & \
+ (_qword).eq_u32[1]) == ~((uint32_t)0))
+
+#define EFX_DWORD_IS_SET(_dword) \
+ ((_dword).ed_u32[0] == ~((uint32_t)0))
+
+#define EFX_WORD_IS_SET(_word) \
+ ((_word).ew_u16[0] == ~((uint16_t)0))
+
+#define EFX_BYTE_IS_SET(_byte) \
+ ((_byte).eb_u8[0] == ~((uint8_t)0))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+
+#define EFX_INSERT_NATIVE64(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint64_t)(_value)) << (_low - _min)) : \
+ (((uint64_t)(_value)) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint32_t)(_value)) << (_low - _min)) : \
+ (((uint32_t)(_value)) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint16_t)((_low > _min) ? \
+ ((_value) << (_low - _min)) : \
+ ((_value) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint8_t)((_low > _min) ? \
+ ((_value) << (_low - _min)) : \
+ ((_value) >> (_min - _low))))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE64(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE32(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE16(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE8(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS64(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_64( \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS32(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_32( \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS16(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_16( \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS8(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __NATIVE_8( \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field10, _value10))
+
+#define EFX_POPULATE_OWORD64(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = EFX_INSERT_FIELDS64(64, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_OWORD32(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = EFX_INSERT_FIELDS32(64, 95, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = EFX_INSERT_FIELDS32(96, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD64(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD32(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_DWORD(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_WORD(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = EFX_INSERT_FIELDS16(0, 15, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_BYTE(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = EFX_INSERT_FIELDS8(0, 7, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
+
+#define EFX_POPULATE_OWORD_9(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_OWORD_10(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_OWORD_8(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_OWORD_9(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_OWORD_7(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_OWORD_8(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_OWORD_6(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_OWORD_7(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_OWORD_5(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_OWORD_6(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_OWORD_4(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_OWORD_5(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_OWORD_3(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_OWORD_4(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_OWORD_2(_oword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_OWORD_3(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_OWORD_1(_oword, \
+ _field1, _value1) \
+ EFX_POPULATE_OWORD_2(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_OWORD(_oword) \
+ EFX_POPULATE_OWORD_1(_oword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_OWORD(_oword) \
+ EFX_POPULATE_OWORD_4(_oword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff, \
+ EFX_DWORD_2, 0xffffffff, EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
+
+#define EFX_POPULATE_QWORD_9(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_QWORD_10(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_QWORD_8(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_QWORD_9(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_QWORD_7(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_QWORD_8(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_QWORD_6(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_QWORD_7(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_QWORD_5(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_QWORD_6(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_QWORD_4(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_QWORD_5(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_QWORD_3(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_QWORD_4(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_QWORD_2(_qword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_QWORD_3(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_QWORD_1(_qword, \
+ _field1, _value1) \
+ EFX_POPULATE_QWORD_2(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_QWORD(_qword) \
+ EFX_POPULATE_QWORD_1(_qword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_QWORD(_qword) \
+ EFX_POPULATE_QWORD_2(_qword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
+
+#define EFX_POPULATE_DWORD_9(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_10(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_DWORD_8(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_DWORD_9(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_DWORD_7(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_DWORD_8(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_DWORD_6(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_7(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_DWORD_5(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_DWORD_6(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_DWORD_4(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_DWORD_5(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_DWORD_3(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_4(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_DWORD_2(_dword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_DWORD_3(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_DWORD_1(_dword, \
+ _field1, _value1) \
+ EFX_POPULATE_DWORD_2(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, \
+ EFX_DWORD_0, 0xffffffff)
+
+/* Populate a word field with various numbers of arguments */
+#define EFX_POPULATE_WORD_10 EFX_POPULATE_WORD
+
+#define EFX_POPULATE_WORD_9(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_WORD_10(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_WORD_8(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_WORD_9(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_WORD_7(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_WORD_8(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_WORD_6(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_WORD_7(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_WORD_5(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_WORD_6(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_WORD_4(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_WORD_5(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_WORD_3(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_WORD_4(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_WORD_2(_word, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_WORD_3(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_WORD_1(_word, \
+ _field1, _value1) \
+ EFX_POPULATE_WORD_2(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, \
+ EFX_WORD_0, 0xffff)
+
+/* Populate a byte field with various numbers of arguments */
+#define EFX_POPULATE_BYTE_10 EFX_POPULATE_BYTE
+
+#define EFX_POPULATE_BYTE_9(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_BYTE_10(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_BYTE_8(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_BYTE_9(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_BYTE_7(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_BYTE_8(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_BYTE_6(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_BYTE_7(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_BYTE_5(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_BYTE_6(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_BYTE_4(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_BYTE_5(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_BYTE_3(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_BYTE_4(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_BYTE_2(_byte, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_BYTE_3(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_BYTE_1(_byte, \
+ _field1, _value1) \
+ EFX_POPULATE_BYTE_2(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, \
+ EFX_BYTE_0, 0xff)
+
+/*
+ * Modify a named field within an already-populated structure. Used
+ * for read-modify-write operations.
+ */
+
+#define EFX_INSERT_FIELD64(_min, _max, _field, _value) \
+ __CPU_TO_LE_64(EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD32(_min, _max, _field, _value) \
+ __CPU_TO_LE_32(EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD16(_min, _max, _field, _value) \
+ __CPU_TO_LE_16(EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD8(_min, _max, _field, _value) \
+ __NATIVE_8(EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value))
+
+#define EFX_INPLACE_MASK64(_min, _max, _field) \
+ EFX_INSERT_FIELD64(_min, _max, _field, EFX_MASK64(_field))
+
+#define EFX_INPLACE_MASK32(_min, _max, _field) \
+ EFX_INSERT_FIELD32(_min, _max, _field, EFX_MASK32(_field))
+
+#define EFX_INPLACE_MASK16(_min, _max, _field) \
+ EFX_INSERT_FIELD16(_min, _max, _field, EFX_MASK16(_field))
+
+#define EFX_INPLACE_MASK8(_min, _max, _field) \
+ EFX_INSERT_FIELD8(_min, _max, _field, EFX_MASK8(_field))
+
+#define EFX_SET_OWORD_FIELD64(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = (((_oword).eo_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = (((_oword).eo_u64[1] & \
+ ~EFX_INPLACE_MASK64(64, 127, _field)) | \
+ EFX_INSERT_FIELD64(64, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_FIELD32(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = (((_oword).eo_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = (((_oword).eo_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = (((_oword).eo_u32[2] & \
+ ~EFX_INPLACE_MASK32(64, 95, _field)) | \
+ EFX_INSERT_FIELD32(64, 95, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = (((_oword).eo_u32[3] & \
+ ~EFX_INPLACE_MASK32(96, 127, _field)) | \
+ EFX_INSERT_FIELD32(96, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD64(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = (((_qword).eq_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD32(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = (((_qword).eq_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = (((_qword).eq_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_DWORD_FIELD(_dword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = (((_dword).ed_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_WORD_FIELD(_word, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = (((_word).ew_u16[0] & \
+ ~EFX_INPLACE_MASK16(0, 15, _field)) | \
+ EFX_INSERT_FIELD16(0, 15, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_BYTE_FIELD(_byte, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = (((_byte).eb_u8[0] & \
+ ~EFX_INPLACE_MASK8(0, 7, _field)) | \
+ EFX_INSERT_FIELD8(0, 7, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Set or clear a numbered bit within an octword.
+ */
+
+#define EFX_SHIFT64_0(_bit) \
+ (((_bit) < 64) ? ((uint64_t)1 << (_bit)) : 0U)
+
+#define EFX_SHIFT64(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 64) ? \
+ ((uint64_t)1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT32_0(_bit) \
+ ((_bit) < (32) ? ((uint32_t)1 << (_bit)) : 0U)
+
+#define EFX_SHIFT32(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 32) ? \
+ ((uint32_t)1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT16(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 16) ? \
+ (uint16_t)(1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT8(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 8) ? \
+ (uint8_t)(1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SET_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64_0(_bit)); \
+ (_oword).eo_u64[1] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32_0(_bit)); \
+ (_oword).eo_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64_0(_bit)); \
+ (_oword).eo_u64[1] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32_0(_bit)); \
+ (_oword).eo_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_OWORD_BIT64(_oword, _bit) \
+ (((_oword).eo_u64[0] & \
+ __CPU_TO_LE_64(EFX_SHIFT64_0(_bit))) || \
+ ((_oword).eo_u64[1] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64)))))
+
+#define EFX_TEST_OWORD_BIT32(_oword, _bit) \
+ (((_oword).eo_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32_0(_bit))) || \
+ ((_oword).eo_u32[1] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))) || \
+ ((_oword).eo_u32[2] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64)))) || \
+ ((_oword).eo_u32[3] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96)))))
+
+
+#define EFX_SET_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64_0(_bit)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32_0(_bit)); \
+ (_qword).eq_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64_0(_bit)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32_0(_bit)); \
+ (_qword).eq_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_QWORD_BIT64(_qword, _bit) \
+ (((_qword).eq_u64[0] & \
+ __CPU_TO_LE_64(EFX_SHIFT64_0(_bit))) != 0)
+
+#define EFX_TEST_QWORD_BIT32(_qword, _bit) \
+ (((_qword).eq_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32_0(_bit))) || \
+ ((_qword).eq_u32[1] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))))
+
+
+#define EFX_SET_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32_0(_bit)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32_0(_bit)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_DWORD_BIT(_dword, _bit) \
+ (((_dword).ed_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32_0(_bit))) != 0)
+
+
+#define EFX_SET_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u16[0] |= \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u32[0] &= \
+ __CPU_TO_LE_16(~EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_WORD_BIT(_word, _bit) \
+ (((_word).ew_u16[0] & \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_SET_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] |= \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] &= \
+ __NATIVE_8(~EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_BYTE_BIT(_byte, _bit) \
+ (((_byte).eb_u8[0] & \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_OR_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] |= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] |= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] |= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] |= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] |= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] |= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] &= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] &= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] &= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] &= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] &= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] &= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] |= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] |= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] |= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] &= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] &= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] &= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] |= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] &= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] |= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] &= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] |= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] &= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_USE_UINT64
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL64
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL64
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET64
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT64
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT64
+#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT64
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT64
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT64
+#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT64
+#define EFX_OR_OWORD EFX_OR_OWORD64
+#define EFX_AND_OWORD EFX_AND_OWORD64
+#define EFX_OR_QWORD EFX_OR_QWORD64
+#define EFX_AND_QWORD EFX_AND_QWORD64
+#else
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL32
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL32
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET32
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET32
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT32
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT32
+#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT32
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT32
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT32
+#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT32
+#define EFX_OR_OWORD EFX_OR_OWORD32
+#define EFX_AND_OWORD EFX_AND_OWORD32
+#define EFX_OR_QWORD EFX_OR_QWORD32
+#define EFX_AND_QWORD EFX_AND_QWORD32
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_TYPES_H */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_vpd.c b/usr/src/uts/common/io/sfxge/common/efx_vpd.c
new file mode 100644
index 0000000000..344126b16d
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_vpd.c
@@ -0,0 +1,1013 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#define TAG_TYPE_LBN 7
+#define TAG_TYPE_WIDTH 1
+#define TAG_TYPE_LARGE_ITEM_DECODE 1
+#define TAG_TYPE_SMALL_ITEM_DECODE 0
+
+#define TAG_SMALL_ITEM_NAME_LBN 3
+#define TAG_SMALL_ITEM_NAME_WIDTH 4
+#define TAG_SMALL_ITEM_SIZE_LBN 0
+#define TAG_SMALL_ITEM_SIZE_WIDTH 3
+
+#define TAG_LARGE_ITEM_NAME_LBN 0
+#define TAG_LARGE_ITEM_NAME_WIDTH 7
+
+#define TAG_NAME_END_DECODE 0x0f
+#define TAG_NAME_ID_STRING_DECODE 0x02
+#define TAG_NAME_VPD_R_DECODE 0x10
+#define TAG_NAME_VPD_W_DECODE 0x11
+
+#if EFSYS_OPT_SIENA
+
+static const efx_vpd_ops_t __efx_vpd_siena_ops = {
+ siena_vpd_init, /* evpdo_init */
+ siena_vpd_size, /* evpdo_size */
+ siena_vpd_read, /* evpdo_read */
+ siena_vpd_verify, /* evpdo_verify */
+ siena_vpd_reinit, /* evpdo_reinit */
+ siena_vpd_get, /* evpdo_get */
+ siena_vpd_set, /* evpdo_set */
+ siena_vpd_next, /* evpdo_next */
+ siena_vpd_write, /* evpdo_write */
+ siena_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_vpd_ops_t __efx_vpd_ef10_ops = {
+ ef10_vpd_init, /* evpdo_init */
+ ef10_vpd_size, /* evpdo_size */
+ ef10_vpd_read, /* evpdo_read */
+ ef10_vpd_verify, /* evpdo_verify */
+ ef10_vpd_reinit, /* evpdo_reinit */
+ ef10_vpd_get, /* evpdo_get */
+ ef10_vpd_set, /* evpdo_set */
+ ef10_vpd_next, /* evpdo_next */
+ ef10_vpd_write, /* evpdo_write */
+ ef10_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_vpd_init(
+ __in efx_nic_t *enp)
+{
+ const efx_vpd_ops_t *evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ evpdop = &__efx_vpd_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (evpdop->evpdo_init != NULL) {
+ if ((rc = evpdop->evpdo_init(enp)) != 0)
+ goto fail2;
+ }
+
+ enp->en_evpdop = evpdop;
+ enp->en_mod_flags |= EFX_MOD_VPD;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_size(enp, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_read(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_verify(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_reinit == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = evpdop->evpdo_reinit(enp, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_set(enp, data, size, evvp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_next(enp, data, size, evvp, contp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_write(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_vpd_next_tag(
+ __in caddr_t data,
+ __in size_t size,
+ __inout unsigned int *offsetp,
+ __out efx_vpd_tag_t *tagp,
+ __out uint16_t *lengthp)
+{
+ efx_byte_t byte;
+ efx_word_t word;
+ uint8_t name;
+ uint16_t length;
+ size_t headlen;
+ efx_rc_t rc;
+
+ if (*offsetp >= size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ EFX_POPULATE_BYTE_1(byte, EFX_BYTE_0, data[*offsetp]);
+
+ switch (EFX_BYTE_FIELD(byte, TAG_TYPE)) {
+ case TAG_TYPE_SMALL_ITEM_DECODE:
+ headlen = 1;
+
+ name = EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_NAME);
+ length = (uint16_t)EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_SIZE);
+
+ break;
+
+ case TAG_TYPE_LARGE_ITEM_DECODE:
+ headlen = 3;
+
+ if (*offsetp + headlen > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ name = EFX_BYTE_FIELD(byte, TAG_LARGE_ITEM_NAME);
+ EFX_POPULATE_WORD_2(word,
+ EFX_BYTE_0, data[*offsetp + 1],
+ EFX_BYTE_1, data[*offsetp + 2]);
+ length = EFX_WORD_FIELD(word, EFX_WORD_0);
+
+ break;
+
+ default:
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if (*offsetp + headlen + length > size) {
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ EFX_STATIC_ASSERT(TAG_NAME_END_DECODE == EFX_VPD_END);
+ EFX_STATIC_ASSERT(TAG_NAME_ID_STRING_DECODE == EFX_VPD_ID);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_R_DECODE == EFX_VPD_RO);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_W_DECODE == EFX_VPD_RW);
+ if (name != EFX_VPD_END && name != EFX_VPD_ID &&
+ name != EFX_VPD_RO) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ *tagp = name;
+ *lengthp = length;
+ *offsetp += headlen;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_vpd_next_keyword(
+ __in_bcount(size) caddr_t tag,
+ __in size_t size,
+ __in unsigned int pos,
+ __out efx_vpd_keyword_t *keywordp,
+ __out uint8_t *lengthp)
+{
+ efx_vpd_keyword_t keyword;
+ uint8_t length;
+ efx_rc_t rc;
+
+ if (pos + 3U > size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ keyword = EFX_VPD_KEYWORD(tag[pos], tag[pos + 1]);
+ length = tag[pos + 2];
+
+ if (length == 0 || pos + 3U + length > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ *keywordp = keyword;
+ *lengthp = length;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp)
+{
+ efx_vpd_tag_t tag;
+ unsigned int offset;
+ uint16_t taglen;
+ efx_rc_t rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ offset += taglen;
+ if (tag == EFX_VPD_END)
+ break;
+ }
+
+ *lengthp = offset;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ boolean_t cksummed = B_FALSE;
+ efx_rc_t rc;
+
+ /*
+ * Parse every tag,keyword in the existing VPD. If the csum is present,
+ * the assert it is correct, and is the final keyword in the RO block.
+ */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag == EFX_VPD_ID)
+ goto done;
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ /* RV keyword must be the last in the block */
+ if (cksummed) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail3;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 4; i++)
+ cksum += data[i];
+
+ if (cksum != 0) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ cksummed = B_TRUE;
+ }
+ }
+
+ done:
+ offset += taglen;
+ }
+
+ if (!cksummed) {
+ rc = EFAULT;
+ goto fail5;
+ }
+
+ if (cksummedp != NULL)
+ *cksummedp = cksummed;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint8_t __efx_vpd_blank_pid[] = {
+ /* Large resource type ID length 1 */
+ 0x82, 0x01, 0x00,
+ /* Product name ' ' */
+ 0x32,
+};
+
+static uint8_t __efx_vpd_blank_r[] = {
+ /* Large resource type VPD-R length 4 */
+ 0x90, 0x04, 0x00,
+ /* RV keyword length 1 */
+ 'R', 'V', 0x01,
+ /* RV payload checksum */
+ 0x00,
+};
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_reinit(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid)
+{
+ unsigned int offset = 0;
+ unsigned int pos;
+ efx_byte_t byte;
+ uint8_t cksum;
+ efx_rc_t rc;
+
+ if (size < 0x100) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (wantpid) {
+ (void) memcpy(data + offset, __efx_vpd_blank_pid,
+ sizeof (__efx_vpd_blank_pid));
+ offset += sizeof (__efx_vpd_blank_pid);
+ }
+
+ (void) memcpy(data + offset, __efx_vpd_blank_r,
+ sizeof (__efx_vpd_blank_r));
+ offset += sizeof (__efx_vpd_blank_r);
+
+ /* Update checksum */
+ cksum = 0;
+ for (pos = 0; pos < offset; pos++)
+ cksum += data[pos];
+ data[offset - 1] -= cksum;
+
+ /* Append trailing tag */
+ EFX_POPULATE_BYTE_3(byte,
+ TAG_TYPE, TAG_TYPE_SMALL_ITEM_DECODE,
+ TAG_SMALL_ITEM_NAME, TAG_NAME_END_DECODE,
+ TAG_SMALL_ITEM_SIZE, 0);
+ data[offset] = EFX_BYTE_FIELD(byte, EFX_BYTE_0);
+ offset++;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keywordp,
+ __out_opt unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword = 0;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int index;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t paylen;
+ efx_rc_t rc;
+
+ offset = index = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+
+ if (tag == EFX_VPD_END) {
+ keyword = 0;
+ paylen = 0;
+ index = 0;
+ break;
+ }
+
+ if (tag == EFX_VPD_ID) {
+ if (index++ == *contp) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+ keyword = 0;
+ paylen = (uint8_t)MIN(taglen, 0xff);
+
+ goto done;
+ }
+ } else {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail2;
+
+ if (index++ == *contp) {
+ offset += pos + 3;
+ paylen = keylen;
+
+ goto done;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+done:
+ *tagp = tag;
+ *keywordp = keyword;
+ if (payloadp != NULL)
+ *payloadp = offset;
+ if (paylenp != NULL)
+ *paylenp = paylen;
+
+ *contp = index;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp)
+{
+ efx_vpd_tag_t itag;
+ efx_vpd_keyword_t ikeyword;
+ unsigned int offset;
+ unsigned int pos;
+ uint16_t taglen;
+ uint8_t keylen;
+ efx_rc_t rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &itag, &taglen)) != 0)
+ goto fail1;
+ if (itag == EFX_VPD_END)
+ break;
+
+ if (itag == tag) {
+ if (itag == EFX_VPD_ID) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+
+ *paylenp = (uint8_t)MIN(taglen, 0xff);
+ *payloadp = offset;
+ return (0);
+ }
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &ikeyword, &keylen)) != 0)
+ goto fail2;
+
+ if (ikeyword == keyword) {
+ *paylenp = keylen;
+ *payloadp = offset + pos + 3;
+ return (0);
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Not an error */
+ return (ENOENT);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_word_t word;
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int taghead;
+ unsigned int source;
+ unsigned int dest;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ size_t used;
+ efx_rc_t rc;
+
+ switch (evvp->evv_tag) {
+ case EFX_VPD_ID:
+ if (evvp->evv_keyword != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Can't delete the ID keyword */
+ if (evvp->evv_length == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ case EFX_VPD_RO:
+ if (evvp->evv_keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Determine total size of all current tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &used)) != 0)
+ goto fail2;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ taghead = offset;
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail3;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag != evvp->evv_tag) {
+ offset += taglen;
+ continue;
+ }
+
+ /* We only support modifying large resource tags */
+ if (offset - taghead != 3) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /*
+ * Work out the offset of the byte immediately after the
+ * old (=source) and new (=dest) new keyword/tag
+ */
+ pos = 0;
+ if (tag == EFX_VPD_ID) {
+ source = offset + taglen;
+ dest = offset + evvp->evv_length;
+ goto check_space;
+ }
+
+ EFSYS_ASSERT3U(tag, ==, EFX_VPD_RO);
+ source = dest = 0;
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail5;
+
+ if (keyword == evvp->evv_keyword &&
+ evvp->evv_length == 0) {
+ /* Deleting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos;
+ break;
+
+ } else if (keyword == evvp->evv_keyword) {
+ /* Adjusting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+
+ } else if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ /* The RV keyword must be at the end */
+ EFSYS_ASSERT3U(pos + 3 + keylen, ==, taglen);
+
+ /*
+ * The keyword doesn't already exist. If the
+ * user deleting a non-existant keyword then
+ * this is a no-op.
+ */
+ if (evvp->evv_length == 0)
+ return (0);
+
+ /* Insert this keyword before the RV keyword */
+ source = offset + pos;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+ }
+ }
+
+ check_space:
+ if (used + dest > size + source) {
+ rc = ENOSPC;
+ goto fail6;
+ }
+
+ /* Move trailing data */
+ (void) memmove(data + dest, data + source, used - source);
+
+ /* Copy contents */
+ (void) memcpy(data + dest - evvp->evv_length, evvp->evv_value,
+ evvp->evv_length);
+
+ /* Insert new keyword header if required */
+ if (tag != EFX_VPD_ID && evvp->evv_length > 0) {
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0,
+ evvp->evv_keyword);
+ data[offset + pos + 0] =
+ EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset + pos + 1] =
+ EFX_WORD_FIELD(word, EFX_BYTE_1);
+ data[offset + pos + 2] = evvp->evv_length;
+ }
+
+ /* Modify tag length (large resource type) */
+ taglen += (dest - source);
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen);
+ data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1);
+
+ goto checksum;
+ }
+
+ /* Unable to find the matching tag */
+ rc = ENOENT;
+ goto fail7;
+
+checksum:
+ /* Find the RV tag, and update the checksum */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail8;
+ if (tag == EFX_VPD_END)
+ break;
+ if (tag == EFX_VPD_RO) {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail9;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 3; i++)
+ cksum += data[i];
+ data[i] = -cksum;
+ break;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Zero out the unused portion */
+ (void) memset(data + offset + taglen, 0xff, size - offset - taglen);
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_fini != NULL)
+ evpdop->evpdo_fini(enp);
+
+ enp->en_evpdop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_VPD;
+}
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/usr/src/uts/common/io/sfxge/common/efx_wol.c b/usr/src/uts/common/io/sfxge/common/efx_wol.c
new file mode 100644
index 0000000000..c66133a0c7
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/efx_wol.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_WOL
+
+ __checkReturn efx_rc_t
+efx_wol_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_WOL));
+
+ if (~(encp->enc_features) & EFX_FEATURE_WOL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ /* Current implementation is Siena specific */
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ enp->en_mod_flags |= EFX_MOD_WOL;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_wol_filter_clear(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_WOL_FILTER_RESET_IN_LEN,
+ MC_CMD_WOL_FILTER_RESET_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_WOL_FILTER_RESET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WOL_FILTER_RESET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_WOL_FILTER_RESET_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_RESET_IN_MASK,
+ MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS |
+ MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_wol_filter_add(
+ __in efx_nic_t *enp,
+ __in efx_wol_type_t type,
+ __in efx_wol_param_t *paramp,
+ __out uint32_t *filter_idp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_WOL_FILTER_SET_IN_LEN,
+ MC_CMD_WOL_FILTER_SET_OUT_LEN)];
+ efx_byte_t link_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_WOL_FILTER_SET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WOL_FILTER_SET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_WOL_FILTER_SET_OUT_LEN;
+
+ switch (type) {
+ case EFX_WOL_TYPE_MAGIC:
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE,
+ MC_CMD_FILTER_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE,
+ MC_CMD_WOL_TYPE_MAGIC);
+ EFX_MAC_ADDR_COPY(
+ MCDI_IN2(req, uint8_t, WOL_FILTER_SET_IN_MAGIC_MAC),
+ paramp->ewp_magic.mac_addr);
+ break;
+
+ case EFX_WOL_TYPE_BITMAP: {
+ uint32_t swapped = 0;
+ efx_dword_t *dwordp;
+ unsigned int pos, bit;
+
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE,
+ MC_CMD_FILTER_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE,
+ MC_CMD_WOL_TYPE_BITMAP);
+
+ /*
+ * MC bitmask is supposed to be bit swapped
+ * amongst 32 bit words(!)
+ */
+
+ dwordp = MCDI_IN2(req, efx_dword_t,
+ WOL_FILTER_SET_IN_BITMAP_MASK);
+
+ EFSYS_ASSERT3U(EFX_WOL_BITMAP_MASK_SIZE % 4, ==, 0);
+
+ for (pos = 0; pos < EFX_WOL_BITMAP_MASK_SIZE; ++pos) {
+ uint8_t native = paramp->ewp_bitmap.mask[pos];
+
+ for (bit = 0; bit < 8; ++bit) {
+ swapped <<= 1;
+ swapped |= (native & 0x1);
+ native >>= 1;
+ }
+
+ if ((pos & 3) == 3) {
+ EFX_POPULATE_DWORD_1(dwordp[pos >> 2],
+ EFX_DWORD_0, swapped);
+ swapped = 0;
+ }
+ }
+
+ (void) memcpy(MCDI_IN2(req, uint8_t,
+ WOL_FILTER_SET_IN_BITMAP_BITMAP),
+ paramp->ewp_bitmap.value,
+ sizeof (paramp->ewp_bitmap.value));
+
+ EFSYS_ASSERT3U(paramp->ewp_bitmap.value_len, <=,
+ sizeof (paramp->ewp_bitmap.value));
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_BITMAP_LEN,
+ paramp->ewp_bitmap.value_len);
+ }
+ break;
+
+ case EFX_WOL_TYPE_LINK:
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE,
+ MC_CMD_FILTER_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE,
+ MC_CMD_WOL_TYPE_LINK);
+
+ EFX_ZERO_BYTE(link_mask);
+ EFX_SET_BYTE_FIELD(link_mask, MC_CMD_WOL_FILTER_SET_IN_LINK_UP,
+ 1);
+ MCDI_IN_SET_BYTE(req, WOL_FILTER_SET_IN_LINK_MASK,
+ link_mask.eb_u8[0]);
+ break;
+
+ default:
+ EFSYS_ASSERT3U(type, !=, type);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *filter_idp = MCDI_OUT_DWORD(req, WOL_FILTER_SET_OUT_FILTER_ID);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_wol_filter_remove(
+ __in efx_nic_t *enp,
+ __in uint32_t filter_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_WOL_FILTER_REMOVE_IN_LEN,
+ MC_CMD_WOL_FILTER_REMOVE_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_WOL_FILTER_REMOVE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WOL_FILTER_REMOVE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_WOL_FILTER_REMOVE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, WOL_FILTER_REMOVE_IN_FILTER_ID, filter_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lightsout_offload_add(
+ __in efx_nic_t *enp,
+ __in efx_lightsout_offload_type_t type,
+ __in efx_lightsout_offload_param_t *paramp,
+ __out uint32_t *filter_idp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN,
+ MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN),
+ MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ADD_LIGHTSOUT_OFFLOAD;
+ req.emr_in_buf = payload;
+ req.emr_in_length = sizeof (type);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN;
+
+ switch (type) {
+ case EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP:
+ req.emr_in_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN;
+
+ MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
+ MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t,
+ ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC),
+ paramp->elop_arp.mac_addr);
+ MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP,
+ paramp->elop_arp.ip);
+ break;
+ case EFX_LIGHTSOUT_OFFLOAD_TYPE_NS:
+ req.emr_in_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN;
+
+ MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
+ MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t,
+ ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC),
+ paramp->elop_ns.mac_addr);
+ (void) memcpy(MCDI_IN2(req, uint8_t,
+ ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6),
+ paramp->elop_ns.solicited_node,
+ sizeof (paramp->elop_ns.solicited_node));
+ (void) memcpy(MCDI_IN2(req, uint8_t,
+ ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6),
+ paramp->elop_ns.ip, sizeof (paramp->elop_ns.ip));
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *filter_idp = MCDI_OUT_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lightsout_offload_remove(
+ __in efx_nic_t *enp,
+ __in efx_lightsout_offload_type_t type,
+ __in uint32_t filter_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN,
+ MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN;
+
+ switch (type) {
+ case EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP:
+ MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
+ MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP);
+ break;
+ case EFX_LIGHTSOUT_OFFLOAD_TYPE_NS:
+ MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
+ MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS);
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID,
+ filter_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ void
+efx_wol_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
+
+ enp->en_mod_flags &= ~EFX_MOD_WOL;
+}
+
+#endif /* EFSYS_OPT_WOL */
diff --git a/usr/src/uts/common/io/sfxge/common/hunt_impl.h b/usr/src/uts/common/io/sfxge/common/hunt_impl.h
new file mode 100644
index 0000000000..ac219d61a5
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/hunt_impl.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_HUNT_IMPL_H
+#define _SYS_HUNT_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+#include "efx_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Missing register definitions */
+#ifndef ER_DZ_TX_PIOBUF_OFST
+#define ER_DZ_TX_PIOBUF_OFST 0x00001000
+#endif
+#ifndef ER_DZ_TX_PIOBUF_STEP
+#define ER_DZ_TX_PIOBUF_STEP 8192
+#endif
+#ifndef ER_DZ_TX_PIOBUF_ROWS
+#define ER_DZ_TX_PIOBUF_ROWS 2048
+#endif
+
+#ifndef ER_DZ_TX_PIOBUF_SIZE
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+#endif
+
+#define HUNT_PIOBUF_NBUFS (16)
+#define HUNT_PIOBUF_SIZE (ER_DZ_TX_PIOBUF_SIZE)
+
+#define HUNT_MIN_PIO_ALLOC_SIZE (HUNT_PIOBUF_SIZE / 32)
+
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+hunt_board_cfg(
+ __in efx_nic_t *enp);
+
+
+/* PHY */
+
+#if EFSYS_OPT_BIST
+
+extern __checkReturn efx_rc_t
+hunt_bist_enable_offline(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+hunt_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+hunt_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+hunt_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HUNT_IMPL_H */
diff --git a/usr/src/uts/common/io/sfxge/common/hunt_nic.c b/usr/src/uts/common/io/sfxge/common/hunt_nic.c
new file mode 100644
index 0000000000..f5f6759ea6
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/hunt_nic.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON
+
+
+ __checkReturn efx_rc_t
+hunt_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6] = { 0 };
+ uint32_t board_type = 0;
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t port;
+ uint32_t pf;
+ uint32_t vf;
+ uint32_t mask;
+ uint32_t flags;
+ uint32_t sysclk;
+ uint32_t base, nvec;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /*
+ * NOTE: The MCDI protocol numbers ports from zero.
+ * The common code MCDI interface numbers ports from one.
+ */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for Huntington */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+ /*
+ * If the bug35388 workaround is enabled, then use an indirect access
+ * method to avoid unsafe EVQ writes.
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug35388_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug35388_workaround = B_FALSE;
+ else
+ goto fail8;
+
+ /*
+ * If the bug41750 workaround is enabled, then do not test interrupts,
+ * as the test will fail (seen with Greenport controllers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE,
+ NULL);
+ if (rc == 0) {
+ encp->enc_bug41750_workaround = B_TRUE;
+ } else if (rc == EACCES) {
+ /* Assume a controller with 40G ports needs the workaround. */
+ if (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ encp->enc_bug41750_workaround = B_TRUE;
+ else
+ encp->enc_bug41750_workaround = B_FALSE;
+ } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+ encp->enc_bug41750_workaround = B_FALSE;
+ } else {
+ goto fail9;
+ }
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /* Interrupt testing does not work for VFs. See bug50084. */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /*
+ * If the bug26807 workaround is enabled, then firmware has enabled
+ * support for chained multicast filters. Firmware will reset (FLR)
+ * functions which have filters in the hardware filter table when the
+ * workaround is enabled/disabled.
+ *
+ * We must recheck if the workaround is enabled after inserting the
+ * first hardware filter, in case it has been changed since this check.
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
+ B_TRUE, &flags);
+ if (rc == 0) {
+ encp->enc_bug26807_workaround = B_TRUE;
+ if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
+ /*
+ * Other functions had installed filters before the
+ * workaround was enabled, and they have been reset
+ * by firmware.
+ */
+ EFSYS_PROBE(bug26807_workaround_flr_done);
+ /* FIXME: bump MC warm boot count ? */
+ }
+ } else if (rc == EACCES) {
+ /*
+ * Unprivileged functions cannot enable the workaround in older
+ * firmware.
+ */
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else {
+ goto fail10;
+ }
+
+ /* Get sysclk frequency (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk)) != 0)
+ goto fail11;
+
+ /*
+ * The timer quantum is 1536 sysclk cycles, documented for the
+ * EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
+ if (encp->enc_bug35388_workaround) {
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ ERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000;
+ } else {
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+ }
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail12;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+ encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = HUNT_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail13;
+ encp->enc_privilege_mask = mask;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail14;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ return (0);
+
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#endif /* EFSYS_OPT_HUNTINGTON */
diff --git a/usr/src/uts/common/io/sfxge/common/hunt_phy.c b/usr/src/uts/common/io/sfxge/common/hunt_phy.c
new file mode 100644
index 0000000000..3b592f5f4e
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/hunt_phy.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+hunt_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_enable_offline(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+hunt_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_start(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+hunt_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX)];
+ uint32_t value_mask = 0;
+ uint32_t result;
+ efx_rc_t rc;
+ _NOTE(ARGUNUSED(type))
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ if (result == MC_CMD_POLL_BIST_FAILED &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MEM_LEN &&
+ count > EFX_BIST_MEM_ECC_FATAL) {
+ if (valuesp != NULL) {
+ valuesp[EFX_BIST_MEM_TEST] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_TEST);
+ valuesp[EFX_BIST_MEM_ADDR] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ADDR);
+ valuesp[EFX_BIST_MEM_BUS] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_BUS);
+ valuesp[EFX_BIST_MEM_EXPECT] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_EXPECT);
+ valuesp[EFX_BIST_MEM_ACTUAL] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ACTUAL);
+ valuesp[EFX_BIST_MEM_ECC] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC);
+ valuesp[EFX_BIST_MEM_ECC_PARITY] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_PARITY);
+ valuesp[EFX_BIST_MEM_ECC_FATAL] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_FATAL);
+ }
+ value_mask |= (1 << EFX_BIST_MEM_TEST) |
+ (1 << EFX_BIST_MEM_ADDR) |
+ (1 << EFX_BIST_MEM_BUS) |
+ (1 << EFX_BIST_MEM_EXPECT) |
+ (1 << EFX_BIST_MEM_ACTUAL) |
+ (1 << EFX_BIST_MEM_ECC) |
+ (1 << EFX_BIST_MEM_ECC_PARITY) |
+ (1 << EFX_BIST_MEM_ECC_FATAL);
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_XFI_FARMI &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+hunt_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ /* There is no way to stop BIST on Huntinton. */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+#endif /* EFSYS_OPT_HUNTINGTON */
diff --git a/usr/src/uts/common/io/sfxge/common/mcdi_mon.c b/usr/src/uts/common/io/sfxge/common/mcdi_mon.c
new file mode 100644
index 0000000000..4496bcc25b
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/mcdi_mon.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_MCDI
+
+#if EFSYS_OPT_MON_STATS
+
+#define MCDI_MON_NEXT_PAGE (uint16_t)0xfffe
+#define MCDI_MON_INVALID_SENSOR (uint16_t)0xfffd
+#define MCDI_MON_PAGE_SIZE 0x20
+
+/* Bitmasks of valid port(s) for each sensor */
+#define MCDI_MON_PORT_NONE (0x00)
+#define MCDI_MON_PORT_P1 (0x01)
+#define MCDI_MON_PORT_P2 (0x02)
+#define MCDI_MON_PORT_P3 (0x04)
+#define MCDI_MON_PORT_P4 (0x08)
+#define MCDI_MON_PORT_Px (0xFFFF)
+
+/* Entry for MCDI sensor in sensor map */
+#define STAT(portmask, stat) \
+ { (MCDI_MON_PORT_##portmask), (EFX_MON_STAT_##stat) }
+
+/* Entry for sensor next page flag in sensor map */
+#define STAT_NEXT_PAGE() \
+ { MCDI_MON_PORT_NONE, MCDI_MON_NEXT_PAGE }
+
+/* Placeholder for gaps in the array */
+#define STAT_NO_SENSOR() \
+ { MCDI_MON_PORT_NONE, MCDI_MON_INVALID_SENSOR }
+
+/* Map from MC sensors to monitor statistics */
+static const struct mcdi_sensor_map_s {
+ uint16_t msm_port_mask;
+ uint16_t msm_stat;
+} mcdi_sensor_map[] = {
+ /* Sensor page 0 MC_CMD_SENSOR_xxx */
+ STAT(Px, INT_TEMP), /* 0x00 CONTROLLER_TEMP */
+ STAT(Px, EXT_TEMP), /* 0x01 PHY_COMMON_TEMP */
+ STAT(Px, INT_COOLING), /* 0x02 CONTROLLER_COOLING */
+ STAT(P1, EXT_TEMP), /* 0x03 PHY0_TEMP */
+ STAT(P1, EXT_COOLING), /* 0x04 PHY0_COOLING */
+ STAT(P2, EXT_TEMP), /* 0x05 PHY1_TEMP */
+ STAT(P2, EXT_COOLING), /* 0x06 PHY1_COOLING */
+ STAT(Px, 1V), /* 0x07 IN_1V0 */
+ STAT(Px, 1_2V), /* 0x08 IN_1V2 */
+ STAT(Px, 1_8V), /* 0x09 IN_1V8 */
+ STAT(Px, 2_5V), /* 0x0a IN_2V5 */
+ STAT(Px, 3_3V), /* 0x0b IN_3V3 */
+ STAT(Px, 12V), /* 0x0c IN_12V0 */
+ STAT(Px, 1_2VA), /* 0x0d IN_1V2A */
+ STAT(Px, VREF), /* 0x0e IN_VREF */
+ STAT(Px, VAOE), /* 0x0f OUT_VAOE */
+ STAT(Px, AOE_TEMP), /* 0x10 AOE_TEMP */
+ STAT(Px, PSU_AOE_TEMP), /* 0x11 PSU_AOE_TEMP */
+ STAT(Px, PSU_TEMP), /* 0x12 PSU_TEMP */
+ STAT(Px, FAN0), /* 0x13 FAN_0 */
+ STAT(Px, FAN1), /* 0x14 FAN_1 */
+ STAT(Px, FAN2), /* 0x15 FAN_2 */
+ STAT(Px, FAN3), /* 0x16 FAN_3 */
+ STAT(Px, FAN4), /* 0x17 FAN_4 */
+ STAT(Px, VAOE_IN), /* 0x18 IN_VAOE */
+ STAT(Px, IAOE), /* 0x19 OUT_IAOE */
+ STAT(Px, IAOE_IN), /* 0x1a IN_IAOE */
+ STAT(Px, NIC_POWER), /* 0x1b NIC_POWER */
+ STAT(Px, 0_9V), /* 0x1c IN_0V9 */
+ STAT(Px, I0_9V), /* 0x1d IN_I0V9 */
+ STAT(Px, I1_2V), /* 0x1e IN_I1V2 */
+ STAT_NEXT_PAGE(), /* 0x1f Next page flag (not a sensor) */
+
+ /* Sensor page 1 MC_CMD_SENSOR_xxx */
+ STAT(Px, 0_9V_ADC), /* 0x20 IN_0V9_ADC */
+ STAT(Px, INT_TEMP2), /* 0x21 CONTROLLER_2_TEMP */
+ STAT(Px, VREG_TEMP), /* 0x22 VREG_INTERNAL_TEMP */
+ STAT(Px, VREG_0_9V_TEMP), /* 0x23 VREG_0V9_TEMP */
+ STAT(Px, VREG_1_2V_TEMP), /* 0x24 VREG_1V2_TEMP */
+ STAT(Px, INT_VPTAT), /* 0x25 CTRLR. VPTAT */
+ STAT(Px, INT_ADC_TEMP), /* 0x26 CTRLR. INTERNAL_TEMP */
+ STAT(Px, EXT_VPTAT), /* 0x27 CTRLR. VPTAT_EXTADC */
+ STAT(Px, EXT_ADC_TEMP), /* 0x28 CTRLR. INTERNAL_TEMP_EXTADC */
+ STAT(Px, AMBIENT_TEMP), /* 0x29 AMBIENT_TEMP */
+ STAT(Px, AIRFLOW), /* 0x2a AIRFLOW */
+ STAT(Px, VDD08D_VSS08D_CSR), /* 0x2b VDD08D_VSS08D_CSR */
+ STAT(Px, VDD08D_VSS08D_CSR_EXTADC), /* 0x2c VDD08D_VSS08D_CSR_EXTADC */
+ STAT(Px, HOTPOINT_TEMP), /* 0x2d HOTPOINT_TEMP */
+ STAT(P1, PHY_POWER_SWITCH_PORT0), /* 0x2e PHY_POWER_SWITCH_PORT0 */
+ STAT(P2, PHY_POWER_SWITCH_PORT1), /* 0x2f PHY_POWER_SWITCH_PORT1 */
+ STAT(Px, MUM_VCC), /* 0x30 MUM_VCC */
+ STAT(Px, 0V9_A), /* 0x31 0V9_A */
+ STAT(Px, I0V9_A), /* 0x32 I0V9_A */
+ STAT(Px, 0V9_A_TEMP), /* 0x33 0V9_A_TEMP */
+ STAT(Px, 0V9_B), /* 0x34 0V9_B */
+ STAT(Px, I0V9_B), /* 0x35 I0V9_B */
+ STAT(Px, 0V9_B_TEMP), /* 0x36 0V9_B_TEMP */
+ STAT(Px, CCOM_AVREG_1V2_SUPPLY), /* 0x37 CCOM_AVREG_1V2_SUPPLY */
+ STAT(Px, CCOM_AVREG_1V2_SUPPLY_EXT_ADC),
+ /* 0x38 CCOM_AVREG_1V2_SUPPLY_EXT_ADC */
+ STAT(Px, CCOM_AVREG_1V8_SUPPLY), /* 0x39 CCOM_AVREG_1V8_SUPPLY */
+ STAT(Px, CCOM_AVREG_1V8_SUPPLY_EXT_ADC),
+ /* 0x3a CCOM_AVREG_1V8_SUPPLY_EXT_ADC */
+ STAT_NO_SENSOR(), /* 0x3b (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3c (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3d (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3e (no sensor) */
+ STAT_NEXT_PAGE(), /* 0x3f Next page flag (not a sensor) */
+
+ /* Sensor page 2 MC_CMD_SENSOR_xxx */
+ STAT(Px, CONTROLLER_MASTER_VPTAT), /* 0x40 MASTER_VPTAT */
+ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP), /* 0x41 MASTER_INT_TEMP */
+ STAT(Px, CONTROLLER_MASTER_VPTAT_EXT_ADC), /* 0x42 MAST_VPTAT_EXT_ADC */
+ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC),
+ /* 0x43 MASTER_INTERNAL_TEMP_EXT_ADC */
+ STAT(Px, CONTROLLER_SLAVE_VPTAT), /* 0x44 SLAVE_VPTAT */
+ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP), /* 0x45 SLAVE_INTERNAL_TEMP */
+ STAT(Px, CONTROLLER_SLAVE_VPTAT_EXT_ADC), /* 0x46 SLAVE_VPTAT_EXT_ADC */
+ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC),
+ /* 0x47 SLAVE_INTERNAL_TEMP_EXT_ADC */
+ STAT_NO_SENSOR(), /* 0x48 (no sensor) */
+ STAT(Px, SODIMM_VOUT), /* 0x49 SODIMM_VOUT */
+ STAT(Px, SODIMM_0_TEMP), /* 0x4a SODIMM_0_TEMP */
+ STAT(Px, SODIMM_1_TEMP), /* 0x4b SODIMM_1_TEMP */
+ STAT(Px, PHY0_VCC), /* 0x4c PHY0_VCC */
+ STAT(Px, PHY1_VCC), /* 0x4d PHY1_VCC */
+ STAT(Px, CONTROLLER_TDIODE_TEMP), /* 0x4e CONTROLLER_TDIODE_TEMP */
+ STAT(Px, BOARD_FRONT_TEMP), /* 0x4f BOARD_FRONT_TEMP */
+ STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */
+};
+
+#define MCDI_STATIC_SENSOR_ASSERT(_field) \
+ EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \
+ == EFX_MON_STAT_STATE_ ## _field)
+
+static void
+mcdi_mon_decode_stats(
+ __in efx_nic_t *enp,
+ __in_ecount(sensor_mask_size) uint32_t *sensor_mask,
+ __in size_t sensor_mask_size,
+ __in_opt efsys_mem_t *esmp,
+ __out_ecount_opt(sensor_mask_size) uint32_t *stat_maskp,
+ __inout_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *stat)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint16_t port_mask;
+ uint16_t sensor;
+ size_t sensor_max;
+ uint32_t stat_mask[(EFX_ARRAY_SIZE(mcdi_sensor_map) + 31) / 32];
+ uint32_t idx = 0;
+ uint32_t page = 0;
+
+ /* Assert the MC_CMD_SENSOR and EFX_MON_STATE namespaces agree */
+ MCDI_STATIC_SENSOR_ASSERT(OK);
+ MCDI_STATIC_SENSOR_ASSERT(WARNING);
+ MCDI_STATIC_SENSOR_ASSERT(FATAL);
+ MCDI_STATIC_SENSOR_ASSERT(BROKEN);
+ MCDI_STATIC_SENSOR_ASSERT(NO_READING);
+
+ EFX_STATIC_ASSERT(sizeof (stat_mask[0]) * 8 ==
+ EFX_MON_MASK_ELEMENT_SIZE);
+ sensor_max =
+ MIN((8 * sensor_mask_size), EFX_ARRAY_SIZE(mcdi_sensor_map));
+
+ port_mask = 1U << emip->emi_port;
+
+ (void) memset(stat_mask, 0, sizeof (stat_mask));
+
+ /*
+ * The MCDI sensor readings in the DMA buffer are a packed array of
+ * MC_CMD_SENSOR_VALUE_ENTRY structures, which only includes entries for
+ * supported sensors (bit set in sensor_mask). The sensor_mask and
+ * sensor readings do not include entries for the per-page NEXT_PAGE
+ * flag.
+ *
+ * sensor_mask may legitimately contain MCDI sensors that the driver
+ * does not understand.
+ */
+ for (sensor = 0; sensor < sensor_max; ++sensor) {
+ efx_mon_stat_t id = mcdi_sensor_map[sensor].msm_stat;
+
+ if ((sensor % MCDI_MON_PAGE_SIZE) == MC_CMD_SENSOR_PAGE0_NEXT) {
+ EFSYS_ASSERT3U(id, ==, MCDI_MON_NEXT_PAGE);
+ page++;
+ continue;
+ }
+ if (~(sensor_mask[page]) & (1U << sensor))
+ continue;
+ idx++;
+
+ if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ continue;
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ /*
+ * stat_mask is a bitmask indexed by EFX_MON_* monitor statistic
+ * identifiers from efx_mon_stat_t (without NEXT_PAGE bits).
+ *
+ * If there is an entry in the MCDI sensor to monitor statistic
+ * map then the sensor reading is used for the value of the
+ * monitor statistic.
+ */
+ stat_mask[id / EFX_MON_MASK_ELEMENT_SIZE] |=
+ (1U << (id % EFX_MON_MASK_ELEMENT_SIZE));
+
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+
+ /* Get MCDI sensor reading from DMA buffer */
+ EFSYS_MEM_READD(esmp, 4 * (idx - 1), &dword);
+
+ /* Update EFX monitor stat from MCDI sensor reading */
+ stat[id].emsv_value = (uint16_t)EFX_DWORD_FIELD(dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
+
+ stat[id].emsv_state = (uint16_t)EFX_DWORD_FIELD(dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ }
+ }
+
+ if (stat_maskp != NULL) {
+ (void) memcpy(stat_maskp, stat_mask, sizeof (stat_mask));
+ }
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint16_t port_mask;
+ uint16_t sensor;
+ uint16_t state;
+ uint16_t value;
+ efx_mon_stat_t id;
+ efx_rc_t rc;
+
+ port_mask = (emip->emi_port == 1)
+ ? MCDI_MON_PORT_P1
+ : MCDI_MON_PORT_P2;
+
+ sensor = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_MONITOR);
+ state = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_STATE);
+ value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE);
+
+ /* Hardware must support this MCDI sensor */
+ EFSYS_ASSERT3U(sensor, <, (8 * encp->enc_mcdi_sensor_mask_size));
+ EFSYS_ASSERT((sensor % MCDI_MON_PAGE_SIZE) != MC_CMD_SENSOR_PAGE0_NEXT);
+ EFSYS_ASSERT(encp->enc_mcdi_sensor_maskp != NULL);
+ EFSYS_ASSERT((encp->enc_mcdi_sensor_maskp[sensor / MCDI_MON_PAGE_SIZE] &
+ (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0);
+
+ /* But we don't have to understand it */
+ if (sensor >= EFX_ARRAY_SIZE(mcdi_sensor_map)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ id = mcdi_sensor_map[sensor].msm_stat;
+ if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ return (ENODEV);
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ *idp = id;
+ valuep->emsv_value = value;
+ valuep->emsv_state = state;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_read_sensors(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint32_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_READ_SENSORS_EXT_IN_LEN,
+ MC_CMD_READ_SENSORS_EXT_OUT_LEN)];
+ uint32_t addr_lo, addr_hi;
+
+ req.emr_cmd = MC_CMD_READ_SENSORS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_READ_SENSORS_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_READ_SENSORS_EXT_OUT_LEN;
+
+ addr_lo = (uint32_t)(EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ addr_hi = (uint32_t)(EFSYS_MEM_ADDR(esmp) >> 32);
+
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_LO, addr_lo);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_HI, addr_hi);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ return (req.emr_rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info_npages(
+ __in efx_nic_t *enp,
+ __out uint32_t *npagesp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ int page;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(npagesp != NULL);
+
+ page = 0;
+ do {
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page++);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ } while (MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK) &
+ (1U << MC_CMD_SENSOR_PAGE0_NEXT));
+
+ *npagesp = page;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info(
+ __in efx_nic_t *enp,
+ __out_ecount(npages) uint32_t *sensor_maskp,
+ __in size_t npages)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ uint32_t page;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(sensor_maskp != NULL);
+
+ for (page = 0; page < npages; page++) {
+ uint32_t mask;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ mask = MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK);
+
+ if ((page != (npages - 1)) &&
+ ((mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) == 0)) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ sensor_maskp[page] = mask;
+ }
+
+ if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size = encp->enc_mon_stat_dma_buf_size;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_read_sensors(enp, esmp, size)) != 0)
+ goto fail1;
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, size);
+
+ mcdi_mon_decode_stats(enp,
+ encp->enc_mcdi_sensor_maskp,
+ encp->enc_mcdi_sensor_mask_size,
+ esmp, NULL, values);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_cfg_build(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t npages;
+ efx_rc_t rc;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ encp->enc_mon_type = EFX_MON_SFC90X0;
+ break;
+#endif
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ encp->enc_mon_type = EFX_MON_SFC91X0;
+ break;
+#endif
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ encp->enc_mon_type = EFX_MON_SFC92X0;
+ break;
+#endif
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get mc sensor mask size */
+ npages = 0;
+ if ((rc = efx_mcdi_sensor_info_npages(enp, &npages)) != 0)
+ goto fail2;
+
+ encp->enc_mon_stat_dma_buf_size = npages * EFX_MON_STATS_PAGE_SIZE;
+ encp->enc_mcdi_sensor_mask_size = npages * sizeof (uint32_t);
+
+ /* Allocate mc sensor mask */
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+
+ if (encp->enc_mcdi_sensor_maskp == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /* Read mc sensor mask */
+ if ((rc = efx_mcdi_sensor_info(enp,
+ encp->enc_mcdi_sensor_maskp,
+ npages)) != 0)
+ goto fail4;
+
+ /* Build monitor statistics mask */
+ mcdi_mon_decode_stats(enp,
+ encp->enc_mcdi_sensor_maskp,
+ encp->enc_mcdi_sensor_mask_size,
+ NULL, encp->enc_mon_stat_mask, NULL);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+ EFSYS_KMEM_FREE(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+mcdi_mon_cfg_free(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ if (encp->enc_mcdi_sensor_maskp != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+ }
+}
+
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_MCDI */
diff --git a/usr/src/uts/common/io/sfxge/common/mcdi_mon.h b/usr/src/uts/common/io/sfxge/common/mcdi_mon.h
new file mode 100644
index 0000000000..f23cc93bf5
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/mcdi_mon.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_MCDI_MON_H
+#define _SYS_MCDI_MON_H
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if EFSYS_OPT_MON_MCDI
+
+#if EFSYS_OPT_MON_STATS
+
+ __checkReturn efx_rc_t
+mcdi_mon_cfg_build(
+ __in efx_nic_t *enp);
+
+ void
+mcdi_mon_cfg_free(
+ __in efx_nic_t *enp);
+
+
+extern __checkReturn efx_rc_t
+mcdi_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep);
+
+extern __checkReturn efx_rc_t
+mcdi_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_MCDI */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MCDI_MON_H */
diff --git a/usr/src/uts/common/io/sfxge/common/medford_impl.h b/usr/src/uts/common/io/sfxge/common/medford_impl.h
new file mode 100644
index 0000000000..4530804736
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/medford_impl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_MEDFORD_IMPL_H
+#define _SYS_MEDFORD_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary
+ *
+ * FIXME: Is this the same on Medford as Huntington?
+ */
+#define MEDFORD_RX_WPTR_ALIGN 8
+
+
+
+#ifndef ER_EZ_TX_PIOBUF_SIZE
+#define ER_EZ_TX_PIOBUF_SIZE 4096
+#endif
+
+
+#define MEDFORD_PIOBUF_NBUFS (16)
+#define MEDFORD_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE)
+
+#define MEDFORD_MIN_PIO_ALLOC_SIZE (MEDFORD_PIOBUF_SIZE / 32)
+
+
+extern __checkReturn efx_rc_t
+medford_board_cfg(
+ __in efx_nic_t *enp);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MEDFORD_IMPL_H */
diff --git a/usr/src/uts/common/io/sfxge/common/medford_nic.c b/usr/src/uts/common/io/sfxge/common/medford_nic.c
new file mode 100644
index 0000000000..9eb80213d4
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/medford_nic.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_rxdp_config(
+ __in efx_nic_t *enp,
+ __out uint32_t *end_paddingp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
+ MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
+ uint32_t end_padding;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
+ /* RX DMA end padding is disabled */
+ end_padding = 0;
+ } else {
+ switch(MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
+ end_padding = 64;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
+ end_padding = 128;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
+ end_padding = 256;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ }
+
+ *end_paddingp = end_padding;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+medford_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6] = { 0 };
+ uint32_t board_type = 0;
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t port;
+ uint32_t pf;
+ uint32_t vf;
+ uint32_t mask;
+ uint32_t sysclk;
+ uint32_t base, nvec;
+ uint32_t end_padding;
+ efx_rc_t rc;
+
+ /*
+ * FIXME: Likely to be incomplete and incorrect.
+ * Parts of this should be shared with Huntington.
+ */
+
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /*
+ * NOTE: The MCDI protocol numbers ports from zero.
+ * The common code MCDI interface numbers ports from one.
+ */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for Medford */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /*
+ * Interrupt testing does not work for VFs. See bug50084.
+ * FIXME: Does this still apply to Medford?
+ */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /* Chained multicast is always enabled on Medford */
+ encp->enc_bug26807_workaround = B_TRUE;
+
+ /* Get sysclk frequency (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk)) != 0)
+ goto fail8;
+
+ /*
+ * The timer quantum is 1536 sysclk cycles, documented for the
+ * EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail9;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+
+ /* Get the RX DMA end padding alignment configuration */
+ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0)
+ goto fail10;
+ encp->enc_rx_buf_align_end = end_padding;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail11;
+ encp->enc_privilege_mask = mask;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail12;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ /*
+ * Medford stores a single global copy of VPD, not per-PF as on
+ * Huntington.
+ */
+ encp->enc_vpd_is_global = B_TRUE;
+
+ return (0);
+
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MEDFORD */
diff --git a/usr/src/uts/common/io/sfxge/common/siena_flash.h b/usr/src/uts/common/io/sfxge/common/siena_flash.h
new file mode 100644
index 0000000000..4b1d6e7969
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/siena_flash.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2007-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SIENA_FLASH_H
+#define _SYS_SIENA_FLASH_H
+
+#pragma pack(1)
+
+/* Fixed locations near the start of flash (which may be in the internal PHY
+ * firmware header) point to the boot header.
+ *
+ * - parsed by MC boot ROM and firmware
+ * - reserved (but not parsed) by PHY firmware
+ * - opaque to driver
+ */
+
+#define SIENA_MC_BOOT_PHY_FW_HDR_LEN (0x20)
+
+#define SIENA_MC_BOOT_PTR_LOCATION (0x18) /* First thing we try to boot */
+#define SIENA_MC_BOOT_ALT_PTR_LOCATION (0x1c) /* Alternative if that fails */
+
+#define SIENA_MC_BOOT_HDR_LEN (0x200)
+
+#define SIENA_MC_BOOT_MAGIC (0x51E4A001)
+#define SIENA_MC_BOOT_VERSION (1)
+
+
+/*Structures supporting an arbitrary number of binary blobs in the flash image
+ intended to house code and tables for the satellite cpus*/
+/*thanks to random.org for:*/
+#define BLOBS_HEADER_MAGIC (0xBDA3BBD4)
+#define BLOB_HEADER_MAGIC (0xA1478A91)
+
+typedef struct blobs_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic;
+ efx_dword_t no_of_blobs;
+} blobs_hdr_t;
+
+typedef struct blob_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic;
+ efx_dword_t cpu_type;
+ efx_dword_t build_variant;
+ efx_dword_t offset;
+ efx_dword_t length;
+ efx_dword_t checksum;
+} blob_hdr_t;
+
+#define BLOB_CPU_TYPE_TXDI_TEXT (0)
+#define BLOB_CPU_TYPE_RXDI_TEXT (1)
+#define BLOB_CPU_TYPE_TXDP_TEXT (2)
+#define BLOB_CPU_TYPE_RXDP_TEXT (3)
+#define BLOB_CPU_TYPE_RXHRSL_HR_LUT (4)
+#define BLOB_CPU_TYPE_RXHRSL_HR_LUT_CFG (5)
+#define BLOB_CPU_TYPE_TXHRSL_HR_LUT (6)
+#define BLOB_CPU_TYPE_TXHRSL_HR_LUT_CFG (7)
+#define BLOB_CPU_TYPE_RXHRSL_HR_PGM (8)
+#define BLOB_CPU_TYPE_RXHRSL_SL_PGM (9)
+#define BLOB_CPU_TYPE_TXHRSL_HR_PGM (10)
+#define BLOB_CPU_TYPE_TXHRSL_SL_PGM (11)
+#define BLOB_CPU_TYPE_RXDI_VTBL0 (12)
+#define BLOB_CPU_TYPE_TXDI_VTBL0 (13)
+#define BLOB_CPU_TYPE_RXDI_VTBL1 (14)
+#define BLOB_CPU_TYPE_TXDI_VTBL1 (15)
+#define BLOB_CPU_TYPE_DUMPSPEC (32)
+#define BLOB_CPU_TYPE_MC_XIP (33)
+
+#define BLOB_CPU_TYPE_INVALID (31)
+
+/*
+ * The upper four bits of the CPU type field specify the compression
+ * algorithm used for this blob.
+ */
+#define BLOB_COMPRESSION_MASK (0xf0000000)
+#define BLOB_CPU_TYPE_MASK (0x0fffffff)
+
+#define BLOB_COMPRESSION_NONE (0x00000000) /* Stored as is */
+#define BLOB_COMPRESSION_LZ (0x10000000) /* see lib/lzdecoder.c */
+
+typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_BOOT_MAGIC */
+ efx_word_t hdr_version; /* this structure definition is version 1 */
+ efx_byte_t board_type;
+ efx_byte_t firmware_version_a;
+ efx_byte_t firmware_version_b;
+ efx_byte_t firmware_version_c;
+ efx_word_t checksum; /* of whole header area + firmware image */
+ efx_word_t firmware_version_d;
+ efx_byte_t mcfw_subtype;
+ efx_byte_t generation; /* Valid for medford, SBZ for earlier chips */
+ efx_dword_t firmware_text_offset; /* offset to firmware .text */
+ efx_dword_t firmware_text_size; /* length of firmware .text, in bytes */
+ efx_dword_t firmware_data_offset; /* offset to firmware .data */
+ efx_dword_t firmware_data_size; /* length of firmware .data, in bytes */
+ efx_byte_t spi_rate; /* SPI rate for reading image, 0 is BootROM default */
+ efx_byte_t spi_phase_adj; /* SPI SDO/SCL phase adjustment, 0 is default (no adj) */
+ efx_word_t xpm_sector; /* The sector that contains the key, or 0xffff if unsigned (medford) SBZ (earlier) */
+ efx_dword_t reserved_c[7]; /* (set to 0) */
+} siena_mc_boot_hdr_t;
+
+#define SIENA_MC_BOOT_HDR_PADDING \
+ (SIENA_MC_BOOT_HDR_LEN - sizeof(siena_mc_boot_hdr_t))
+
+#define SIENA_MC_STATIC_CONFIG_MAGIC (0xBDCF5555)
+#define SIENA_MC_STATIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_static_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_STATIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t static_vpd_offset;
+ efx_dword_t static_vpd_length;
+ efx_dword_t capabilities;
+ efx_byte_t mac_addr_base[6];
+ efx_byte_t green_mode_cal; /* Green mode calibration result */
+ efx_byte_t green_mode_valid; /* Whether cal holds a valid value */
+ efx_word_t mac_addr_count;
+ efx_word_t mac_addr_stride;
+ efx_word_t calibrated_vref; /* Vref as measured during production */
+ efx_word_t adc_vref; /* Vref as read by ADC */
+ efx_dword_t reserved2[1]; /* (write as zero) */
+ efx_dword_t num_dbi_items;
+ struct {
+ efx_word_t addr;
+ efx_word_t byte_enables;
+ efx_dword_t value;
+ } dbi[];
+} siena_mc_static_config_hdr_t;
+
+#define SIENA_MC_DYNAMIC_CONFIG_MAGIC (0xBDCFDDDD)
+#define SIENA_MC_DYNAMIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_fw_version_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t fw_subtype;
+ efx_word_t version_w;
+ efx_word_t version_x;
+ efx_word_t version_y;
+ efx_word_t version_z;
+} siena_mc_fw_version_t;
+
+typedef struct siena_mc_dynamic_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_DYNAMIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t dynamic_vpd_offset;
+ efx_dword_t dynamic_vpd_length;
+ efx_dword_t num_fw_version_items;
+ siena_mc_fw_version_t fw_version[];
+} siena_mc_dynamic_config_hdr_t;
+
+#define SIENA_MC_EXPROM_SINGLE_MAGIC (0xAA55) /* little-endian uint16_t */
+
+#define SIENA_MC_EXPROM_COMBO_MAGIC (0xB0070102) /* little-endian uint32_t */
+#define SIENA_MC_EXPROM_COMBO_V2_MAGIC (0xB0070103) /* little-endian uint32_t */
+
+typedef struct siena_mc_combo_rom_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_EXPROM_COMBO_MAGIC or SIENA_MC_EXPROM_COMBO_V2_MAGIC */
+ union {
+ struct {
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk0_off;/* infoblk offset */
+ efx_word_t infoblk1_off;/* infoblk offset */
+ efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */
+ efx_byte_t reserved[7];/* (set to 0) */
+ } v1;
+ struct {
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk_off;/* infoblk start offset */
+ efx_word_t infoblk_count;/* infoblk count */
+ efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */
+ efx_byte_t reserved[7];/* (set to 0) */
+ } v2;
+ } data;
+} siena_mc_combo_rom_hdr_t;
+
+#pragma pack()
+
+#endif /* _SYS_SIENA_FLASH_H */
diff --git a/usr/src/uts/common/io/sfxge/common/siena_impl.h b/usr/src/uts/common/io/sfxge/common/siena_impl.h
new file mode 100644
index 0000000000..a030941bc6
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/siena_impl.h
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SIENA_IMPL_H
+#define _SYS_SIENA_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_mcdi.h"
+#include "siena_flash.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SIENA_NVRAM_CHUNK 0x80
+
+extern __checkReturn efx_rc_t
+siena_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+siena_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+siena_nic_unprobe(
+ __in efx_nic_t *enp);
+
+#define SIENA_SRAM_ROWS 0x12000
+
+extern void
+siena_sram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+siena_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+siena_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in void *hdrp,
+ __in size_t hdr_len,
+ __in void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+siena_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+siena_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+siena_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern void
+siena_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern void
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep);
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+siena_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+siena_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+siena_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+typedef struct siena_link_state_s {
+ uint32_t sls_adv_cap_mask;
+ uint32_t sls_lp_cap_mask;
+ unsigned int sls_fcntl;
+ efx_link_mode_t sls_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t sls_loopback;
+#endif
+ boolean_t sls_mac_up;
+} siena_link_state_t;
+
+extern void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp);
+
+extern __checkReturn efx_rc_t
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+siena_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat);
+
+extern __checkReturn efx_rc_t
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+extern __checkReturn efx_rc_t
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+extern __checkReturn efx_rc_t
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+siena_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SIENA_IMPL_H */
diff --git a/usr/src/uts/common/io/sfxge/common/siena_mac.c b/usr/src/uts/common/io/sfxge/common/siena_mac.c
new file mode 100644
index 0000000000..04b81f2cce
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/siena_mac.c
@@ -0,0 +1,441 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ __checkReturn efx_rc_t
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ siena_link_state_t sls;
+ efx_rc_t rc;
+
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_fcntl = sls.sls_fcntl;
+
+ *link_modep = sls.sls_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ siena_link_state_t sls;
+ efx_rc_t rc;
+
+ /*
+ * Because Siena doesn't *require* polling, we can't rely on
+ * siena_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ *mac_upp = sls.sls_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_oword_t multicast_hash[2];
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN),
+ MAX(MC_CMD_SET_MCAST_HASH_IN_LEN,
+ MC_CMD_SET_MCAST_HASH_OUT_LEN))];
+ unsigned int fcntl;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, !epp->ep_all_unicst,
+ SET_MAC_IN_REJECT_BRDCST, !epp->ep_brdcst);
+
+ if (epp->ep_fcntl_autoneg)
+ /* efx_fcntl_set() has already set the phy capabilities */
+ fcntl = MC_CMD_FCNTL_AUTO;
+ else if (epp->ep_fcntl & EFX_FCNTL_RESPOND)
+ fcntl = (epp->ep_fcntl & EFX_FCNTL_GENERATE)
+ ? MC_CMD_FCNTL_BIDIR
+ : MC_CMD_FCNTL_RESPOND;
+ else
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, fcntl);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* Push multicast hash */
+
+ if (epp->ep_all_mulcst) {
+ /* A hash matching all multicast is all 1s */
+ EFX_SET_OWORD(multicast_hash[0]);
+ EFX_SET_OWORD(multicast_hash[1]);
+ } else if (epp->ep_mulcst) {
+ /* Use the hash set by the multicast list */
+ multicast_hash[0] = epp->ep_multicst_hash[0];
+ multicast_hash[1] = epp->ep_multicst_hash[1];
+ } else {
+ /* A hash matching no traffic is simply 0 */
+ EFX_ZERO_OWORD(multicast_hash[0]);
+ EFX_ZERO_OWORD(multicast_hash[1]);
+ }
+
+ /*
+ * Broadcast packets go through the multicast hash filter.
+ * The IEEE 802.3 CRC32 of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask (bit 0x7f in the
+ * second octword).
+ */
+ if (epp->ep_brdcst) {
+ /*
+ * NB: due to constant folding, some of this evaluates
+ * to null expressions, giving E_EXPR_NULL_EFFECT during
+ * lint. No good way to fix this without explicit coding
+ * the individual word/bit setting. So just suppress lint
+ * for this one line.
+ */
+ /* LINTED */
+ EFX_SET_OWORD_BIT(multicast_hash[1], 0x7f);
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MCAST_HASH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MCAST_HASH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MCAST_HASH_OUT_LEN;
+
+ (void) memcpy(MCDI_IN2(req, uint8_t, SET_MCAST_HASH_IN_HASH0),
+ multicast_hash, sizeof (multicast_hash));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ efx_rc_t rc;
+
+ /* The PHY object handles this on Siena */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+#define SIENA_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+ __checkReturn efx_rc_t
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp)
+{
+ efx_qword_t value;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+
+ _NOTE(ARGUNUSED(enp))
+
+ /* Read END first so we don't race with the MC */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EFSYS_MEM_READ_BARRIER();
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ /* Not a good enough check ? */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/usr/src/uts/common/io/sfxge/common/siena_mcdi.c b/usr/src/uts/common/io/sfxge/common/siena_mcdi.c
new file mode 100644
index 0000000000..1af078d96a
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/siena_mcdi.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA && EFSYS_OPT_MCDI
+
+#define SIENA_MCDI_PDU(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_PDU_OFST >> 2 \
+ : MC_SMEM_P1_PDU_OFST >> 2)
+
+#define SIENA_MCDI_DOORBELL(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_DOORBELL_OFST >> 2 \
+ : MC_SMEM_P1_DOORBELL_OFST >> 2)
+
+#define SIENA_MCDI_STATUS(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_STATUS_OFST >> 2 \
+ : MC_SMEM_P1_STATUS_OFST >> 2)
+
+
+ void
+siena_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in void *hdrp,
+ __in size_t hdr_len,
+ __in void *sdup,
+ __in size_t sdu_len)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t dword;
+ unsigned int pdur;
+ unsigned int dbr;
+ unsigned int pos;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+ dbr = SIENA_MCDI_DOORBELL(emip);
+
+ /* Write the header */
+ EFSYS_ASSERT3U(hdr_len, ==, sizeof (efx_dword_t));
+ dword = *(efx_dword_t *)hdrp;
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_TRUE);
+
+ /* Write the payload */
+ for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + 1 + (pos >> 2), &dword, B_FALSE);
+ }
+
+ /* Ring the doorbell */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xd004be11);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, dbr, &dword, B_FALSE);
+}
+
+ efx_rc_t
+siena_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+#if 1
+ /*
+ * XXX Bug 25922, bug 26099: This function is not being used
+ * properly. Until its callers are fixed, it should always
+ * return 0.
+ */
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+#else
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int rebootr;
+ efx_dword_t dword;
+ uint32_t value;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ rebootr = SIENA_MCDI_STATUS(emip);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+ value = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ if (value == 0)
+ return (0);
+
+ EFX_ZERO_DWORD(dword);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return (EINTR);
+ else
+ return (EIO);
+#endif
+}
+
+extern __checkReturn boolean_t
+siena_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr;
+ unsigned int pdur;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur, &hdr, B_FALSE);
+ return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+ void
+siena_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int pdur;
+ unsigned int pos;
+ efx_dword_t data;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+
+ for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + ((offset + pos) >> 2), &data, B_FALSE);
+ (void) memcpy((uint8_t *)bufferp + pos, &data,
+ MIN(sizeof (data), length - pos));
+ }
+}
+
+ __checkReturn efx_rc_t
+siena_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_oword_t oword;
+ unsigned int portnum;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(mtp));
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine the port number to use for MCDI */
+ EFX_BAR_READO(enp, FR_AZ_CS_DEBUG_REG, &oword);
+ portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM);
+
+ if (portnum == 0) {
+ /* Presumably booted from ROM; only MCDI port 1 will work */
+ emip->emi_port = 1;
+ } else if (portnum <= 2) {
+ emip->emi_port = portnum;
+ } else {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Siena BootROM and firmware only support MCDIv1 */
+ emip->emi_max_version = 1;
+
+ /*
+ * Wipe the atomic reboot status so subsequent MCDI requests succeed.
+ * BOOT_STATUS is preserved so eno_nic_probe() can boot out of the
+ * assertion handler.
+ */
+ (void) siena_mcdi_poll_reboot(enp);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp));
+}
+
+ __checkReturn efx_rc_t
+siena_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ switch (id) {
+ case EFX_MCDI_FEATURE_FW_UPDATE:
+ case EFX_MCDI_FEATURE_LINK_CONTROL:
+ case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+ case EFX_MCDI_FEATURE_MAC_SPOOFING:
+ *supportedp = B_TRUE;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA && EFSYS_OPT_MCDI */
diff --git a/usr/src/uts/common/io/sfxge/common/siena_nic.c b/usr/src/uts/common/io/sfxge/common/siena_nic.c
new file mode 100644
index 0000000000..478d69efcf
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/siena_nic.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#include "mcdi_mon.h"
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_nic_get_partn_mask(
+ __in efx_nic_t *enp,
+ __out unsigned int *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TYPES_IN_LEN,
+ MC_CMD_NVRAM_TYPES_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_TYPES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TYPES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TYPES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, NVRAM_TYPES_OUT_TYPES);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6];
+ efx_dword_t capabilities;
+ uint32_t board_type;
+ uint32_t nevq, nrxq, ntxq;
+ efx_rc_t rc;
+
+ /* External port identifier using one-based port numbering */
+ encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port;
+
+ /* Board configuration */
+ if ((rc = efx_mcdi_get_board_cfg(enp, &board_type,
+ &capabilities, mac_addr)) != 0)
+ goto fail1;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ encp->enc_board_type = board_type;
+
+ /* Additional capabilities */
+ encp->enc_clk_mult = 1;
+ if (EFX_DWORD_FIELD(capabilities, MC_CMD_CAPABILITIES_TURBO)) {
+ enp->en_features |= EFX_FEATURE_TURBO;
+
+ if (EFX_DWORD_FIELD(capabilities,
+ MC_CMD_CAPABILITIES_TURBO_ACTIVE)) {
+ encp->enc_clk_mult = 2;
+ }
+ }
+
+ encp->enc_evq_timer_quantum_ns =
+ EFX_EVQ_SIENA_TIMER_QUANTUM_NS / encp->enc_clk_mult;
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* When hash header insertion is enabled, Siena inserts 16 bytes */
+ encp->enc_rx_prefix_size = 16;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+ encp->enc_rx_buf_align_end = 1;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = 1;
+
+ /* Resource limits */
+ rc = efx_mcdi_get_resource_limits(enp, &nevq, &nrxq, &ntxq);
+ if (rc != 0) {
+ if (rc != ENOTSUP)
+ goto fail2;
+
+ nevq = 1024;
+ nrxq = EFX_RXQ_LIMIT_TARGET;
+ ntxq = EFX_TXQ_LIMIT_TARGET;
+ }
+ encp->enc_evq_limit = nevq;
+ encp->enc_rxq_limit = MIN(EFX_RXQ_LIMIT_TARGET, nrxq);
+ encp->enc_txq_limit = MIN(EFX_TXQ_LIMIT_TARGET, ntxq);
+
+ encp->enc_buftbl_limit = SIENA_SRAM_ROWS -
+ (encp->enc_txq_limit * EFX_TXQ_DC_NDESCS(EFX_TXQ_DC_SIZE)) -
+ (encp->enc_rxq_limit * EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE));
+
+ encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
+ encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_phy_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail1;
+
+#if EFSYS_OPT_PHY_STATS
+ /* Convert the MCDI statistic mask into the EFX_PHY_STAT mask */
+ siena_phy_decode_stats(enp, encp->enc_mcdi_phy_stat_mask,
+ NULL, &encp->enc_phy_stat_mask, NULL);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ siena_link_state_t sls;
+ unsigned int mask;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Test BIU */
+ if ((rc = efx_nic_biu_test(enp)) != 0)
+ goto fail1;
+
+ /* Clear the region register */
+ EFX_POPULATE_OWORD_4(oword,
+ FRF_AZ_ADR_REGION0, 0,
+ FRF_AZ_ADR_REGION1, (1 << 16),
+ FRF_AZ_ADR_REGION2, (2 << 16),
+ FRF_AZ_ADR_REGION3, (3 << 16));
+ EFX_BAR_WRITEO(enp, FR_AZ_ADR_REGION_REG, &oword);
+
+ /* Read clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail2;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail3;
+
+ /* Wrestle control from the BMC */
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail4;
+
+ if ((rc = siena_board_cfg(enp)) != 0)
+ goto fail5;
+
+ if ((rc = siena_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = siena_nic_reset(enp)) != 0)
+ goto fail7;
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail8;
+ epp->ep_default_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+ if ((rc = siena_nic_get_partn_mask(enp, &mask)) != 0)
+ goto fail9;
+ enp->en_u.siena.enu_partn_mask = mask;
+#endif
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail10;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail11;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0)
+ goto fail12;
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail12:
+ EFSYS_PROBE(fail12);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail11:
+ EFSYS_PROBE(fail11);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail10:
+ EFSYS_PROBE(fail10);
+#endif
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+fail9:
+ EFSYS_PROBE(fail9);
+#endif
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* siena_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Bug24908: ENTITY_RESET_IN_LEN is non zero but zero may be supplied
+ * for backwards compatibility with PORT_RESET_IN_LEN.
+ */
+ EFX_STATIC_ASSERT(MC_CMD_ENTITY_RESET_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+static void
+siena_nic_rx_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * RX_INGR_EN is always enabled on Siena, because we rely on
+ * the RX parser to be resiliant to missing SOP/EOP.
+ */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_INGR_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Disable parsing of additional 802.1Q in Q packets */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_nic_usrev_dis(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_CZ_USREV_DIS, 1);
+ EFX_BAR_WRITEO(enp, FR_CZ_USR_EV_CFG, &oword);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ siena_sram_init(enp);
+
+ /* Configure Siena's RX block */
+ siena_nic_rx_cfg(enp);
+
+ /* Disable USR_EVents for now */
+ siena_nic_usrev_dis(enp);
+
+ /* bug17057: Ensure set_link is called */
+ if ((rc = siena_phy_reconfigure(enp)) != 0)
+ goto fail2;
+
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V1;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_nic_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+siena_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+static efx_register_set_t __siena_registers[] = {
+ { FR_AZ_ADR_REGION_REG_OFST, 0, 1 },
+ { FR_CZ_USR_EV_CFG_OFST, 0, 1 },
+ { FR_AZ_RX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_RESERVED_REG_OFST, 0, 1 },
+ { FR_AZ_SRM_TX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_PF_WM_REG_OFST, 0, 1 },
+ { FR_AZ_DP_CTRL_REG_OFST, 0, 1 },
+ { FR_BZ_RX_RSS_TKEY_REG_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG1_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG2_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG3_OFST, 0, 1}
+};
+
+static const uint32_t __siena_register_masks[] = {
+ 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF,
+ 0x000103FF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000,
+ 0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF,
+ 0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF,
+ 0x001FFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000003, 0x00000000, 0x00000000, 0x00000000,
+ 0x000003FF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000FFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000
+};
+
+static efx_register_set_t __siena_tables[] = {
+ { FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP,
+ FR_AZ_RX_FILTER_TBL0_ROWS },
+ { FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP,
+ FR_CZ_RX_MAC_FILTER_TBL0_ROWS },
+ { FR_AZ_RX_DESC_PTR_TBL_OFST,
+ FR_AZ_RX_DESC_PTR_TBL_STEP, FR_CZ_RX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TX_DESC_PTR_TBL_OFST,
+ FR_AZ_TX_DESC_PTR_TBL_STEP, FR_CZ_TX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TIMER_TBL_OFST, FR_AZ_TIMER_TBL_STEP, FR_CZ_TIMER_TBL_ROWS },
+ { FR_CZ_TX_FILTER_TBL0_OFST,
+ FR_CZ_TX_FILTER_TBL0_STEP, FR_CZ_TX_FILTER_TBL0_ROWS },
+ { FR_CZ_TX_MAC_FILTER_TBL0_OFST,
+ FR_CZ_TX_MAC_FILTER_TBL0_STEP, FR_CZ_TX_MAC_FILTER_TBL0_ROWS }
+};
+
+static const uint32_t __siena_table_masks[] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000003FF,
+ 0xFFFF0FFF, 0xFFFFFFFF, 0x00000E7F, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x01800000, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x0C000000, 0x00000000,
+ 0x3FFFFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000013FF,
+ 0xFFFF07FF, 0xFFFFFFFF, 0x0000007F, 0x00000000,
+};
+
+ __checkReturn efx_rc_t
+siena_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_register_set_t *rsp;
+ const uint32_t *dwordp;
+ unsigned int nitems;
+ unsigned int count;
+ efx_rc_t rc;
+
+ /* Fill out the register mask entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks)
+ == EFX_ARRAY_SIZE(__siena_registers) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_registers);
+ dwordp = __siena_register_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_registers + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ /* Fill out the register table entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks)
+ == EFX_ARRAY_SIZE(__siena_tables) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_tables);
+ dwordp = __siena_table_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_tables + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ if ((rc = efx_nic_test_registers(enp, __siena_registers,
+ EFX_ARRAY_SIZE(__siena_registers))) != 0)
+ goto fail1;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail2;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail3;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/usr/src/uts/common/io/sfxge/common/siena_nvram.c b/usr/src/uts/common/io/sfxge/common/siena_nvram.c
new file mode 100644
index 0000000000..0dcf3a32ef
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/siena_nvram.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
+ NULL, NULL, NULL)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk,
+ MC_CMD_NVRAM_READ_IN_V2_DEFAULT)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
+ data, chunk)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ boolean_t reboot;
+ efx_rc_t rc;
+
+ /*
+ * Reboot into the new image only for PHYs. The driver has to
+ * explicitly cope with an MC reboot after a firmware update.
+ */
+ reboot = (partn == MC_CMD_NVRAM_TYPE_PHY_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_PHY_PORT1 ||
+ partn == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO);
+
+ if ((rc = efx_mcdi_nvram_update_finish(enp, partn, reboot)) != 0) {
+ goto fail1;
+ }
+
+ return;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct siena_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port;
+ efx_nvram_type_t nvtype;
+} siena_parttbl_entry_t;
+
+static siena_parttbl_entry_t siena_parttbl[] = {
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 1, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 2, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 1, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 2, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 1, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 2, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT0, 1, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT1, 2, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {MC_CMD_NVRAM_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {MC_CMD_NVRAM_TYPE_FC_FW, 1, EFX_NVRAM_FCFW},
+ {MC_CMD_NVRAM_TYPE_FC_FW, 2, EFX_NVRAM_FCFW},
+ {MC_CMD_NVRAM_TYPE_CPLD, 1, EFX_NVRAM_CPLD},
+ {MC_CMD_NVRAM_TYPE_CPLD, 2, EFX_NVRAM_CPLD},
+ {MC_CMD_NVRAM_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {MC_CMD_NVRAM_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}
+};
+
+ __checkReturn efx_rc_t
+siena_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int i;
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT(partnp != NULL);
+
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ siena_parttbl_entry_t *entry = &siena_parttbl[i];
+
+ if (entry->port == emip->emi_port && entry->nvtype == type) {
+ *partnp = entry->partn;
+ return (0);
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+siena_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_parttbl_entry_t *entry;
+ unsigned int i;
+ efx_rc_t rc;
+
+ /*
+ * Iterate over the list of supported partition types
+ * applicable to *this* port
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ entry = &siena_parttbl[i];
+
+ if (entry->port != emip->emi_port ||
+ !(enp->en_u.siena.enu_partn_mask & (1 << entry->partn)))
+ continue;
+
+ if ((rc = efx_mcdi_nvram_test(enp, entry->partn)) != 0) {
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#define SIENA_DYNAMIC_CFG_SIZE(_nitems) \
+ (sizeof (siena_mc_dynamic_config_hdr_t) + ((_nitems) * \
+ sizeof (((siena_mc_dynamic_config_hdr_t *)NULL)->fw_version[0])))
+
+ __checkReturn efx_rc_t
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep)
+{
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int nversions;
+ unsigned int pos;
+ unsigned int region;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1);
+
+ /*
+ * Allocate sufficient memory for the entire dynamiccfg area, even
+ * if we're not actually going to read in the VPD.
+ */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg);
+ if (dcfg == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail3;
+
+ /* Verify the magic */
+ if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0)
+ != SIENA_MC_DYNAMIC_CONFIG_MAGIC)
+ goto invalid1;
+
+ /* All future versions of the structure must be backwards compatable */
+ EFX_STATIC_ASSERT(SIENA_MC_DYNAMIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nversions = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the partn size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size)
+ goto invalid2;
+
+ /* Verify the header has room for all it's versions */
+ if (hdr_length < SIENA_DYNAMIC_CFG_SIZE(0) ||
+ hdr_length < SIENA_DYNAMIC_CFG_SIZE(nversions))
+ goto invalid3;
+
+ /*
+ * Read the remaining portion of the dcfg, either including
+ * the whole of VPD (there is no vpd length in this structure,
+ * so we have to parse each tag), or just the dcfg header itself
+ */
+ region = vpd ? vpd_offset + vpd_length : hdr_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)dcfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail4;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ if (cksum != 0)
+ goto invalid4;
+
+ goto done;
+
+invalid4:
+ EFSYS_PROBE(invalid4);
+invalid3:
+ EFSYS_PROBE(invalid3);
+invalid2:
+ EFSYS_PROBE(invalid2);
+invalid1:
+ EFSYS_PROBE(invalid1);
+
+ /*
+ * Construct a new "null" dcfg, with an empty version vector,
+ * and an empty VPD chunk trailing. This has the neat side effect
+ * of testing the exception paths in the write path.
+ */
+ EFX_POPULATE_DWORD_1(dcfg->magic,
+ EFX_DWORD_0, SIENA_MC_DYNAMIC_CONFIG_MAGIC);
+ EFX_POPULATE_WORD_1(dcfg->length, EFX_WORD_0, sizeof (*dcfg));
+ EFX_POPULATE_BYTE_1(dcfg->version, EFX_BYTE_0,
+ SIENA_MC_DYNAMIC_CONFIG_VERSION);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, sizeof (*dcfg));
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, 0);
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, EFX_DWORD_0, 0);
+
+done:
+ *dcfgp = dcfg;
+ *sizep = size;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, dcfg);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMAX)];
+ efx_word_t *fw_list;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST +
+ (partn + 1) * sizeof (efx_word_t)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ fw_list = MCDI_OUT2(req, efx_word_t,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ *subtypep = EFX_WORD_FIELD(fw_list[partn], EFX_WORD_0);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ siena_mc_dynamic_config_hdr_t *dcfg;
+ siena_parttbl_entry_t *entry;
+ uint32_t dcfg_partn;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = siena_nvram_get_subtype(enp, partn, subtypep)) != 0)
+ goto fail2;
+
+ /*
+ * Some partitions are accessible from both ports (for instance BOOTROM)
+ * Find the highest version reported by all dcfg structures on ports
+ * that have access to this partition.
+ */
+ version[0] = version[1] = version[2] = version[3] = 0;
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ siena_mc_fw_version_t *verp;
+ unsigned int nitems;
+ uint16_t temp[4];
+ size_t length;
+
+ entry = &siena_parttbl[i];
+ if (entry->partn != partn)
+ continue;
+
+ dcfg_partn = (entry->port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+ /*
+ * Ingore missing partitions on port 2, assuming they're due
+ * to to running on a single port part.
+ */
+ if ((1 << dcfg_partn) & ~enp->en_u.siena.enu_partn_mask) {
+ if (entry->port == 2)
+ continue;
+ }
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items,
+ EFX_DWORD_0);
+ if (nitems < entry->partn)
+ goto done;
+
+ verp = &dcfg->fw_version[partn];
+ temp[0] = EFX_WORD_FIELD(verp->version_w, EFX_WORD_0);
+ temp[1] = EFX_WORD_FIELD(verp->version_x, EFX_WORD_0);
+ temp[2] = EFX_WORD_FIELD(verp->version_y, EFX_WORD_0);
+ temp[3] = EFX_WORD_FIELD(verp->version_z, EFX_WORD_0);
+ if (memcmp(version, temp, sizeof (temp)) < 0)
+ (void) memcpy(version, temp, sizeof (temp));
+
+done:
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = siena_nvram_partn_lock(enp, partn)) != 0)
+ goto fail1;
+
+ if (chunk_sizep != NULL)
+ *chunk_sizep = SIENA_NVRAM_CHUNK;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ siena_nvram_partn_unlock(enp, partn);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4])
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ siena_mc_fw_version_t *fwverp;
+ uint32_t dcfg_partn;
+ size_t dcfg_size;
+ unsigned int hdr_length;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int nitems;
+ unsigned int required_hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ uint32_t subtype;
+ size_t length;
+ efx_rc_t rc;
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &dcfg_size)) != 0)
+ goto fail1;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ /*
+ * NOTE: This function will blatt any fields trailing the version
+ * vector, or the VPD chunk.
+ */
+ required_hdr_length = SIENA_DYNAMIC_CFG_SIZE(partn + 1);
+ if (required_hdr_length + vpd_length > length) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ if (vpd_offset < required_hdr_length) {
+ (void) memmove((caddr_t)dcfg + required_hdr_length,
+ (caddr_t)dcfg + vpd_offset, vpd_length);
+ vpd_offset = required_hdr_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, vpd_offset);
+ }
+
+ if (hdr_length < required_hdr_length) {
+ (void) memset((caddr_t)dcfg + hdr_length, 0,
+ required_hdr_length - hdr_length);
+ hdr_length = required_hdr_length;
+ EFX_POPULATE_WORD_1(dcfg->length,
+ EFX_WORD_0, hdr_length);
+ }
+
+ /* Get the subtype to insert into the fw_subtype array */
+ if ((rc = siena_nvram_get_subtype(enp, partn, &subtype)) != 0)
+ goto fail5;
+
+ /* Fill out the new version */
+ fwverp = &dcfg->fw_version[partn];
+ EFX_POPULATE_DWORD_1(fwverp->fw_subtype, EFX_DWORD_0, subtype);
+ EFX_POPULATE_WORD_1(fwverp->version_w, EFX_WORD_0, version[0]);
+ EFX_POPULATE_WORD_1(fwverp->version_x, EFX_WORD_0, version[1]);
+ EFX_POPULATE_WORD_1(fwverp->version_y, EFX_WORD_0, version[2]);
+ EFX_POPULATE_WORD_1(fwverp->version_z, EFX_WORD_0, version[3]);
+
+ /* Update the version count */
+ if (nitems < partn + 1) {
+ nitems = partn + 1;
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items,
+ EFX_DWORD_0, nitems);
+ }
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new partition */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, dcfg_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0,
+ (caddr_t)dcfg, vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/usr/src/uts/common/io/sfxge/common/siena_phy.c b/usr/src/uts/common/io/sfxge/common/siena_phy.c
new file mode 100644
index 0000000000..bcd1b6f37e
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/siena_phy.c
@@ -0,0 +1,797 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static void
+siena_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+siena_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+ void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ siena_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ siena_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = siena_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &slsp->sls_adv_cap_mask);
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &slsp->sls_lp_cap_mask);
+
+ siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &slsp->sls_link_mode, &slsp->sls_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_SET_ID_LED_IN_LEN,
+ MC_CMD_SET_ID_LED_OUT_LEN),
+ MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN))];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#define SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ _mc_record, _efx_record) \
+ if ((_vmask) & (1ULL << (_mc_record))) { \
+ (_smask) |= (1ULL << (_efx_record)); \
+ if ((_stat) != NULL && !EFSYS_MEM_IS_NULL(_esmp)) { \
+ efx_dword_t dword; \
+ EFSYS_MEM_READD(_esmp, (_mc_record) * 4, &dword);\
+ (_stat)[_efx_record] = \
+ EFX_DWORD_FIELD(dword, EFX_DWORD_0); \
+ } \
+ }
+
+#define SIENA_SIMPLE_STAT_SET2(_vmask, _esmp, _smask, _stat, _record) \
+ SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ MC_CMD_ ## _record, \
+ EFX_PHY_STAT_ ## _record)
+
+ void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ uint64_t smask = 0;
+
+ _NOTE(ARGUNUSED(enp))
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, OUI);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_TX_FAULT);
+
+ if (vmask & (1 << MC_CMD_PMA_PMD_SIGNAL)) {
+ smask |= ((1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_A) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_B) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_C) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_D));
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sig;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PMA_PMD_SIGNAL,
+ &dword);
+ sig = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_A] = (sig >> 1) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_B] = (sig >> 2) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_C] = (sig >> 3) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_D] = (sig >> 4) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_A,
+ EFX_PHY_STAT_SNR_A);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_B,
+ EFX_PHY_STAT_SNR_B);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_C,
+ EFX_PHY_STAT_SNR_C);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_D,
+ EFX_PHY_STAT_SNR_D);
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BER);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BLOCK_ERRORS);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_ALIGN);
+
+ if (vmask & (1 << MC_CMD_PHYXS_SYNC)) {
+ smask |= ((1 << EFX_PHY_STAT_PHY_XS_SYNC_A) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_B) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_C) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_D));
+ if (stat != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sync;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PHYXS_SYNC, &dword);
+ sync = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_A] = (sync >> 0) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_B] = (sync >> 1) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_C] = (sync >> 2) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_D] = (sync >> 3) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_COMPLETE);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_CL22_LINK_UP,
+ EFX_PHY_STAT_CL22EXT_LINK_UP);
+
+ if (smaskp != NULL)
+ *smaskp = smask;
+}
+
+ __checkReturn efx_rc_t
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t vmask = encp->enc_mcdi_phy_stat_mask;
+ uint64_t smask;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_PHY_STATS_IN_LEN,
+ MC_CMD_PHY_STATS_OUT_DMA_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_PHY_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_PHY_STATS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_PHY_STATS_OUT_DMA_LEN;
+
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN);
+
+ siena_phy_decode_stats(enp, vmask, esmp, &smask, stat);
+ EFSYS_ASSERT(smask == encp->enc_phy_stat_mask);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_start(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn unsigned long
+siena_phy_sft9001_bist_status(
+ __in uint16_t code)
+{
+ switch (code) {
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY:
+ return (EFX_PHY_CABLE_STATUS_BUSY);
+ case MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTERPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN:
+ return (EFX_PHY_CABLE_STATUS_OPEN);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OK:
+ return (EFX_PHY_CABLE_STATUS_OK);
+ default:
+ return (EFX_PHY_CABLE_STATUS_INVALID);
+ }
+}
+
+ __checkReturn efx_rc_t
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX)];
+ uint32_t value_mask = 0;
+ efx_mcdi_req_t req;
+ uint32_t result;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ /* Extract PHY specific results */
+ if (result == MC_CMD_POLL_BIST_PASSED &&
+ encp->enc_phy_type == EFX_PHY_SFT9001B &&
+ req.emr_out_length_used >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN &&
+ (type == EFX_BIST_TYPE_PHY_CABLE_SHORT ||
+ type == EFX_BIST_TYPE_PHY_CABLE_LONG)) {
+ uint16_t word;
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_A) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_A] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_A);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_B) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_B] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_B);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_C) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_C] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_C);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_D) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_D] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_D);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_A) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_A);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_A] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_A);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_B) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_B);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_B] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_B);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_C) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_C);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_C] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_C);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_D) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_D);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_D] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_D);
+ }
+
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_QLX111V &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ /* There is no way to stop BIST on Siena */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/usr/src/uts/common/io/sfxge/common/siena_sram.c b/usr/src/uts/common/io/sfxge/common/siena_sram.c
new file mode 100644
index 0000000000..6374dea51e
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/siena_sram.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ void
+siena_sram_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t rx_base, tx_base;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ rx_base = encp->enc_buftbl_limit;
+ tx_base = rx_base + (encp->enc_rxq_limit *
+ EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE));
+
+ /* Initialize the transmit descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, tx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DC_SIZE, EFX_TXQ_DC_SIZE);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_DC_CFG_REG, &oword);
+
+ /* Initialize the receive descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_SIZE, EFX_RXQ_DC_SIZE);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_CFG_REG, &oword);
+
+ /* Set receive descriptor pre-fetch low water mark */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_PF_LWM, 56);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_PF_WM_REG, &oword);
+
+ /* Set the event queue to use for SRAM updates */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_UPD_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_UPD_EVQ_REG, &oword);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func)
+{
+ efx_oword_t oword;
+ efx_qword_t qword;
+ efx_qword_t verify;
+ size_t rows;
+ unsigned int wptr;
+ unsigned int rptr;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Reconfigure into HALF buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * Move the descriptor caches up to the top of SRAM, and test
+ * all of SRAM below them. We only miss out one row here.
+ */
+ rows = SIENA_SRAM_ROWS - 1;
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rows);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, rows + 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ /*
+ * Write the pattern through BUF_HALF_TBL. Write
+ * in 64 entry batches, waiting 1us in between each batch
+ * to guarantee not to overflow the SRAM fifo
+ */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_FALSE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_FALSE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+ }
+
+ /* And do the same negated */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_TRUE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_TRUE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+ }
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * We don't need to reconfigure SRAM again because the API
+ * requires efx_nic_fini() to be called after an sram test.
+ */
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/usr/src/uts/common/io/sfxge/common/siena_vpd.c b/usr/src/uts/common/io/sfxge/common/siena_vpd.c
new file mode 100644
index 0000000000..a79af0382e
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/common/siena_vpd.c
@@ -0,0 +1,614 @@
+/*
+ * Copyright (c) 2009-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_vpd_get_static(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __deref_out_bcount_opt(*sizep) caddr_t *svpdp,
+ __out size_t *sizep)
+{
+ siena_mc_static_config_hdr_t *scfg;
+ caddr_t svpd;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int pos;
+ unsigned int region;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1);
+
+ /* Allocate sufficient memory for the entire static cfg area */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg);
+ if (scfg == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail3;
+
+ /* Verify the magic number */
+ if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) !=
+ SIENA_MC_STATIC_CONFIG_MAGIC) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* All future versions of the structure must be backwards compatable */
+ EFX_STATIC_ASSERT(SIENA_MC_STATIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(scfg->length, EFX_WORD_0);
+ vpd_offset = EFX_DWORD_FIELD(scfg->static_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(scfg->static_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the sector size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ /* Read the remainder of scfg + static vpd */
+ region = vpd_offset + vpd_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)scfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail6;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)scfg)[pos];
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ if (vpd_length == 0)
+ svpd = NULL;
+ else {
+ /* Copy the vpd data out */
+ EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd);
+ if (svpd == NULL) {
+ rc = ENOMEM;
+ goto fail8;
+ }
+ (void) memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length);
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+ *svpdp = svpd;
+ *sizep = vpd_length;
+
+ return (0);
+
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_init(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ caddr_t svpd = NULL;
+ unsigned partn;
+ size_t size = 0;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1;
+
+ /*
+ * We need the static VPD sector to present a unified static+dynamic
+ * VPD, that is, basically on every read, write, verify cycle. Since
+ * it should *never* change we can just cache it here.
+ */
+ if ((rc = siena_vpd_get_static(enp, partn, &svpd, &size)) != 0)
+ goto fail1;
+
+ if (svpd != NULL && size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_u.siena.enu_svpd = svpd;
+ enp->en_u.siena.enu_svpd_length = size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd.
+ * Since the dynamic_config structure can change under our feet,
+ * (as version numbers are inserted), just be safe and return the
+ * total size of the dynamic_config *sector*
+ */
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, partn, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ size_t dcfg_size;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &dcfg_size)) != 0)
+ goto fail1;
+
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ if (vpd_length > size) {
+ rc = EFAULT; /* Invalid dcfg: header bigger than sector */
+ goto fail2;
+ }
+
+ EFSYS_ASSERT3U(vpd_length, <=, size);
+ (void) memcpy(data, (caddr_t)dcfg + vpd_offset, vpd_length);
+
+ /* Pad data with all-1s, consistent with update operations */
+ (void) memset(data + vpd_length, 0xff, size - vpd_length);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. siena_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ /*
+ * Skip the RV keyword. It should be present in both the static
+ * and dynamic cfg sectors.
+ */
+ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V'))
+ continue;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ efx_rc_t rc;
+
+ /*
+ * Only create a PID if the dynamic cfg doesn't have one
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ (void) memcpy(evvp->evv_value,
+ enp->en_u.siena.enu_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0)
+ goto fail2;
+
+ evvp->evv_length = length;
+ (void) memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ unsigned int hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ size_t partn_size, dcfg_size;
+ size_t vpd_length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine total length of all tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Lock dynamic config sector for write, and read structure only */
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail3;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &dcfg_size)) != 0)
+ goto fail4;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+
+ /* Allocated memory should have room for the new VPD */
+ if (hdr_length + vpd_length > dcfg_size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Copy in new vpd and update header */
+ vpd_offset = dcfg_size - vpd_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, EFX_DWORD_0, vpd_offset);
+ (void) memcpy((caddr_t)dcfg + vpd_offset, data, vpd_length);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, vpd_length);
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new sector */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg,
+ vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.siena.enu_svpd_length,
+ enp->en_u.siena.enu_svpd);
+
+ enp->en_u.siena.enu_svpd = NULL;
+ enp->en_u.siena.enu_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/usr/src/uts/common/io/sfxge/efsys.h b/usr/src/uts/common/io/sfxge/efsys.h
new file mode 100644
index 0000000000..c194081fa5
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/efsys.h
@@ -0,0 +1,686 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFSYS_H
+#define _SYS_EFSYS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/cpuvar.h>
+#include <sys/disp.h>
+#include <sys/sdt.h>
+#include <sys/kstat.h>
+#include <sys/crc32.h>
+#include <sys/note.h>
+#include <sys/byteorder.h>
+
+#define EFSYS_HAS_UINT64 1
+#define EFSYS_USE_UINT64 0
+#define EFSYS_HAS_SSE2_M128 0
+#ifdef _BIG_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 1
+#endif
+#ifdef _LITTLE_ENDIAN
+#define EFSYS_IS_LITTLE_ENDIAN 1
+#endif
+#include "efx_types.h"
+
+/* Modifiers used for Windows builds */
+#define __in
+#define __in_opt
+#define __in_ecount(_n)
+#define __in_ecount_opt(_n)
+#define __in_bcount(_n)
+#define __in_bcount_opt(_n)
+
+#define __out
+#define __out_opt
+#define __out_ecount(_n)
+#define __out_ecount_opt(_n)
+#define __out_bcount(_n)
+#define __out_bcount_opt(_n)
+#define __out_bcount_part(_n, _l)
+#define __out_bcount_part_opt(_n, _l)
+
+#define __deref_out
+
+#define __inout
+#define __inout_opt
+#define __inout_ecount(_n)
+#define __inout_ecount_opt(_n)
+#define __inout_bcount(_n)
+#define __inout_bcount_opt(_n)
+#define __inout_bcount_full_opt(_n)
+
+#define __deref_out_bcount_opt(n)
+
+#define __checkReturn
+#define __success(_x)
+
+#define __drv_when(_p, _c)
+
+/* Code inclusion options */
+
+
+#define EFSYS_OPT_NAMES 1
+
+#define EFSYS_OPT_SIENA 1
+#define EFSYS_OPT_HUNTINGTON 1
+#define EFSYS_OPT_MEDFORD 0
+#if DEBUG
+#define EFSYS_OPT_CHECK_REG 1
+#else
+#define EFSYS_OPT_CHECK_REG 0
+#endif
+
+#define EFSYS_OPT_MCDI 1
+#define EFSYS_OPT_MCDI_LOGGING 0
+#define EFSYS_OPT_MCDI_PROXY_AUTH 0
+
+#define EFSYS_OPT_MAC_STATS 1
+
+#define EFSYS_OPT_LOOPBACK 1
+
+#define EFSYS_OPT_MON_MCDI 1
+#define EFSYS_OPT_MON_STATS 1
+
+#define EFSYS_OPT_PHY_STATS 1
+#define EFSYS_OPT_BIST 1
+#define EFSYS_OPT_PHY_LED_CONTROL 1
+
+#define EFSYS_OPT_VPD 1
+#define EFSYS_OPT_NVRAM 1
+#define EFSYS_OPT_BOOTCFG 1
+
+#define EFSYS_OPT_DIAG 0
+#define EFSYS_OPT_WOL 1
+#define EFSYS_OPT_RX_SCALE 1
+#define EFSYS_OPT_QSTATS 1
+
+#define EFSYS_OPT_EV_PREFETCH 0
+
+#define EFSYS_OPT_DECODE_INTR_FATAL 1
+
+#define EFSYS_OPT_FILTER 1
+
+#define EFSYS_OPT_LICENSING 0
+
+/* ID */
+
+typedef struct __efsys_identifier_s efsys_identifier_t;
+
+/* DMA */
+
+typedef uint64_t efsys_dma_addr_t;
+
+typedef struct efsys_mem_s {
+ ddi_dma_handle_t esm_dma_handle; /* DMA memory allocate/bind */
+ ddi_acc_handle_t esm_acc_handle; /* DMA memory read/write */
+ caddr_t esm_base;
+ efsys_dma_addr_t esm_addr;
+ size_t esm_size;
+ size_t esm_used;
+} efsys_mem_t;
+
+
+#define EFSYS_MEM_ZERO(_esmp, _size) \
+ (void) bzero((_esmp)->esm_base, (_size))
+
+#define EFSYS_MEM_READD(_esmp, _offset, _edp) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t))); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ (_edp)->ed_u32[0] = ddi_get32((_esmp)->esm_acc_handle, \
+ addr); \
+ \
+ DTRACE_PROBE2(mem_readd, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t))); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ (_eqp)->eq_u32[0] = ddi_get32((_esmp)->esm_acc_handle, \
+ addr++); \
+ (_eqp)->eq_u32[1] = ddi_get32((_esmp)->esm_acc_handle, \
+ addr); \
+ \
+ DTRACE_PROBE3(mem_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READO(_esmp, _offset, _eop) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t))); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ (_eop)->eo_u32[0] = ddi_get32((_esmp)->esm_acc_handle, \
+ addr++); \
+ (_eop)->eo_u32[1] = ddi_get32((_esmp)->esm_acc_handle, \
+ addr++); \
+ (_eop)->eo_u32[2] = ddi_get32((_esmp)->esm_acc_handle, \
+ addr++); \
+ (_eop)->eo_u32[3] = ddi_get32((_esmp)->esm_acc_handle, \
+ addr); \
+ \
+ DTRACE_PROBE5(mem_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t))); \
+ \
+ DTRACE_PROBE2(mem_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ ddi_put32((_esmp)->esm_acc_handle, addr, \
+ (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t))); \
+ \
+ DTRACE_PROBE3(mem_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ ddi_put32((_esmp)->esm_acc_handle, addr++, \
+ (_eqp)->eq_u32[0]); \
+ ddi_put32((_esmp)->esm_acc_handle, addr, \
+ (_eqp)->eq_u32[1]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t))); \
+ \
+ DTRACE_PROBE5(mem_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ addr = (void *)((_esmp)->esm_base + (_offset)); \
+ \
+ ddi_put32((_esmp)->esm_acc_handle, addr++, \
+ (_eop)->eo_u32[0]); \
+ ddi_put32((_esmp)->esm_acc_handle, addr++, \
+ (_eop)->eo_u32[1]); \
+ ddi_put32((_esmp)->esm_acc_handle, addr++, \
+ (_eop)->eo_u32[2]); \
+ ddi_put32((_esmp)->esm_acc_handle, addr, \
+ (_eop)->eo_u32[3]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_ADDR(_esmp) \
+ ((_esmp)->esm_addr)
+
+#define EFSYS_MEM_IS_NULL(_esmp) \
+ ((_esmp)->esm_base == NULL)
+
+/* BAR */
+
+typedef struct efsys_bar_s {
+ kmutex_t esb_lock;
+ ddi_acc_handle_t esb_handle;
+ caddr_t esb_base;
+} efsys_bar_t;
+
+#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mutex_enter(&((_esbp)->esb_lock)); \
+ \
+ addr = (void *)((_esbp)->esb_base + (_offset)); \
+ \
+ (_edp)->ed_u32[0] = ddi_get32((_esbp)->esb_handle, \
+ addr); \
+ \
+ DTRACE_PROBE2(bar_readd, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mutex_exit(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t))); \
+ \
+ mutex_enter(&((_esbp)->esb_lock)); \
+ \
+ addr = (void *)((_esbp)->esb_base + (_offset)); \
+ \
+ (_eqp)->eq_u32[0] = ddi_get32((_esbp)->esb_handle, \
+ addr++); \
+ (_eqp)->eq_u32[1] = ddi_get32((_esbp)->esb_handle, \
+ addr); \
+ \
+ DTRACE_PROBE3(bar_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ mutex_exit(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mutex_enter(&((_esbp)->esb_lock)); \
+ \
+ addr = (void *)((_esbp)->esb_base + (_offset)); \
+ \
+ (_eop)->eo_u32[0] = ddi_get32((_esbp)->esb_handle, \
+ addr++); \
+ (_eop)->eo_u32[1] = ddi_get32((_esbp)->esb_handle, \
+ addr++); \
+ (_eop)->eo_u32[2] = ddi_get32((_esbp)->esb_handle, \
+ addr++); \
+ (_eop)->eo_u32[3] = ddi_get32((_esbp)->esb_handle, \
+ addr); \
+ \
+ DTRACE_PROBE5(bar_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mutex_exit(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mutex_enter(&((_esbp)->esb_lock)); \
+ \
+ DTRACE_PROBE2(bar_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ addr = (void *)((_esbp)->esb_base + (_offset)); \
+ \
+ ddi_put32((_esbp)->esb_handle, addr, \
+ (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mutex_exit(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t))); \
+ \
+ mutex_enter(&((_esbp)->esb_lock)); \
+ \
+ DTRACE_PROBE3(bar_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ addr = (void *)((_esbp)->esb_base + (_offset)); \
+ \
+ ddi_put32((_esbp)->esb_handle, addr++, \
+ (_eqp)->eq_u32[0]); \
+ ddi_put32((_esbp)->esb_handle, addr, \
+ (_eqp)->eq_u32[1]); \
+ \
+ mutex_exit(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
+ * (required by PIO hardware)
+ */
+#define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t))); \
+ \
+ (void) (_esbp); \
+ \
+ /* FIXME: Perform a 64-bit write */ \
+ EFSYS_ASSERT(0); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
+ do { \
+ uint32_t *addr; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ ASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mutex_enter(&((_esbp)->esb_lock)); \
+ \
+ DTRACE_PROBE5(bar_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ addr = (void *)((_esbp)->esb_base + (_offset)); \
+ \
+ ddi_put32((_esbp)->esb_handle, addr++, \
+ (_eop)->eo_u32[0]); \
+ ddi_put32((_esbp)->esb_handle, addr++, \
+ (_eop)->eo_u32[1]); \
+ ddi_put32((_esbp)->esb_handle, addr++, \
+ (_eop)->eo_u32[2]); \
+ ddi_put32((_esbp)->esb_handle, addr, \
+ (_eop)->eo_u32[3]); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ if (_lock) \
+ mutex_exit(&((_esbp)->esb_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Use the standard octo-word write for doorbell writes */
+#define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
+ do { \
+ EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* SPIN */
+
+#define EFSYS_SPIN(_us) \
+ drv_usecwait(_us)
+
+/* TODO: Perhaps this should use delay(9F)? */
+#define EFSYS_SLEEP EFSYS_SPIN
+
+/* BARRIERS */
+
+/* Strict ordering guaranteed by devacc.devacc_attr_dataorder */
+#define EFSYS_MEM_READ_BARRIER() membar_consumer()
+/* TODO: Is ddi_put32() properly barriered? */
+#define EFSYS_PIO_WRITE_BARRIER()
+
+/* DMA SYNC */
+/*
+ * It could be cheaper to sync entire map than calculate offset and
+ * size. If so, below macros should be updated to ignore these arguments
+ * and sync entire map.
+ */
+#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) \
+ (void) ddi_dma_sync((_esmp)->esm_dma_handle, \
+ (_offset), (_size), DDI_DMA_SYNC_FORKERNEL)
+
+#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) \
+ (void) ddi_dma_sync((_esmp)->esm_dma_handle, \
+ (_offset), (_size), DDI_DMA_SYNC_FORDEV)
+
+/* TIMESTAMP */
+
+typedef clock_t efsys_timestamp_t;
+
+/* TODO: Arguably this could use gethrtime */
+#define EFSYS_TIMESTAMP(_usp) \
+ do { \
+ *(_usp) = drv_hztousec(ddi_get_lbolt()); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* KMEM */
+
+#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
+ do { \
+ (_esip) = (_esip); \
+ (_p) = kmem_zalloc((_size), KM_NOSLEEP); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_KMEM_FREE(_esip, _size, _p) \
+ do { \
+ (_esip) = (_esip); \
+ kmem_free((_p), (_size)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* LOCK */
+
+typedef kmutex_t efsys_lock_t;
+
+#define EFSYS_LOCK_MAGIC 0x000010c4
+
+#define EFSYS_LOCK(_lockp, _state) \
+ do { \
+ mutex_enter(_lockp); \
+ (_state) = EFSYS_LOCK_MAGIC; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_UNLOCK(_lockp, _state) \
+ do { \
+ if ((_state) != EFSYS_LOCK_MAGIC) \
+ ASSERT(B_FALSE); \
+ mutex_exit(_lockp); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* STAT */
+
+typedef kstat_named_t efsys_stat_t;
+
+#define EFSYS_STAT_INCR(_knp, _delta) \
+ do { \
+ ((_knp)->value.ui64) += (_delta); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_DECR(_knp, _delta) \
+ do { \
+ ((_knp)->value.ui64) -= (_delta); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET(_knp, _val) \
+ do { \
+ ((_knp)->value.ui64) = (_val); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_QWORD(_knp, _valp) \
+ do { \
+ ((_knp)->value.ui64) = LE_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_DWORD(_knp, _valp) \
+ do { \
+ ((_knp)->value.ui64) = LE_32((_valp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
+ do { \
+ ((_knp)->value.ui64) += LE_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
+ do { \
+ ((_knp)->value.ui64) -= LE_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* ERR */
+
+extern void sfxge_err(efsys_identifier_t *, unsigned int,
+ uint32_t, uint32_t);
+
+#if EFSYS_OPT_DECODE_INTR_FATAL
+#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
+ sfxge_err((_esip), (_code), (_dword0), (_dword1))
+#endif
+
+/* PROBE */
+
+#define EFSYS_PROBE(_name) \
+ DTRACE_PROBE(_name)
+
+#define EFSYS_PROBE1(_name, _type1, _arg1) \
+ DTRACE_PROBE1(_name, _type1, _arg1)
+
+#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
+ DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
+
+#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3) \
+ DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3)
+
+#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4) \
+ DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4)
+
+#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5) \
+ DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5)
+
+#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6) \
+ DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6)
+
+#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6, _type7, _arg7) \
+ DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6, _type7, _arg7)
+
+/* ASSERT */
+
+#define EFSYS_ASSERT(_exp) ASSERT(_exp)
+#define EFSYS_ASSERT3U(_x, _op, _y) ASSERT3U(_x, _op, _y)
+#define EFSYS_ASSERT3S(_x, _op, _y) ASSERT3S(_x, _op, _y)
+#define EFSYS_ASSERT3P(_x, _op, _y) ASSERT3P(_x, _op, _y)
+
+/* ROTATE */
+
+#define EFSYS_HAS_ROTL_DWORD 0
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFSYS_H */
diff --git a/usr/src/uts/common/io/sfxge/sfxge.c b/usr/src/uts/common/io/sfxge/sfxge.c
new file mode 100644
index 0000000000..7665284d72
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge.c
@@ -0,0 +1,1194 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/ethernet.h>
+#include <sys/pci.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/processor.h>
+#include <sys/cpuvar.h>
+#include <sys/pghw.h>
+
+#include "sfxge.h"
+#include "sfxge_version.h"
+#include "efsys.h"
+#include "efx.h"
+
+#ifdef DEBUG
+boolean_t sfxge_aask = B_FALSE;
+#endif
+
+/* Receive queue TRIM default polling interval (in microseconds) */
+#define SFXGE_RX_QPOLL_USEC (5000000)
+
+/* Broadcast address */
+uint8_t sfxge_brdcst[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+/*
+ * By default modinfo will display lines truncated to 80 characters and so just
+ * show 32 characters of our sfxge_ident string.
+ */
+const char sfxge_ident[] = "Solarflare 10Gb/40Gb Ethernet";
+const char sfxge_version[] = SFXGE_VERSION_STRING;
+
+static void
+sfxge_cfg_build(sfxge_t *sp)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sp->s_enp);
+ (void) snprintf(sp->s_cfg_kstat.buf.sck_mac, 64,
+ "%02X:%02X:%02X:%02X:%02X:%02X",
+ encp->enc_mac_addr[0], encp->enc_mac_addr[1],
+ encp->enc_mac_addr[2], encp->enc_mac_addr[3],
+ encp->enc_mac_addr[4], encp->enc_mac_addr[5]);
+}
+
+static int
+sfxge_create(dev_info_t *dip, sfxge_t **spp)
+{
+ sfxge_t *sp;
+ efx_nic_t *enp;
+ unsigned int rxq_size;
+ int rxq_poll_usec;
+ int rc;
+
+ /* Allocate the object */
+ sp = kmem_zalloc(sizeof (*sp), KM_SLEEP);
+ sp->s_dip = dip;
+ ddi_set_driver_private(dip, sp);
+
+ mutex_init(&(sp->s_state_lock), NULL, MUTEX_DRIVER, NULL);
+ sp->s_state = SFXGE_UNINITIALIZED;
+
+ /* Get property values */
+ sp->s_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "mtu", ETHERMTU);
+
+ sp->s_action_on_hw_err = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "action_on_hw_err", SFXGE_RECOVER);
+
+ rxq_size = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "rxq_size", SFXGE_DEFAULT_RXQ_SIZE);
+ if (!(ISP2(rxq_size)))
+ rxq_size = SFXGE_DEFAULT_RXQ_SIZE;
+ rxq_size = min(rxq_size, EFX_RXQ_MAXNDESCS);
+ sp->s_rxq_size = (uint16_t)max(rxq_size, EFX_RXQ_MINNDESCS);
+
+ /* Configure polling interval for queue refill/trim */
+ rxq_poll_usec = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "rxq_poll_usec", SFXGE_RX_QPOLL_USEC);
+ if (rxq_poll_usec <= 0)
+ rxq_poll_usec = SFXGE_RX_QPOLL_USEC;
+ sp->s_rxq_poll_usec = rxq_poll_usec;
+
+#if EFSYS_OPT_MCDI_LOGGING
+ sp->s_mcdi_logging = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "mcdi_logging", 0);
+#endif
+
+ /* Create a taskq */
+ sp->s_tqp = ddi_taskq_create(dip, "tq", 1, TASKQ_DEFAULTPRI, 0);
+ if (sp->s_tqp == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ /* Check and initialize PCI configuration space */
+ if ((rc = sfxge_pci_init(sp)) != 0)
+ goto fail3;
+
+ /* Map the device registers */
+ if ((rc = sfxge_bar_init(sp)) != 0)
+ goto fail4;
+
+ /* Create the NIC object */
+ mutex_init(&(sp->s_nic_lock), NULL, MUTEX_DRIVER, NULL);
+
+ if ((rc = efx_nic_create(sp->s_family, (efsys_identifier_t *)sp,
+ &(sp->s_bar), &(sp->s_nic_lock), &enp)) != 0)
+ goto fail5;
+
+ sp->s_enp = enp;
+
+ /* Initialize MCDI to talk to the Microcontroller */
+ if ((rc = sfxge_mcdi_init(sp)) != 0)
+ goto fail6;
+
+ /* Probe the NIC and build the configuration data area */
+ if ((rc = efx_nic_probe(enp)) != 0)
+ goto fail7;
+
+ switch (sp->s_family) {
+ case EFX_FAMILY_HUNTINGTON:
+ sfxge_pcie_check_link(sp, 8, 3); /* PCI 8x Gen3 */
+ break;
+
+ case EFX_FAMILY_SIENA:
+ sfxge_pcie_check_link(sp, 8, 2); /* PCI 8x Gen2 */
+ break;
+
+ default:
+ break;
+ }
+
+ if ((rc = efx_nvram_init(enp)) != 0)
+ goto fail8;
+
+ if ((rc = efx_vpd_init(enp)) != 0)
+ goto fail9;
+
+ if ((rc = efx_nic_reset(enp)) != 0)
+ goto fail10;
+
+ sfxge_sram_init(sp);
+
+ if ((rc = sfxge_intr_init(sp)) != 0)
+ goto fail11;
+
+ if ((rc = sfxge_ev_init(sp)) != 0)
+ goto fail12;
+
+ if ((rc = sfxge_mac_init(sp)) != 0)
+ goto fail13;
+
+ if ((rc = sfxge_rx_init(sp)) != 0)
+ goto fail14;
+
+ if ((rc = sfxge_tx_init(sp)) != 0)
+ goto fail15;
+
+ if ((rc = sfxge_mon_init(sp)) != 0)
+ goto fail16;
+
+ mutex_init(&(sp->s_tx_flush_lock), NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(sp->s_intr.si_intr_pri));
+ cv_init(&(sp->s_tx_flush_kv), NULL, CV_DRIVER, NULL);
+
+ sp->s_state = SFXGE_INITIALIZED;
+
+ *spp = sp;
+ return (0);
+
+fail16:
+ DTRACE_PROBE(fail15);
+ sfxge_tx_fini(sp);
+
+fail15:
+ DTRACE_PROBE(fail14);
+ sfxge_rx_fini(sp);
+
+fail14:
+ DTRACE_PROBE(fail14);
+ sfxge_mac_fini(sp);
+
+fail13:
+ DTRACE_PROBE(fail13);
+ sfxge_ev_fini(sp);
+
+fail12:
+ DTRACE_PROBE(fail12);
+ sfxge_intr_fini(sp);
+
+fail11:
+ DTRACE_PROBE(fail11);
+ sfxge_sram_fini(sp);
+ (void) efx_nic_reset(sp->s_enp);
+
+fail10:
+ DTRACE_PROBE(fail10);
+ efx_vpd_fini(enp);
+
+fail9:
+ DTRACE_PROBE(fail9);
+ efx_nvram_fini(enp);
+
+fail8:
+ DTRACE_PROBE(fail8);
+ efx_nic_unprobe(enp);
+
+fail7:
+ DTRACE_PROBE(fail7);
+ sfxge_mcdi_fini(sp);
+
+fail6:
+ DTRACE_PROBE(fail6);
+ sp->s_enp = NULL;
+ efx_nic_destroy(enp);
+
+fail5:
+ DTRACE_PROBE(fail5);
+ mutex_destroy(&(sp->s_nic_lock));
+ sfxge_bar_fini(sp);
+
+fail4:
+ DTRACE_PROBE(fail4);
+ sfxge_pci_fini(sp);
+
+fail3:
+ DTRACE_PROBE(fail3);
+ ddi_taskq_destroy(sp->s_tqp);
+ sp->s_tqp = NULL;
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Clear property values */
+ sp->s_mtu = 0;
+
+ mutex_destroy(&(sp->s_state_lock));
+
+ /* Free the soft state */
+ sp->s_dip = NULL;
+
+ SFXGE_OBJ_CHECK(sp, sfxge_t);
+ kmem_free(sp, sizeof (*sp));
+
+ return (rc);
+}
+
+
+static int
+sfxge_start_locked(sfxge_t *sp, boolean_t restart)
+{
+ int rc;
+
+ ASSERT(mutex_owned(&(sp->s_state_lock)));
+
+ if (sp->s_state == SFXGE_STARTED)
+ goto done;
+
+ if (sp->s_state != SFXGE_REGISTERED) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ sp->s_state = SFXGE_STARTING;
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ efx_mcdi_new_epoch(sp->s_enp);
+
+ if ((rc = efx_nic_reset(sp->s_enp)) != 0)
+ goto fail2;
+
+ if ((rc = efx_nic_init(sp->s_enp)) != 0)
+ goto fail3;
+
+ if ((rc = efx_filter_init(sp->s_enp)) != 0)
+ goto fail4;
+
+ if ((rc = sfxge_sram_start(sp)) != 0)
+ goto fail5;
+
+ if ((rc = sfxge_intr_start(sp)) != 0)
+ goto fail6;
+
+ if ((rc = sfxge_ev_start(sp)) != 0)
+ goto fail7;
+
+ if ((rc = sfxge_mac_start(sp, restart)) != 0)
+ goto fail8;
+
+ if ((rc = sfxge_rx_start(sp)) != 0)
+ goto fail9;
+
+ if ((rc = sfxge_tx_start(sp)) != 0)
+ goto fail10;
+
+ if ((rc = sfxge_mon_start(sp)) != 0)
+ goto fail11;
+
+ ASSERT3U(sp->s_state, ==, SFXGE_STARTING);
+ sp->s_state = SFXGE_STARTED;
+
+ /* Notify any change of MTU */
+ sfxge_gld_mtu_update(sp);
+
+done:
+ return (0);
+
+fail11:
+ DTRACE_PROBE(fail11);
+ sfxge_tx_stop(sp);
+
+fail10:
+ DTRACE_PROBE(fail10);
+ sfxge_rx_stop(sp);
+
+fail9:
+ DTRACE_PROBE(fail9);
+ sfxge_mac_stop(sp);
+
+fail8:
+ DTRACE_PROBE(fail8);
+ sfxge_ev_stop(sp);
+
+fail7:
+ DTRACE_PROBE(fail7);
+ sfxge_intr_stop(sp);
+
+fail6:
+ DTRACE_PROBE(fail6);
+ sfxge_sram_stop(sp);
+
+fail5:
+ DTRACE_PROBE(fail5);
+ efx_filter_fini(sp->s_enp);
+
+fail4:
+ DTRACE_PROBE(fail4);
+ efx_nic_fini(sp->s_enp);
+
+fail3:
+ DTRACE_PROBE(fail3);
+ (void) efx_nic_reset(sp->s_enp);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ ASSERT3U(sp->s_state, ==, SFXGE_STARTING);
+ sp->s_state = SFXGE_REGISTERED;
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+int
+sfxge_start(sfxge_t *sp, boolean_t restart)
+{
+ int rc;
+
+ mutex_enter(&(sp->s_state_lock));
+ rc = sfxge_start_locked(sp, restart);
+ mutex_exit(&(sp->s_state_lock));
+ return (rc);
+}
+
+
+static void
+sfxge_stop_locked(sfxge_t *sp)
+{
+ ASSERT(mutex_owned(&(sp->s_state_lock)));
+
+ if (sp->s_state != SFXGE_STARTED) {
+ return;
+ }
+ sp->s_state = SFXGE_STOPPING;
+
+ sfxge_mon_stop(sp);
+ sfxge_tx_stop(sp);
+ sfxge_rx_stop(sp);
+ sfxge_mac_stop(sp);
+
+ /* Stop event processing - must be after rx_stop see sfxge_rx_qpoll() */
+ sfxge_ev_stop(sp);
+ sfxge_intr_stop(sp); /* cope with late flush/soft events until here */
+ sfxge_sram_stop(sp);
+
+ efx_filter_fini(sp->s_enp);
+
+ efx_nic_fini(sp->s_enp);
+ (void) efx_nic_reset(sp->s_enp);
+
+ ASSERT3U(sp->s_state, ==, SFXGE_STOPPING);
+ sp->s_state = SFXGE_REGISTERED;
+}
+
+void
+sfxge_stop(sfxge_t *sp)
+{
+ mutex_enter(&(sp->s_state_lock));
+ sfxge_stop_locked(sp);
+ mutex_exit(&(sp->s_state_lock));
+}
+
+static void
+_sfxge_restart(void *arg)
+{
+ sfxge_t *sp = arg;
+ int rc;
+
+ /* logging on entry is in sfxge_restart_dispatch */
+ mutex_enter(&(sp->s_state_lock));
+
+ DTRACE_PROBE(_sfxge_restart);
+ if (sp->s_state != SFXGE_STARTED)
+ goto done;
+
+ /* inform the OS that the link is down - may trigger IPMP failover */
+ if (sp->s_hw_err && sp->s_action_on_hw_err != SFXGE_INVISIBLE) {
+ sp->s_mac.sm_link_mode = EFX_LINK_DOWN;
+ sfxge_gld_link_update(sp);
+ }
+
+ /* Stop processing */
+ sfxge_stop_locked(sp);
+
+ if (sp->s_hw_err && sp->s_action_on_hw_err == SFXGE_LEAVE_DEAD) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "NIC error - interface is"
+ " being left permanently DOWN per driver config");
+
+ (void) atomic_swap_32(&(sp->s_nested_restarts), 0);
+ mutex_exit(&(sp->s_state_lock));
+ return;
+ } else
+ sp->s_hw_err = SFXGE_HW_OK;
+
+ /* Start processing */
+ if ((rc = sfxge_start_locked(sp, B_TRUE)) != 0)
+ goto fail1;
+
+done:
+ (void) atomic_swap_32(&(sp->s_nested_restarts), 0);
+ mutex_exit(&(sp->s_state_lock));
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR "NIC restart complete");
+ return;
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ dev_err(sp->s_dip, CE_WARN,
+ SFXGE_CMN_ERR "FATAL ERROR: NIC restart failed rc=%d", rc);
+
+ (void) atomic_swap_32(&(sp->s_nested_restarts), 0);
+ mutex_exit(&(sp->s_state_lock));
+}
+
+int
+sfxge_restart_dispatch(sfxge_t *sp, uint_t cflags, sfxge_hw_err_t hw_err,
+ const char *reason, uint32_t errval)
+{
+ if (hw_err == SFXGE_HW_OK)
+ sp->s_num_restarts++;
+ else {
+ sp->s_hw_err = hw_err;
+ sp->s_num_restarts_hw_err++;
+ }
+
+ if (atomic_inc_32_nv(&(sp->s_nested_restarts)) > 1) {
+ /* A restart is currently in progress */
+ return (0);
+ }
+
+ DTRACE_PROBE2(sfxge_restart_dispatch, sfxge_hw_err_t, hw_err, char *,
+ reason);
+
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR "NIC restart due to %s:%d",
+ reason, errval);
+
+ /* If cflags == DDI_SLEEP then guaranteed to succeed */
+ return (ddi_taskq_dispatch(sp->s_tqp, _sfxge_restart, sp, cflags));
+}
+
+
+static int
+sfxge_can_destroy(sfxge_t *sp)
+{
+ int index;
+
+ /*
+ * In SFC bug 19834 it was noted that a mblk passed up to STREAMS
+ * could be reused for transmit and sit in the sfxge_tx_packet_cache.
+ * This call to empty the TX deferred packet list may result in
+ * rx_loaned reducing.
+ */
+ index = EFX_ARRAY_SIZE(sp->s_stp);
+ while (--index >= 0) {
+ sfxge_txq_t *stp = sp->s_stp[index];
+
+ if (stp != NULL)
+ sfxge_tx_qdpl_flush(stp);
+ }
+
+ /* Need to wait for desballoc free_func callback */
+ return (sfxge_rx_loaned(sp));
+}
+
+
+static int
+sfxge_destroy(sfxge_t *sp)
+{
+ ddi_taskq_t *tqp;
+ efx_nic_t *enp;
+ int rc;
+
+ ASSERT3U(sp->s_state, ==, SFXGE_INITIALIZED);
+ enp = sp->s_enp;
+
+ if (sfxge_can_destroy(sp) != 0) {
+ rc = EBUSY;
+ goto fail1;
+ }
+
+ sp->s_state = SFXGE_UNINITIALIZED;
+
+ cv_destroy(&(sp->s_tx_flush_kv));
+ mutex_destroy(&(sp->s_tx_flush_lock));
+
+ sfxge_mon_fini(sp);
+ sfxge_tx_fini(sp);
+ sfxge_rx_fini(sp);
+ sfxge_mac_fini(sp);
+ sfxge_ev_fini(sp);
+ sfxge_intr_fini(sp);
+ sfxge_sram_fini(sp);
+ (void) efx_nic_reset(enp);
+
+ efx_vpd_fini(enp);
+ efx_nvram_fini(enp);
+ efx_nic_unprobe(enp);
+ sfxge_mcdi_fini(sp);
+
+ /* Destroy the NIC object */
+ sp->s_enp = NULL;
+ efx_nic_destroy(enp);
+
+ mutex_destroy(&(sp->s_nic_lock));
+
+ /* Unmap the device registers */
+ sfxge_bar_fini(sp);
+
+ /* Tear down PCI configuration space */
+ sfxge_pci_fini(sp);
+
+ /* Destroy the taskq */
+ tqp = sp->s_tqp;
+ sp->s_tqp = NULL;
+ ddi_taskq_destroy(tqp);
+
+ mutex_destroy(&(sp->s_state_lock));
+
+ /* Clear property values */
+ sp->s_mtu = 0;
+
+ /* Free the soft state */
+ sp->s_dip = NULL;
+
+ SFXGE_OBJ_CHECK(sp, sfxge_t);
+ kmem_free(sp, sizeof (*sp));
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+void
+sfxge_ioctl(sfxge_t *sp, queue_t *wq, mblk_t *mp)
+{
+ struct iocblk *iocp;
+ int rc, taskq_wait = 0;
+ size_t ioclen = 0;
+
+ /*
+ * single concurrent IOCTL
+ * serialized from sfxge_create, _destroy, _(re)start, _stop
+ */
+ mutex_enter(&(sp->s_state_lock));
+
+ /*LINTED*/
+ iocp = (struct iocblk *)mp->b_rptr;
+
+ switch (iocp->ioc_cmd) {
+ case SFXGE_NVRAM_IOC:
+ ioclen = sizeof (sfxge_nvram_ioc_t);
+ break;
+ case SFXGE_MCDI_IOC:
+ ioclen = sizeof (sfxge_mcdi_ioc_t);
+ break;
+ case SFXGE_MCDI2_IOC:
+ ioclen = sizeof (sfxge_mcdi2_ioc_t);
+ break;
+ case SFXGE_VPD_IOC:
+ ioclen = sizeof (sfxge_vpd_ioc_t);
+ break;
+ case SFXGE_NIC_RESET_IOC:
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (iocp->ioc_count != ioclen) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* if in multiple fragments pull it up to one linear buffer */
+ if ((rc = miocpullup(mp, ioclen)) != 0) {
+ goto fail3;
+ }
+
+ switch (iocp->ioc_cmd) {
+ case SFXGE_NVRAM_IOC: {
+ sfxge_nvram_ioc_t *snip =
+ (sfxge_nvram_ioc_t *)mp->b_cont->b_rptr;
+
+ if ((rc = sfxge_nvram_ioctl(sp, snip)) != 0)
+ goto fail4;
+
+ break;
+ }
+ case SFXGE_MCDI_IOC: {
+ sfxge_mcdi_ioc_t *smip = (sfxge_mcdi_ioc_t *)mp->b_cont->b_rptr;
+
+ if ((rc = sfxge_mcdi_ioctl(sp, smip)) != 0)
+ goto fail4;
+ taskq_wait = 1;
+
+ break;
+ }
+ case SFXGE_MCDI2_IOC: {
+ sfxge_mcdi2_ioc_t *smip =
+ (sfxge_mcdi2_ioc_t *)mp->b_cont->b_rptr;
+
+ if ((rc = sfxge_mcdi2_ioctl(sp, smip)) != 0)
+ goto fail4;
+ taskq_wait = 1;
+
+ break;
+ }
+ case SFXGE_NIC_RESET_IOC: {
+ DTRACE_PROBE(nic_reset_ioc);
+
+ /* sp->s_state_lock held */
+ (void) sfxge_restart_dispatch(sp, DDI_SLEEP, SFXGE_HW_OK,
+ "NIC_RESET_IOC", 0);
+ taskq_wait = 1;
+
+ break;
+ }
+ case SFXGE_VPD_IOC: {
+ sfxge_vpd_ioc_t *svip = (sfxge_vpd_ioc_t *)mp->b_cont->b_rptr;
+
+ if ((rc = sfxge_vpd_ioctl(sp, svip)) != 0)
+ goto fail4;
+
+ break;
+ }
+ default:
+ ASSERT(0);
+ }
+
+ mutex_exit(&(sp->s_state_lock));
+
+ if (taskq_wait) {
+ /*
+ * Wait for any tasks that may be accessing GLD functions
+ * This may end up waiting for multiple nic_resets
+ * as it needs to be outside of s_state_lock for sfxge_restart()
+ */
+ ddi_taskq_wait(sp->s_tqp);
+ }
+
+ /* The entire structure is the acknowledgement */
+ miocack(wq, mp, iocp->ioc_count, 0);
+
+ return;
+
+fail4:
+ DTRACE_PROBE(fail4);
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(sp->s_state_lock));
+
+ /* no data returned */
+ miocnak(wq, mp, 0, rc);
+}
+
+static int
+sfxge_register(sfxge_t *sp)
+{
+ int rc;
+
+ ASSERT3U(sp->s_state, ==, SFXGE_INITIALIZED);
+
+ if ((rc = sfxge_gld_register(sp)) != 0)
+ goto fail1;
+
+ sp->s_state = SFXGE_REGISTERED;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_unregister(sfxge_t *sp)
+{
+ int rc;
+
+ ASSERT3U(sp->s_state, ==, SFXGE_REGISTERED);
+
+ /* Wait for any tasks that may be accessing GLD functions */
+ ddi_taskq_wait(sp->s_tqp);
+
+ if ((rc = sfxge_gld_unregister(sp)) != 0)
+ goto fail1;
+
+ sp->s_state = SFXGE_INITIALIZED;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+_sfxge_vpd_kstat_init(sfxge_t *sp, caddr_t vpd, size_t size, efx_vpd_tag_t tag,
+ const char *keyword, sfxge_vpd_type_t type)
+{
+ static const char unknown[] = "?";
+ efx_nic_t *enp = sp->s_enp;
+ sfxge_vpd_kstat_t *svkp = &(sp->s_vpd_kstat);
+ kstat_named_t *knp;
+ efx_vpd_value_t *evvp;
+
+ evvp = svkp->svk_vv + type;
+ evvp->evv_tag = tag;
+ evvp->evv_keyword = EFX_VPD_KEYWORD(keyword[0], keyword[1]);
+
+ if (efx_vpd_get(enp, vpd, size, evvp) != 0) {
+ evvp->evv_length = strlen(unknown) + 1;
+ bcopy(unknown, evvp->evv_value, evvp->evv_length);
+ }
+
+ knp = &(svkp->svk_stat[type]);
+
+ kstat_named_init(knp, (char *)keyword, KSTAT_DATA_STRING);
+ kstat_named_setstr(knp, (char *)evvp->evv_value);
+ svkp->svk_ksp->ks_data_size += sizeof (*evvp);
+}
+
+static int
+sfxge_vpd_kstat_init(sfxge_t *sp)
+{
+ efx_nic_t *enp = sp->s_enp;
+ sfxge_vpd_kstat_t *svkp = &(sp->s_vpd_kstat);
+ dev_info_t *dip = sp->s_dip;
+ char name[MAXNAMELEN];
+ kstat_t *ksp;
+ caddr_t vpd;
+ size_t size;
+ int rc;
+
+ SFXGE_OBJ_CHECK(svkp, sfxge_vpd_kstat_t);
+ (void) snprintf(name, MAXNAMELEN - 1, "%s_vpd", ddi_driver_name(dip));
+
+ /* Get a copy of the VPD space */
+ if ((rc = efx_vpd_size(enp, &size)) != 0)
+ goto fail1;
+
+ if ((vpd = kmem_zalloc(size, KM_NOSLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((svkp->svk_vv = kmem_zalloc(sizeof (efx_vpd_value_t) *
+ SFXGE_VPD_MAX, KM_NOSLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ if ((rc = efx_vpd_read(enp, vpd, size)) != 0)
+ goto fail4;
+
+ if ((ksp = kstat_create((char *)ddi_driver_name(dip),
+ ddi_get_instance(dip), name, "vpd", KSTAT_TYPE_NAMED, SFXGE_VPD_MAX,
+ KSTAT_FLAG_VIRTUAL)) == NULL) {
+ rc = ENOMEM;
+ goto fail5;
+ }
+ svkp->svk_ksp = ksp;
+ ksp->ks_data = &(svkp->svk_stat);
+
+ _sfxge_vpd_kstat_init(sp, vpd, size, EFX_VPD_ID, "ID", SFXGE_VPD_ID);
+ _sfxge_vpd_kstat_init(sp, vpd, size, EFX_VPD_RO, "PN", SFXGE_VPD_PN);
+ _sfxge_vpd_kstat_init(sp, vpd, size, EFX_VPD_RO, "SN", SFXGE_VPD_SN);
+ _sfxge_vpd_kstat_init(sp, vpd, size, EFX_VPD_RO, "EC", SFXGE_VPD_EC);
+ _sfxge_vpd_kstat_init(sp, vpd, size, EFX_VPD_RO, "MN", SFXGE_VPD_MN);
+ _sfxge_vpd_kstat_init(sp, vpd, size, EFX_VPD_RO, "VD", SFXGE_VPD_VD);
+ _sfxge_vpd_kstat_init(sp, vpd, size, EFX_VPD_RO, "VE", SFXGE_VPD_VE);
+
+ kstat_install(ksp);
+ kmem_free(vpd, size);
+
+ return (0);
+
+fail5:
+ DTRACE_PROBE(fail5);
+fail4:
+ DTRACE_PROBE(fail4);
+ kmem_free(svkp->svk_vv, sizeof (efx_vpd_value_t) * SFXGE_VPD_MAX);
+fail3:
+ DTRACE_PROBE(fail3);
+ kmem_free(vpd, size);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ SFXGE_OBJ_CHECK(svkp, sfxge_vpd_kstat_t);
+
+ return (rc);
+}
+
+static void
+sfxge_vpd_kstat_fini(sfxge_t *sp)
+{
+ sfxge_vpd_kstat_t *svkp = &(sp->s_vpd_kstat);
+
+ /* NOTE: VPD support is optional, so kstats might not be registered */
+ if (svkp->svk_ksp != NULL) {
+
+ kstat_delete(svkp->svk_ksp);
+
+ kmem_free(svkp->svk_vv,
+ sizeof (efx_vpd_value_t) * SFXGE_VPD_MAX);
+
+ bzero(svkp->svk_stat,
+ sizeof (kstat_named_t) * SFXGE_VPD_MAX);
+
+ svkp->svk_ksp = NULL;
+ }
+
+ SFXGE_OBJ_CHECK(svkp, sfxge_vpd_kstat_t);
+}
+
+static int
+sfxge_cfg_kstat_init(sfxge_t *sp)
+{
+ dev_info_t *dip = sp->s_dip;
+ char name[MAXNAMELEN];
+ kstat_t *ksp;
+ sfxge_cfg_kstat_t *sckp;
+ int rc;
+
+ sfxge_cfg_build(sp);
+
+ /* Create the set */
+ (void) snprintf(name, MAXNAMELEN - 1, "%s_cfg", ddi_driver_name(dip));
+
+ if ((ksp = kstat_create((char *)ddi_driver_name(dip),
+ ddi_get_instance(dip), name, "cfg", KSTAT_TYPE_NAMED,
+ sizeof (sckp->kstat) / sizeof (kstat_named_t),
+ KSTAT_FLAG_VIRTUAL)) == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ sp->s_cfg_ksp = ksp;
+
+ ksp->ks_data = sckp = &(sp->s_cfg_kstat);
+
+ kstat_named_init(&(sckp->kstat.sck_mac), "mac", KSTAT_DATA_STRING);
+ kstat_named_setstr(&(sckp->kstat.sck_mac), sckp->buf.sck_mac);
+ ksp->ks_data_size += sizeof (sckp->buf.sck_mac);
+
+ kstat_named_init(&(sckp->kstat.sck_version), "version",
+ KSTAT_DATA_STRING);
+ kstat_named_setstr(&(sckp->kstat.sck_version), sfxge_version);
+ ksp->ks_data_size += sizeof (sfxge_version);
+
+ kstat_install(ksp);
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_cfg_kstat_fini(sfxge_t *sp)
+{
+ if (sp->s_cfg_ksp == NULL)
+ return;
+
+ kstat_delete(sp->s_cfg_ksp);
+ sp->s_cfg_ksp = NULL;
+
+ bzero(&(sp->s_cfg_kstat), sizeof (sfxge_cfg_kstat_t));
+}
+
+static int
+sfxge_resume(sfxge_t *sp)
+{
+ int rc;
+
+ /* Start processing */
+ if ((rc = sfxge_start(sp, B_FALSE)) != 0)
+ goto fail1;
+
+ return (DDI_SUCCESS);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (DDI_FAILURE);
+}
+
+static int
+sfxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ sfxge_t *sp;
+ int rc;
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ break;
+
+ case DDI_RESUME:
+ if ((sp = ddi_get_driver_private(dip)) == NULL)
+ return (DDI_FAILURE);
+ return (sfxge_resume(sp));
+
+ default:
+ return (DDI_FAILURE);
+ }
+
+ /* Create the soft state */
+ if ((rc = sfxge_create(dip, &sp)) != 0)
+ goto fail1;
+
+ /* Create the configuration kstats */
+ if ((rc = sfxge_cfg_kstat_init(sp)) != 0)
+ goto fail2;
+
+ /* Create the VPD kstats */
+ if ((rc = sfxge_vpd_kstat_init(sp)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail3;
+ }
+
+ /* Register the interface */
+ if ((rc = sfxge_register(sp)) != 0)
+ goto fail4;
+
+ /* Announce ourselves in the system log */
+ ddi_report_dev(dip);
+
+ return (DDI_SUCCESS);
+
+fail4:
+ DTRACE_PROBE(fail4);
+
+ /* Destroy the VPD kstats */
+ sfxge_vpd_kstat_fini(sp);
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+ /* Destroy the configuration kstats */
+ sfxge_cfg_kstat_fini(sp);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Destroy the soft state */
+ (void) sfxge_destroy(sp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (DDI_FAILURE);
+}
+
+static int
+sfxge_suspend(sfxge_t *sp)
+{
+ /* Stop processing */
+ sfxge_stop(sp);
+
+ return (DDI_SUCCESS);
+}
+
+static int
+sfxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ sfxge_t *sp = ddi_get_driver_private(dip);
+ int rc;
+
+ switch (cmd) {
+ case DDI_DETACH:
+ if (sp == NULL)
+ return (DDI_FAILURE);
+ break;
+
+ case DDI_SUSPEND:
+ if (sp == NULL)
+ return (DDI_FAILURE);
+ return (sfxge_suspend(sp));
+
+ default:
+ return (DDI_FAILURE);
+ }
+
+ ASSERT(sp != NULL);
+
+ /* Wait for any pending restarts to complete */
+ ddi_taskq_wait(sp->s_tqp);
+
+ /*
+ * IOCTLs from utilites can cause GLD mc_start() (SFXGE_STARTED state)
+ * And mc_stop() may not occur until detach time and race. SFC bug 19855
+ * Holding the lock seems to be enough - the log message is not seen
+ */
+ mutex_enter(&(sp->s_state_lock));
+ if (sp->s_state == SFXGE_STARTED) {
+ dev_err(dip, CE_WARN, SFXGE_CMN_ERR
+ "STREAMS detach when STARTED");
+ sfxge_stop_locked(sp);
+ ASSERT3U(sp->s_state, ==, SFXGE_REGISTERED);
+ }
+ mutex_exit(&(sp->s_state_lock));
+
+ ASSERT(sp->s_state == SFXGE_REGISTERED ||
+ sp->s_state == SFXGE_INITIALIZED);
+
+ if (sp->s_state != SFXGE_REGISTERED)
+ goto destroy;
+
+ /* Unregister the interface */
+ if ((rc = sfxge_unregister(sp)) != 0)
+ goto fail1;
+
+destroy:
+ /* Destroy the VPD kstats */
+ sfxge_vpd_kstat_fini(sp);
+
+ /* Destroy the configuration kstats */
+ sfxge_cfg_kstat_fini(sp);
+
+ /*
+ * Destroy the soft state - this might fail until rx_loaned packets that
+ * have been passed up the STREAMS stack are returned
+ */
+ if ((rc = sfxge_destroy(sp)) != 0)
+ goto fail2;
+
+ return (DDI_SUCCESS);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (DDI_FAILURE);
+}
+
+/*
+ * modlinkage
+ */
+
+DDI_DEFINE_STREAM_OPS(sfxge_dev_ops, nulldev, nulldev, sfxge_attach,
+ sfxge_detach, nulldev, NULL, D_MP, NULL, NULL);
+
+static struct modldrv sfxge_modldrv = {
+ &mod_driverops,
+ (char *)sfxge_ident,
+ &sfxge_dev_ops,
+};
+
+static struct modlinkage sfxge_modlinkage = {
+ MODREV_1,
+ { &sfxge_modldrv, NULL }
+};
+
+kmutex_t sfxge_global_lock;
+unsigned int *sfxge_cpu;
+
+int
+_init(void)
+{
+ int rc;
+
+ mutex_init(&sfxge_global_lock, NULL, MUTEX_DRIVER, NULL);
+
+ /* Create tables for CPU, core, cache and chip counts */
+ sfxge_cpu = kmem_zalloc(sizeof (unsigned int) * NCPU, KM_SLEEP);
+
+ mac_init_ops(&sfxge_dev_ops, SFXGE_DRIVER_NAME);
+
+ if ((rc = mod_install(&sfxge_modlinkage)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE(fail2);
+
+ mac_fini_ops(&sfxge_dev_ops);
+
+ kmem_free(sfxge_cpu, sizeof (unsigned int) * NCPU);
+ mutex_destroy(&sfxge_global_lock);
+
+ return (rc);
+}
+
+int
+_fini(void)
+{
+ int rc;
+
+ if ((rc = mod_remove(&sfxge_modlinkage)) != 0)
+ return (rc);
+
+ mac_fini_ops(&sfxge_dev_ops);
+
+ /* Destroy tables */
+ kmem_free(sfxge_cpu, sizeof (unsigned int) * NCPU);
+
+ mutex_destroy(&sfxge_global_lock);
+
+ return (0);
+}
+
+int
+_info(struct modinfo *mip)
+{
+ return (mod_info(&sfxge_modlinkage, mip));
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge.h b/usr/src/uts/common/io/sfxge/sfxge.h
new file mode 100644
index 0000000000..df5e2f9e33
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge.h
@@ -0,0 +1,1033 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SFXGE_H
+#define _SYS_SFXGE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stream.h>
+#include <sys/ethernet.h>
+#include <sys/cpuvar.h>
+
+#include <sys/mac.h>
+#include <sys/mac_ether.h>
+#include <sys/mac_provider.h>
+
+#include "sfxge_ioc.h"
+#include "sfxge_debug.h"
+
+#include "efx.h"
+#include "efx_regs.h"
+
+#ifdef _KERNEL
+
+#define SFXGE_DRIVER_NAME "sfxge"
+
+#define SFXGE_CPU_CACHE_SIZE 64
+
+typedef struct sfxge_s sfxge_t;
+
+typedef enum sfxge_intr_state_e {
+ SFXGE_INTR_UNINITIALIZED = 0,
+ SFXGE_INTR_INITIALIZED,
+ SFXGE_INTR_TESTING,
+ SFXGE_INTR_STARTED
+} sfxge_intr_state_t;
+
+typedef struct sfxge_intr_s {
+ ddi_intr_handle_t *si_table;
+ int si_table_size;
+ int si_nalloc;
+ int si_type;
+ int si_cap;
+ efsys_mem_t si_mem;
+ uint64_t si_mask;
+ sfxge_intr_state_t si_state;
+ uint32_t si_zero_count;
+ int si_intr_pri;
+} sfxge_intr_t;
+
+typedef enum sfxge_promisc_type_e {
+ SFXGE_PROMISC_OFF = 0,
+ SFXGE_PROMISC_ALL_MULTI,
+ SFXGE_PROMISC_ALL_PHYS
+} sfxge_promisc_type_t;
+
+typedef enum sfxge_link_duplex_e {
+ SFXGE_LINK_DUPLEX_UNKNOWN = 0,
+ SFXGE_LINK_DUPLEX_HALF,
+ SFXGE_LINK_DUPLEX_FULL
+} sfxge_link_duplex_t;
+
+typedef enum sfxge_unicst_type_e {
+ SFXGE_UNICST_BIA = 0,
+ SFXGE_UNICST_LAA,
+ SFXGE_UNICST_NTYPES
+} sfxge_unicst_type_t;
+
+typedef struct sfxge_phy_s {
+ kstat_t *sp_ksp;
+ kstat_named_t *sp_stat;
+ uint32_t *sp_statbuf;
+ efsys_mem_t sp_mem;
+} sfxge_phy_t;
+
+typedef enum sfxge_mac_state_e {
+ SFXGE_MAC_UNINITIALIZED = 0,
+ SFXGE_MAC_INITIALIZED,
+ SFXGE_MAC_STARTED
+} sfxge_mac_state_t;
+
+typedef struct sfxge_mac_s {
+ sfxge_t *sm_sp;
+ efsys_mem_t sm_mem;
+ kstat_t *sm_ksp;
+ kstat_named_t *sm_stat;
+ uint8_t sm_bia[ETHERADDRL];
+ uint8_t sm_laa[ETHERADDRL];
+ boolean_t sm_laa_valid;
+ unsigned int sm_fcntl;
+ sfxge_promisc_type_t sm_promisc;
+ uint8_t sm_mcast_addr[EFX_MAC_MULTICAST_LIST_MAX *
+ ETHERADDRL]; /* List of multicast addresses to filter on */
+ int sm_mcast_count;
+ clock_t sm_lbolt;
+ kmutex_t sm_lock;
+ efx_link_mode_t sm_link_mode;
+ unsigned int sm_link_speed;
+ sfxge_link_duplex_t sm_link_duplex;
+ boolean_t sm_link_up;
+ boolean_t sm_link_poll_reqd;
+ kcondvar_t sm_link_poll_kv;
+ boolean_t sm_mac_stats_timer_reqd;
+ boolean_t sm_mac_stats_pend;
+ ddi_taskq_t *sm_tqp;
+ sfxge_mac_state_t sm_state;
+ sfxge_phy_t sm_phy;
+ uint32_t sm_phy_cap_to_set;
+ uint32_t sm_phy_cap_to_unset;
+} sfxge_mac_t;
+
+typedef enum sfxge_mon_state_e {
+ SFXGE_MON_UNINITIALIZED = 0,
+ SFXGE_MON_INITIALIZED,
+ SFXGE_MON_STARTED
+} sfxge_mon_state_t;
+
+typedef struct sfxge_mon_s {
+ sfxge_t *sm_sp;
+ efx_mon_type_t sm_type;
+ unsigned int sm_devid;
+ kstat_t *sm_ksp;
+ kstat_named_t *sm_stat;
+ efx_mon_stat_value_t *sm_statbuf;
+ kmutex_t sm_lock;
+ sfxge_mon_state_t sm_state;
+ efsys_mem_t sm_mem;
+ int sm_polling;
+} sfxge_mon_t;
+
+typedef enum sfxge_sram_state_e {
+ SFXGE_SRAM_UNINITIALIZED = 0,
+ SFXGE_SRAM_INITIALIZED,
+ SFXGE_SRAM_STARTED
+} sfxge_sram_state_t;
+
+typedef struct sfxge_sram_s {
+ sfxge_t *ss_sp;
+ kmutex_t ss_lock;
+ struct map *ss_buf_tbl_map;
+ unsigned int ss_count;
+ sfxge_sram_state_t ss_state;
+} sfxge_sram_t;
+
+typedef enum sfxge_mcdi_state_e {
+ SFXGE_MCDI_UNINITIALIZED = 0,
+ SFXGE_MCDI_INITIALIZED,
+ SFXGE_MCDI_BUSY,
+ SFXGE_MCDI_COMPLETED
+} sfxge_mcdi_state_t;
+
+typedef struct sfxge_mcdi_s {
+ sfxge_t *sm_sp;
+ kmutex_t sm_lock;
+ sfxge_mcdi_state_t sm_state;
+ efx_mcdi_transport_t sm_emt;
+ efsys_mem_t sm_mem;
+ kcondvar_t sm_kv; /* MCDI poll complete */
+} sfxge_mcdi_t;
+
+#define SFXGE_NEVS 4096
+#define SFXGE_RX_NDESCS 1024
+#define SFXGE_TX_NDESCS 1024
+#define SFXGE_TX_NLABELS EFX_EV_TX_NLABELS
+
+#define SFXGE_DEFAULT_RXQ_SIZE 1024
+#define SFXGE_DEFAULT_MODERATION 30
+
+typedef enum sfxge_evq_state_e {
+ SFXGE_EVQ_UNINITIALIZED = 0,
+ SFXGE_EVQ_INITIALIZED,
+ SFXGE_EVQ_STARTING,
+ SFXGE_EVQ_STARTED
+} sfxge_evq_state_t;
+
+#define SFXGE_EV_BATCH (SFXGE_NEVS / 4)
+
+typedef struct sfxge_txq_s sfxge_txq_t;
+
+typedef struct sfxge_evq_s {
+ union {
+ struct {
+ sfxge_t *__se_sp;
+ unsigned int __se_index;
+ efsys_mem_t __se_mem;
+ unsigned int __se_id;
+ kstat_t *__se_ksp;
+ kstat_named_t *__se_stat;
+ efx_ev_callbacks_t __se_eec;
+ sfxge_evq_state_t __se_state;
+ boolean_t __se_exception;
+ } __se_s1;
+ uint8_t __se_pad[SFXGE_CPU_CACHE_SIZE * 4];
+ } __se_u1;
+ union {
+ struct {
+ kmutex_t __se_lock;
+ kcondvar_t __se_init_kv;
+ efx_evq_t *__se_eep;
+ unsigned int __se_count;
+ unsigned int __se_rx;
+ unsigned int __se_tx;
+ sfxge_txq_t *__se_stp;
+ sfxge_txq_t **__se_stpp;
+ processorid_t __se_cpu_id;
+ uint16_t __se_ev_batch;
+ } __se_s2;
+ uint8_t __se_pad[SFXGE_CPU_CACHE_SIZE];
+ } __se_u2;
+ union {
+ struct {
+ sfxge_txq_t *__se_label_stp[SFXGE_TX_NLABELS];
+ } __se_s3;
+ uint8_t __se_pad[SFXGE_CPU_CACHE_SIZE * 4];
+ } __se_u3;
+} sfxge_evq_t;
+
+#define se_sp __se_u1.__se_s1.__se_sp
+#define se_index __se_u1.__se_s1.__se_index
+#define se_mem __se_u1.__se_s1.__se_mem
+#define se_id __se_u1.__se_s1.__se_id
+#define se_ksp __se_u1.__se_s1.__se_ksp
+#define se_stat __se_u1.__se_s1.__se_stat
+#define se_eec __se_u1.__se_s1.__se_eec
+#define se_state __se_u1.__se_s1.__se_state
+#define se_exception __se_u1.__se_s1.__se_exception
+
+#define se_lock __se_u2.__se_s2.__se_lock
+#define se_init_kv __se_u2.__se_s2.__se_init_kv
+#define se_eep __se_u2.__se_s2.__se_eep
+#define se_count __se_u2.__se_s2.__se_count
+#define se_rx __se_u2.__se_s2.__se_rx
+#define se_tx __se_u2.__se_s2.__se_tx
+#define se_stp __se_u2.__se_s2.__se_stp
+#define se_stpp __se_u2.__se_s2.__se_stpp
+#define se_cpu_id __se_u2.__se_s2.__se_cpu_id
+#define se_ev_batch __se_u2.__se_s2.__se_ev_batch
+
+#define se_label_stp __se_u3.__se_s3.__se_label_stp
+
+
+#define SFXGE_MAGIC_RESERVED 0x8000
+
+#define SFXGE_MAGIC_DMAQ_LABEL_WIDTH 5
+#define SFXGE_MAGIC_DMAQ_LABEL_MASK ((1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH) - 1)
+
+#define SFXGE_MAGIC_RX_QFLUSH_DONE \
+ (SFXGE_MAGIC_RESERVED | (1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+
+#define SFXGE_MAGIC_RX_QFLUSH_FAILED \
+ (SFXGE_MAGIC_RESERVED | (2 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+
+#define SFXGE_MAGIC_RX_QFPP_TRIM \
+ (SFXGE_MAGIC_RESERVED | (3 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+
+#define SFXGE_MAGIC_TX_QFLUSH_DONE \
+ (SFXGE_MAGIC_RESERVED | (4 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
+
+typedef struct sfxge_rxq_s sfxge_rxq_t;
+
+#define SFXGE_ETHERTYPE_LOOPBACK 0x9000 /* Xerox loopback */
+
+typedef struct sfxge_rx_packet_s sfxge_rx_packet_t;
+
+struct sfxge_rx_packet_s {
+ union {
+ struct {
+ frtn_t __srp_free;
+ uint16_t __srp_flags;
+ uint16_t __srp_size;
+ mblk_t *__srp_mp;
+ struct ether_header *__srp_etherhp;
+ struct ip *__srp_iphp;
+ struct tcphdr *__srp_thp;
+ size_t __srp_off;
+ } __srp_s1;
+ uint8_t __srp_pad[SFXGE_CPU_CACHE_SIZE];
+ } __srp_u1;
+ union {
+ struct {
+ sfxge_rxq_t *__srp_srp;
+ ddi_dma_handle_t __srp_dma_handle;
+ ddi_acc_handle_t __srp_acc_handle;
+ unsigned char *__srp_base;
+ size_t __srp_mblksize;
+ uint64_t __srp_addr;
+ boolean_t __srp_recycle;
+ caddr_t __srp_putp;
+ } __srp_s2;
+ uint8_t __srp_pad[SFXGE_CPU_CACHE_SIZE * 2];
+ } __srp_u2;
+};
+
+#define srp_free __srp_u1.__srp_s1.__srp_free
+#define srp_flags __srp_u1.__srp_s1.__srp_flags
+#define srp_size __srp_u1.__srp_s1.__srp_size
+#define srp_mp __srp_u1.__srp_s1.__srp_mp
+#define srp_etherhp __srp_u1.__srp_s1.__srp_etherhp
+#define srp_iphp __srp_u1.__srp_s1.__srp_iphp
+#define srp_thp __srp_u1.__srp_s1.__srp_thp
+#define srp_off __srp_u1.__srp_s1.__srp_off
+
+#define srp_srp __srp_u2.__srp_s2.__srp_srp
+#define srp_dma_handle __srp_u2.__srp_s2.__srp_dma_handle
+#define srp_acc_handle __srp_u2.__srp_s2.__srp_acc_handle
+#define srp_base __srp_u2.__srp_s2.__srp_base
+#define srp_mblksize __srp_u2.__srp_s2.__srp_mblksize
+#define srp_addr __srp_u2.__srp_s2.__srp_addr
+#define srp_recycle __srp_u2.__srp_s2.__srp_recycle
+#define srp_putp __srp_u2.__srp_s2.__srp_putp
+
+#define SFXGE_RX_FPP_NSLOTS 8
+#define SFXGE_RX_FPP_MASK (SFXGE_RX_FPP_NSLOTS - 1)
+
+/* Free packet pool putlist (dynamically allocated) */
+typedef struct sfxge_rx_fpp_putlist_s {
+ kmutex_t srfpl_lock;
+ unsigned int srfpl_count;
+ mblk_t *srfpl_putp;
+ mblk_t **srfpl_putpp;
+} sfxge_rx_fpp_putlist_t;
+
+/* Free packet pool */
+typedef struct sfxge_rx_fpp_s {
+ caddr_t srfpp_putp;
+ unsigned int srfpp_loaned;
+ mblk_t *srfpp_get;
+ unsigned int srfpp_count;
+ unsigned int srfpp_min;
+ /* Low water mark: Don't trim to below this */
+ unsigned int srfpp_lowat;
+} sfxge_rx_fpp_t;
+
+typedef struct sfxge_rx_flow_s sfxge_rx_flow_t;
+
+struct sfxge_rx_flow_s {
+ uint32_t srf_tag;
+ /* in-order segment count */
+ unsigned int srf_count;
+ uint16_t srf_tci;
+ uint32_t srf_saddr;
+ uint32_t srf_daddr;
+ uint16_t srf_sport;
+ uint16_t srf_dport;
+ /* sequence number */
+ uint32_t srf_seq;
+ clock_t srf_lbolt;
+ mblk_t *srf_mp;
+ mblk_t **srf_mpp;
+ struct ether_header *srf_etherhp;
+ struct ip *srf_iphp;
+ struct tcphdr *srf_first_thp;
+ struct tcphdr *srf_last_thp;
+ size_t srf_len;
+ sfxge_rx_flow_t *srf_next;
+};
+
+#define SFXGE_MAX_FLOW 1024
+#define SFXGE_SLOW_START 20
+
+typedef enum sfxge_flush_state_e {
+ SFXGE_FLUSH_INACTIVE = 0,
+ SFXGE_FLUSH_DONE,
+ SFXGE_FLUSH_PENDING,
+ SFXGE_FLUSH_FAILED
+} sfxge_flush_state_t;
+
+typedef enum sfxge_rxq_state_e {
+ SFXGE_RXQ_UNINITIALIZED = 0,
+ SFXGE_RXQ_INITIALIZED,
+ SFXGE_RXQ_STARTED
+} sfxge_rxq_state_t;
+
+
+#define SFXGE_RX_BATCH 128
+#define SFXGE_RX_NSTATS 8 /* note that *esballoc share one kstat */
+
+struct sfxge_rxq_s {
+ union {
+ struct {
+ sfxge_t *__sr_sp;
+ unsigned int __sr_index;
+ efsys_mem_t __sr_mem;
+ unsigned int __sr_id;
+ unsigned int __sr_lowat;
+ unsigned int __sr_hiwat;
+ volatile timeout_id_t __sr_tid;
+ sfxge_rxq_state_t __sr_state;
+ } __sr_s1;
+ uint8_t __sr_pad[SFXGE_CPU_CACHE_SIZE * 2];
+ } __sr_u1;
+ union {
+ struct {
+ sfxge_rx_packet_t **__sr_srpp;
+ unsigned int __sr_added;
+ unsigned int __sr_pushed;
+ unsigned int __sr_pending;
+ unsigned int __sr_completed;
+ unsigned int __sr_loopback;
+ mblk_t *__sr_mp;
+ mblk_t **__sr_mpp;
+ sfxge_rx_flow_t *__sr_flow;
+ sfxge_rx_flow_t *__sr_srfp;
+ sfxge_rx_flow_t **__sr_srfpp;
+ clock_t __sr_rto;
+ } __sr_s2;
+ uint8_t __sr_pad[SFXGE_CPU_CACHE_SIZE * 2];
+ } __sr_u2;
+ union {
+ struct {
+ sfxge_rx_fpp_t __sr_fpp;
+ efx_rxq_t *__sr_erp;
+ volatile sfxge_flush_state_t __sr_flush;
+ kcondvar_t __sr_flush_kv;
+ kstat_t *__sr_ksp;
+ } __sr_s3;
+ uint8_t __sr_pad[SFXGE_CPU_CACHE_SIZE];
+ } __sr_u3;
+ struct {
+ /* NB must match SFXGE_RX_NSTATS */
+ uint32_t srk_rx_pkt_mem_limit;
+ uint32_t srk_kcache_alloc_nomem;
+ uint32_t srk_dma_alloc_nomem;
+ uint32_t srk_dma_alloc_fail;
+ uint32_t srk_dma_bind_nomem;
+ uint32_t srk_dma_bind_fail;
+ uint32_t srk_desballoc_fail;
+ uint32_t srk_rxq_empty_discard;
+ } sr_kstat;
+};
+
+#define sr_sp __sr_u1.__sr_s1.__sr_sp
+#define sr_index __sr_u1.__sr_s1.__sr_index
+#define sr_mem __sr_u1.__sr_s1.__sr_mem
+#define sr_id __sr_u1.__sr_s1.__sr_id
+#define sr_mrh __sr_u1.__sr_s1.__sr_mrh
+#define sr_lowat __sr_u1.__sr_s1.__sr_lowat
+#define sr_hiwat __sr_u1.__sr_s1.__sr_hiwat
+#define sr_tid __sr_u1.__sr_s1.__sr_tid
+#define sr_state __sr_u1.__sr_s1.__sr_state
+
+#define sr_srpp __sr_u2.__sr_s2.__sr_srpp
+#define sr_added __sr_u2.__sr_s2.__sr_added
+#define sr_pushed __sr_u2.__sr_s2.__sr_pushed
+#define sr_pending __sr_u2.__sr_s2.__sr_pending
+#define sr_completed __sr_u2.__sr_s2.__sr_completed
+#define sr_loopback __sr_u2.__sr_s2.__sr_loopback
+#define sr_mp __sr_u2.__sr_s2.__sr_mp
+#define sr_mpp __sr_u2.__sr_s2.__sr_mpp
+#define sr_flow __sr_u2.__sr_s2.__sr_flow
+#define sr_srfp __sr_u2.__sr_s2.__sr_srfp
+#define sr_srfpp __sr_u2.__sr_s2.__sr_srfpp
+#define sr_rto __sr_u2.__sr_s2.__sr_rto
+
+#define sr_fpp __sr_u3.__sr_s3.__sr_fpp
+#define sr_erp __sr_u3.__sr_s3.__sr_erp
+#define sr_flush __sr_u3.__sr_s3.__sr_flush
+#define sr_flush_kv __sr_u3.__sr_s3.__sr_flush_kv
+#define sr_ksp __sr_u3.__sr_s3.__sr_ksp
+
+typedef struct sfxge_tx_packet_s sfxge_tx_packet_t;
+
+/* Packet type from parsing transmit packet */
+typedef enum sfxge_packet_type_e {
+ SFXGE_PACKET_TYPE_UNKNOWN = 0,
+ SFXGE_PACKET_TYPE_IPV4_TCP,
+ SFXGE_PACKET_TYPE_IPV4_UDP,
+ SFXGE_PACKET_TYPE_IPV4_SCTP,
+ SFXGE_PACKET_TYPE_IPV4_OTHER,
+ SFXGE_PACKET_NTYPES
+} sfxge_packet_type_t;
+
+struct sfxge_tx_packet_s {
+ sfxge_tx_packet_t *stp_next;
+ mblk_t *stp_mp;
+ struct ether_header *stp_etherhp;
+ struct ip *stp_iphp;
+ struct tcphdr *stp_thp;
+ size_t stp_off;
+ size_t stp_size;
+ size_t stp_mss;
+ uint32_t stp_dpl_put_len;
+};
+
+#define SFXGE_TX_FPP_MAX 64
+
+typedef struct sfxge_tx_fpp_s {
+ sfxge_tx_packet_t *stf_stpp;
+ unsigned int stf_count;
+} sfxge_tx_fpp_t;
+
+typedef struct sfxge_tx_mapping_s sfxge_tx_mapping_t;
+
+#define SFXGE_TX_MAPPING_NADDR (((1 << 16) >> 12) + 2)
+
+struct sfxge_tx_mapping_s {
+ sfxge_tx_mapping_t *stm_next;
+ sfxge_t *stm_sp;
+ mblk_t *stm_mp;
+ ddi_dma_handle_t stm_dma_handle;
+ caddr_t stm_base;
+ size_t stm_size;
+ size_t stm_off;
+ uint64_t stm_addr[SFXGE_TX_MAPPING_NADDR];
+};
+
+typedef struct sfxge_tx_fmp_s {
+ sfxge_tx_mapping_t *stf_stmp;
+ unsigned int stf_count;
+} sfxge_tx_fmp_t;
+
+typedef struct sfxge_tx_buffer_s sfxge_tx_buffer_t;
+
+struct sfxge_tx_buffer_s {
+ sfxge_tx_buffer_t *stb_next;
+ size_t stb_off;
+ efsys_mem_t stb_esm;
+};
+
+#define SFXGE_TX_BUFFER_SIZE 0x400
+#define SFXGE_TX_HEADER_SIZE 0x100
+#define SFXGE_TX_COPY_THRESHOLD 0x200
+
+typedef struct sfxge_tx_fbp_s {
+ sfxge_tx_buffer_t *stf_stbp;
+ unsigned int stf_count;
+} sfxge_tx_fbp_t;
+
+typedef struct sfxge_tx_dpl_s {
+ uintptr_t std_put;
+ sfxge_tx_packet_t *std_get;
+ sfxge_tx_packet_t **std_getp;
+ unsigned int std_count; /* only get list count */
+ unsigned int get_pkt_limit;
+ unsigned int put_pkt_limit;
+ unsigned int get_full_count;
+ unsigned int put_full_count;
+} sfxge_tx_dpl_t;
+
+typedef enum sfxge_txq_state_e {
+ SFXGE_TXQ_UNINITIALIZED = 0,
+ SFXGE_TXQ_INITIALIZED,
+ SFXGE_TXQ_STARTED,
+ SFXGE_TXQ_FLUSH_PENDING,
+ SFXGE_TXQ_FLUSH_DONE,
+ SFXGE_TXQ_FLUSH_FAILED
+} sfxge_txq_state_t;
+
+typedef enum sfxge_txq_type_e {
+ SFXGE_TXQ_NON_CKSUM = 0,
+ SFXGE_TXQ_IP_CKSUM,
+ SFXGE_TXQ_IP_TCP_UDP_CKSUM,
+ SFXGE_TXQ_NTYPES
+} sfxge_txq_type_t;
+
+#define SFXGE_TXQ_UNBLOCK_LEVEL1 (EFX_TXQ_LIMIT(SFXGE_TX_NDESCS) / 4)
+#define SFXGE_TXQ_UNBLOCK_LEVEL2 0
+#define SFXGE_TXQ_NOT_BLOCKED -1
+
+#define SFXGE_TX_BATCH 64
+
+struct sfxge_txq_s {
+ union {
+ struct {
+ sfxge_t *__st_sp;
+ unsigned int __st_index;
+ unsigned int __st_label;
+ sfxge_txq_type_t __st_type;
+ unsigned int __st_evq;
+ efsys_mem_t __st_mem;
+ unsigned int __st_id;
+ kstat_t *__st_ksp;
+ kstat_named_t *__st_stat;
+ sfxge_txq_state_t __st_state;
+ } __st_s1;
+ uint8_t __st_pad[SFXGE_CPU_CACHE_SIZE * 2];
+ } __st_u1;
+ union {
+ struct {
+ sfxge_tx_dpl_t __st_dpl;
+ } __st_s2;
+ uint8_t __st_pad[SFXGE_CPU_CACHE_SIZE];
+ } __st_u2;
+ union {
+ struct {
+ kmutex_t __st_lock;
+ /* mapping pool - sfxge_tx_mapping_t */
+ sfxge_tx_fmp_t __st_fmp;
+ /* buffer pool - sfxge_tx_buffer_t */
+ sfxge_tx_fbp_t __st_fbp;
+ /* packet pool - sfxge_tx_packet_t */
+ sfxge_tx_fpp_t __st_fpp;
+ efx_buffer_t *__st_eb;
+ unsigned int __st_n;
+ efx_txq_t *__st_etp;
+ sfxge_tx_mapping_t **__st_stmp;
+ sfxge_tx_buffer_t **__st_stbp;
+ mblk_t **__st_mp;
+ unsigned int __st_added;
+ unsigned int __st_reaped;
+ int __st_unblock;
+ } __st_s3;
+ uint8_t __st_pad[SFXGE_CPU_CACHE_SIZE * 3];
+ } __st_u3;
+ union {
+ struct {
+ sfxge_txq_t *__st_next;
+ unsigned int __st_pending;
+ unsigned int __st_completed;
+
+ } __st_s4;
+ uint8_t __st_pad[SFXGE_CPU_CACHE_SIZE];
+ } __st_u4;
+};
+
+#define st_sp __st_u1.__st_s1.__st_sp
+#define st_index __st_u1.__st_s1.__st_index
+#define st_label __st_u1.__st_s1.__st_label
+#define st_type __st_u1.__st_s1.__st_type
+#define st_evq __st_u1.__st_s1.__st_evq
+#define st_mem __st_u1.__st_s1.__st_mem
+#define st_id __st_u1.__st_s1.__st_id
+#define st_ksp __st_u1.__st_s1.__st_ksp
+#define st_stat __st_u1.__st_s1.__st_stat
+#define st_state __st_u1.__st_s1.__st_state
+
+#define st_dpl __st_u2.__st_s2.__st_dpl
+
+#define st_lock __st_u3.__st_s3.__st_lock
+#define st_fmp __st_u3.__st_s3.__st_fmp
+#define st_fbp __st_u3.__st_s3.__st_fbp
+#define st_fpp __st_u3.__st_s3.__st_fpp
+#define st_eb __st_u3.__st_s3.__st_eb
+#define st_n __st_u3.__st_s3.__st_n
+#define st_etp __st_u3.__st_s3.__st_etp
+#define st_stmp __st_u3.__st_s3.__st_stmp
+#define st_stbp __st_u3.__st_s3.__st_stbp
+#define st_mp __st_u3.__st_s3.__st_mp
+#define st_added __st_u3.__st_s3.__st_added
+#define st_reaped __st_u3.__st_s3.__st_reaped
+#define st_unblock __st_u3.__st_s3.__st_unblock
+
+#define st_next __st_u4.__st_s4.__st_next
+#define st_pending __st_u4.__st_s4.__st_pending
+#define st_completed __st_u4.__st_s4.__st_completed
+
+typedef enum sfxge_rx_scale_state_e {
+ SFXGE_RX_SCALE_UNINITIALIZED = 0,
+ SFXGE_RX_SCALE_INITIALIZED,
+ SFXGE_RX_SCALE_STARTED
+} sfxge_rx_scale_state_t;
+
+#define SFXGE_RX_SCALE_MAX EFX_RSS_TBL_SIZE
+
+typedef struct sfxge_rx_scale_s {
+ kmutex_t srs_lock;
+ unsigned int *srs_cpu;
+ unsigned int srs_tbl[SFXGE_RX_SCALE_MAX];
+ unsigned int srs_count;
+ kstat_t *srs_ksp;
+ sfxge_rx_scale_state_t srs_state;
+} sfxge_rx_scale_t;
+
+
+typedef enum sfxge_rx_coalesce_mode_e {
+ SFXGE_RX_COALESCE_OFF = 0,
+ SFXGE_RX_COALESCE_DISALLOW_PUSH = 1,
+ SFXGE_RX_COALESCE_ALLOW_PUSH = 2
+} sfxge_rx_coalesce_mode_t;
+
+typedef enum sfxge_vpd_type_e {
+ SFXGE_VPD_ID = 0,
+ SFXGE_VPD_PN = 1,
+ SFXGE_VPD_SN = 2,
+ SFXGE_VPD_EC = 3,
+ SFXGE_VPD_MN = 4,
+ SFXGE_VPD_VD = 5,
+ SFXGE_VPD_VE = 6,
+ SFXGE_VPD_MAX = 7,
+} sfxge_vpd_type_t;
+
+typedef struct sfxge_vpd_kstat_s {
+ kstat_t *svk_ksp;
+ kstat_named_t svk_stat[SFXGE_VPD_MAX];
+ efx_vpd_value_t *svk_vv;
+} sfxge_vpd_kstat_t;
+
+typedef struct sfxge_cfg_kstat_s {
+ struct {
+ kstat_named_t sck_mac;
+ kstat_named_t sck_version;
+ } kstat;
+ struct {
+ char sck_mac[64 + 1];
+ } buf;
+} sfxge_cfg_kstat_t;
+
+typedef enum sfxge_state_e {
+ SFXGE_UNINITIALIZED = 0,
+ SFXGE_INITIALIZED,
+ SFXGE_REGISTERED,
+ SFXGE_STARTING,
+ SFXGE_STARTED,
+ SFXGE_STOPPING
+} sfxge_state_t;
+
+typedef enum sfxge_hw_err_e {
+ SFXGE_HW_OK = 0,
+ SFXGE_HW_ERR,
+} sfxge_hw_err_t;
+
+typedef enum sfxge_action_on_hw_err_e {
+ SFXGE_RECOVER = 0,
+ SFXGE_INVISIBLE = 1,
+ SFXGE_LEAVE_DEAD = 2,
+} sfxge_action_on_hw_err_t;
+
+typedef char *sfxge_mac_priv_prop_t;
+
+#define SFXGE_TOEPLITZ_KEY_LEN 40
+
+struct sfxge_s {
+ kmutex_t s_state_lock;
+ sfxge_state_t s_state;
+ dev_info_t *s_dip;
+ ddi_taskq_t *s_tqp;
+ ddi_acc_handle_t s_pci_handle;
+ uint16_t s_pci_venid;
+ uint16_t s_pci_devid;
+#if EFSYS_OPT_MCDI_LOGGING
+ unsigned int s_bus_addr;
+#endif
+ efx_family_t s_family;
+ unsigned int s_pcie_nlanes;
+ unsigned int s_pcie_linkspeed;
+ kmutex_t s_nic_lock;
+ efsys_bar_t s_bar;
+ sfxge_intr_t s_intr;
+ sfxge_mac_t s_mac;
+ sfxge_mon_t s_mon;
+ sfxge_sram_t s_sram;
+ sfxge_mcdi_t s_mcdi;
+ kmem_cache_t *s_eq0c; /* eventQ 0 */
+ kmem_cache_t *s_eqXc; /* all other eventQs */
+ sfxge_evq_t *s_sep[SFXGE_RX_SCALE_MAX];
+ unsigned int s_ev_moderation;
+ kmem_cache_t *s_rqc;
+ sfxge_rxq_t *s_srp[SFXGE_RX_SCALE_MAX];
+ sfxge_rx_scale_t s_rx_scale;
+ size_t s_rx_prefix_size;
+ size_t s_rx_buffer_size;
+ size_t s_rx_buffer_align;
+ sfxge_rx_coalesce_mode_t s_rx_coalesce_mode;
+ int64_t s_rx_pkt_mem_max;
+ volatile uint64_t s_rx_pkt_mem_alloc;
+ kmem_cache_t *s_rpc;
+ kmem_cache_t *s_tqc;
+ unsigned int s_tx_scale_base[SFXGE_TXQ_NTYPES];
+ unsigned int s_tx_scale_max[SFXGE_TXQ_NTYPES];
+ int s_tx_qcount;
+ sfxge_txq_t *s_stp[SFXGE_RX_SCALE_MAX *
+ SFXGE_TXQ_NTYPES]; /* Sparse array */
+ kmem_cache_t *s_tpc;
+ int s_tx_flush_pending;
+ kmutex_t s_tx_flush_lock;
+ kcondvar_t s_tx_flush_kv;
+ kmem_cache_t *s_tbc;
+ kmem_cache_t *s_tmc;
+ efx_nic_t *s_enp;
+ sfxge_vpd_kstat_t s_vpd_kstat;
+ sfxge_cfg_kstat_t s_cfg_kstat;
+ kstat_t *s_cfg_ksp;
+ size_t s_mtu;
+ int s_rxq_poll_usec;
+ mac_callbacks_t s_mc;
+ mac_handle_t s_mh;
+ sfxge_mac_priv_prop_t *s_mac_priv_props;
+ int s_mac_priv_props_alloc;
+ volatile uint32_t s_nested_restarts;
+ uint32_t s_num_restarts;
+ uint32_t s_num_restarts_hw_err;
+ sfxge_hw_err_t s_hw_err;
+ sfxge_action_on_hw_err_t s_action_on_hw_err;
+ uint16_t s_rxq_size;
+ uint16_t s_evq0_size;
+ uint16_t s_evqX_size;
+#if EFSYS_OPT_MCDI_LOGGING
+ int s_mcdi_logging;
+#endif
+ const uint32_t *s_toeplitz_cache;
+};
+
+typedef struct sfxge_dma_buffer_attr_s {
+ dev_info_t *sdba_dip;
+ ddi_dma_attr_t *sdba_dattrp;
+ int (*sdba_callback) (caddr_t);
+ size_t sdba_length;
+ uint_t sdba_memflags;
+ ddi_device_acc_attr_t *sdba_devaccp;
+ uint_t sdba_bindflags;
+ int sdba_maxcookies;
+ boolean_t sdba_zeroinit;
+} sfxge_dma_buffer_attr_t;
+
+extern const char sfxge_ident[];
+extern uint8_t sfxge_brdcst[];
+
+extern kmutex_t sfxge_global_lock;
+
+extern unsigned int *sfxge_cpu;
+
+extern int sfxge_start(sfxge_t *, boolean_t);
+extern void sfxge_stop(sfxge_t *);
+extern void sfxge_ioctl(sfxge_t *, queue_t *, mblk_t *);
+extern int sfxge_restart_dispatch(sfxge_t *, uint_t,
+ sfxge_hw_err_t, const char *, uint32_t);
+
+extern void sfxge_gld_link_update(sfxge_t *);
+extern void sfxge_gld_mtu_update(sfxge_t *);
+extern void sfxge_gld_rx_post(sfxge_t *, unsigned int,
+ mblk_t *);
+extern void sfxge_gld_rx_push(sfxge_t *);
+extern int sfxge_gld_register(sfxge_t *);
+extern int sfxge_gld_unregister(sfxge_t *);
+
+extern int sfxge_dma_buffer_create(efsys_mem_t *,
+ const sfxge_dma_buffer_attr_t *);
+extern void sfxge_dma_buffer_destroy(efsys_mem_t *);
+
+extern int sfxge_intr_init(sfxge_t *);
+extern int sfxge_intr_start(sfxge_t *);
+extern void sfxge_intr_stop(sfxge_t *);
+extern void sfxge_intr_fini(sfxge_t *);
+extern void sfxge_intr_fatal(sfxge_t *);
+
+extern int sfxge_ev_init(sfxge_t *);
+extern int sfxge_ev_start(sfxge_t *);
+extern void sfxge_ev_moderation_get(sfxge_t *,
+ unsigned int *);
+extern int sfxge_ev_moderation_set(sfxge_t *,
+ unsigned int);
+extern int sfxge_ev_qmoderate(sfxge_t *, unsigned int,
+ unsigned int);
+extern int sfxge_ev_qpoll(sfxge_t *, unsigned int);
+extern int sfxge_ev_qprime(sfxge_t *, unsigned int);
+extern void sfxge_ev_stop(sfxge_t *);
+extern void sfxge_ev_fini(sfxge_t *);
+extern int sfxge_ev_txlabel_alloc(sfxge_t *sp,
+ unsigned int evq, sfxge_txq_t *stp, unsigned int *labelp);
+extern int sfxge_ev_txlabel_free(sfxge_t *sp,
+ unsigned int evq, sfxge_txq_t *stp, unsigned int label);
+
+extern int sfxge_mon_init(sfxge_t *);
+extern int sfxge_mon_start(sfxge_t *);
+extern void sfxge_mon_stop(sfxge_t *);
+extern void sfxge_mon_fini(sfxge_t *);
+
+extern int sfxge_mac_init(sfxge_t *);
+extern int sfxge_mac_start(sfxge_t *, boolean_t);
+extern void sfxge_mac_stat_get(sfxge_t *, unsigned int,
+ uint64_t *);
+extern void sfxge_mac_link_check(sfxge_t *, boolean_t *);
+extern void sfxge_mac_link_speed_get(sfxge_t *,
+ unsigned int *);
+extern void sfxge_mac_link_duplex_get(sfxge_t *,
+ sfxge_link_duplex_t *);
+extern void sfxge_mac_fcntl_get(sfxge_t *, unsigned int *);
+extern int sfxge_mac_fcntl_set(sfxge_t *, unsigned int);
+extern int sfxge_mac_unicst_get(sfxge_t *,
+ sfxge_unicst_type_t, uint8_t *);
+extern int sfxge_mac_unicst_set(sfxge_t *,
+ uint8_t *);
+extern int sfxge_mac_promisc_set(sfxge_t *,
+ sfxge_promisc_type_t);
+extern int sfxge_mac_multicst_add(sfxge_t *,
+ uint8_t const *addr);
+extern int sfxge_mac_multicst_remove(sfxge_t *,
+ uint8_t const *addr);
+extern void sfxge_mac_stop(sfxge_t *);
+extern void sfxge_mac_fini(sfxge_t *);
+extern void sfxge_mac_link_update(sfxge_t *sp,
+ efx_link_mode_t mode);
+
+extern int sfxge_mcdi_init(sfxge_t *sp);
+extern void sfxge_mcdi_fini(sfxge_t *sp);
+extern int sfxge_mcdi_ioctl(sfxge_t *sp,
+ sfxge_mcdi_ioc_t *smip);
+extern int sfxge_mcdi2_ioctl(sfxge_t *sp,
+ sfxge_mcdi2_ioc_t *smip);
+
+extern int sfxge_phy_init(sfxge_t *);
+extern void sfxge_phy_link_mode_get(sfxge_t *,
+ efx_link_mode_t *);
+extern void sfxge_phy_fini(sfxge_t *);
+extern int sfxge_phy_kstat_init(sfxge_t *sp);
+extern void sfxge_phy_kstat_fini(sfxge_t *sp);
+extern uint8_t sfxge_phy_lp_cap_test(sfxge_t *sp,
+ uint32_t field);
+extern int sfxge_phy_cap_apply(sfxge_t *sp,
+ boolean_t use_default);
+extern uint8_t sfxge_phy_cap_test(sfxge_t *sp, uint32_t flags,
+ uint32_t field, boolean_t *mutablep);
+extern int sfxge_phy_cap_set(sfxge_t *sp, uint32_t field,
+ int set);
+
+extern int sfxge_rx_init(sfxge_t *);
+extern int sfxge_rx_start(sfxge_t *);
+extern void sfxge_rx_coalesce_mode_get(sfxge_t *,
+ sfxge_rx_coalesce_mode_t *);
+extern int sfxge_rx_coalesce_mode_set(sfxge_t *,
+ sfxge_rx_coalesce_mode_t);
+extern unsigned int sfxge_rx_scale_prop_get(sfxge_t *);
+extern void sfxge_rx_scale_update(void *);
+extern int sfxge_rx_scale_count_get(sfxge_t *,
+ unsigned int *);
+extern int sfxge_rx_scale_count_set(sfxge_t *,
+ unsigned int);
+extern void sfxge_rx_qcomplete(sfxge_rxq_t *, boolean_t);
+extern void sfxge_rx_qflush_done(sfxge_rxq_t *);
+extern void sfxge_rx_qflush_failed(sfxge_rxq_t *);
+extern void sfxge_rx_qfpp_trim(sfxge_rxq_t *);
+extern void sfxge_rx_stop(sfxge_t *);
+extern unsigned int sfxge_rx_loaned(sfxge_t *);
+extern void sfxge_rx_fini(sfxge_t *);
+
+extern int sfxge_tx_init(sfxge_t *);
+extern int sfxge_tx_start(sfxge_t *);
+extern int sfxge_tx_packet_add(sfxge_t *, mblk_t *);
+extern void sfxge_tx_qcomplete(sfxge_txq_t *);
+extern void sfxge_tx_qflush_done(sfxge_txq_t *);
+extern void sfxge_tx_stop(sfxge_t *);
+extern void sfxge_tx_fini(sfxge_t *);
+extern void sfxge_tx_qdpl_flush(sfxge_txq_t *stp);
+
+extern void sfxge_sram_init(sfxge_t *);
+extern int sfxge_sram_buf_tbl_alloc(sfxge_t *, size_t,
+ uint32_t *);
+extern int sfxge_sram_start(sfxge_t *);
+extern int sfxge_sram_buf_tbl_set(sfxge_t *, uint32_t,
+ efsys_mem_t *, size_t);
+extern void sfxge_sram_buf_tbl_clear(sfxge_t *, uint32_t,
+ size_t);
+extern void sfxge_sram_stop(sfxge_t *);
+extern void sfxge_sram_buf_tbl_free(sfxge_t *, uint32_t,
+ size_t);
+extern void sfxge_sram_fini(sfxge_t *);
+
+extern sfxge_packet_type_t sfxge_pkthdr_parse(mblk_t *,
+ struct ether_header **, struct ip **, struct tcphdr **, size_t *, size_t *,
+ uint16_t *, uint16_t *);
+
+extern int sfxge_toeplitz_hash_init(sfxge_t *);
+extern void sfxge_toeplitz_hash_fini(sfxge_t *);
+extern uint32_t sfxge_toeplitz_hash(sfxge_t *, unsigned int,
+ uint8_t *, uint16_t, uint8_t *, uint16_t);
+
+/*
+ * 4-tuple hash for TCP/IPv4 used for LRO, TSO and TX queue selection.
+ * To compute the same hash value as Siena/Huntington hardware, the inputs
+ * must be in big endian (network) byte order.
+ */
+#define SFXGE_TCP_HASH(_sp, _raddr, _rport, _laddr, _lport, _hash) \
+ do { \
+ (_hash) = sfxge_toeplitz_hash(_sp, \
+ sizeof (struct in_addr), \
+ (uint8_t *)(_raddr), \
+ (_rport), \
+ (uint8_t *)(_laddr), \
+ (_lport)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * 4-tuple hash for non-TCP IPv4 packets, used for TX queue selection.
+ * For UDP or SCTP packets, calculate a 4-tuple hash using port numbers.
+ * For other IPv4 non-TCP packets, use zero for the port numbers.
+ */
+#define SFXGE_IP_HASH(_sp, _raddr, _rport, _laddr, _lport, _hash) \
+ SFXGE_TCP_HASH((_sp), (_raddr), (_rport), (_laddr), (_lport), (_hash))
+
+
+extern int sfxge_nvram_ioctl(sfxge_t *, sfxge_nvram_ioc_t *);
+
+extern int sfxge_pci_init(sfxge_t *);
+extern void sfxge_pcie_check_link(sfxge_t *, unsigned int,
+ unsigned int);
+extern void sfxge_pci_fini(sfxge_t *);
+
+extern int sfxge_bar_init(sfxge_t *);
+extern void sfxge_bar_fini(sfxge_t *);
+
+extern int sfxge_vpd_ioctl(sfxge_t *, sfxge_vpd_ioc_t *);
+
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SFXGE_H */
diff --git a/usr/src/uts/common/io/sfxge/sfxge_bar.c b/usr/src/uts/common/io/sfxge/sfxge_bar.c
new file mode 100644
index 0000000000..ee84c07026
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_bar.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stream.h>
+#include <sys/dlpi.h>
+
+#include "sfxge.h"
+
+int
+sfxge_bar_init(sfxge_t *sp)
+{
+ efsys_bar_t *esbp = &(sp->s_bar);
+ ddi_device_acc_attr_t devacc;
+ int rc;
+
+ devacc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
+ devacc.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
+ devacc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
+
+ if (ddi_regs_map_setup(sp->s_dip, EFX_MEM_BAR, &(esbp->esb_base), 0, 0,
+ &devacc, &(esbp->esb_handle)) != DDI_SUCCESS) {
+ rc = ENODEV;
+ goto fail1;
+ }
+
+ mutex_init(&(esbp->esb_lock), NULL, MUTEX_DRIVER, NULL);
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+void
+sfxge_bar_fini(sfxge_t *sp)
+{
+ efsys_bar_t *esbp = &(sp->s_bar);
+
+ ddi_regs_map_free(&(esbp->esb_handle));
+
+ mutex_destroy(&(esbp->esb_lock));
+
+ esbp->esb_base = NULL;
+ esbp->esb_handle = NULL;
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_debug.h b/usr/src/uts/common/io/sfxge/sfxge_debug.h
new file mode 100644
index 0000000000..b8a4a65c00
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_debug.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SFXGE_DEBUG_H
+#define _SYS_SFXGE_DEBUG_H
+
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef DEBUG
+
+extern boolean_t sfxge_aask;
+
+#define SFXGE_OBJ_CHECK(_objp, _type) \
+ do { \
+ uint8_t *p = (uint8_t *)(_objp); \
+ size_t off; \
+ \
+ for (off = 0; off < sizeof (_type); off++) { \
+ char buf[MAXNAMELEN]; \
+ \
+ if (*p++ == 0) \
+ continue; \
+ \
+ (void) snprintf(buf, MAXNAMELEN - 1, \
+ "%s[%d]: non-zero byte found in %s " \
+ "at 0x%p+%lx", __FILE__, __LINE__, #_type, \
+ (void *)(_objp), off); \
+ \
+ if (sfxge_aask) \
+ debug_enter(buf); \
+ break; \
+ } \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Log cmn_err(9F) messages to console and system log */
+#define SFXGE_CMN_ERR ""
+
+#else /* DEBUG */
+
+#define SFXGE_OBJ_CHECK(_objp, _type)
+
+/* Log cmn_err(9F) messages to system log only */
+#define SFXGE_CMN_ERR "!"
+
+#endif /* DEBUG */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SFXGE_DEBUG_H */
diff --git a/usr/src/uts/common/io/sfxge/sfxge_dma.c b/usr/src/uts/common/io/sfxge/sfxge_dma.c
new file mode 100644
index 0000000000..6715888eab
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_dma.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/ddi.h>
+
+#include "sfxge.h"
+#include "efx.h"
+
+static int
+sfxge_dma_buffer_unbind_handle(efsys_mem_t *esmp)
+{
+ int rc;
+
+ esmp->esm_addr = 0;
+ rc = ddi_dma_unbind_handle(esmp->esm_dma_handle);
+ if (rc != DDI_SUCCESS)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_dma_buffer_mem_free(efsys_mem_t *esmp)
+{
+ esmp->esm_base = NULL;
+ ddi_dma_mem_free(&(esmp->esm_acc_handle));
+ esmp->esm_acc_handle = NULL;
+}
+
+static void
+sfxge_dma_buffer_handle_free(ddi_dma_handle_t *dhandlep)
+{
+ ddi_dma_free_handle(dhandlep);
+ *dhandlep = NULL;
+}
+
+int
+sfxge_dma_buffer_create(efsys_mem_t *esmp, const sfxge_dma_buffer_attr_t *sdbap)
+{
+ int err;
+ int rc;
+ size_t unit;
+ ddi_dma_cookie_t dmac;
+ unsigned int ncookies;
+
+ /* Allocate a DMA handle */
+ err = ddi_dma_alloc_handle(sdbap->sdba_dip, sdbap->sdba_dattrp,
+ sdbap->sdba_callback, NULL, &(esmp->esm_dma_handle));
+ switch (err) {
+ case DDI_SUCCESS:
+ break;
+
+ case DDI_DMA_BADATTR:
+ rc = EINVAL;
+ goto fail1;
+
+ case DDI_DMA_NORESOURCES:
+ rc = ENOMEM;
+ goto fail1;
+
+ default:
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ /* Allocate some DMA memory */
+ err = ddi_dma_mem_alloc(esmp->esm_dma_handle, sdbap->sdba_length,
+ sdbap->sdba_devaccp, sdbap->sdba_memflags,
+ sdbap->sdba_callback, NULL,
+ &(esmp->esm_base), &unit, &(esmp->esm_acc_handle));
+ switch (err) {
+ case DDI_SUCCESS:
+ break;
+
+ case DDI_FAILURE:
+ /*FALLTHRU*/
+ default:
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if (sdbap->sdba_zeroinit)
+ bzero(esmp->esm_base, sdbap->sdba_length);
+
+ /* Bind the DMA memory to the DMA handle */
+ /* We aren't handling partial mappings */
+ ASSERT3U(sdbap->sdba_bindflags & DDI_DMA_PARTIAL, !=, DDI_DMA_PARTIAL);
+ err = ddi_dma_addr_bind_handle(esmp->esm_dma_handle, NULL,
+ esmp->esm_base, sdbap->sdba_length, sdbap->sdba_bindflags,
+ sdbap->sdba_callback, NULL, &dmac, &ncookies);
+ switch (err) {
+ case DDI_DMA_MAPPED:
+ break;
+
+ case DDI_DMA_INUSE:
+ rc = EEXIST;
+ goto fail3;
+
+ case DDI_DMA_NORESOURCES:
+ rc = ENOMEM;
+ goto fail3;
+
+ case DDI_DMA_NOMAPPING:
+ rc = ENOTSUP;
+ goto fail3;
+
+ case DDI_DMA_TOOBIG:
+ rc = EFBIG;
+ goto fail3;
+
+ default:
+ rc = EFAULT;
+ goto fail3;
+ }
+ ASSERT3U(ncookies, >=, 1);
+ ASSERT3U(ncookies, <=, sdbap->sdba_maxcookies);
+
+ esmp->esm_addr = dmac.dmac_laddress;
+ esmp->esm_size = dmac.dmac_size;
+ DTRACE_PROBE1(addr, efsys_dma_addr_t, esmp->esm_addr);
+
+ return (0);
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+ sfxge_dma_buffer_mem_free(esmp);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ sfxge_dma_buffer_handle_free(&(esmp->esm_dma_handle));
+ esmp->esm_dma_handle = NULL;
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (-1);
+}
+
+void
+sfxge_dma_buffer_destroy(efsys_mem_t *esmp)
+{
+ int rc;
+
+ rc = sfxge_dma_buffer_unbind_handle(esmp);
+ if (rc != 0) {
+ cmn_err(CE_WARN, SFXGE_CMN_ERR "DMA Unbind failed rc=%d", rc);
+ }
+ sfxge_dma_buffer_mem_free(esmp);
+ sfxge_dma_buffer_handle_free(&(esmp->esm_dma_handle));
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_err.c b/usr/src/uts/common/io/sfxge/sfxge_err.c
new file mode 100644
index 0000000000..879c6ca610
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_err.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include "sfxge.h"
+
+#include "efx.h"
+
+static const char *__sfxge_err[] = {
+ "",
+ "SRAM out-of-bounds",
+ "Buffer ID out-of-bounds",
+ "Internal memory parity",
+ "Receive buffer ownership",
+ "Transmit buffer ownership",
+ "Receive descriptor ownership",
+ "Transmit descriptor ownership",
+ "Event queue ownership",
+ "Event queue FIFO overflow",
+ "Illegal address",
+ "SRAM parity"
+};
+
+void
+sfxge_err(efsys_identifier_t *arg, unsigned int code, uint32_t dword0,
+ uint32_t dword1)
+{
+ sfxge_t *sp = (sfxge_t *)arg;
+ dev_info_t *dip = sp->s_dip;
+
+ ASSERT3U(code, <, EFX_ERR_NCODES);
+
+ dev_err(dip, CE_WARN, SFXGE_CMN_ERR "FATAL ERROR: %s (0x%08x%08x)",
+ __sfxge_err[code], dword1, dword0);
+}
+
+void
+sfxge_intr_fatal(sfxge_t *sp)
+{
+ efx_nic_t *enp = sp->s_enp;
+ int err;
+
+ efx_intr_disable(enp);
+ efx_intr_fatal(enp);
+
+ err = sfxge_restart_dispatch(sp, DDI_NOSLEEP, SFXGE_HW_ERR,
+ "Fatal Interrupt", 0);
+ if (err != 0) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "UNRECOVERABLE ERROR:"
+ " Could not schedule driver restart. err=%d",
+ err);
+ ASSERT(B_FALSE);
+ }
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_ev.c b/usr/src/uts/common/io/sfxge/sfxge_ev.c
new file mode 100644
index 0000000000..a1fa772c6d
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_ev.c
@@ -0,0 +1,1341 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/strsubr.h>
+#include <sys/cpu.h>
+#include <sys/pghw.h>
+
+#include "sfxge.h"
+
+#include "efx.h"
+
+
+/* Timeout to wait for DRIVER_EV_START event at EVQ startup */
+#define SFXGE_EV_QSTART_TIMEOUT_USEC (2000000)
+
+
+/* Event queue DMA attributes */
+static ddi_device_acc_attr_t sfxge_evq_devacc = {
+
+ DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
+ DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
+ DDI_STRICTORDER_ACC /* devacc_attr_dataorder */
+};
+
+static ddi_dma_attr_t sfxge_evq_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ EFX_BUF_SIZE, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+static int
+_sfxge_ev_qctor(sfxge_t *sp, sfxge_evq_t *sep, int kmflags, uint16_t evq_size)
+{
+ efsys_mem_t *esmp = &(sep->se_mem);
+ sfxge_dma_buffer_attr_t dma_attr;
+ int rc;
+
+ /* Compile-time structure layout checks */
+ EFX_STATIC_ASSERT(sizeof (sep->__se_u1.__se_s1) <=
+ sizeof (sep->__se_u1.__se_pad));
+ EFX_STATIC_ASSERT(sizeof (sep->__se_u2.__se_s2) <=
+ sizeof (sep->__se_u2.__se_pad));
+ EFX_STATIC_ASSERT(sizeof (sep->__se_u3.__se_s3) <=
+ sizeof (sep->__se_u3.__se_pad));
+
+ bzero(sep, sizeof (sfxge_evq_t));
+
+ sep->se_sp = sp;
+
+ dma_attr.sdba_dip = sp->s_dip;
+ dma_attr.sdba_dattrp = &sfxge_evq_dma_attr;
+ dma_attr.sdba_callback = (kmflags == KM_SLEEP) ?
+ DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
+ dma_attr.sdba_length = EFX_EVQ_SIZE(evq_size);
+ dma_attr.sdba_memflags = DDI_DMA_CONSISTENT;
+ dma_attr.sdba_devaccp = &sfxge_evq_devacc;
+ dma_attr.sdba_bindflags = DDI_DMA_READ | DDI_DMA_CONSISTENT;
+ dma_attr.sdba_maxcookies = 1;
+ dma_attr.sdba_zeroinit = B_FALSE;
+
+ if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
+ goto fail1;
+
+ /* Allocate some buffer table entries */
+ if ((rc = sfxge_sram_buf_tbl_alloc(sp, EFX_EVQ_NBUFS(evq_size),
+ &(sep->se_id))) != 0)
+ goto fail2;
+
+ sep->se_stpp = &(sep->se_stp);
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Tear down DMA setup */
+ esmp->esm_addr = 0;
+ sfxge_dma_buffer_destroy(esmp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ sep->se_sp = NULL;
+
+ SFXGE_OBJ_CHECK(sep, sfxge_evq_t);
+
+ return (-1);
+}
+
+static int
+sfxge_ev_q0ctor(void *buf, void *arg, int kmflags)
+{
+ sfxge_evq_t *sep = buf;
+ sfxge_t *sp = arg;
+ return (_sfxge_ev_qctor(sp, sep, kmflags, sp->s_evq0_size));
+}
+
+static int
+sfxge_ev_qXctor(void *buf, void *arg, int kmflags)
+{
+ sfxge_evq_t *sep = buf;
+ sfxge_t *sp = arg;
+ return (_sfxge_ev_qctor(sp, sep, kmflags, sp->s_evqX_size));
+}
+static void
+_sfxge_ev_qdtor(sfxge_t *sp, sfxge_evq_t *sep, uint16_t evq_size)
+{
+ efsys_mem_t *esmp = &(sep->se_mem);
+ ASSERT3P(sep->se_sp, ==, sp);
+ ASSERT3P(sep->se_stpp, ==, &(sep->se_stp));
+ sep->se_stpp = NULL;
+
+ /* Free the buffer table entries */
+ sfxge_sram_buf_tbl_free(sp, sep->se_id, EFX_EVQ_NBUFS(evq_size));
+ sep->se_id = 0;
+
+ /* Tear down DMA setup */
+ sfxge_dma_buffer_destroy(esmp);
+
+ sep->se_sp = NULL;
+
+ SFXGE_OBJ_CHECK(sep, sfxge_evq_t);
+}
+
+static void
+sfxge_ev_q0dtor(void *buf, void *arg)
+{
+ sfxge_evq_t *sep = buf;
+ sfxge_t *sp = arg;
+ _sfxge_ev_qdtor(sp, sep, sp->s_evq0_size);
+}
+
+static void
+sfxge_ev_qXdtor(void *buf, void *arg)
+{
+ sfxge_evq_t *sep = buf;
+ sfxge_t *sp = arg;
+ _sfxge_ev_qdtor(sp, sep, sp->s_evqX_size);
+}
+
+static boolean_t
+sfxge_ev_initialized(void *arg)
+{
+ sfxge_evq_t *sep = arg;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ /* Init done events may be duplicated on 7xxx (see SFCbug31631) */
+ if (sep->se_state == SFXGE_EVQ_STARTED)
+ goto done;
+
+ ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTING);
+ sep->se_state = SFXGE_EVQ_STARTED;
+
+ cv_broadcast(&(sep->se_init_kv));
+
+done:
+ return (B_FALSE);
+}
+
+static void
+sfxge_ev_qcomplete(sfxge_evq_t *sep, boolean_t eop)
+{
+ sfxge_t *sp = sep->se_sp;
+ unsigned int index = sep->se_index;
+ sfxge_rxq_t *srp = sp->s_srp[index];
+ sfxge_txq_t *stp;
+
+ if ((stp = sep->se_stp) != NULL) {
+ sep->se_stp = NULL;
+ sep->se_stpp = &(sep->se_stp);
+
+ do {
+ sfxge_txq_t *next;
+
+ next = stp->st_next;
+ stp->st_next = NULL;
+
+ ASSERT3U(stp->st_evq, ==, index);
+
+ if (stp->st_pending != stp->st_completed)
+ sfxge_tx_qcomplete(stp);
+
+ stp = next;
+ } while (stp != NULL);
+ }
+
+ if (srp != NULL) {
+ if (srp->sr_pending != srp->sr_completed)
+ sfxge_rx_qcomplete(srp, eop);
+ }
+}
+
+static boolean_t
+sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
+ uint16_t flags)
+{
+ sfxge_evq_t *sep = arg;
+ sfxge_t *sp = sep->se_sp;
+ sfxge_rxq_t *srp;
+ sfxge_rx_packet_t *srpp;
+ unsigned int prefetch;
+ unsigned int stop;
+ unsigned int delta;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ if (sep->se_exception)
+ goto done;
+
+ srp = sp->s_srp[label];
+ if (srp == NULL)
+ goto done;
+
+ ASSERT3U(sep->se_index, ==, srp->sr_index);
+ ASSERT3U(id, <, sp->s_rxq_size);
+
+ /*
+ * Note that in sfxge_stop() EVQ stopped after RXQ, and will be reset
+ * So the return missing srp->sr_pending increase is safe
+ */
+ if (srp->sr_state != SFXGE_RXQ_STARTED)
+ goto done;
+
+ stop = (id + 1) & (sp->s_rxq_size - 1);
+ id = srp->sr_pending & (sp->s_rxq_size - 1);
+
+ delta = (stop >= id) ? (stop - id) : (sp->s_rxq_size - id + stop);
+ srp->sr_pending += delta;
+
+ if (delta != 1) {
+ if ((!efx_nic_cfg_get(sp->s_enp)->enc_rx_batching_enabled) ||
+ (delta == 0) ||
+ (delta > efx_nic_cfg_get(sp->s_enp)->enc_rx_batch_max)) {
+ /*
+ * FIXME: This does not take into account scatter
+ * aborts. See Bug40811
+ */
+ sep->se_exception = B_TRUE;
+
+ DTRACE_PROBE(restart_ev_rx_id);
+ /* sfxge_evq_t->se_lock held */
+ (void) sfxge_restart_dispatch(sp, DDI_SLEEP,
+ SFXGE_HW_ERR, "Out of order RX event", delta);
+
+ goto done;
+ }
+ }
+
+ prefetch = (id + 4) & (sp->s_rxq_size - 1);
+ if ((srpp = srp->sr_srpp[prefetch]) != NULL)
+ prefetch_read_many(srpp);
+
+ srpp = srp->sr_srpp[id];
+ ASSERT(srpp != NULL);
+ prefetch_read_many(srpp->srp_mp);
+
+ for (; id != stop; id = (id + 1) & (sp->s_rxq_size - 1)) {
+ srpp = srp->sr_srpp[id];
+ ASSERT(srpp != NULL);
+
+ ASSERT3U(srpp->srp_flags, ==, EFX_DISCARD);
+ srpp->srp_flags = flags;
+
+ ASSERT3U(size, <, (1 << 16));
+ srpp->srp_size = (uint16_t)size;
+ }
+
+ sep->se_rx++;
+
+ DTRACE_PROBE2(qlevel, unsigned int, srp->sr_index,
+ unsigned int, srp->sr_added - srp->sr_pending);
+
+ if (srp->sr_pending - srp->sr_completed >= SFXGE_RX_BATCH)
+ sfxge_ev_qcomplete(sep, B_FALSE);
+
+done:
+ /* returning B_TRUE makes efx_ev_qpoll() stop processing events */
+ return (sep->se_rx >= sep->se_ev_batch);
+}
+
+static boolean_t
+sfxge_ev_exception(void *arg, uint32_t code, uint32_t data)
+{
+ sfxge_evq_t *sep = arg;
+ sfxge_t *sp = sep->se_sp;
+
+ _NOTE(ARGUNUSED(code))
+ _NOTE(ARGUNUSED(data))
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+ sep->se_exception = B_TRUE;
+
+ if (code != EFX_EXCEPTION_UNKNOWN_SENSOREVT) {
+
+ DTRACE_PROBE(restart_ev_exception);
+
+ /* sfxge_evq_t->se_lock held */
+ (void) sfxge_restart_dispatch(sp, DDI_SLEEP, SFXGE_HW_ERR,
+ "Unknown EV", code);
+ }
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_rxq_flush_done(void *arg, uint32_t rxq_index)
+{
+ sfxge_evq_t *sep_targetq, *sep = arg;
+ sfxge_t *sp = sep->se_sp;
+ sfxge_rxq_t *srp;
+ unsigned int index;
+ unsigned int label;
+ uint16_t magic;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ /* Ensure RXQ exists, as events may arrive after RXQ was destroyed */
+ srp = sp->s_srp[rxq_index];
+ if (srp == NULL)
+ goto done;
+
+ /* Process right now if it is the correct event queue */
+ index = srp->sr_index;
+ if (index == sep->se_index) {
+ sfxge_rx_qflush_done(srp);
+ goto done;
+ }
+
+ /* Resend a software event on the correct queue */
+ sep_targetq = sp->s_sep[index];
+
+ if (sep_targetq->se_state != SFXGE_EVQ_STARTED)
+ goto done; /* TBD: state test not under the lock */
+
+ label = rxq_index;
+ ASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label);
+ magic = SFXGE_MAGIC_RX_QFLUSH_DONE | label;
+
+ efx_ev_qpost(sep_targetq->se_eep, magic);
+
+done:
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_rxq_flush_failed(void *arg, uint32_t rxq_index)
+{
+ sfxge_evq_t *sep_targetq, *sep = arg;
+ sfxge_t *sp = sep->se_sp;
+ sfxge_rxq_t *srp;
+ unsigned int index;
+ unsigned int label;
+ uint16_t magic;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ /* Ensure RXQ exists, as events may arrive after RXQ was destroyed */
+ srp = sp->s_srp[rxq_index];
+ if (srp == NULL)
+ goto done;
+
+ /* Process right now if it is the correct event queue */
+ index = srp->sr_index;
+ if (index == sep->se_index) {
+ sfxge_rx_qflush_failed(srp);
+ goto done;
+ }
+
+ /* Resend a software event on the correct queue */
+ sep_targetq = sp->s_sep[index];
+
+ label = rxq_index;
+ ASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label);
+ magic = SFXGE_MAGIC_RX_QFLUSH_FAILED | label;
+
+ if (sep_targetq->se_state != SFXGE_EVQ_STARTED)
+ goto done; /* TBD: state test not under the lock */
+
+ efx_ev_qpost(sep_targetq->se_eep, magic);
+
+done:
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_tx(void *arg, uint32_t label, uint32_t id)
+{
+ sfxge_evq_t *sep = arg;
+ sfxge_txq_t *stp;
+ unsigned int stop;
+ unsigned int delta;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ stp = sep->se_label_stp[label];
+ if (stp == NULL)
+ goto done;
+
+ if (stp->st_state != SFXGE_TXQ_STARTED)
+ goto done;
+
+ ASSERT3U(sep->se_index, ==, stp->st_evq);
+
+ stop = (id + 1) & (SFXGE_TX_NDESCS - 1);
+ id = stp->st_pending & (SFXGE_TX_NDESCS - 1);
+
+ delta = (stop >= id) ? (stop - id) : (SFXGE_TX_NDESCS - id + stop);
+ stp->st_pending += delta;
+
+ sep->se_tx++;
+
+ if (stp->st_next == NULL &&
+ sep->se_stpp != &(stp->st_next)) {
+ *(sep->se_stpp) = stp;
+ sep->se_stpp = &(stp->st_next);
+ }
+
+ DTRACE_PROBE2(qlevel, unsigned int, stp->st_index,
+ unsigned int, stp->st_added - stp->st_pending);
+
+ if (stp->st_pending - stp->st_completed >= SFXGE_TX_BATCH)
+ sfxge_tx_qcomplete(stp);
+
+done:
+ /* returning B_TRUE makes efx_ev_qpoll() stop processing events */
+ return (sep->se_tx >= sep->se_ev_batch);
+}
+
+static boolean_t
+sfxge_ev_txq_flush_done(void *arg, uint32_t txq_index)
+{
+ sfxge_evq_t *sep = arg;
+ sfxge_t *sp = sep->se_sp;
+ sfxge_txq_t *stp;
+ unsigned int evq;
+ unsigned int label;
+ uint16_t magic;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ /* Ensure TXQ exists, as events may arrive after TXQ was destroyed */
+ stp = sp->s_stp[txq_index];
+ if (stp == NULL)
+ goto done;
+
+ /* Process right now if it is the correct event queue */
+ evq = stp->st_evq;
+ if (evq == sep->se_index) {
+ sfxge_tx_qflush_done(stp);
+ goto done;
+ }
+
+ /* Resend a software event on the correct queue */
+ sep = sp->s_sep[evq];
+
+ label = stp->st_label;
+
+ ASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label);
+ magic = SFXGE_MAGIC_TX_QFLUSH_DONE | label;
+
+ ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTED);
+ efx_ev_qpost(sep->se_eep, magic);
+
+done:
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_software(void *arg, uint16_t magic)
+{
+ sfxge_evq_t *sep = arg;
+ sfxge_t *sp = sep->se_sp;
+ dev_info_t *dip = sp->s_dip;
+ unsigned int label;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ EFX_STATIC_ASSERT(SFXGE_MAGIC_DMAQ_LABEL_WIDTH ==
+ FSF_AZ_RX_EV_Q_LABEL_WIDTH);
+ EFX_STATIC_ASSERT(SFXGE_MAGIC_DMAQ_LABEL_WIDTH ==
+ FSF_AZ_TX_EV_Q_LABEL_WIDTH);
+
+ label = magic & SFXGE_MAGIC_DMAQ_LABEL_MASK;
+ magic &= ~SFXGE_MAGIC_DMAQ_LABEL_MASK;
+
+ switch (magic) {
+ case SFXGE_MAGIC_RX_QFLUSH_DONE: {
+ sfxge_rxq_t *srp = sp->s_srp[label];
+
+ if (srp != NULL) {
+ ASSERT3U(sep->se_index, ==, srp->sr_index);
+
+ sfxge_rx_qflush_done(srp);
+ }
+ break;
+ }
+ case SFXGE_MAGIC_RX_QFLUSH_FAILED: {
+ sfxge_rxq_t *srp = sp->s_srp[label];
+
+ if (srp != NULL) {
+ ASSERT3U(sep->se_index, ==, srp->sr_index);
+
+ sfxge_rx_qflush_failed(srp);
+ }
+ break;
+ }
+ case SFXGE_MAGIC_RX_QFPP_TRIM: {
+ sfxge_rxq_t *srp = sp->s_srp[label];
+
+ if (srp != NULL) {
+ ASSERT3U(sep->se_index, ==, srp->sr_index);
+
+ sfxge_rx_qfpp_trim(srp);
+ }
+ break;
+ }
+ case SFXGE_MAGIC_TX_QFLUSH_DONE: {
+ sfxge_txq_t *stp = sep->se_label_stp[label];
+
+ if (stp != NULL) {
+ ASSERT3U(sep->se_index, ==, stp->st_evq);
+
+ sfxge_tx_qflush_done(stp);
+ }
+ break;
+ }
+ default:
+ dev_err(dip, CE_NOTE,
+ SFXGE_CMN_ERR "unknown software event 0x%x", magic);
+ break;
+ }
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_sram(void *arg, uint32_t code)
+{
+ _NOTE(ARGUNUSED(arg))
+
+ switch (code) {
+ case EFX_SRAM_UPDATE:
+ DTRACE_PROBE(sram_update);
+ break;
+
+ case EFX_SRAM_CLEAR:
+ DTRACE_PROBE(sram_clear);
+ break;
+
+ case EFX_SRAM_ILLEGAL_CLEAR:
+ DTRACE_PROBE(sram_illegal_clear);
+ break;
+
+ default:
+ ASSERT(B_FALSE);
+ break;
+ }
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_timer(void *arg, uint32_t index)
+{
+ _NOTE(ARGUNUSED(arg, index))
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_wake_up(void *arg, uint32_t index)
+{
+ _NOTE(ARGUNUSED(arg, index))
+
+ return (B_FALSE);
+}
+
+static boolean_t
+sfxge_ev_link_change(void *arg, efx_link_mode_t link_mode)
+{
+ sfxge_evq_t *sep = arg;
+ sfxge_t *sp = sep->se_sp;
+
+ sfxge_mac_link_update(sp, link_mode);
+
+ return (B_FALSE);
+}
+
+static int
+sfxge_ev_kstat_update(kstat_t *ksp, int rw)
+{
+ sfxge_evq_t *sep = ksp->ks_private;
+ kstat_named_t *knp;
+ int rc;
+
+ if (rw != KSTAT_READ) {
+ rc = EACCES;
+ goto fail1;
+ }
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ if (sep->se_state != SFXGE_EVQ_STARTED)
+ goto done;
+
+ efx_ev_qstats_update(sep->se_eep, sep->se_stat);
+
+ knp = ksp->ks_data;
+ knp += EV_NQSTATS;
+
+ knp->value.ui64 = sep->se_cpu_id;
+
+done:
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_ev_kstat_init(sfxge_evq_t *sep)
+{
+ sfxge_t *sp = sep->se_sp;
+ unsigned int index = sep->se_index;
+ dev_info_t *dip = sp->s_dip;
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ char name[MAXNAMELEN];
+ unsigned int id;
+ int rc;
+
+ /* Determine the name */
+ (void) snprintf(name, MAXNAMELEN - 1, "%s_evq%04d",
+ ddi_driver_name(dip), index);
+
+ /* Create the set */
+ if ((ksp = kstat_create((char *)ddi_driver_name(dip),
+ ddi_get_instance(dip), name, "queue", KSTAT_TYPE_NAMED,
+ EV_NQSTATS + 1, 0)) == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ sep->se_ksp = ksp;
+
+ ksp->ks_update = sfxge_ev_kstat_update;
+ ksp->ks_private = sep;
+ ksp->ks_lock = &(sep->se_lock);
+
+ /* Initialise the named stats */
+ sep->se_stat = knp = ksp->ks_data;
+ for (id = 0; id < EV_NQSTATS; id++) {
+ kstat_named_init(knp, (char *)efx_ev_qstat_name(sp->s_enp, id),
+ KSTAT_DATA_UINT64);
+ knp++;
+ }
+
+ kstat_named_init(knp, "cpu", KSTAT_DATA_UINT64);
+
+ kstat_install(ksp);
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_ev_kstat_fini(sfxge_evq_t *sep)
+{
+ /* Destroy the set */
+ kstat_delete(sep->se_ksp);
+ sep->se_ksp = NULL;
+ sep->se_stat = NULL;
+}
+
+inline unsigned pow2_ge(unsigned int n) {
+ unsigned int order = 0;
+ ASSERT3U(n, >, 0);
+ while ((1ul << order) < n) ++order;
+ return (1ul << (order));
+}
+
+static int
+sfxge_ev_qinit(sfxge_t *sp, unsigned int index, unsigned int ev_batch)
+{
+ sfxge_evq_t *sep;
+ int rc;
+
+ ASSERT3U(index, <, SFXGE_RX_SCALE_MAX);
+
+ sep = kmem_cache_alloc(index ? sp->s_eqXc : sp->s_eq0c, KM_SLEEP);
+ if (sep == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+ ASSERT3U(sep->se_state, ==, SFXGE_EVQ_UNINITIALIZED);
+
+ sep->se_index = index;
+
+ mutex_init(&(sep->se_lock), NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(sp->s_intr.si_intr_pri));
+
+ cv_init(&(sep->se_init_kv), NULL, CV_DRIVER, NULL);
+
+ /* Initialize the statistics */
+ if ((rc = sfxge_ev_kstat_init(sep)) != 0)
+ goto fail2;
+
+ sep->se_state = SFXGE_EVQ_INITIALIZED;
+ sep->se_ev_batch = (uint16_t)ev_batch;
+ sp->s_sep[index] = sep;
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ sep->se_index = 0;
+
+ cv_destroy(&(sep->se_init_kv));
+ mutex_destroy(&(sep->se_lock));
+
+ kmem_cache_free(index ? sp->s_eqXc : sp->s_eq0c, sep);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_ev_qstart(sfxge_t *sp, unsigned int index)
+{
+ sfxge_evq_t *sep = sp->s_sep[index];
+ sfxge_intr_t *sip = &(sp->s_intr);
+ efx_nic_t *enp = sp->s_enp;
+ efx_ev_callbacks_t *eecp;
+ efsys_mem_t *esmp;
+ clock_t timeout;
+ int rc;
+ uint16_t evq_size = index ? sp->s_evqX_size : sp->s_evq0_size;
+
+ mutex_enter(&(sep->se_lock));
+ esmp = &(sep->se_mem);
+
+ ASSERT3U(sep->se_state, ==, SFXGE_EVQ_INITIALIZED);
+
+ /* Set the memory to all ones */
+ (void) memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq_size));
+
+ /* Program the buffer table */
+ if ((rc = sfxge_sram_buf_tbl_set(sp, sep->se_id, esmp,
+ EFX_EVQ_NBUFS(evq_size))) != 0)
+ goto fail1;
+
+ /* Set up the event callbacks */
+ eecp = &(sep->se_eec);
+ eecp->eec_initialized = sfxge_ev_initialized;
+ eecp->eec_rx = sfxge_ev_rx;
+ eecp->eec_tx = sfxge_ev_tx;
+ eecp->eec_exception = sfxge_ev_exception;
+ eecp->eec_rxq_flush_done = sfxge_ev_rxq_flush_done;
+ eecp->eec_rxq_flush_failed = sfxge_ev_rxq_flush_failed;
+ eecp->eec_txq_flush_done = sfxge_ev_txq_flush_done;
+ eecp->eec_software = sfxge_ev_software;
+ eecp->eec_sram = sfxge_ev_sram;
+ eecp->eec_wake_up = sfxge_ev_wake_up;
+ eecp->eec_timer = sfxge_ev_timer;
+ eecp->eec_link_change = sfxge_ev_link_change;
+
+ /* Create the event queue */
+ if ((rc = efx_ev_qcreate(enp, index, esmp, evq_size, sep->se_id,
+ &(sep->se_eep))) != 0)
+ goto fail2;
+
+ /* Set the default moderation */
+ if ((rc = efx_ev_qmoderate(sep->se_eep, sp->s_ev_moderation)) != 0)
+ goto fail3;
+
+ /* Check that interrupts are enabled at the NIC */
+ if (sip->si_state != SFXGE_INTR_STARTED) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ sep->se_state = SFXGE_EVQ_STARTING;
+
+ /* Prime the event queue for interrupts */
+ if ((rc = efx_ev_qprime(sep->se_eep, sep->se_count)) != 0)
+ goto fail5;
+
+ /* Wait for the initialization event */
+ timeout = ddi_get_lbolt() + drv_usectohz(SFXGE_EV_QSTART_TIMEOUT_USEC);
+ while (sep->se_state != SFXGE_EVQ_STARTED) {
+ if (cv_timedwait(&(sep->se_init_kv), &(sep->se_lock),
+ timeout) < 0) {
+ /* Timeout waiting for initialization */
+ dev_info_t *dip = sp->s_dip;
+
+ DTRACE_PROBE(timeout);
+ dev_err(dip, CE_NOTE,
+ SFXGE_CMN_ERR "evq[%d] qstart timeout", index);
+
+ rc = ETIMEDOUT;
+ goto fail6;
+ }
+ }
+
+ mutex_exit(&(sep->se_lock));
+ return (0);
+
+fail6:
+ DTRACE_PROBE(fail6);
+
+fail5:
+ DTRACE_PROBE(fail5);
+
+ sep->se_state = SFXGE_EVQ_INITIALIZED;
+
+fail4:
+ DTRACE_PROBE(fail4);
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+ /* Destroy the event queue */
+ efx_ev_qdestroy(sep->se_eep);
+ sep->se_eep = NULL;
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Zero out the event handlers */
+ bzero(&(sep->se_eec), sizeof (efx_ev_callbacks_t));
+
+ /* Clear entries from the buffer table */
+ sfxge_sram_buf_tbl_clear(sp, sep->se_id, EFX_EVQ_NBUFS(evq_size));
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(sep->se_lock));
+
+ return (rc);
+}
+
+int
+sfxge_ev_qpoll(sfxge_t *sp, unsigned int index)
+{
+ sfxge_evq_t *sep = sp->s_sep[index];
+ processorid_t cpu_id;
+ int rc;
+ uint16_t evq_size = index ? sp->s_evqX_size : sp->s_evq0_size;
+
+ mutex_enter(&(sep->se_lock));
+
+ if (sep->se_state != SFXGE_EVQ_STARTING &&
+ sep->se_state != SFXGE_EVQ_STARTED) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Make sure the CPU information is up to date */
+ cpu_id = CPU->cpu_id;
+
+ if (cpu_id != sep->se_cpu_id) {
+ sep->se_cpu_id = cpu_id;
+
+ /* sfxge_evq_t->se_lock held */
+ (void) ddi_taskq_dispatch(sp->s_tqp, sfxge_rx_scale_update, sp,
+ DDI_NOSLEEP);
+ }
+
+ /* Synchronize the DMA memory for reading */
+ (void) ddi_dma_sync(sep->se_mem.esm_dma_handle,
+ 0,
+ EFX_EVQ_SIZE(evq_size),
+ DDI_DMA_SYNC_FORKERNEL);
+
+ ASSERT3U(sep->se_rx, ==, 0);
+ ASSERT3U(sep->se_tx, ==, 0);
+ ASSERT3P(sep->se_stp, ==, NULL);
+ ASSERT3P(sep->se_stpp, ==, &(sep->se_stp));
+
+ /* Poll the queue */
+ efx_ev_qpoll(sep->se_eep, &(sep->se_count), &(sep->se_eec),
+ sep);
+
+ sep->se_rx = 0;
+ sep->se_tx = 0;
+
+ /* Perform any pending completion processing */
+ sfxge_ev_qcomplete(sep, B_TRUE);
+
+ /* Re-prime the event queue for interrupts */
+ if ((rc = efx_ev_qprime(sep->se_eep, sep->se_count)) != 0)
+ goto fail2;
+
+ mutex_exit(&(sep->se_lock));
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(sep->se_lock));
+
+ return (rc);
+}
+
+int
+sfxge_ev_qprime(sfxge_t *sp, unsigned int index)
+{
+ sfxge_evq_t *sep = sp->s_sep[index];
+ int rc;
+
+ mutex_enter(&(sep->se_lock));
+
+ if (sep->se_state != SFXGE_EVQ_STARTING &&
+ sep->se_state != SFXGE_EVQ_STARTED) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = efx_ev_qprime(sep->se_eep, sep->se_count)) != 0)
+ goto fail2;
+
+ mutex_exit(&(sep->se_lock));
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(sep->se_lock));
+
+ return (rc);
+}
+
+
+int
+sfxge_ev_qmoderate(sfxge_t *sp, unsigned int index, unsigned int us)
+{
+ sfxge_evq_t *sep = sp->s_sep[index];
+ efx_evq_t *eep = sep->se_eep;
+
+ ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTED);
+
+ return (efx_ev_qmoderate(eep, us));
+}
+
+static void
+sfxge_ev_qstop(sfxge_t *sp, unsigned int index)
+{
+ sfxge_evq_t *sep = sp->s_sep[index];
+ uint16_t evq_size;
+
+ mutex_enter(&(sep->se_lock));
+ ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTED);
+ sep->se_state = SFXGE_EVQ_INITIALIZED;
+ evq_size = index ? sp->s_evqX_size : sp->s_evq0_size;
+
+ /* Clear the CPU information */
+ sep->se_cpu_id = 0;
+
+ /* Clear the event count */
+ sep->se_count = 0;
+
+ /* Reset the exception flag */
+ sep->se_exception = B_FALSE;
+
+ /* Destroy the event queue */
+ efx_ev_qdestroy(sep->se_eep);
+ sep->se_eep = NULL;
+
+ mutex_exit(&(sep->se_lock));
+
+ /* Zero out the event handlers */
+ bzero(&(sep->se_eec), sizeof (efx_ev_callbacks_t));
+
+ /* Clear entries from the buffer table */
+ sfxge_sram_buf_tbl_clear(sp, sep->se_id, EFX_EVQ_NBUFS(evq_size));
+}
+
+static void
+sfxge_ev_qfini(sfxge_t *sp, unsigned int index)
+{
+ sfxge_evq_t *sep = sp->s_sep[index];
+
+ ASSERT3U(sep->se_state, ==, SFXGE_EVQ_INITIALIZED);
+
+ sp->s_sep[index] = NULL;
+ sep->se_state = SFXGE_EVQ_UNINITIALIZED;
+
+ /* Tear down the statistics */
+ sfxge_ev_kstat_fini(sep);
+
+ cv_destroy(&(sep->se_init_kv));
+ mutex_destroy(&(sep->se_lock));
+
+ sep->se_index = 0;
+
+ kmem_cache_free(index ? sp->s_eqXc : sp->s_eq0c, sep);
+}
+
+int
+sfxge_ev_txlabel_alloc(sfxge_t *sp, unsigned int evq, sfxge_txq_t *stp,
+ unsigned int *labelp)
+{
+ sfxge_evq_t *sep = sp->s_sep[evq];
+ sfxge_txq_t **stpp;
+ unsigned int label;
+ int rc;
+
+ mutex_enter(&(sep->se_lock));
+
+ if (stp == NULL || labelp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ stpp = NULL;
+ for (label = 0; label < SFXGE_TX_NLABELS; label++) {
+ if (sep->se_label_stp[label] == stp) {
+ rc = EEXIST;
+ goto fail2;
+ }
+ if ((stpp == NULL) && (sep->se_label_stp[label] == NULL)) {
+ stpp = &sep->se_label_stp[label];
+ }
+ }
+ if (stpp == NULL) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ *stpp = stp;
+ label = stpp - sep->se_label_stp;
+
+ ASSERT3U(label, <, SFXGE_TX_NLABELS);
+ *labelp = label;
+
+ mutex_exit(&(sep->se_lock));
+ return (0);
+
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(sep->se_lock));
+
+ return (rc);
+}
+
+
+int
+sfxge_ev_txlabel_free(sfxge_t *sp, unsigned int evq, sfxge_txq_t *stp,
+ unsigned int label)
+{
+ sfxge_evq_t *sep = sp->s_sep[evq];
+ int rc;
+
+ mutex_enter(&(sep->se_lock));
+
+ if (stp == NULL || label > SFXGE_TX_NLABELS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (sep->se_label_stp[label] != stp) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ sep->se_label_stp[label] = NULL;
+
+ mutex_exit(&(sep->se_lock));
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(sep->se_lock));
+
+ return (rc);
+}
+
+
+static kmem_cache_t *
+sfxge_ev_kmem_cache_create(sfxge_t *sp, const char *qname,
+ int (*ctor)(void *, void *, int), void (*dtor)(void *, void *))
+{
+ char name[MAXNAMELEN];
+ kmem_cache_t *eqc;
+
+ (void) snprintf(name, MAXNAMELEN - 1, "%s%d_%s_cache",
+ ddi_driver_name(sp->s_dip), ddi_get_instance(sp->s_dip), qname);
+
+ eqc = kmem_cache_create(name, sizeof (sfxge_evq_t),
+ SFXGE_CPU_CACHE_SIZE, ctor, dtor, NULL, sp, NULL, 0);
+ ASSERT(eqc != NULL);
+ return (eqc);
+}
+
+int
+sfxge_ev_init(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ unsigned int evq0_size;
+ unsigned int evqX_size;
+ unsigned int ev_batch;
+ int index;
+ int rc;
+
+ ASSERT3U(sip->si_state, ==, SFXGE_INTR_INITIALIZED);
+
+ /*
+ * Must account for RXQ, TXQ(s); MCDI not event completed at present
+ * Note that common code does not completely fill descriptor queues
+ */
+ evqX_size = sp->s_rxq_size + SFXGE_TX_NDESCS;
+ evq0_size = evqX_size + SFXGE_TX_NDESCS; /* only IP checksum TXQ */
+ evq0_size += SFXGE_TX_NDESCS; /* no checksums */
+
+ ASSERT3U(evqX_size, >=, EFX_EVQ_MINNEVS);
+ ASSERT3U(evq0_size, >, evqX_size);
+
+ if (evq0_size > EFX_EVQ_MAXNEVS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ sp->s_evq0_size = pow2_ge(evq0_size);
+ sp->s_evqX_size = pow2_ge(evqX_size);
+
+ /* Read driver parameters */
+ sp->s_ev_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "intr_moderation", SFXGE_DEFAULT_MODERATION);
+
+ ev_batch = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "ev_batch", SFXGE_EV_BATCH);
+
+ /*
+ * It is slightly peverse to have a cache for one item. But it allows
+ * for simple alignment control without increasing the allocation size
+ */
+ sp->s_eq0c = sfxge_ev_kmem_cache_create(sp, "evq0", sfxge_ev_q0ctor,
+ sfxge_ev_q0dtor);
+ sp->s_eqXc = sfxge_ev_kmem_cache_create(sp, "evqX", sfxge_ev_qXctor,
+ sfxge_ev_qXdtor);
+
+ /* Initialize the event queue(s) */
+ for (index = 0; index < sip->si_nalloc; index++) {
+ if ((rc = sfxge_ev_qinit(sp, index, ev_batch)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ while (--index >= 0)
+ sfxge_ev_qfini(sp, index);
+ sp->s_ev_moderation = 0;
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ kmem_cache_destroy(sp->s_eqXc);
+ kmem_cache_destroy(sp->s_eq0c);
+ sp->s_eqXc = NULL;
+ sp->s_eq0c = NULL;
+
+ return (rc);
+}
+
+int
+sfxge_ev_start(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ efx_nic_t *enp = sp->s_enp;
+ int index;
+ int rc;
+
+ ASSERT3U(sip->si_state, ==, SFXGE_INTR_STARTED);
+
+ /* Initialize the event module */
+ if ((rc = efx_ev_init(enp)) != 0)
+ goto fail1;
+
+ /* Start the event queues */
+ for (index = 0; index < sip->si_nalloc; index++) {
+ if ((rc = sfxge_ev_qstart(sp, index)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Stop the event queue(s) */
+ while (--index >= 0)
+ sfxge_ev_qstop(sp, index);
+
+ /* Tear down the event module */
+ efx_ev_fini(enp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+void
+sfxge_ev_moderation_get(sfxge_t *sp, unsigned int *usp)
+{
+ *usp = sp->s_ev_moderation;
+}
+
+int
+sfxge_ev_moderation_set(sfxge_t *sp, unsigned int us)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ int index;
+ int rc;
+
+ if (sip->si_state != SFXGE_INTR_STARTED)
+ return (ENODEV);
+
+ for (index = 0; index < sip->si_nalloc; index++) {
+ if ((rc = sfxge_ev_qmoderate(sp, index, us)) != 0)
+ goto fail1;
+ }
+
+ sp->s_ev_moderation = us;
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ /* The only error path is if the value to set to is invalid. */
+ ASSERT3U(index, ==, 0);
+
+ return (rc);
+}
+
+void
+sfxge_ev_stop(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ efx_nic_t *enp = sp->s_enp;
+ int index;
+
+ ASSERT3U(sip->si_state, ==, SFXGE_INTR_STARTED);
+
+ /* Stop the event queue(s) */
+ index = sip->si_nalloc;
+ while (--index >= 0)
+ sfxge_ev_qstop(sp, index);
+
+ /* Tear down the event module */
+ efx_ev_fini(enp);
+}
+
+void
+sfxge_ev_fini(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ int index;
+
+ ASSERT3U(sip->si_state, ==, SFXGE_INTR_INITIALIZED);
+
+ sp->s_ev_moderation = 0;
+
+ /* Tear down the event queue(s) */
+ index = sip->si_nalloc;
+ while (--index >= 0)
+ sfxge_ev_qfini(sp, index);
+
+ kmem_cache_destroy(sp->s_eqXc);
+ kmem_cache_destroy(sp->s_eq0c);
+ sp->s_eqXc = NULL;
+ sp->s_eq0c = NULL;
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_gld_v3.c b/usr/src/uts/common/io/sfxge/sfxge_gld_v3.c
new file mode 100644
index 0000000000..8cd1526aa5
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_gld_v3.c
@@ -0,0 +1,1331 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/strsubr.h>
+#include <sys/dlpi.h>
+#include <sys/ksynch.h>
+#include <sys/cpuvar.h>
+#include <sys/cpu.h>
+#include <sys/vlan.h>
+
+#include <inet/tcp.h>
+
+#include "sfxge.h"
+
+void
+sfxge_gld_link_update(sfxge_t *sp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ link_state_t link;
+
+ switch (smp->sm_link_mode) {
+ case EFX_LINK_UNKNOWN:
+ link = LINK_STATE_UNKNOWN;
+ break;
+ case EFX_LINK_DOWN:
+ link = LINK_STATE_DOWN;
+ break;
+ default:
+ link = LINK_STATE_UP;
+ }
+
+ mac_link_update(sp->s_mh, link);
+}
+
+void
+sfxge_gld_mtu_update(sfxge_t *sp)
+{
+#ifdef _USE_MTU_UPDATE
+ (void) mac_maxsdu_update(sp->s_mh, sp->s_mtu);
+#else
+ _NOTE(ARGUNUSED(sp));
+#endif
+}
+
+void
+sfxge_gld_rx_post(sfxge_t *sp, unsigned int index, mblk_t *mp)
+{
+ _NOTE(ARGUNUSED(index))
+
+ mac_rx(sp->s_mh, NULL, mp);
+}
+
+
+void
+sfxge_gld_rx_push(sfxge_t *sp)
+{
+ _NOTE(ARGUNUSED(sp))
+}
+
+
+static uint64_t
+sfxge_phy_dfl_cap_test64(sfxge_t *sp, uint32_t field)
+{
+ return (sfxge_phy_cap_test(sp, EFX_PHY_CAP_DEFAULT, field, NULL) ?
+ 1ull : 0ull);
+}
+
+
+static uint64_t
+sfxge_phy_cur_cap_test64(sfxge_t *sp, uint32_t field)
+{
+ return (sfxge_phy_cap_test(sp, EFX_PHY_CAP_CURRENT, field, NULL) ?
+ 1ull : 0ull);
+}
+
+static uint64_t
+sfxge_phy_lp_cap_test64(sfxge_t *sp, uint32_t field)
+{
+ return (sfxge_phy_lp_cap_test(sp, field) ? 1ull : 0ull);
+}
+
+static int
+sfxge_gld_getstat(void *arg, unsigned int id, uint64_t *valp)
+{
+ sfxge_t *sp = arg;
+ efx_nic_t *enp = sp->s_enp;
+ int rc;
+
+ if (sp->s_mac.sm_state != SFXGE_MAC_STARTED) {
+ rc = ENODEV;
+ goto fail1;
+ }
+
+ switch (id) {
+ case MAC_STAT_IFSPEED: {
+ unsigned int speed;
+
+ sfxge_mac_link_speed_get(sp, &speed);
+
+ *valp = (uint64_t)speed * 1000000ull;
+ break;
+ }
+ case ETHER_STAT_LINK_DUPLEX: {
+ sfxge_link_duplex_t duplex;
+
+ sfxge_mac_link_duplex_get(sp, &duplex);
+
+ switch (duplex) {
+ case SFXGE_LINK_DUPLEX_UNKNOWN:
+ *valp = LINK_DUPLEX_UNKNOWN;
+ break;
+
+ case SFXGE_LINK_DUPLEX_HALF:
+ *valp = LINK_DUPLEX_HALF;
+ break;
+
+ case SFXGE_LINK_DUPLEX_FULL:
+ *valp = LINK_DUPLEX_FULL;
+ break;
+
+ default:
+ ASSERT(B_FALSE);
+ break;
+ }
+ break;
+ }
+
+ case ETHER_STAT_CAP_40GFDX:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_40000FDX);
+ break;
+ case ETHER_STAT_CAP_10GFDX:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_10000FDX);
+ break;
+ case ETHER_STAT_CAP_1000FDX:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_1000FDX);
+ break;
+ case ETHER_STAT_CAP_1000HDX:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_1000HDX);
+ break;
+ case ETHER_STAT_CAP_100FDX:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_100FDX);
+ break;
+ case ETHER_STAT_CAP_100HDX:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_100HDX);
+ break;
+ case ETHER_STAT_CAP_10FDX:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_10FDX);
+ break;
+ case ETHER_STAT_CAP_10HDX:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_10HDX);
+ break;
+ case ETHER_STAT_CAP_ASMPAUSE:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_ASYM);
+ break;
+ case ETHER_STAT_CAP_PAUSE:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_PAUSE);
+ break;
+ case ETHER_STAT_CAP_AUTONEG:
+ *valp = sfxge_phy_dfl_cap_test64(sp, EFX_PHY_CAP_AN);
+ break;
+ case ETHER_STAT_ADV_CAP_40GFDX:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_40000FDX);
+ break;
+ case ETHER_STAT_ADV_CAP_10GFDX:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_10000FDX);
+ break;
+ case ETHER_STAT_ADV_CAP_1000FDX:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_1000FDX);
+ break;
+ case ETHER_STAT_ADV_CAP_1000HDX:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_1000HDX);
+ break;
+ case ETHER_STAT_ADV_CAP_100FDX:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_100FDX);
+ break;
+ case ETHER_STAT_ADV_CAP_100HDX:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_100HDX);
+ break;
+ case ETHER_STAT_ADV_CAP_10FDX:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_10FDX);
+ break;
+ case ETHER_STAT_ADV_CAP_10HDX:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_10HDX);
+ break;
+ case ETHER_STAT_ADV_CAP_ASMPAUSE:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_ASYM);
+ break;
+ case ETHER_STAT_ADV_CAP_PAUSE:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_PAUSE);
+ break;
+ case ETHER_STAT_ADV_CAP_AUTONEG:
+ *valp = sfxge_phy_cur_cap_test64(sp, EFX_PHY_CAP_AN);
+ break;
+ case ETHER_STAT_LP_CAP_40GFDX:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_40000FDX);
+ break;
+ case ETHER_STAT_LP_CAP_10GFDX:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_10000FDX);
+ break;
+ case ETHER_STAT_LP_CAP_1000FDX:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_1000FDX);
+ break;
+ case ETHER_STAT_LP_CAP_1000HDX:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_1000HDX);
+ break;
+ case ETHER_STAT_LP_CAP_100FDX:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_100FDX);
+ break;
+ case ETHER_STAT_LP_CAP_100HDX:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_100HDX);
+ break;
+ case ETHER_STAT_LP_CAP_10FDX:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_10FDX);
+ break;
+ case ETHER_STAT_LP_CAP_10HDX:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_10HDX);
+ break;
+ case ETHER_STAT_LP_CAP_ASMPAUSE:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_ASYM);
+ break;
+ case ETHER_STAT_LP_CAP_PAUSE:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_PAUSE);
+ break;
+ case ETHER_STAT_LP_CAP_AUTONEG:
+ *valp = sfxge_phy_lp_cap_test64(sp, EFX_PHY_CAP_AN);
+ break;
+
+ case ETHER_STAT_XCVR_ADDR: {
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+ *valp = encp->enc_port;
+ break;
+ }
+ case ETHER_STAT_XCVR_ID: {
+ uint32_t oui;
+
+ if ((rc = efx_phy_oui_get(sp->s_enp, &oui)) != 0)
+ goto fail2;
+ *valp = oui;
+ break;
+ }
+ case MAC_STAT_MULTIRCV:
+ sfxge_mac_stat_get(sp, EFX_MAC_RX_MULTICST_PKTS, valp);
+ break;
+
+ case MAC_STAT_BRDCSTRCV:
+ sfxge_mac_stat_get(sp, EFX_MAC_RX_BRDCST_PKTS, valp);
+ break;
+
+ case MAC_STAT_MULTIXMT:
+ sfxge_mac_stat_get(sp, EFX_MAC_TX_MULTICST_PKTS, valp);
+ break;
+
+ case MAC_STAT_BRDCSTXMT:
+ sfxge_mac_stat_get(sp, EFX_MAC_TX_BRDCST_PKTS, valp);
+ break;
+
+ case MAC_STAT_IERRORS:
+ sfxge_mac_stat_get(sp, EFX_MAC_RX_ERRORS, valp);
+ break;
+
+ case MAC_STAT_OERRORS:
+ sfxge_mac_stat_get(sp, EFX_MAC_TX_ERRORS, valp);
+ break;
+
+ case MAC_STAT_RBYTES:
+ sfxge_mac_stat_get(sp, EFX_MAC_RX_OCTETS, valp);
+ break;
+
+ case MAC_STAT_IPACKETS:
+ sfxge_mac_stat_get(sp, EFX_MAC_RX_PKTS, valp);
+ break;
+
+ case MAC_STAT_OBYTES:
+ sfxge_mac_stat_get(sp, EFX_MAC_TX_OCTETS, valp);
+ break;
+
+ case MAC_STAT_OPACKETS:
+ sfxge_mac_stat_get(sp, EFX_MAC_TX_PKTS, valp);
+ break;
+
+ case MAC_STAT_NORCVBUF:
+ sfxge_mac_stat_get(sp, EFX_MAC_RX_DROP_EVENTS, valp);
+ break;
+
+ case ETHER_STAT_FCS_ERRORS:
+ sfxge_mac_stat_get(sp, EFX_MAC_RX_FCS_ERRORS, valp);
+ break;
+
+ default:
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ return (0);
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_gld_start(void *arg)
+{
+ sfxge_t *sp = arg;
+ int rc;
+
+ if ((rc = sfxge_start(sp, B_FALSE)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_gld_stop(void *arg)
+{
+ sfxge_t *sp = arg;
+
+ sfxge_stop(sp);
+}
+
+static int
+sfxge_gld_setpromisc(void *arg, boolean_t on)
+{
+ sfxge_t *sp = arg;
+
+ return sfxge_mac_promisc_set(sp,
+ (on) ? SFXGE_PROMISC_ALL_PHYS : SFXGE_PROMISC_OFF);
+}
+
+static int
+sfxge_gld_multicst(void *arg, boolean_t add, const uint8_t *addr)
+{
+ sfxge_t *sp = arg;
+ int rc;
+
+ if (add) {
+ if ((rc = sfxge_mac_multicst_add(sp, addr)) != 0)
+ goto fail1;
+ } else {
+ if ((rc = sfxge_mac_multicst_remove(sp, addr)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ return (rc);
+}
+
+static int
+sfxge_gld_unicst(void *arg, const uint8_t *addr)
+{
+ sfxge_t *sp = arg;
+ int rc;
+
+ if ((rc = sfxge_mac_unicst_set(sp, (uint8_t *)addr)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_gld_ioctl(void *arg, queue_t *wq, mblk_t *mp)
+{
+ sfxge_t *sp = arg;
+
+ sfxge_ioctl(sp, wq, mp);
+}
+
+
+static mblk_t *
+sfxge_gld_tx(void *arg, mblk_t *mp)
+{
+ sfxge_t *sp = arg;
+ mblk_t *next;
+
+ /* Walk the packet chain */
+ do {
+ /* Break the packet out of the chain */
+ next = mp->b_next;
+ mp->b_next = NULL;
+
+ if (next != NULL)
+ prefetch_read_many(next);
+
+ /* Post the packet in the appropriate transmit queue */
+ if (sfxge_tx_packet_add(sp, mp) == ENOSPC) {
+ mp->b_next = next;
+ return (mp);
+ }
+
+ mp = next;
+ } while (mp != NULL);
+
+ return (NULL);
+}
+
+/*
+ * This must not be static, in order to be tunable by /etc/system.
+ * (Static declarations may be optmized away by the compiler.)
+ */
+boolean_t sfxge_lso = B_TRUE;
+
+static boolean_t
+sfxge_gld_getcapab(void *arg, mac_capab_t cap, void *cap_arg)
+{
+ int rc;
+
+ _NOTE(ARGUNUSED(arg))
+
+ switch (cap) {
+ case MAC_CAPAB_LSO: {
+ mac_capab_lso_t *lsop = cap_arg;
+
+ /* Check whether LSO is disabled */
+ if (!sfxge_lso) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ DTRACE_PROBE(lso);
+
+ lsop->lso_flags = LSO_TX_BASIC_TCP_IPV4;
+ lsop->lso_basic_tcp_ipv4.lso_max = TCP_MAX_LSO_LENGTH;
+ break;
+ }
+ case MAC_CAPAB_HCKSUM: {
+ uint32_t *hcksump = cap_arg;
+
+ DTRACE_PROBE(cksum);
+
+ *hcksump = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM;
+ break;
+ }
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (B_TRUE);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (B_FALSE);
+}
+
+/*
+ * GLDv3 driver-private property names must be preceded by an underscore - see
+ * mc_getprop(9E).
+ */
+#define SFXGE_PRIV_PROP_NAME(s) ("_" #s)
+
+#define SFXGE_XSTR(s) SFXGE_STR(s)
+#define SFXGE_STR(s) #s
+
+static void
+sfxge_gld_priv_prop_info(sfxge_t *sp, const char *name,
+ mac_prop_info_handle_t handle)
+{
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(rx_coalesce_mode)) == 0) {
+ mac_prop_info_set_default_uint32(handle,
+ SFXGE_RX_COALESCE_OFF);
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_RW);
+ return;
+ }
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(rx_scale_count)) == 0) {
+ mac_prop_info_set_default_uint32(handle, ncpus);
+ mac_prop_info_set_range_uint32(handle, 1,
+ (uint32_t)sp->s_intr.si_nalloc);
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_RW);
+ return;
+ }
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(intr_moderation)) == 0) {
+ mac_prop_info_set_default_uint32(handle,
+ SFXGE_DEFAULT_MODERATION);
+ mac_prop_info_set_range_uint32(handle,
+ 0, efx_nic_cfg_get(sp->s_enp)->enc_evq_timer_max_us);
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_RW);
+ return;
+ }
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(mon_polling)) == 0) {
+ mac_prop_info_set_default_uint8(handle, 0);
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_RW);
+ return;
+ }
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(mcdi_logging)) == 0) {
+ mac_prop_info_set_default_uint8(handle, 0);
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_RW);
+ return;
+ }
+#endif
+ DTRACE_PROBE(unknown_priv_prop);
+}
+
+
+static int
+sfxge_gld_priv_prop_get(sfxge_t *sp, const char *name,
+ unsigned int size, void *valp)
+{
+ long val;
+ int rc;
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(rx_coalesce_mode)) == 0) {
+ sfxge_rx_coalesce_mode_t mode;
+
+ sfxge_rx_coalesce_mode_get(sp, &mode);
+
+ val = (long)mode;
+ goto done;
+ }
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(rx_scale_count)) == 0) {
+ unsigned int count;
+
+ if (sfxge_rx_scale_count_get(sp, &count) != 0)
+ count = 0;
+
+ val = (long)count;
+ goto done;
+ }
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(intr_moderation)) == 0) {
+ unsigned int us;
+
+ sfxge_ev_moderation_get(sp, &us);
+
+ val = (long)us;
+ goto done;
+ }
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(mon_polling)) == 0) {
+ val = (long)sp->s_mon.sm_polling;
+ goto done;
+ }
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(mcdi_logging)) == 0) {
+ val = (long)sp->s_mcdi_logging;
+ goto done;
+ }
+#endif
+
+ rc = ENOTSUP;
+ goto fail1;
+
+done:
+ (void) snprintf(valp, size, "%ld", val);
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+static int
+sfxge_gld_priv_prop_set(sfxge_t *sp, const char *name, unsigned int size,
+ const void *valp)
+{
+ long val;
+ int rc = 0;
+
+ _NOTE(ARGUNUSED(size))
+
+ (void) ddi_strtol(valp, (char **)NULL, 0, &val);
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(rx_coalesce_mode)) == 0) {
+ if ((rc = sfxge_rx_coalesce_mode_set(sp,
+ (sfxge_rx_coalesce_mode_t)val)) != 0)
+ goto fail1;
+
+ goto done;
+ }
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(rx_scale_count)) == 0) {
+ if ((rc = sfxge_rx_scale_count_set(sp, (unsigned int)val)) != 0)
+ goto fail1;
+
+ goto done;
+ }
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(intr_moderation)) == 0) {
+ if ((rc = sfxge_ev_moderation_set(sp, (unsigned int) val)) != 0)
+ goto fail1;
+
+ goto done;
+ }
+
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(mon_polling)) == 0) {
+ sp->s_mon.sm_polling = (int)val;
+ goto done;
+ }
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (strcmp(name, SFXGE_PRIV_PROP_NAME(mcdi_logging)) == 0) {
+ sp->s_mcdi_logging = (int)val;
+ goto done;
+ }
+#endif
+
+
+ rc = ENOTSUP;
+ goto fail1;
+
+done:
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_MCDI_LOGGING
+#define SFXGE_N_NAMED_PROPS 4
+#else
+#define SFXGE_N_NAMED_PROPS 3
+#endif
+
+static void
+sfxge_gld_priv_prop_init(sfxge_t *sp)
+{
+ sfxge_mac_priv_prop_t *mac_priv_props;
+ unsigned int nprops = 0;
+
+ /*
+ * We have named_props (3 or 4) named properties and the structure must
+ * be finished by a NULL pointer.
+ */
+ sp->s_mac_priv_props_alloc = SFXGE_N_NAMED_PROPS + 1;
+ sp->s_mac_priv_props = kmem_zalloc(sizeof (sfxge_mac_priv_prop_t) *
+ sp->s_mac_priv_props_alloc,
+ KM_SLEEP);
+
+ /*
+ * Driver-private property names start with an underscore - see
+ * mc_getprop(9E).
+ */
+
+ mac_priv_props = sp->s_mac_priv_props;
+
+ *mac_priv_props = kmem_zalloc(MAXLINKPROPNAME, KM_SLEEP);
+ (void) snprintf(*mac_priv_props, MAXLINKPROPNAME - 1,
+ SFXGE_PRIV_PROP_NAME(rx_coalesce_mode));
+ mac_priv_props++;
+ nprops++;
+
+ *mac_priv_props = kmem_zalloc(MAXLINKPROPNAME, KM_SLEEP);
+ (void) snprintf(*mac_priv_props, MAXLINKPROPNAME - 1,
+ SFXGE_PRIV_PROP_NAME(rx_scale_count));
+ mac_priv_props++;
+ nprops++;
+
+ *mac_priv_props = kmem_zalloc(MAXLINKPROPNAME, KM_SLEEP);
+ (void) snprintf(*mac_priv_props, MAXLINKPROPNAME - 1,
+ SFXGE_PRIV_PROP_NAME(intr_moderation));
+ mac_priv_props++;
+ nprops++;
+
+#if EFSYS_OPT_MCDI_LOGGING
+ *mac_priv_props = kmem_zalloc(MAXLINKPROPNAME, KM_SLEEP);
+ (void) snprintf(*mac_priv_props, MAXLINKPROPNAME - 1,
+ SFXGE_PRIV_PROP_NAME(mcdi_logging));
+ mac_priv_props++;
+ nprops++;
+#endif
+
+ ASSERT3U((nprops + 1), ==, sp->s_mac_priv_props_alloc);
+
+ /* Terminated by a NULL pointer */
+ *mac_priv_props = NULL;
+}
+
+static void
+sfxge_gld_priv_prop_fini(sfxge_t *sp)
+{
+ char **mac_priv_props;
+ unsigned int id;
+
+ mac_priv_props = sp->s_mac_priv_props;
+
+ for (id = 0; id < SFXGE_N_NAMED_PROPS; id++) {
+ kmem_free(*mac_priv_props, MAXLINKPROPNAME);
+ mac_priv_props++;
+ }
+
+ kmem_free(sp->s_mac_priv_props, sizeof (sfxge_mac_priv_prop_t) *
+ sp->s_mac_priv_props_alloc);
+ sp->s_mac_priv_props = NULL;
+}
+
+
+static int
+sfxge_gld_getprop(void *arg, const char *name, mac_prop_id_t id,
+ unsigned int size, void *valp)
+{
+ sfxge_t *sp = arg;
+ uint32_t flag = EFX_PHY_CAP_CURRENT;
+ uint8_t *v8 = ((uint8_t *)valp);
+ int rc;
+
+ /* check size */
+ switch (id) {
+ case MAC_PROP_DUPLEX:
+ if (size < sizeof (link_duplex_t)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ case MAC_PROP_FLOWCTRL:
+ if (size < sizeof (link_flowctrl_t)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ case MAC_PROP_SPEED:
+ case MAC_PROP_STATUS:
+ if (size < sizeof (uint64_t)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ case MAC_PROP_MTU:
+ if (size < sizeof (uint32_t)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ case MAC_PROP_EN_AUTONEG:
+ case MAC_PROP_AUTONEG:
+ case MAC_PROP_EN_40GFDX_CAP:
+ case MAC_PROP_ADV_40GFDX_CAP:
+ case MAC_PROP_EN_10GFDX_CAP:
+ case MAC_PROP_ADV_10GFDX_CAP:
+ case MAC_PROP_EN_1000FDX_CAP:
+ case MAC_PROP_ADV_1000FDX_CAP:
+ case MAC_PROP_EN_1000HDX_CAP:
+ case MAC_PROP_ADV_1000HDX_CAP:
+ case MAC_PROP_EN_100FDX_CAP:
+ case MAC_PROP_ADV_100FDX_CAP:
+ case MAC_PROP_EN_100HDX_CAP:
+ case MAC_PROP_ADV_100HDX_CAP:
+ case MAC_PROP_EN_10FDX_CAP:
+ case MAC_PROP_ADV_10FDX_CAP:
+ case MAC_PROP_EN_10HDX_CAP:
+ case MAC_PROP_ADV_10HDX_CAP:
+ if (size < sizeof (uint8_t)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ case MAC_PROP_PRIVATE:
+ /* sfxge_gld_priv_prop_get should do any size checking */
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ switch (id) {
+ case MAC_PROP_DUPLEX: {
+ sfxge_link_duplex_t duplex;
+
+ sfxge_mac_link_duplex_get(sp, &duplex);
+
+ switch (duplex) {
+ case SFXGE_LINK_DUPLEX_UNKNOWN:
+ *((link_duplex_t *)valp) = LINK_DUPLEX_UNKNOWN;
+ break;
+
+ case SFXGE_LINK_DUPLEX_HALF:
+ *((link_duplex_t *)valp) = LINK_DUPLEX_HALF;
+ break;
+
+ case SFXGE_LINK_DUPLEX_FULL:
+ *((link_duplex_t *)valp) = LINK_DUPLEX_FULL;
+ break;
+
+ default:
+ ASSERT(B_FALSE);
+ break;
+ }
+
+ break;
+ }
+ case MAC_PROP_SPEED: {
+ unsigned int speed;
+
+ sfxge_mac_link_speed_get(sp, &speed);
+
+ *((uint64_t *)valp) = (uint64_t)speed * 1000000ull;
+
+ break;
+ }
+ case MAC_PROP_STATUS: {
+ boolean_t up;
+
+ sfxge_mac_link_check(sp, &up);
+
+ *((link_state_t *)valp) = (up) ?
+ LINK_STATE_UP : LINK_STATE_DOWN;
+
+ break;
+ }
+ case MAC_PROP_EN_AUTONEG:
+ case MAC_PROP_AUTONEG:
+ *v8 = sfxge_phy_cap_test(sp, flag, EFX_PHY_CAP_AN, NULL);
+ break;
+ case MAC_PROP_EN_40GFDX_CAP:
+ case MAC_PROP_ADV_40GFDX_CAP:
+ *v8 = sfxge_phy_cap_test(sp, flag, EFX_PHY_CAP_40000FDX, NULL);
+ break;
+ case MAC_PROP_EN_10GFDX_CAP:
+ case MAC_PROP_ADV_10GFDX_CAP:
+ *v8 = sfxge_phy_cap_test(sp, flag, EFX_PHY_CAP_10000FDX, NULL);
+ break;
+ case MAC_PROP_EN_1000FDX_CAP:
+ case MAC_PROP_ADV_1000FDX_CAP:
+ *v8 = sfxge_phy_cap_test(sp, flag, EFX_PHY_CAP_1000FDX, NULL);
+ break;
+ case MAC_PROP_EN_1000HDX_CAP:
+ case MAC_PROP_ADV_1000HDX_CAP:
+ *v8 = sfxge_phy_cap_test(sp, flag, EFX_PHY_CAP_1000HDX, NULL);
+ break;
+ case MAC_PROP_EN_100FDX_CAP:
+ case MAC_PROP_ADV_100FDX_CAP:
+ *v8 = sfxge_phy_cap_test(sp, flag, EFX_PHY_CAP_100FDX, NULL);
+ break;
+ case MAC_PROP_EN_100HDX_CAP:
+ case MAC_PROP_ADV_100HDX_CAP:
+ *v8 = sfxge_phy_cap_test(sp, flag, EFX_PHY_CAP_100HDX, NULL);
+ break;
+ case MAC_PROP_EN_10FDX_CAP:
+ case MAC_PROP_ADV_10FDX_CAP:
+ *v8 = sfxge_phy_cap_test(sp, flag, EFX_PHY_CAP_10FDX, NULL);
+ break;
+ case MAC_PROP_EN_10HDX_CAP:
+ case MAC_PROP_ADV_10HDX_CAP:
+ *v8 = sfxge_phy_cap_test(sp, flag, EFX_PHY_CAP_10HDX, NULL);
+ break;
+ case MAC_PROP_MTU:
+ *((uint32_t *)valp) = (uint32_t)(sp->s_mtu);
+ break;
+
+ case MAC_PROP_FLOWCTRL: {
+ unsigned int fcntl;
+
+ sfxge_mac_fcntl_get(sp, &fcntl);
+
+ switch (fcntl) {
+ case 0:
+ *((link_flowctrl_t *)valp) = LINK_FLOWCTRL_NONE;
+ break;
+
+ case EFX_FCNTL_GENERATE:
+ *((link_flowctrl_t *)valp) = LINK_FLOWCTRL_RX;
+ break;
+
+ case EFX_FCNTL_RESPOND:
+ *((link_flowctrl_t *)valp) = LINK_FLOWCTRL_TX;
+ break;
+
+ case (EFX_FCNTL_GENERATE | EFX_FCNTL_RESPOND):
+ *((link_flowctrl_t *)valp) = LINK_FLOWCTRL_BI;
+ break;
+
+ default:
+ ASSERT(B_FALSE);
+ break;
+ }
+ break;
+ }
+ case MAC_PROP_PRIVATE:
+ if ((rc = sfxge_gld_priv_prop_get(sp, name, size, valp)) != 0)
+ goto fail2;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+static int
+sfxge_gld_setprop(void *arg, const char *name, mac_prop_id_t id,
+ unsigned int size, const void *valp)
+{
+ sfxge_t *sp = arg;
+ int v8 = *(uint8_t *)valp;
+ int rc;
+
+ /* get size checks out fo the way */
+ switch (id) {
+ /*
+ * On Sol11 (no updates) dladm seems to be using MAC_PROP_AUTONEG to set
+ * the autoneg parameter. This does not match the scheme suggested in
+ * mac(9E) but as they both map to the same think in the driver and in
+ * dladm it doesn't matter.
+ */
+ case MAC_PROP_AUTONEG:
+ case MAC_PROP_EN_AUTONEG:
+ case MAC_PROP_EN_40GFDX_CAP:
+ case MAC_PROP_EN_10GFDX_CAP:
+ case MAC_PROP_EN_1000FDX_CAP:
+ case MAC_PROP_EN_1000HDX_CAP:
+ case MAC_PROP_EN_100FDX_CAP:
+ case MAC_PROP_EN_100HDX_CAP:
+ case MAC_PROP_EN_10FDX_CAP:
+ case MAC_PROP_EN_10HDX_CAP:
+ if (size < sizeof (uint8_t)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ case MAC_PROP_MTU:
+ if (size < sizeof (uint32_t)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ case MAC_PROP_FLOWCTRL:
+ if (size < sizeof (link_flowctrl_t)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ case MAC_PROP_PRIVATE:
+ /* sfxge_gld_priv_prop_set should do any size checking */
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ switch (id) {
+ /*
+ * It is unclear which of MAC_PROP_AUTONEG and MAC_PROP_EN_AUTONEG is
+ * used. Try both.
+ */
+ case MAC_PROP_AUTONEG:
+ case MAC_PROP_EN_AUTONEG:
+ if ((rc = sfxge_phy_cap_set(sp, EFX_PHY_CAP_AN, v8)) != 0)
+ goto fail2;
+ break;
+ case MAC_PROP_EN_40GFDX_CAP:
+ if ((rc = sfxge_phy_cap_set(sp, EFX_PHY_CAP_40000FDX, v8)) != 0)
+ goto fail2;
+ break;
+ case MAC_PROP_EN_10GFDX_CAP: {
+ if ((rc = sfxge_phy_cap_set(sp, EFX_PHY_CAP_10000FDX, v8)) != 0)
+ goto fail2;
+ break;
+ }
+ case MAC_PROP_EN_1000FDX_CAP: {
+ if ((rc = sfxge_phy_cap_set(sp, EFX_PHY_CAP_1000FDX, v8)) != 0)
+ goto fail2;
+ break;
+ }
+ case MAC_PROP_EN_1000HDX_CAP: {
+ if ((rc = sfxge_phy_cap_set(sp, EFX_PHY_CAP_1000HDX, v8)) != 0)
+ goto fail2;
+ break;
+ }
+ case MAC_PROP_EN_100FDX_CAP: {
+ if ((rc = sfxge_phy_cap_set(sp, EFX_PHY_CAP_100FDX, v8)) != 0)
+ goto fail2;
+ break;
+ }
+ case MAC_PROP_EN_100HDX_CAP: {
+ if ((rc = sfxge_phy_cap_set(sp, EFX_PHY_CAP_100HDX, v8)) != 0)
+ goto fail2;
+ break;
+ }
+ case MAC_PROP_EN_10FDX_CAP: {
+ if ((rc = sfxge_phy_cap_set(sp, EFX_PHY_CAP_10FDX, v8)) != 0)
+ goto fail2;
+ break;
+ }
+ case MAC_PROP_EN_10HDX_CAP: {
+ if ((rc = sfxge_phy_cap_set(sp, EFX_PHY_CAP_10HDX, v8)) != 0)
+ goto fail2;
+ break;
+ }
+ case MAC_PROP_MTU: {
+ size_t mtu = (size_t)(*((uint32_t *)valp));
+
+ if (mtu > EFX_MAC_SDU_MAX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ sp->s_mtu = mtu;
+
+ DTRACE_PROBE(restart_mtu);
+ (void) sfxge_restart_dispatch(sp, DDI_SLEEP, SFXGE_HW_OK,
+ "MTU changing", (uint32_t)mtu);
+
+ break;
+ }
+ case MAC_PROP_FLOWCTRL: {
+ unsigned int fcntl = 0;
+
+ switch (*((link_flowctrl_t *)valp)) {
+ case LINK_FLOWCTRL_NONE:
+ fcntl = 0;
+ break;
+
+ case LINK_FLOWCTRL_RX:
+ fcntl = EFX_FCNTL_GENERATE;
+ break;
+
+ case LINK_FLOWCTRL_TX:
+ fcntl = EFX_FCNTL_RESPOND;
+ break;
+
+ case LINK_FLOWCTRL_BI:
+ fcntl = EFX_FCNTL_GENERATE | EFX_FCNTL_RESPOND;
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if ((rc = sfxge_mac_fcntl_set(sp, fcntl)) != 0)
+ goto fail3;
+
+ break;
+ }
+ case MAC_PROP_PRIVATE:
+ if ((rc = sfxge_gld_priv_prop_set(sp, name, size, valp)) != 0)
+ goto fail4;
+
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ DTRACE_PROBE(fail5);
+
+fail4:
+ DTRACE_PROBE(fail4);
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_gld_propinfo(void *arg, const char *name, mac_prop_id_t id,
+ mac_prop_info_handle_t handle)
+{
+ sfxge_t *sp = arg;
+ efx_phy_cap_type_t phy_cap = EFX_PHY_CAP_INVALID;
+ switch (id) {
+ case MAC_PROP_DUPLEX:
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_READ);
+ return;
+ case MAC_PROP_FLOWCTRL:
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_RW);
+ mac_prop_info_set_default_link_flowctrl(handle,
+ LINK_FLOWCTRL_BI);
+ return;
+ case MAC_PROP_SPEED:
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_READ);
+ return;
+ case MAC_PROP_STATUS:
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_READ);
+ return;
+ case MAC_PROP_MTU: {
+ uint32_t mtu_default;
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_RW);
+ mtu_default = ddi_prop_get_int(DDI_DEV_T_ANY,
+ sp->s_dip, DDI_PROP_DONTPASS, "mtu", ETHERMTU);
+ mac_prop_info_set_default_uint32(handle, mtu_default);
+ return;
+ }
+ case MAC_PROP_PRIVATE:
+ sfxge_gld_priv_prop_info(sp, name, handle);
+ return;
+ case MAC_PROP_EN_AUTONEG:
+ case MAC_PROP_AUTONEG:
+ phy_cap = EFX_PHY_CAP_AN;
+ break;
+ case MAC_PROP_EN_10GFDX_CAP:
+ case MAC_PROP_ADV_10GFDX_CAP:
+ phy_cap = EFX_PHY_CAP_10000FDX;
+ break;
+ case MAC_PROP_EN_1000FDX_CAP:
+ case MAC_PROP_ADV_1000FDX_CAP:
+ phy_cap = EFX_PHY_CAP_1000FDX;
+ break;
+ case MAC_PROP_EN_1000HDX_CAP:
+ case MAC_PROP_ADV_1000HDX_CAP:
+ phy_cap = EFX_PHY_CAP_1000HDX;
+ break;
+ case MAC_PROP_EN_100FDX_CAP:
+ case MAC_PROP_ADV_100FDX_CAP:
+ phy_cap = EFX_PHY_CAP_100FDX;
+ break;
+ case MAC_PROP_EN_100HDX_CAP:
+ case MAC_PROP_ADV_100HDX_CAP:
+ phy_cap = EFX_PHY_CAP_100HDX;
+ break;
+ case MAC_PROP_EN_10FDX_CAP:
+ case MAC_PROP_ADV_10FDX_CAP:
+ phy_cap = EFX_PHY_CAP_10FDX;
+ break;
+ case MAC_PROP_EN_10HDX_CAP:
+ case MAC_PROP_ADV_10HDX_CAP:
+ phy_cap = EFX_PHY_CAP_10HDX;
+ break;
+ default:
+ DTRACE_PROBE(unknown_prop);
+ return;
+ }
+ if (phy_cap != EFX_PHY_CAP_INVALID) {
+ boolean_t rw;
+ uint8_t cap_default;
+ cap_default = sfxge_phy_cap_test(sp, EFX_PHY_CAP_DEFAULT,
+ phy_cap, &rw);
+ if (rw == B_TRUE)
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_RW);
+ else
+ mac_prop_info_set_perm(handle, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(handle, cap_default);
+ }
+}
+
+int
+sfxge_gld_register(sfxge_t *sp)
+{
+ mac_callbacks_t *mcp;
+ mac_register_t *mrp;
+ mac_handle_t mh;
+ uint8_t addr[ETHERADDRL];
+ int rc;
+
+ if ((mrp = mac_alloc(MAC_VERSION)) == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ mrp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
+ mrp->m_driver = sp;
+ mrp->m_dip = sp->s_dip;
+
+ /* Set up the callbacks */
+ mcp = &(sp->s_mc);
+ bzero(mcp, sizeof (mac_callbacks_t));
+
+ mcp->mc_getstat = sfxge_gld_getstat;
+ mcp->mc_start = sfxge_gld_start;
+ mcp->mc_stop = sfxge_gld_stop;
+ mcp->mc_setpromisc = sfxge_gld_setpromisc;
+ mcp->mc_multicst = sfxge_gld_multicst;
+ mcp->mc_unicst = sfxge_gld_unicst;
+ mcp->mc_tx = sfxge_gld_tx;
+
+ mcp->mc_callbacks |= MC_IOCTL;
+ mcp->mc_ioctl = sfxge_gld_ioctl;
+
+ mcp->mc_callbacks |= MC_GETCAPAB;
+ mcp->mc_getcapab = sfxge_gld_getcapab;
+
+ mcp->mc_callbacks |= MC_SETPROP;
+ mcp->mc_setprop = sfxge_gld_setprop;
+
+ mcp->mc_callbacks |= MC_GETPROP;
+ mcp->mc_getprop = sfxge_gld_getprop;
+
+ mcp->mc_callbacks |= MC_PROPINFO;
+ mcp->mc_propinfo = sfxge_gld_propinfo;
+
+ mrp->m_callbacks = mcp;
+
+ mrp->m_src_addr = addr;
+
+ if ((rc = sfxge_mac_unicst_get(sp, SFXGE_UNICST_BIA,
+ mrp->m_src_addr)) != 0)
+ goto fail2;
+
+ mrp->m_min_sdu = 0;
+ mrp->m_max_sdu = sp->s_mtu;
+
+ mrp->m_margin = VLAN_TAGSZ;
+
+ /* Set up the private properties */
+ /* NOTE: m_priv_props added in s10u9 */
+ mrp->m_priv_props = sp->s_mac_priv_props;
+ sfxge_gld_priv_prop_init(sp);
+
+ /* NOTE: m_flags added in s11.0 */
+ /* NOTE: m_multicast_sdu added in s11.0 */
+
+ /* Register the interface */
+ if ((rc = mac_register(mrp, &mh)) != 0)
+ goto fail3;
+
+ /* Free the stack registration object */
+ kmem_free(mrp, sizeof (mac_register_t));
+
+ sp->s_mh = mh;
+
+ return (0);
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Free the stack registration object */
+ mac_free(mrp);
+
+ /* Tear down the private properties */
+ sfxge_gld_priv_prop_fini(sp);
+
+ /* Clear the callbacks */
+ bzero(&(sp->s_mc), sizeof (mac_callbacks_t));
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+int
+sfxge_gld_unregister(sfxge_t *sp)
+{
+ mac_handle_t mh = sp->s_mh;
+ int rc;
+
+ if ((rc = mac_unregister(mh)) != 0)
+ goto fail1;
+
+ sp->s_mh = NULL;
+
+ /* Tear down the private properties */
+ sfxge_gld_priv_prop_fini(sp);
+
+ /* Clear the callbacks */
+ bzero(&(sp->s_mc), sizeof (mac_callbacks_t));
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_hash.c b/usr/src/uts/common/io/sfxge/sfxge_hash.c
new file mode 100644
index 0000000000..8d3e380f8c
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_hash.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/param.h>
+#include <sys/int_limits.h>
+#include <sys/byteorder.h>
+#include <sys/random.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <netinet/in.h>
+#include "sfxge.h"
+#include "efx.h"
+
+/*
+ * The largest amount of the data which the hash may be calculated over
+ * is a 4-tuple of source/destination IPv6 addresses (2 x 16 bytes)
+ * and source/destination TCP port numbers (2 x 2 bytes), adding up to 40 bytes
+ */
+#define SFXGE_TOEPLITZ_IN_MAX \
+ (2 * (sizeof (struct in6_addr) + sizeof (in_port_t)))
+#define SFXGE_TOEPLITZ_CACHE_SIZE (SFXGE_TOEPLITZ_IN_MAX * (UINT8_MAX + 1))
+
+static uint32_t
+toeplitz_hash(const uint32_t *cache, const uint8_t *input,
+ unsigned pos, unsigned datalen)
+{
+ uint32_t hash = 0;
+ for (; datalen != 0; datalen--, pos++, input++) {
+ hash ^= cache[pos * (UINT8_MAX + 1) + *input];
+ }
+
+ return (hash);
+}
+
+uint32_t
+sfxge_toeplitz_hash(sfxge_t *sp, unsigned int addr_size,
+ uint8_t *src_addr, uint16_t src_port, uint8_t *dst_addr, uint16_t dst_port)
+{
+ uint32_t hash = 0;
+ unsigned pos = 0;
+
+ hash ^= toeplitz_hash(sp->s_toeplitz_cache, src_addr, pos, addr_size);
+ pos += addr_size;
+ hash ^= toeplitz_hash(sp->s_toeplitz_cache, dst_addr, pos, addr_size);
+ pos += addr_size;
+ if (src_port != 0 || dst_port != 0) {
+ hash ^= toeplitz_hash(sp->s_toeplitz_cache,
+ (const uint8_t *)&src_port, pos, sizeof (src_port));
+ pos += sizeof (src_port);
+ hash ^= toeplitz_hash(sp->s_toeplitz_cache,
+ (const uint8_t *)&dst_port, pos, sizeof (dst_port));
+ }
+ return (hash);
+}
+
+/*
+ * The algorithm to calculate RSS Toeplitz hash is essentially as follows:
+ * - Regard a Toeplitz key and an input as bit strings, with the
+ * most significant bit of the first byte being the first bit
+ * - Let's have a 32-bit window sliding over the Toeplitz key bit by bit
+ * - Let the initial value of the hash be zero
+ * - Then for every bit in the input that is set to 1, XOR the value of the
+ * window at a given bit position into the resulting hash
+ *
+ * First we note that since XOR is commutative and associative, the
+ * resulting hash is just a XOR of subhashes for every input bit:
+ * H = H_0 XOR H_1 XOR ... XOR H_n (1)
+ * Then we note that every H_i is only dependent on the value of i and
+ * the value of i'th bit of input, but not on any preceding or following
+ * input bits.
+ * Then we note that (1) holds also for any bit sequences,
+ * e.g. for bytes of input:
+ * H = H_0_7 XOR H_8_15 XOR ... XOR H_(n-7)_n (2)
+ * and every
+ * H_i_j = H_i XOR H_(i+1) ... XOR H_j. (3)
+ *
+ * It naturally follows than H_i_(i+7) only depends on the value of the byte
+ * and the position of the byte in the input.
+ * Therefore we may pre-calculate the value of each byte sub-hash H_i_(i+7)
+ * for each possible byte value and each possible byte input position, and
+ * then just assemble the hash of the packet byte-by-byte instead of
+ * bit-by-bit.
+ *
+ * The amount of memory required for such a cache is not prohibitive:
+ * - we have at most 36 bytes of input, each holding 256 possible values
+ * - and the hash is 32-bit wide
+ * - hence, we need only 36 * 256 * 4 = 36kBytes of cache.
+ *
+ * The performance gain, at least on synthetic benchmarks, is significant:
+ * cache lookup is about 15 times faster than direct hash calculation
+ */
+const uint32_t *
+toeplitz_cache_init(const uint8_t *key)
+{
+ uint32_t *cache = kmem_alloc(SFXGE_TOEPLITZ_CACHE_SIZE *
+ sizeof (uint32_t), KM_SLEEP);
+ unsigned i;
+
+ for (i = 0; i < SFXGE_TOEPLITZ_IN_MAX; i++, key++) {
+ uint32_t key_bits[NBBY] = { 0 };
+ unsigned j;
+ unsigned mask;
+ unsigned byte;
+
+#if defined(BE_IN32)
+ key_bits[0] = BE_IN32(key);
+#else
+ key_bits[0] = BE_32(*(uint32_t *)key);
+#endif
+ for (j = 1, mask = 1 << (NBBY - 1); j < NBBY; j++, mask >>= 1) {
+ key_bits[j] = key_bits[j - 1] << 1;
+ if ((key[sizeof (uint32_t)] & mask) != 0)
+ key_bits[j] |= 1;
+ }
+
+ for (byte = 0; byte <= UINT8_MAX; byte++) {
+ uint32_t res = 0;
+ for (j = 0, mask = 1 << (NBBY - 1);
+ j < NBBY;
+ j++, mask >>= 1) {
+ if (byte & mask)
+ res ^= key_bits[j];
+ }
+ cache[i * (UINT8_MAX + 1) + byte] = res;
+ }
+ }
+ return (cache);
+}
+
+
+int
+sfxge_toeplitz_hash_init(sfxge_t *sp)
+{
+ int rc;
+ uint8_t toeplitz_key[SFXGE_TOEPLITZ_KEY_LEN];
+
+ (void) random_get_pseudo_bytes(toeplitz_key, sizeof (toeplitz_key));
+
+ if ((rc = efx_rx_scale_mode_set(sp->s_enp, EFX_RX_HASHALG_TOEPLITZ,
+ (1 << EFX_RX_HASH_IPV4) | (1 << EFX_RX_HASH_TCPIPV4) |
+ (1 << EFX_RX_HASH_IPV6) | (1 << EFX_RX_HASH_TCPIPV6), B_TRUE)) != 0)
+ return (rc);
+
+ if ((rc = efx_rx_scale_key_set(sp->s_enp, toeplitz_key,
+ sizeof (toeplitz_key))) != 0)
+ return (rc);
+
+ sp->s_toeplitz_cache = toeplitz_cache_init(toeplitz_key);
+
+ return (0);
+}
+
+void
+sfxge_toeplitz_hash_fini(sfxge_t *sp)
+{
+ if (sp->s_toeplitz_cache != NULL) {
+ kmem_free((void *)sp->s_toeplitz_cache,
+ SFXGE_TOEPLITZ_CACHE_SIZE);
+ sp->s_toeplitz_cache = NULL;
+ }
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_intr.c b/usr/src/uts/common/io/sfxge/sfxge_intr.c
new file mode 100644
index 0000000000..33d9d67f5e
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_intr.c
@@ -0,0 +1,812 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/atomic.h>
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/ethernet.h>
+#include <sys/pci.h>
+#include <sys/pcie.h>
+
+#include "sfxge.h"
+
+#include "efx.h"
+
+
+/* Interrupt table DMA attributes */
+static ddi_device_acc_attr_t sfxge_intr_devacc = {
+
+ DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
+ DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
+ DDI_STRICTORDER_ACC /* devacc_attr_dataorder */
+};
+
+static ddi_dma_attr_t sfxge_intr_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ EFX_INTR_SIZE, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+static unsigned int
+sfxge_intr_line(caddr_t arg1, caddr_t arg2)
+{
+ sfxge_t *sp = (void *)arg1;
+ efx_nic_t *enp = sp->s_enp;
+ sfxge_intr_t *sip = &(sp->s_intr);
+ unsigned int index;
+ boolean_t fatal;
+ uint32_t qmask;
+ int rc;
+
+ _NOTE(ARGUNUSED(arg2))
+
+ ASSERT3U(sip->si_type, ==, EFX_INTR_LINE);
+
+ if (sip->si_state != SFXGE_INTR_STARTED &&
+ sip->si_state != SFXGE_INTR_TESTING) {
+ rc = DDI_INTR_UNCLAIMED;
+ goto done;
+ }
+
+ if (sip->si_state == SFXGE_INTR_TESTING) {
+ sip->si_mask |= 1; /* only one interrupt */
+ rc = DDI_INTR_CLAIMED;
+ goto done;
+ }
+
+ efx_intr_status_line(enp, &fatal, &qmask);
+
+ if (fatal) {
+ sfxge_intr_fatal(sp);
+
+ rc = DDI_INTR_CLAIMED;
+ goto done;
+ }
+
+ if (qmask != 0) {
+ for (index = 0; index < EFX_INTR_NEVQS; index++) {
+ if (qmask & (1 << index))
+ (void) sfxge_ev_qpoll(sp, index);
+ }
+
+ sip->si_zero_count = 0;
+ sfxge_gld_rx_push(sp);
+ rc = DDI_INTR_CLAIMED;
+ goto done;
+ }
+
+ /*
+ * bug15671/bug17203 workaround. Return CLAIMED for the first ISR=0
+ * interrupt, and poll all evqs for work. For subsequent ISR=0
+ * interrupts (the line must be shared in this case), just rearm the
+ * event queues to ensure we don't miss an interrupt.
+ */
+ if (sip->si_zero_count++ == 0) {
+ for (index = 0; index < EFX_INTR_NEVQS; index++) {
+ if (sp->s_sep[index] != NULL)
+ (void) sfxge_ev_qpoll(sp, index);
+ }
+
+ rc = DDI_INTR_CLAIMED;
+ } else {
+ for (index = 0; index < EFX_INTR_NEVQS; index++) {
+ if (sp->s_sep[index] != NULL)
+ (void) sfxge_ev_qprime(sp, index);
+ }
+
+ rc = DDI_INTR_UNCLAIMED;
+ }
+
+done:
+ return (rc);
+}
+
+static unsigned int
+sfxge_intr_message(caddr_t arg1, caddr_t arg2)
+{
+ sfxge_t *sp = (void *)arg1;
+ efx_nic_t *enp = sp->s_enp;
+ sfxge_intr_t *sip = &(sp->s_intr);
+ unsigned int index = (unsigned int)(uintptr_t)arg2;
+ boolean_t fatal;
+ int rc;
+
+ ASSERT3U(sip->si_type, ==, EFX_INTR_MESSAGE);
+
+ if (sip->si_state != SFXGE_INTR_STARTED &&
+ sip->si_state != SFXGE_INTR_TESTING) {
+ rc = DDI_INTR_UNCLAIMED;
+ goto done;
+ }
+
+ if (sip->si_state == SFXGE_INTR_TESTING) {
+ uint64_t mask;
+
+ do {
+ mask = sip->si_mask;
+ } while (atomic_cas_64(&(sip->si_mask), mask,
+ mask | (1 << index)) != mask);
+
+ rc = DDI_INTR_CLAIMED;
+ goto done;
+ }
+
+ efx_intr_status_message(enp, index, &fatal);
+
+ if (fatal) {
+ sfxge_intr_fatal(sp);
+
+ rc = DDI_INTR_CLAIMED;
+ goto done;
+ }
+
+ (void) sfxge_ev_qpoll(sp, index);
+
+ sfxge_gld_rx_push(sp);
+ rc = DDI_INTR_CLAIMED;
+
+done:
+ return (rc);
+}
+
+static int
+sfxge_intr_bus_enable(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ ddi_intr_handler_t *handler;
+ int add_index;
+ int en_index;
+ int err;
+ int rc;
+
+ /* Serialise all instances to avoid problems seen in bug31184. */
+ mutex_enter(&sfxge_global_lock);
+
+ switch (sip->si_type) {
+ case EFX_INTR_MESSAGE:
+ handler = sfxge_intr_message;
+ break;
+
+ case EFX_INTR_LINE:
+ handler = sfxge_intr_line;
+ break;
+
+ default:
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "bus_enable: unknown intr type (si_type=%d nalloc=%d)",
+ sip->si_type, sip->si_nalloc);
+ ASSERT(B_FALSE);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Try to add the handlers */
+ for (add_index = 0; add_index < sip->si_nalloc; add_index++) {
+ unsigned int pri;
+
+ /* This cannot fail unless given invalid inputs. */
+ err = ddi_intr_get_pri(sip->si_table[add_index], &pri);
+ ASSERT(err == DDI_SUCCESS);
+
+ DTRACE_PROBE2(pri, unsigned int, add_index, unsigned int, pri);
+
+ err = ddi_intr_add_handler(sip->si_table[add_index], handler,
+ (caddr_t)sp, (caddr_t)(uintptr_t)add_index);
+ if (err != DDI_SUCCESS) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "bus_enable: ddi_intr_add_handler failed"
+ " err=%d (h=%p idx=%d nalloc=%d)",
+ err, (void *)sip->si_table[add_index], add_index,
+ sip->si_nalloc);
+
+ rc = (err == DDI_EINVAL) ? EINVAL : EFAULT;
+ goto fail2;
+ }
+ }
+
+ /* Get interrupt capabilities */
+ err = ddi_intr_get_cap(sip->si_table[0], &(sip->si_cap));
+ if (err != DDI_SUCCESS) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "bus_enable: ddi_intr_get_cap failed"
+ " err=%d (h=%p idx=%d nalloc=%d)",
+ err, (void *)sip->si_table[0], 0, sip->si_nalloc);
+
+ if (err == DDI_EINVAL)
+ rc = EINVAL;
+ else if (err == DDI_ENOTSUP)
+ rc = ENOTSUP;
+ else
+ rc = EFAULT;
+
+ goto fail3;
+ }
+
+ /* Enable interrupts at the bus */
+ if (sip->si_cap & DDI_INTR_FLAG_BLOCK) {
+ en_index = 0; /* Silence gcc */
+ err = ddi_intr_block_enable(sip->si_table, sip->si_nalloc);
+ if (err != DDI_SUCCESS) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "bus_enable: ddi_intr_block_enable failed"
+ " err=%d (table=%p nalloc=%d)",
+ err, (void *)sip->si_table, sip->si_nalloc);
+
+ rc = (err == DDI_EINVAL) ? EINVAL : EFAULT;
+ goto fail4;
+ }
+ } else {
+ for (en_index = 0; en_index < sip->si_nalloc; en_index++) {
+ err = ddi_intr_enable(sip->si_table[en_index]);
+ if (err != DDI_SUCCESS) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "bus_enable: ddi_intr_enable failed"
+ " err=%d (h=%p idx=%d nalloc=%d)",
+ err, (void *)sip->si_table[en_index],
+ en_index, sip->si_nalloc);
+
+ rc = (err == DDI_EINVAL) ? EINVAL : EFAULT;
+ goto fail4;
+ }
+ }
+ }
+
+ mutex_exit(&sfxge_global_lock);
+ return (0);
+
+fail4:
+ DTRACE_PROBE(fail4);
+
+ /* Disable the enabled handlers */
+ if (!(sip->si_cap & DDI_INTR_FLAG_BLOCK)) {
+ while (--en_index >= 0) {
+ err = ddi_intr_disable(sip->si_table[en_index]);
+ if (err != DDI_SUCCESS) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "bus_enable: ddi_intr_disable"
+ " failed err=%d (h=%p idx=%d nalloc=%d)",
+ err, (void *)sip->si_table[en_index],
+ en_index, sip->si_nalloc);
+ }
+ }
+ }
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+ /* Remove all handlers */
+ add_index = sip->si_nalloc;
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Remove remaining handlers */
+ while (--add_index >= 0) {
+ err = ddi_intr_remove_handler(sip->si_table[add_index]);
+ if (err != DDI_SUCCESS) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "bus_enable: ddi_intr_remove_handler"
+ " failed err=%d (h=%p idx=%d nalloc=%d)",
+ err, (void *)sip->si_table[add_index], add_index,
+ sip->si_nalloc);
+ }
+ }
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&sfxge_global_lock);
+ return (rc);
+}
+
+static void
+sfxge_intr_bus_disable(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ int index;
+ int err;
+
+ /* Serialise all instances to avoid problems seen in bug31184. */
+ mutex_enter(&sfxge_global_lock);
+
+ /* Disable interrupts at the bus */
+ if (sip->si_cap & DDI_INTR_FLAG_BLOCK) {
+ err = ddi_intr_block_disable(sip->si_table, sip->si_nalloc);
+ if (err != DDI_SUCCESS) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "bus_disable: ddi_intr_block_disable"
+ " failed err=%d (table=%p nalloc=%d)",
+ err, (void *)sip->si_table, sip->si_nalloc);
+ }
+ } else {
+ index = sip->si_nalloc;
+ while (--index >= 0) {
+ err = ddi_intr_disable(sip->si_table[index]);
+ if (err != DDI_SUCCESS) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "bus_disable: ddi_intr_disable"
+ " failed err=%d (h=%p idx=%d nalloc=%d)",
+ err, (void *)sip->si_table[index], index,
+ sip->si_nalloc);
+ }
+ }
+ }
+
+ sip->si_cap = 0;
+
+ /* Remove all handlers */
+ index = sip->si_nalloc;
+ while (--index >= 0) {
+ err = ddi_intr_remove_handler(sip->si_table[index]);
+ if (err != DDI_SUCCESS) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "bus_disable: ddi_intr_remove_handler"
+ " failed err=%d (h=%p idx=%d nalloc=%d)",
+ err, (void *)sip->si_table[index], index,
+ sip->si_nalloc);
+ }
+ }
+
+ mutex_exit(&sfxge_global_lock);
+}
+
+static int
+sfxge_intr_nic_enable(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ efsys_mem_t *esmp = &(sip->si_mem);
+ efx_nic_t *enp = sp->s_enp;
+ unsigned int index;
+ uint64_t mask;
+ unsigned int count;
+ int rc;
+
+ /* Zero the memory */
+ bzero(esmp->esm_base, EFX_INTR_SIZE);
+
+ /* Enable interrupts at the NIC */
+ if ((rc = efx_intr_init(enp, sip->si_type, esmp)) != 0)
+ goto fail1;
+
+ efx_intr_enable(enp);
+
+ /* FIXME FIXME FIXME */
+ if (sp->s_family == EFX_FAMILY_HUNTINGTON) {
+ /* Disable interrupt test until supported on Huntington. */
+ return (0);
+ }
+ /* FIXME FIXME FIXME */
+
+ /* Test the interrupts */
+ mask = 0;
+ for (index = 0; index < sip->si_nalloc; index++) {
+ mask |= (1 << index);
+
+ rc = efx_intr_trigger(enp, index);
+ ASSERT3U(rc, ==, 0);
+ }
+
+ /* Wait for the tests to complete */
+ count = 0;
+ do {
+ DTRACE_PROBE1(wait, unsigned int, count);
+
+ /* Spin for 1 ms */
+ drv_usecwait(1000);
+
+ /*
+ * Check to see that all the test interrupts have been
+ * processed.
+ */
+ if ((mask & sip->si_mask) == mask)
+ goto done;
+
+ } while (++count < 20);
+
+ rc = ETIMEDOUT;
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "Interrupt test failed (mask=%"PRIx64" got=%"
+ PRIx64"). NIC is disabled",
+ mask, sip->si_mask);
+
+ DTRACE_PROBE2(int_test_fail, uint64_t, mask, uint64_t, sip->si_mask);
+
+ sip->si_mask = 0;
+
+ /* Disable interrupts at the NIC */
+ efx_intr_disable(enp);
+ efx_intr_fini(enp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_intr_nic_disable(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ efx_nic_t *enp = sp->s_enp;
+
+ sip->si_mask = 0;
+
+ /* Disable interrupts at the NIC */
+ efx_intr_disable(enp);
+ efx_intr_fini(enp);
+}
+
+static inline unsigned
+pow2_le(unsigned long n)
+{
+ unsigned int order = 1;
+ ASSERT3U(n, >, 0);
+ while ((1ul << order) <= n) ++order;
+ return (1ul << (order - 1));
+}
+
+int
+sfxge_intr_init(sfxge_t *sp)
+{
+ dev_info_t *dip = sp->s_dip;
+ sfxge_intr_t *sip = &(sp->s_intr);
+ efsys_mem_t *esmp = &(sip->si_mem);
+ sfxge_dma_buffer_attr_t dma_attr;
+ int err;
+ int rc;
+ int types;
+ int type;
+ int index;
+ unsigned int nalloc;
+ int navail;
+
+ SFXGE_OBJ_CHECK(sip, sfxge_intr_t);
+
+ ASSERT3U(sip->si_state, ==, SFXGE_INTR_UNINITIALIZED);
+
+#ifdef __sparc
+ /* PSARC 2007/453 */
+ (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
+ "#msix-request", NULL, 0);
+#endif
+
+ /* Get the map of supported interrupt types */
+ err = ddi_intr_get_supported_types(dip, &types);
+ if (err != DDI_SUCCESS) {
+ dev_err(dip, CE_WARN, SFXGE_CMN_ERR
+ "intr_init: ddi_intr_get_supported_types failed err=%d",
+ err);
+
+ if (err == DDI_EINVAL)
+ rc = EINVAL;
+ else if (err == DDI_INTR_NOTFOUND)
+ rc = ENOENT;
+ else
+ rc = EFAULT;
+
+ goto fail1;
+ }
+
+ /* Choose most favourable type */
+ if (types & DDI_INTR_TYPE_MSIX) {
+ DTRACE_PROBE(msix);
+
+ type = DDI_INTR_TYPE_MSIX;
+ sip->si_type = EFX_INTR_MESSAGE;
+ } else {
+ DTRACE_PROBE(fixed);
+
+ ASSERT(types & DDI_INTR_TYPE_FIXED);
+
+ type = DDI_INTR_TYPE_FIXED;
+ sip->si_type = EFX_INTR_LINE;
+ }
+
+ /* Get the number of available interrupts */
+ navail = 0;
+ err = ddi_intr_get_navail(dip, type, &navail);
+ if (err != DDI_SUCCESS) {
+ dev_err(dip, CE_WARN, SFXGE_CMN_ERR
+ "intr_init: ddi_intr_get_navail failed err=%d", err);
+
+ if (err == DDI_EINVAL)
+ rc = EINVAL;
+ else if (err == DDI_INTR_NOTFOUND)
+ rc = ENOENT;
+ else
+ rc = EFAULT;
+
+ goto fail2;
+ }
+
+ /* Double-check */
+ if (navail == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ /*
+ * Allow greater number of MSI-X interrupts than CPUs.
+ * This can be useful to prevent RX no desc drops; See task 32179.
+ * Limit non MSI-X interrupts to a single instance.
+ */
+ if (type != DDI_INTR_TYPE_MSIX)
+ navail = 1;
+ else
+ navail = min(navail, sfxge_rx_scale_prop_get(sp));
+
+ DTRACE_PROBE1(navail, unsigned int, navail);
+
+ /* Allocate a handle table */
+ sip->si_table_size = navail * sizeof (ddi_intr_handle_t);
+ sip->si_table = kmem_zalloc(sip->si_table_size, KM_SLEEP);
+
+ /*
+ * Allocate interrupt handles.
+ * Serialise all device instances to avoid problems seen in bug31184.
+ */
+ mutex_enter(&sfxge_global_lock);
+
+ err = ddi_intr_alloc(dip, sip->si_table, type, 0,
+ navail, &(sip->si_nalloc), DDI_INTR_ALLOC_NORMAL);
+
+ mutex_exit(&sfxge_global_lock);
+
+ if (err != DDI_SUCCESS) {
+ dev_err(dip, CE_WARN, SFXGE_CMN_ERR
+ "intr_init: ddi_intr_alloc failed err=%d"
+ " (navail=%d nalloc=%d)",
+ err, navail, sip->si_nalloc);
+
+ if (err == DDI_EINVAL)
+ rc = EINVAL;
+ else if (err == DDI_EAGAIN)
+ rc = EAGAIN;
+ else if (err == DDI_INTR_NOTFOUND)
+ rc = ENOENT;
+ else
+ rc = EFAULT;
+
+ goto fail3;
+ }
+
+ /* Double-check */
+ if (sip->si_nalloc == 0) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ /* Round down to a power of 2 */
+ nalloc = pow2_le(sip->si_nalloc);
+
+ /* Free off any excess handles */
+ mutex_enter(&sfxge_global_lock);
+
+ index = sip->si_nalloc;
+ while (--index >= nalloc) {
+ (void) ddi_intr_free(sip->si_table[index]);
+ sip->si_table[index] = NULL;
+ }
+
+ mutex_exit(&sfxge_global_lock);
+
+ sip->si_nalloc = nalloc;
+ DTRACE_PROBE1(nalloc, unsigned int, sip->si_nalloc);
+
+ dma_attr.sdba_dip = sp->s_dip;
+ dma_attr.sdba_dattrp = &sfxge_intr_dma_attr;
+ dma_attr.sdba_callback = DDI_DMA_SLEEP;
+ dma_attr.sdba_length = EFX_INTR_SIZE;
+ dma_attr.sdba_memflags = DDI_DMA_CONSISTENT;
+ dma_attr.sdba_devaccp = &sfxge_intr_devacc;
+ dma_attr.sdba_bindflags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
+ dma_attr.sdba_maxcookies = 1;
+ dma_attr.sdba_zeroinit = B_TRUE;
+
+ if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
+ goto fail4;
+
+ /* Store the highest priority for convenience */
+ sip->si_intr_pri = 0;
+ for (index = 0; index < sip->si_nalloc; index++) {
+ uint_t pri;
+ if ((rc = ddi_intr_get_pri(sip->si_table[index], &pri)) != 0)
+ goto fail5;
+ if (pri > sip->si_intr_pri)
+ sip->si_intr_pri = pri;
+ }
+
+ sip->si_state = SFXGE_INTR_INITIALIZED;
+ return (0);
+
+fail5:
+ DTRACE_PROBE(fail5);
+
+fail4:
+ DTRACE_PROBE(fail4);
+
+ /* Free interrupt handles */
+ mutex_exit(&sfxge_global_lock);
+
+ index = sip->si_nalloc;
+ while (--index >= 0) {
+ err = ddi_intr_free(sip->si_table[index]);
+ if (err != DDI_SUCCESS) {
+ dev_err(dip, CE_WARN, SFXGE_CMN_ERR
+ "intr_init: ddi_intr_free failed err=%d"
+ " (h=%p idx=%d nalloc=%d)",
+ err, (void *)sip->si_table[index], index,
+ sip->si_nalloc);
+ }
+ sip->si_table[index] = NULL;
+ }
+ sip->si_nalloc = 0;
+
+ mutex_exit(&sfxge_global_lock);
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+ /* Free the handle table */
+ kmem_free(sip->si_table, sip->si_table_size);
+ sip->si_table = NULL;
+ sip->si_table_size = 0;
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Clear the interrupt type */
+ sip->si_type = EFX_INTR_INVALID;
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ SFXGE_OBJ_CHECK(sip, sfxge_intr_t);
+
+ return (rc);
+}
+
+int
+sfxge_intr_start(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ int rc;
+
+ ASSERT3U(sip->si_state, ==, SFXGE_INTR_INITIALIZED);
+
+ /* Enable interrupts at the bus */
+ if ((rc = sfxge_intr_bus_enable(sp)) != 0)
+ goto fail1;
+
+ sip->si_state = SFXGE_INTR_TESTING;
+
+ /* Enable interrupts at the NIC */
+ if ((rc = sfxge_intr_nic_enable(sp)) != 0)
+ goto fail2;
+
+ sip->si_state = SFXGE_INTR_STARTED;
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Disable interrupts at the bus */
+ sfxge_intr_bus_disable(sp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ sip->si_state = SFXGE_INTR_INITIALIZED;
+
+ return (rc);
+}
+
+void
+sfxge_intr_stop(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+
+ ASSERT3U(sip->si_state, ==, SFXGE_INTR_STARTED);
+
+ sip->si_state = SFXGE_INTR_INITIALIZED;
+
+ /* Disable interrupts at the NIC */
+ sfxge_intr_nic_disable(sp);
+
+ /* Disable interrupts at the bus */
+ sfxge_intr_bus_disable(sp);
+}
+
+void
+sfxge_intr_fini(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ efsys_mem_t *esmp = &(sip->si_mem);
+ int index;
+ int err;
+
+ ASSERT3U(sip->si_state, ==, SFXGE_INTR_INITIALIZED);
+
+ sip->si_state = SFXGE_INTR_UNINITIALIZED;
+
+ /* Tear down dma setup */
+ sfxge_dma_buffer_destroy(esmp);
+
+
+ /* Free interrupt handles */
+ mutex_enter(&sfxge_global_lock);
+
+ index = sip->si_nalloc;
+ while (--index >= 0) {
+ err = ddi_intr_free(sip->si_table[index]);
+ if (err != DDI_SUCCESS) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "intr_fini: ddi_intr_free failed err=%d"
+ " (h=%p idx=%d nalloc=%d)",
+ err, (void *)sip->si_table[index],
+ index, sip->si_nalloc);
+ }
+ sip->si_table[index] = NULL;
+ }
+ sip->si_nalloc = 0;
+
+ mutex_exit(&sfxge_global_lock);
+
+ /* Free the handle table */
+ kmem_free(sip->si_table, sip->si_table_size);
+ sip->si_table = NULL;
+ sip->si_table_size = 0;
+
+ /* Clear the interrupt type */
+ sip->si_type = EFX_INTR_INVALID;
+
+ SFXGE_OBJ_CHECK(sip, sfxge_intr_t);
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_ioc.h b/usr/src/uts/common/io/sfxge/sfxge_ioc.h
new file mode 100644
index 0000000000..0ad86a160b
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_ioc.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SFXGE_IOC_H
+#define _SYS_SFXGE_IOC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+
+/* Ensure no ambiguity over structure layouts */
+#pragma pack(1)
+
+#define SFXGE_IOC ('S' << 24 | 'F' << 16 | 'C' << 8)
+
+#define SFXGE_STOP_IOC (SFXGE_IOC | 0x01)
+#define SFXGE_START_IOC (SFXGE_IOC | 0x02)
+
+/* MDIO was SFXGE_IOC 0x03 */
+
+/* I2C was SFXGE_IOC 0x04 */
+
+/* SPI was SFXGE_IOC 0x05 */
+
+/* BAR was SFXGE_IOC 0x06 */
+
+/* PCI was SFXGE_IOC 0x07 */
+
+/* MAC was SFXGE_IOC 0x08 */
+
+/* PHY was SFXGE_IOC 0x09 */
+
+/* SRAM was SFXGE_IOC 0x0a */
+
+/* TX was SFXGE_IOC 0x0b */
+
+/* RX was SFXGE_IOC 0x0c */
+
+/* NVRAM */
+
+#define SFXGE_NVRAM_IOC (SFXGE_IOC | 0x0d)
+
+typedef struct sfxge_nvram_ioc_s {
+ uint32_t sni_op;
+ uint32_t sni_type;
+ uint32_t sni_offset;
+ uint32_t sni_size;
+ uint32_t sni_subtype;
+ uint16_t sni_version[4]; /* get/set_ver */
+ /*
+ * Streams STRMSGSZ limit (default 64kb)
+ * See write(2) and I_STR in streamio(7i)
+ */
+ uint8_t sni_data[32*1024]; /* read/write */
+} sfxge_nvram_ioc_t;
+
+#define SFXGE_NVRAM_OP_SIZE 0x00000001
+#define SFXGE_NVRAM_OP_READ 0x00000002
+#define SFXGE_NVRAM_OP_WRITE 0x00000003
+#define SFXGE_NVRAM_OP_ERASE 0x00000004
+#define SFXGE_NVRAM_OP_GET_VER 0x00000005
+#define SFXGE_NVRAM_OP_SET_VER 0x00000006
+
+#define SFXGE_NVRAM_TYPE_BOOTROM 0x00000001
+#define SFXGE_NVRAM_TYPE_BOOTROM_CFG 0x00000002
+#define SFXGE_NVRAM_TYPE_MC 0x00000003
+#define SFXGE_NVRAM_TYPE_MC_GOLDEN 0x00000004
+#define SFXGE_NVRAM_TYPE_PHY 0x00000005
+#define SFXGE_NVRAM_TYPE_NULL_PHY 0x00000006
+#define SFXGE_NVRAM_TYPE_FPGA 0x00000007
+#define SFXGE_NVRAM_TYPE_FCFW 0x00000008
+#define SFXGE_NVRAM_TYPE_CPLD 0x00000009
+#define SFXGE_NVRAM_TYPE_FPGA_BACKUP 0x0000000a
+#define SFXGE_NVRAM_TYPE_DYNAMIC_CFG 0x0000000b
+
+
+/* PHY BIST was IOC 0x0e */
+
+/* Legacy IOC for MCDIv1 protocol - do not use in new code */
+#define SFXGE_MCDI_IOC (SFXGE_IOC | 0x0f)
+
+typedef struct sfxge_mcdi_ioc_s {
+ uint8_t smi_payload[256];
+ uint8_t smi_cmd;
+ uint8_t smi_len; /* In and out */
+ uint8_t smi_rc;
+} sfxge_mcdi_ioc_t;
+
+/* Reset the NIC */
+
+#define SFXGE_NIC_RESET_IOC (SFXGE_IOC | 0x10)
+
+/* VPD */
+
+#define SFXGE_VPD_IOC (SFXGE_IOC | 0x11)
+
+#define SFXGE_VPD_MAX_PAYLOAD 0x100
+
+typedef struct sfxge_vpd_ioc_s {
+ uint8_t svi_op;
+ uint8_t svi_tag;
+ uint16_t svi_keyword;
+ uint8_t svi_len; /* In or out */
+ uint8_t svi_payload[SFXGE_VPD_MAX_PAYLOAD]; /* In or out */
+} sfxge_vpd_ioc_t;
+
+#define SFXGE_VPD_OP_GET_KEYWORD 0x00000001
+#define SFXGE_VPD_OP_SET_KEYWORD 0x00000002
+
+/* MCDIv2 */
+
+#define SFXGE_MCDI2_IOC (SFXGE_IOC | 0x12)
+
+typedef struct sfxge_mcdi2_ioc_s {
+ uint8_t smi_payload[1024];
+ uint32_t smi_cmd;
+ uint32_t smi_len; /* In and out */
+ uint32_t smi_rc;
+} sfxge_mcdi2_ioc_t;
+
+
+#pragma pack()
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SFXGE_IOC_H */
diff --git a/usr/src/uts/common/io/sfxge/sfxge_mac.c b/usr/src/uts/common/io/sfxge/sfxge_mac.c
new file mode 100644
index 0000000000..2b5bc2acf5
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_mac.c
@@ -0,0 +1,1056 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+/*
+ * All efx_mac_*() must be after efx_port_init()
+ * LOCKING STRATEGY: Aquire sm_lock and test sm_state==SFXGE_MAC_STARTED
+ * to serialise against sfxge_restart()
+ */
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include "sfxge.h"
+#include "efx.h"
+
+#define SFXGE_MAC_POLL_PERIOD_MS 1000
+
+static void sfxge_mac_link_update_locked(sfxge_t *sp, efx_link_mode_t mode);
+
+
+/* MAC DMA attributes */
+static ddi_device_acc_attr_t sfxge_mac_devacc = {
+
+ DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
+ DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
+ DDI_STRICTORDER_ACC /* devacc_attr_dataorder */
+};
+
+static ddi_dma_attr_t sfxge_mac_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ 0x1000, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+
+static void
+_sfxge_mac_stat_update(sfxge_mac_t *smp, int tries, int delay_usec)
+{
+ sfxge_t *sp = smp->sm_sp;
+ efsys_mem_t *esmp = &(smp->sm_mem);
+ int i;
+
+ ASSERT(mutex_owned(&(smp->sm_lock)));
+ ASSERT3U(smp->sm_state, !=, SFXGE_MAC_UNINITIALIZED);
+
+ /* if no stats pending then they are already freshly updated */
+ if (smp->sm_mac_stats_timer_reqd && !smp->sm_mac_stats_pend)
+ return;
+
+ for (i = 0; i < tries; i++) {
+ /* Try to update the cached counters */
+ if (efx_mac_stats_update(sp->s_enp, esmp, smp->sm_stat,
+ NULL) != EAGAIN)
+ goto done;
+
+ drv_usecwait(delay_usec);
+ }
+
+ DTRACE_PROBE(mac_stat_timeout);
+ dev_err(sp->s_dip, CE_NOTE, SFXGE_CMN_ERR "MAC stats timeout");
+ return;
+
+done:
+ smp->sm_mac_stats_pend = B_FALSE;
+ smp->sm_lbolt = ddi_get_lbolt();
+}
+
+static void
+sfxge_mac_stat_update_quick(sfxge_mac_t *smp)
+{
+ /*
+ * Update the statistics from the most recent DMA. This might race
+ * with an inflight dma, so retry once. Otherwise get mac stat
+ * values from the last mac_poll() or MC periodic stats.
+ */
+ _sfxge_mac_stat_update(smp, 2, 50);
+}
+
+static void
+sfxge_mac_stat_update_wait(sfxge_mac_t *smp)
+{
+ /* Wait a max of 20 * 500us = 10ms */
+ _sfxge_mac_stat_update(smp, 20, 500);
+}
+
+static int
+sfxge_mac_kstat_update(kstat_t *ksp, int rw)
+{
+ sfxge_mac_t *smp = ksp->ks_private;
+ kstat_named_t *knp;
+ int rc;
+ unsigned int val;
+ sfxge_rx_coalesce_mode_t rxmode;
+
+ if (rw != KSTAT_READ) {
+ rc = EACCES;
+ goto fail1;
+ }
+
+ ASSERT(mutex_owned(&(smp->sm_lock)));
+
+ if (smp->sm_state != SFXGE_MAC_STARTED)
+ goto done;
+
+ sfxge_mac_stat_update_quick(smp);
+
+ knp = smp->sm_stat;
+ knp += EFX_MAC_NSTATS;
+
+ knp->value.ui64 = (smp->sm_link_up) ? 1 : 0;
+ knp++;
+
+ knp->value.ui64 = smp->sm_link_speed;
+ knp++;
+
+ knp->value.ui64 = smp->sm_link_duplex;
+ knp++;
+
+ knp->value.ui64 = (smp->sm_fcntl & EFX_FCNTL_GENERATE) ? 1 : 0;
+ knp++;
+
+ knp->value.ui64 = (smp->sm_fcntl & EFX_FCNTL_RESPOND) ? 1 : 0;
+ knp++;
+
+ sfxge_ev_moderation_get(smp->sm_sp, &val);
+ knp->value.ui64 = val;
+ knp++;
+
+ sfxge_rx_coalesce_mode_get(smp->sm_sp, &rxmode);
+ knp->value.ui64 = (uint64_t)rxmode;
+ knp++;
+
+ if (sfxge_rx_scale_count_get(smp->sm_sp, &val) != 0)
+ val = 0;
+ knp->value.ui64 = val;
+ knp++;
+
+done:
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_mac_kstat_init(sfxge_t *sp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ dev_info_t *dip = sp->s_dip;
+ char name[MAXNAMELEN];
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ unsigned int id;
+ int rc;
+
+ /* Create the set */
+ (void) snprintf(name, MAXNAMELEN - 1, "%s_mac", ddi_driver_name(dip));
+
+ if ((ksp = kstat_create((char *)ddi_driver_name(dip),
+ ddi_get_instance(dip), name, "mac", KSTAT_TYPE_NAMED,
+ EFX_MAC_NSTATS + 8, 0)) == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ smp->sm_ksp = ksp;
+
+ ksp->ks_update = sfxge_mac_kstat_update;
+ ksp->ks_private = smp;
+ ksp->ks_lock = &(smp->sm_lock);
+
+ /* Initialise the named stats */
+ smp->sm_stat = knp = ksp->ks_data;
+ for (id = 0; id < EFX_MAC_NSTATS; id++) {
+ kstat_named_init(knp, (char *)efx_mac_stat_name(sp->s_enp, id),
+ KSTAT_DATA_UINT64);
+ knp++;
+ }
+
+ kstat_named_init(knp++, "link_up", KSTAT_DATA_UINT64);
+ kstat_named_init(knp++, "link_speed", KSTAT_DATA_UINT64);
+ kstat_named_init(knp++, "link_duplex", KSTAT_DATA_UINT64);
+ kstat_named_init(knp++, "fcntl_generate", KSTAT_DATA_UINT64);
+ kstat_named_init(knp++, "fcntl_respond", KSTAT_DATA_UINT64);
+ kstat_named_init(knp++, "intr_moderation", KSTAT_DATA_UINT64);
+ kstat_named_init(knp++, "rx_coalesce_mode", KSTAT_DATA_UINT64);
+ kstat_named_init(knp++, "rx_scale_count", KSTAT_DATA_UINT64);
+
+ kstat_install(ksp);
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_mac_kstat_fini(sfxge_t *sp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+
+ /* Destroy the set */
+ kstat_delete(smp->sm_ksp);
+ smp->sm_ksp = NULL;
+ smp->sm_stat = NULL;
+}
+
+void
+sfxge_mac_stat_get(sfxge_t *sp, unsigned int id, uint64_t *valp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+
+ /* Make sure the cached counter values are recent */
+ mutex_enter(&(smp->sm_lock));
+
+ if (smp->sm_state != SFXGE_MAC_STARTED)
+ goto done;
+
+ sfxge_mac_stat_update_quick(smp);
+
+ *valp = smp->sm_stat[id].value.ui64;
+
+done:
+ mutex_exit(&(smp->sm_lock));
+}
+
+static void
+sfxge_mac_poll(void *arg)
+{
+ sfxge_t *sp = arg;
+ efx_nic_t *enp = sp->s_enp;
+ sfxge_mac_t *smp = &(sp->s_mac);
+ efsys_mem_t *esmp = &(smp->sm_mem);
+ efx_link_mode_t mode;
+ clock_t timeout;
+
+ mutex_enter(&(smp->sm_lock));
+ while (smp->sm_state == SFXGE_MAC_STARTED) {
+
+ /* clears smp->sm_mac_stats_pend if appropriate */
+ if (smp->sm_mac_stats_pend)
+ sfxge_mac_stat_update_wait(smp);
+
+ /* This may sleep waiting for MCDI completion */
+ mode = EFX_LINK_UNKNOWN;
+ if (efx_port_poll(enp, &mode) == 0)
+ sfxge_mac_link_update_locked(sp, mode);
+
+ if ((smp->sm_link_poll_reqd == B_FALSE) &&
+ (smp->sm_mac_stats_timer_reqd == B_FALSE))
+ goto done;
+
+ /* Zero the memory */
+ bzero(esmp->esm_base, EFX_MAC_STATS_SIZE);
+
+ /* Trigger upload the MAC statistics counters */
+ if (smp->sm_link_up &&
+ efx_mac_stats_upload(sp->s_enp, esmp) == 0)
+ smp->sm_mac_stats_pend = B_TRUE;
+
+ /* Wait for timeout or end of polling */
+ timeout = ddi_get_lbolt() + drv_usectohz(1000 *
+ SFXGE_MAC_POLL_PERIOD_MS);
+ while (smp->sm_state == SFXGE_MAC_STARTED) {
+ if (cv_timedwait(&(smp->sm_link_poll_kv),
+ &(smp->sm_lock), timeout) < 0) {
+ /* Timeout - poll if polling still enabled */
+ break;
+ }
+ }
+ }
+done:
+ mutex_exit(&(smp->sm_lock));
+
+}
+
+static void
+sfxge_mac_poll_start(sfxge_t *sp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+
+ ASSERT(mutex_owned(&(smp->sm_lock)));
+ ASSERT3U(smp->sm_state, ==, SFXGE_MAC_STARTED);
+
+ /* Schedule a poll */
+ (void) ddi_taskq_dispatch(smp->sm_tqp, sfxge_mac_poll, sp, DDI_SLEEP);
+}
+
+static void
+sfxge_mac_poll_stop(sfxge_t *sp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+
+ ASSERT(mutex_owned(&(smp->sm_lock)));
+ ASSERT3U(smp->sm_state, ==, SFXGE_MAC_INITIALIZED);
+
+ cv_broadcast(&(smp->sm_link_poll_kv));
+
+ /* Wait for link polling to cease */
+ mutex_exit(&(smp->sm_lock));
+ ddi_taskq_wait(smp->sm_tqp);
+ mutex_enter(&(smp->sm_lock));
+
+ /* Collect the final statistics. */
+ sfxge_mac_stat_update_wait(smp);
+}
+
+int
+sfxge_mac_init(sfxge_t *sp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ efsys_mem_t *esmp = &(smp->sm_mem);
+ dev_info_t *dip = sp->s_dip;
+ sfxge_dma_buffer_attr_t dma_attr;
+ const efx_nic_cfg_t *encp;
+ unsigned char *bytes;
+ unsigned int n;
+ int err, rc;
+
+ SFXGE_OBJ_CHECK(smp, sfxge_mac_t);
+
+ ASSERT3U(smp->sm_state, ==, SFXGE_MAC_UNINITIALIZED);
+
+ smp->sm_sp = sp;
+ encp = efx_nic_cfg_get(sp->s_enp);
+ smp->sm_link_poll_reqd = (~encp->enc_features &
+ EFX_FEATURE_LINK_EVENTS);
+ smp->sm_mac_stats_timer_reqd = (~encp->enc_features &
+ EFX_FEATURE_PERIODIC_MAC_STATS);
+
+ mutex_init(&(smp->sm_lock), NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(sp->s_intr.si_intr_pri));
+ cv_init(&(smp->sm_link_poll_kv), NULL, CV_DRIVER, NULL);
+
+ /* Create link poll taskq */
+ smp->sm_tqp = ddi_taskq_create(dip, "mac_tq", 1, TASKQ_DEFAULTPRI, 0);
+ if (smp->sm_tqp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ if ((rc = sfxge_phy_init(sp)) != 0)
+ goto fail2;
+
+ dma_attr.sdba_dip = dip;
+ dma_attr.sdba_dattrp = &sfxge_mac_dma_attr;
+ dma_attr.sdba_callback = DDI_DMA_SLEEP;
+ dma_attr.sdba_length = EFX_MAC_STATS_SIZE;
+ dma_attr.sdba_memflags = DDI_DMA_CONSISTENT;
+ dma_attr.sdba_devaccp = &sfxge_mac_devacc;
+ dma_attr.sdba_bindflags = DDI_DMA_READ | DDI_DMA_CONSISTENT;
+ dma_attr.sdba_maxcookies = 1;
+ dma_attr.sdba_zeroinit = B_TRUE;
+
+ if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
+ goto fail3;
+
+ /* Set the initial flow control values */
+ smp->sm_fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+
+ /*
+ * Determine the 'burnt-in' MAC address:
+ *
+ * A: if the "mac-address" property is set on our device node use that.
+ * B: otherwise, use the value from NVRAM.
+ */
+
+ /* A: property */
+ err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "mac-address", &bytes, &n);
+ switch (err) {
+ case DDI_PROP_SUCCESS:
+ if (n == ETHERADDRL) {
+ bcopy(bytes, smp->sm_bia, ETHERADDRL);
+ goto done;
+ }
+
+ ddi_prop_free(bytes);
+ break;
+
+ default:
+ break;
+ }
+
+ /* B: NVRAM */
+ bcopy(encp->enc_mac_addr, smp->sm_bia, ETHERADDRL);
+
+done:
+ /* Initialize the statistics */
+ if ((rc = sfxge_mac_kstat_init(sp)) != 0)
+ goto fail4;
+
+ if ((rc = sfxge_phy_kstat_init(sp)) != 0)
+ goto fail5;
+
+ smp->sm_state = SFXGE_MAC_INITIALIZED;
+
+ return (0);
+
+fail5:
+ DTRACE_PROBE(fail5);
+
+ sfxge_mac_kstat_fini(sp);
+fail4:
+ DTRACE_PROBE(fail4);
+
+ /* Tear down DMA setup */
+ sfxge_dma_buffer_destroy(esmp);
+fail3:
+ DTRACE_PROBE(fail3);
+
+ sfxge_phy_fini(sp);
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Destroy the link poll taskq */
+ ddi_taskq_destroy(smp->sm_tqp);
+ smp->sm_tqp = NULL;
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ cv_destroy(&(smp->sm_link_poll_kv));
+
+ mutex_destroy(&(smp->sm_lock));
+
+ smp->sm_sp = NULL;
+
+ return (rc);
+}
+
+static int
+sfxge_mac_filter_apply(sfxge_t *sp)
+{
+ efx_nic_t *enp = sp->s_enp;
+ sfxge_mac_t *smp = &(sp->s_mac);
+ int rc;
+
+ ASSERT(mutex_owned(&(smp->sm_lock)));
+
+ if (smp->sm_state == SFXGE_MAC_STARTED) {
+ boolean_t all_unicst;
+ boolean_t mulcst;
+ boolean_t all_mulcst;
+ boolean_t brdcst;
+
+ all_unicst = (smp->sm_promisc == SFXGE_PROMISC_ALL_PHYS);
+ mulcst = (smp->sm_mcast_count > 0);
+ all_mulcst = (smp->sm_promisc >= SFXGE_PROMISC_ALL_MULTI);
+ brdcst = B_TRUE;
+
+ if ((rc = efx_mac_filter_set(enp, all_unicst, mulcst,
+ all_mulcst, brdcst)) != 0) {
+ goto fail1;
+ }
+ if ((rc = efx_mac_multicast_list_set(enp,
+ smp->sm_mcast_addr, smp->sm_mcast_count)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+int
+sfxge_mac_start(sfxge_t *sp, boolean_t restart)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ efsys_mem_t *esmp = &(smp->sm_mem);
+ efx_nic_t *enp = sp->s_enp;
+ size_t pdu;
+ int rc;
+
+ mutex_enter(&(smp->sm_lock));
+
+ ASSERT3U(smp->sm_state, ==, SFXGE_MAC_INITIALIZED);
+
+ if ((rc = efx_port_init(enp)) != 0)
+ goto fail1;
+
+ /*
+ * Set up the advertised capabilities that may have been asked for
+ * before the call to efx_port_init().
+ */
+ if ((rc = sfxge_phy_cap_apply(sp, !restart)) != 0)
+ goto fail2;
+
+ /* Set the SDU */
+ pdu = EFX_MAC_PDU(sp->s_mtu);
+ if ((rc = efx_mac_pdu_set(enp, pdu)) != 0)
+ goto fail3;
+
+ if ((rc = efx_mac_fcntl_set(enp, smp->sm_fcntl, B_TRUE)) != 0)
+ goto fail4;
+
+ /* Set the unicast address */
+ if ((rc = efx_mac_addr_set(enp, (smp->sm_laa_valid) ?
+ smp->sm_laa : smp->sm_bia)) != 0)
+ goto fail5;
+
+ if ((rc = sfxge_mac_filter_apply(sp)) != 0)
+ goto fail6;
+
+ if (!smp->sm_mac_stats_timer_reqd) {
+ if ((rc = efx_mac_stats_periodic(enp, esmp,
+ SFXGE_MAC_POLL_PERIOD_MS, B_FALSE)) != 0)
+ goto fail7;
+ }
+
+ if ((rc = efx_mac_drain(enp, B_FALSE)) != 0)
+ goto fail8;
+
+ smp->sm_state = SFXGE_MAC_STARTED;
+
+ /*
+ * Start link state polling. For hardware that reports link change
+ * events we still poll once to update the initial link state.
+ */
+ sfxge_mac_poll_start(sp);
+
+ mutex_exit(&(smp->sm_lock));
+ return (0);
+
+fail8:
+ DTRACE_PROBE(fail8);
+ (void) efx_mac_stats_periodic(enp, esmp, 0, B_FALSE);
+fail7:
+ DTRACE_PROBE(fail7);
+fail6:
+ DTRACE_PROBE(fail6);
+fail5:
+ DTRACE_PROBE(fail5);
+fail4:
+ DTRACE_PROBE(fail4);
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+ efx_port_fini(enp);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(smp->sm_lock));
+
+ return (rc);
+}
+
+
+static void
+sfxge_mac_link_update_locked(sfxge_t *sp, efx_link_mode_t mode)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ const char *change, *duplex;
+ char info[sizeof (": now 10000Mbps FULL duplex")];
+
+ ASSERT(mutex_owned(&(smp->sm_lock)));
+ if (smp->sm_state != SFXGE_MAC_STARTED)
+ return;
+
+ if (smp->sm_link_mode == mode)
+ return;
+
+ smp->sm_link_mode = mode;
+ smp->sm_link_up = B_TRUE;
+
+ switch (smp->sm_link_mode) {
+ case EFX_LINK_UNKNOWN:
+ case EFX_LINK_DOWN:
+ smp->sm_link_speed = 0;
+ smp->sm_link_duplex = SFXGE_LINK_DUPLEX_UNKNOWN;
+ smp->sm_link_up = B_FALSE;
+ break;
+
+ case EFX_LINK_10HDX:
+ case EFX_LINK_10FDX:
+ smp->sm_link_speed = 10;
+ smp->sm_link_duplex = (smp->sm_link_mode == EFX_LINK_10HDX) ?
+ SFXGE_LINK_DUPLEX_HALF : SFXGE_LINK_DUPLEX_FULL;
+ break;
+
+ case EFX_LINK_100HDX:
+ case EFX_LINK_100FDX:
+ smp->sm_link_speed = 100;
+ smp->sm_link_duplex = (smp->sm_link_mode == EFX_LINK_100HDX) ?
+ SFXGE_LINK_DUPLEX_HALF : SFXGE_LINK_DUPLEX_FULL;
+ break;
+
+ case EFX_LINK_1000HDX:
+ case EFX_LINK_1000FDX:
+ smp->sm_link_speed = 1000;
+ smp->sm_link_duplex = (smp->sm_link_mode == EFX_LINK_1000HDX) ?
+ SFXGE_LINK_DUPLEX_HALF : SFXGE_LINK_DUPLEX_FULL;
+ break;
+
+ case EFX_LINK_10000FDX:
+ smp->sm_link_speed = 10000;
+ smp->sm_link_duplex = SFXGE_LINK_DUPLEX_FULL;
+ break;
+
+ case EFX_LINK_40000FDX:
+ smp->sm_link_speed = 40000;
+ smp->sm_link_duplex = SFXGE_LINK_DUPLEX_FULL;
+ break;
+
+ default:
+ ASSERT(B_FALSE);
+ break;
+ }
+
+ duplex = (smp->sm_link_duplex == SFXGE_LINK_DUPLEX_FULL) ?
+ "full" : "half";
+ change = (smp->sm_link_up) ? "UP" : "DOWN";
+ (void) snprintf(info, sizeof (info), ": now %dMbps %s duplex",
+ smp->sm_link_speed, duplex);
+
+ dev_err(sp->s_dip, CE_NOTE, SFXGE_CMN_ERR "Link %s%s",
+ change, smp->sm_link_up ? info : "");
+
+ /* Push link state update to the OS */
+ sfxge_gld_link_update(sp);
+}
+
+void
+sfxge_mac_link_update(sfxge_t *sp, efx_link_mode_t mode)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+
+ mutex_enter(&(smp->sm_lock));
+ sfxge_mac_link_update_locked(sp, mode);
+ mutex_exit(&(smp->sm_lock));
+}
+
+void
+sfxge_mac_link_check(sfxge_t *sp, boolean_t *upp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+
+ mutex_enter(&(smp->sm_lock));
+ *upp = smp->sm_link_up;
+ mutex_exit(&(smp->sm_lock));
+}
+
+void
+sfxge_mac_link_speed_get(sfxge_t *sp, unsigned int *speedp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+
+ mutex_enter(&(smp->sm_lock));
+ *speedp = smp->sm_link_speed;
+ mutex_exit(&(smp->sm_lock));
+}
+
+void
+sfxge_mac_link_duplex_get(sfxge_t *sp, sfxge_link_duplex_t *duplexp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+
+ mutex_enter(&(smp->sm_lock));
+ *duplexp = smp->sm_link_duplex;
+ mutex_exit(&(smp->sm_lock));
+}
+
+void
+sfxge_mac_fcntl_get(sfxge_t *sp, unsigned int *fcntlp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+
+ mutex_enter(&(smp->sm_lock));
+ *fcntlp = smp->sm_fcntl;
+ mutex_exit(&(smp->sm_lock));
+}
+
+int
+sfxge_mac_fcntl_set(sfxge_t *sp, unsigned int fcntl)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ int rc;
+
+ mutex_enter(&(smp->sm_lock));
+
+ if (smp->sm_fcntl == fcntl)
+ goto done;
+
+ smp->sm_fcntl = fcntl;
+
+ if (smp->sm_state != SFXGE_MAC_STARTED)
+ goto done;
+
+ if ((rc = efx_mac_fcntl_set(sp->s_enp, smp->sm_fcntl, B_TRUE)) != 0)
+ goto fail1;
+
+done:
+ mutex_exit(&(smp->sm_lock));
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(smp->sm_lock));
+
+ return (rc);
+}
+
+int
+sfxge_mac_unicst_get(sfxge_t *sp, sfxge_unicst_type_t type, uint8_t *addr)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ int rc;
+
+ if (type >= SFXGE_UNICST_NTYPES) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ mutex_enter(&(smp->sm_lock));
+
+ if (smp->sm_state != SFXGE_MAC_INITIALIZED &&
+ smp->sm_state != SFXGE_MAC_STARTED) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ switch (type) {
+ case SFXGE_UNICST_BIA:
+ bcopy(smp->sm_bia, addr, ETHERADDRL);
+ break;
+
+ case SFXGE_UNICST_LAA:
+ if (!(smp->sm_laa_valid)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ bcopy(smp->sm_laa, addr, ETHERADDRL);
+ break;
+
+ default:
+ ASSERT(B_FALSE);
+ break;
+ }
+
+ mutex_exit(&(smp->sm_lock));
+
+ return (0);
+
+
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+
+ mutex_exit(&(smp->sm_lock));
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+int
+sfxge_mac_unicst_set(sfxge_t *sp, uint8_t *addr)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ efx_nic_t *enp = sp->s_enp;
+ boolean_t old_mac_valid;
+ uint8_t old_mac[ETHERADDRL];
+ int rc;
+
+ mutex_enter(&(smp->sm_lock));
+
+ old_mac_valid = smp->sm_laa_valid;
+ if (old_mac_valid)
+ bcopy(smp->sm_laa, old_mac, ETHERADDRL);
+
+ bcopy(addr, smp->sm_laa, ETHERADDRL);
+ smp->sm_laa_valid = B_TRUE;
+
+ if (smp->sm_state != SFXGE_MAC_STARTED)
+ goto done;
+
+ if (efx_nic_cfg_get(enp)->enc_allow_set_mac_with_installed_filters) {
+ if ((rc = efx_mac_addr_set(enp, smp->sm_laa)) != 0) {
+ dev_err(sp->s_dip, CE_NOTE, SFXGE_CMN_ERR
+ "unable to set unicast MAC filter");
+ goto fail1;
+ }
+ } else {
+ /* Older EF10 firmware requires a device start */
+ mutex_exit(&smp->sm_lock);
+ sfxge_stop(sp);
+ if ((rc = sfxge_start(sp, B_TRUE)) != 0) {
+ dev_err(sp->s_dip, CE_NOTE, SFXGE_CMN_ERR
+ "unable to restart with a new MAC");
+ mutex_enter(&(smp->sm_lock));
+ goto fail1;
+ }
+ mutex_enter(&smp->sm_lock);
+ }
+
+ if ((rc = efx_mac_addr_set(enp, smp->sm_laa)) != 0)
+ goto fail1;
+
+done:
+ mutex_exit(&(smp->sm_lock));
+
+ return (0);
+
+fail1:
+ if (old_mac_valid)
+ bcopy(old_mac, smp->sm_laa, ETHERADDRL);
+ else
+ smp->sm_laa_valid = B_FALSE;
+
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(smp->sm_lock));
+
+ return (rc);
+}
+
+int
+sfxge_mac_promisc_set(sfxge_t *sp, sfxge_promisc_type_t promisc)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ int rc;
+
+ mutex_enter(&(smp->sm_lock));
+
+ if (smp->sm_promisc == promisc)
+ goto done;
+
+ smp->sm_promisc = promisc;
+
+ if ((rc = sfxge_mac_filter_apply(sp)) != 0)
+ goto fail1;
+
+done:
+ mutex_exit(&(smp->sm_lock));
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ mutex_exit(&(smp->sm_lock));
+
+ return (rc);
+}
+
+int
+sfxge_mac_multicst_add(sfxge_t *sp, const uint8_t *addr)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ int i;
+ int rc;
+
+ mutex_enter(&(smp->sm_lock));
+
+ if ((addr[0] & 0x1) == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Check if the address is already in the list */
+ i = 0;
+ while (i < smp->sm_mcast_count) {
+ if (bcmp(smp->sm_mcast_addr + (i * ETHERADDRL),
+ addr, ETHERADDRL) == 0)
+ goto done;
+ else
+ i++;
+ }
+
+ if (smp->sm_mcast_count >= EFX_MAC_MULTICAST_LIST_MAX) {
+ rc = ENOENT;
+ goto fail1;
+ }
+
+ /* Add to the list */
+ bcopy(addr, smp->sm_mcast_addr + (smp->sm_mcast_count++ * ETHERADDRL),
+ ETHERADDRL);
+
+ if ((rc = sfxge_mac_filter_apply(sp)) != 0)
+ goto fail2;
+
+done:
+ mutex_exit(&(smp->sm_lock));
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+ smp->sm_mcast_count--;
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ mutex_exit(&(smp->sm_lock));
+
+ return (rc);
+}
+
+int
+sfxge_mac_multicst_remove(sfxge_t *sp, const uint8_t *addr)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ int i;
+ int rc;
+
+ mutex_enter(&(smp->sm_lock));
+
+ i = 0;
+ while (i < smp->sm_mcast_count) {
+ if (bcmp(smp->sm_mcast_addr + (i * ETHERADDRL),
+ addr, ETHERADDRL) == 0) {
+ (void) memmove(smp->sm_mcast_addr + (i * ETHERADDRL),
+ smp->sm_mcast_addr + ((i + 1) * ETHERADDRL),
+ (smp->sm_mcast_count - (i + 1)) * ETHERADDRL);
+ smp->sm_mcast_count--;
+ } else
+ i++;
+ }
+
+ if ((rc = sfxge_mac_filter_apply(sp)) != 0)
+ goto fail1;
+
+ mutex_exit(&(smp->sm_lock));
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ mutex_exit(&(smp->sm_lock));
+
+ return (rc);
+}
+
+void
+sfxge_mac_stop(sfxge_t *sp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ efx_nic_t *enp = sp->s_enp;
+ efsys_mem_t *esmp = &(smp->sm_mem);
+
+ mutex_enter(&(smp->sm_lock));
+
+ ASSERT3U(smp->sm_state, ==, SFXGE_MAC_STARTED);
+ ASSERT3P(smp->sm_sp, ==, sp);
+ smp->sm_state = SFXGE_MAC_INITIALIZED;
+
+ /* If stopping in response to an MC reboot this may fail */
+ if (!smp->sm_mac_stats_timer_reqd)
+ (void) efx_mac_stats_periodic(enp, esmp, 0, B_FALSE);
+
+ sfxge_mac_poll_stop(sp);
+
+ smp->sm_lbolt = 0;
+
+ smp->sm_link_up = B_FALSE;
+ smp->sm_link_speed = 0;
+ smp->sm_link_duplex = SFXGE_LINK_DUPLEX_UNKNOWN;
+
+ /* This may call MCDI */
+ (void) efx_mac_drain(enp, B_TRUE);
+
+ smp->sm_link_mode = EFX_LINK_UNKNOWN;
+
+ efx_port_fini(enp);
+
+ mutex_exit(&(smp->sm_lock));
+}
+
+void
+sfxge_mac_fini(sfxge_t *sp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ efsys_mem_t *esmp = &(smp->sm_mem);
+
+ ASSERT3U(smp->sm_state, ==, SFXGE_MAC_INITIALIZED);
+ ASSERT3P(smp->sm_sp, ==, sp);
+
+ /* Tear down the statistics */
+ sfxge_phy_kstat_fini(sp);
+ sfxge_mac_kstat_fini(sp);
+
+ smp->sm_state = SFXGE_MAC_UNINITIALIZED;
+ smp->sm_link_mode = EFX_LINK_UNKNOWN;
+ smp->sm_promisc = SFXGE_PROMISC_OFF;
+
+ bzero(smp->sm_mcast_addr, sizeof (smp->sm_mcast_addr));
+ smp->sm_mcast_count = 0;
+
+ bzero(smp->sm_laa, ETHERADDRL);
+ smp->sm_laa_valid = B_FALSE;
+
+ bzero(smp->sm_bia, ETHERADDRL);
+
+ smp->sm_fcntl = 0;
+
+ /* Finish with PHY DMA memory */
+ sfxge_phy_fini(sp);
+
+ /* Teardown the DMA */
+ sfxge_dma_buffer_destroy(esmp);
+
+ /* Destroy the link poll taskq */
+ ddi_taskq_destroy(smp->sm_tqp);
+ smp->sm_tqp = NULL;
+
+ mutex_destroy(&(smp->sm_lock));
+
+ smp->sm_sp = NULL;
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_mcdi.c b/usr/src/uts/common/io/sfxge/sfxge_mcdi.c
new file mode 100644
index 0000000000..6884dde351
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_mcdi.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stream.h>
+#include <sys/dlpi.h>
+#include <sys/pci.h>
+
+#include "sfxge.h"
+#include "efsys.h"
+#include "efx.h"
+#include "efx_mcdi.h"
+#include "efx_regs_mcdi.h"
+
+/* MAC DMA attributes */
+static ddi_device_acc_attr_t sfxge_mcdi_devacc = {
+
+ DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
+ DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
+ DDI_STRICTORDER_ACC /* devacc_attr_dataorder */
+};
+
+static ddi_dma_attr_t sfxge_mcdi_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ 0x1000, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+/*
+ * Notes on MCDI operation:
+ * ------------------------
+ * MCDI requests can be made in arbitrary thread context, and as a synchronous
+ * API must therefore block until the response is available from the MC, or
+ * a watchdog timeout occurs.
+ *
+ * This interacts badly with the limited number of worker threads (2 per CPU)
+ * used by the Solaris callout subsystem to invoke timeout handlers. If both
+ * worker threads are blocked (e.g. waiting for a condvar or mutex) then timeout
+ * processing is deadlocked on that CPU, causing system failure.
+ *
+ * For this reason the driver does not use event based MCDI completion, as this
+ * leads to numerous paths involving timeouts and reentrant GLDv3 entrypoints
+ * that result in a deadlocked system.
+ */
+#define SFXGE_MCDI_POLL_INTERVAL 10 /* 10us in 1us units */
+#define SFXGE_MCDI_WATCHDOG_INTERVAL 10000000 /* 10s in 1us units */
+
+
+/* Acquire exclusive access to MCDI for the duration of a request */
+static void
+sfxge_mcdi_acquire(sfxge_mcdi_t *smp)
+{
+ mutex_enter(&(smp->sm_lock));
+ ASSERT3U(smp->sm_state, !=, SFXGE_MCDI_UNINITIALIZED);
+
+ while (smp->sm_state != SFXGE_MCDI_INITIALIZED) {
+ (void) cv_wait(&(smp->sm_kv), &(smp->sm_lock));
+ }
+ smp->sm_state = SFXGE_MCDI_BUSY;
+
+ mutex_exit(&(smp->sm_lock));
+}
+
+
+/* Release ownership of MCDI on request completion */
+static void
+sfxge_mcdi_release(sfxge_mcdi_t *smp)
+{
+ mutex_enter(&(smp->sm_lock));
+ ASSERT((smp->sm_state == SFXGE_MCDI_BUSY) ||
+ (smp->sm_state == SFXGE_MCDI_COMPLETED));
+
+ smp->sm_state = SFXGE_MCDI_INITIALIZED;
+ cv_broadcast(&(smp->sm_kv));
+
+ mutex_exit(&(smp->sm_lock));
+}
+
+
+static void
+sfxge_mcdi_timeout(sfxge_t *sp)
+{
+ dev_info_t *dip = sp->s_dip;
+
+ dev_err(dip, CE_WARN, SFXGE_CMN_ERR "MC_TIMEOUT");
+
+ DTRACE_PROBE(mcdi_timeout);
+ (void) sfxge_restart_dispatch(sp, DDI_SLEEP, SFXGE_HW_ERR,
+ "MCDI timeout", 0);
+}
+
+
+static void
+sfxge_mcdi_poll(sfxge_t *sp)
+{
+ efx_nic_t *enp = sp->s_enp;
+ clock_t timeout;
+ boolean_t aborted;
+
+ /* Poll until request completes or timeout */
+ timeout = ddi_get_lbolt() + drv_usectohz(SFXGE_MCDI_WATCHDOG_INTERVAL);
+ while (efx_mcdi_request_poll(enp) == B_FALSE) {
+
+ /* No response received yet */
+ if (ddi_get_lbolt() > timeout) {
+ /* Timeout expired */
+ goto fail;
+ }
+
+ /* Short delay to avoid excessive PCIe traffic */
+ drv_usecwait(SFXGE_MCDI_POLL_INTERVAL);
+ }
+
+ /* Request completed (or polling failed) */
+ return;
+
+fail:
+ /* Timeout before request completion */
+ DTRACE_PROBE(fail);
+ aborted = efx_mcdi_request_abort(enp);
+ ASSERT(aborted);
+ sfxge_mcdi_timeout(sp);
+}
+
+
+static void
+sfxge_mcdi_execute(void *arg, efx_mcdi_req_t *emrp)
+{
+ sfxge_t *sp = (sfxge_t *)arg;
+ sfxge_mcdi_t *smp = &(sp->s_mcdi);
+
+ sfxge_mcdi_acquire(smp);
+
+ /* Issue request and poll for completion */
+ efx_mcdi_request_start(sp->s_enp, emrp, B_FALSE);
+ sfxge_mcdi_poll(sp);
+
+ sfxge_mcdi_release(smp);
+}
+
+
+static void
+sfxge_mcdi_ev_cpl(void *arg)
+{
+ sfxge_t *sp = (sfxge_t *)arg;
+ sfxge_mcdi_t *smp = &(sp->s_mcdi);
+
+ mutex_enter(&(smp->sm_lock));
+ ASSERT(smp->sm_state == SFXGE_MCDI_BUSY);
+ smp->sm_state = SFXGE_MCDI_COMPLETED;
+ cv_broadcast(&(smp->sm_kv));
+ mutex_exit(&(smp->sm_lock));
+}
+
+
+static void
+sfxge_mcdi_exception(void *arg, efx_mcdi_exception_t eme)
+{
+ sfxge_t *sp = (sfxge_t *)arg;
+ const char *reason;
+
+ if (eme == EFX_MCDI_EXCEPTION_MC_REBOOT)
+ reason = "MC_REBOOT";
+ else if (eme == EFX_MCDI_EXCEPTION_MC_BADASSERT)
+ reason = "MC_BADASSERT";
+ else
+ reason = "MC_UNKNOWN";
+
+ DTRACE_PROBE(mcdi_exception);
+ /* sfxge_evq_t->se_lock held */
+ (void) sfxge_restart_dispatch(sp, DDI_SLEEP, SFXGE_HW_ERR, reason, 0);
+}
+
+#if EFSYS_OPT_MCDI_LOGGING
+#define SFXGE_MCDI_LOG_BUF_SIZE 128
+
+static size_t
+sfxge_mcdi_do_log(char *buffer, void *data, size_t data_size,
+ size_t pfxsize, size_t position)
+{
+ uint32_t *words = data;
+ size_t i;
+
+ for (i = 0; i < data_size; i += sizeof (*words)) {
+ if (position + 2 * sizeof (*words) + 1 >=
+ SFXGE_MCDI_LOG_BUF_SIZE) {
+ buffer[position] = '\0';
+ cmn_err(CE_NOTE, "%s \\", buffer);
+ position = pfxsize;
+ }
+ snprintf(buffer + position, SFXGE_MCDI_LOG_BUF_SIZE - position,
+ " %08x", *words);
+ words++;
+ position += 2 * sizeof (uint32_t) + 1;
+ }
+ return (position);
+}
+
+
+static void
+sfxge_mcdi_logger(void *arg, efx_log_msg_t type,
+ void *header, size_t header_size, void *data, size_t data_size)
+{
+ sfxge_t *sp = (sfxge_t *)arg;
+ char buffer[SFXGE_MCDI_LOG_BUF_SIZE];
+ size_t pfxsize;
+ size_t start;
+
+ if (!sp->s_mcdi_logging)
+ return;
+
+ pfxsize = snprintf(buffer, sizeof (buffer),
+ "sfc %04x:%02x:%02x.%02x %s%d MCDI RPC %s:",
+ 0,
+ PCI_REG_BUS_G(sp->s_bus_addr),
+ PCI_REG_DEV_G(sp->s_bus_addr),
+ PCI_REG_FUNC_G(sp->s_bus_addr),
+ ddi_driver_name(sp->s_dip),
+ ddi_get_instance(sp->s_dip),
+ type == EFX_LOG_MCDI_REQUEST ? "REQ" :
+ type == EFX_LOG_MCDI_RESPONSE ? "RESP" : "???");
+ start = sfxge_mcdi_do_log(buffer, header, header_size,
+ pfxsize, pfxsize);
+ start = sfxge_mcdi_do_log(buffer, data, data_size, pfxsize, start);
+ if (start != pfxsize) {
+ buffer[start] = '\0';
+ cmn_err(CE_NOTE, "%s", buffer);
+ }
+}
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+int
+sfxge_mcdi_init(sfxge_t *sp)
+{
+ efx_nic_t *enp = sp->s_enp;
+ sfxge_mcdi_t *smp = &(sp->s_mcdi);
+ efsys_mem_t *esmp = &(smp->sm_mem);
+ efx_mcdi_transport_t *emtp = &(smp->sm_emt);
+ sfxge_dma_buffer_attr_t dma_attr;
+ int msg_buf_size;
+ int rc;
+
+ ASSERT3U(smp->sm_state, ==, SFXGE_MCDI_UNINITIALIZED);
+
+ msg_buf_size = sizeof (uint32_t) + MCDI_CTL_SDU_LEN_MAX_V2;
+
+ /* Allocate host DMA buffer for MCDI commands */
+ dma_attr.sdba_dip = sp->s_dip;
+ dma_attr.sdba_dattrp = &sfxge_mcdi_dma_attr;
+ dma_attr.sdba_callback = DDI_DMA_SLEEP;
+ dma_attr.sdba_length = msg_buf_size;
+ dma_attr.sdba_memflags = DDI_DMA_CONSISTENT;
+ dma_attr.sdba_devaccp = &sfxge_mcdi_devacc;
+ dma_attr.sdba_bindflags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
+ dma_attr.sdba_maxcookies = 1;
+ dma_attr.sdba_zeroinit = B_TRUE;
+
+ if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
+ goto fail1;
+
+ mutex_init(&(smp->sm_lock), NULL, MUTEX_DRIVER, NULL);
+
+ smp->sm_state = SFXGE_MCDI_INITIALIZED;
+
+ emtp->emt_context = sp;
+ emtp->emt_dma_mem = esmp;
+ emtp->emt_execute = sfxge_mcdi_execute;
+ emtp->emt_ev_cpl = sfxge_mcdi_ev_cpl;
+ emtp->emt_exception = sfxge_mcdi_exception;
+#if EFSYS_OPT_MCDI_LOGGING
+ emtp->emt_logger = sfxge_mcdi_logger;
+#endif
+
+ cv_init(&(smp->sm_kv), NULL, CV_DRIVER, NULL);
+
+ if ((rc = efx_mcdi_init(enp, emtp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ cv_destroy(&(smp->sm_kv));
+ mutex_destroy(&(smp->sm_lock));
+
+ sfxge_dma_buffer_destroy(esmp);
+
+ smp->sm_state = SFXGE_MCDI_UNINITIALIZED;
+ smp->sm_sp = NULL;
+ SFXGE_OBJ_CHECK(smp, sfxge_mcdi_t);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+void
+sfxge_mcdi_fini(sfxge_t *sp)
+{
+ efx_nic_t *enp = sp->s_enp;
+ sfxge_mcdi_t *smp = &(sp->s_mcdi);
+ efsys_mem_t *esmp = &(smp->sm_mem);
+ efx_mcdi_transport_t *emtp;
+
+ mutex_enter(&(smp->sm_lock));
+ ASSERT3U(smp->sm_state, ==, SFXGE_MCDI_INITIALIZED);
+
+ efx_mcdi_fini(enp);
+ emtp = &(smp->sm_emt);
+ bzero(emtp, sizeof (*emtp));
+
+ smp->sm_sp = NULL;
+
+ cv_destroy(&(smp->sm_kv));
+ mutex_exit(&(smp->sm_lock));
+
+ sfxge_dma_buffer_destroy(esmp);
+
+ mutex_destroy(&(smp->sm_lock));
+
+ smp->sm_state = SFXGE_MCDI_UNINITIALIZED;
+ SFXGE_OBJ_CHECK(smp, sfxge_mcdi_t);
+}
+
+
+int
+sfxge_mcdi_ioctl(sfxge_t *sp, sfxge_mcdi_ioc_t *smip)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sp->s_enp);
+ sfxge_mcdi_t *smp = &(sp->s_mcdi);
+ efx_mcdi_req_t emr;
+ uint8_t *out;
+ int rc;
+
+ if (smp->sm_state == SFXGE_MCDI_UNINITIALIZED) {
+ rc = ENODEV;
+ goto fail1;
+ }
+
+ if (!(encp->enc_features & EFX_FEATURE_MCDI)) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ out = kmem_zalloc(sizeof (smip->smi_payload), KM_NOSLEEP);
+ if (out == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ emr.emr_cmd = smip->smi_cmd;
+ emr.emr_in_buf = smip->smi_payload;
+ emr.emr_in_length = smip->smi_len;
+
+ emr.emr_out_buf = out;
+ emr.emr_out_length = sizeof (smip->smi_payload);
+
+ sfxge_mcdi_execute(sp, &emr);
+
+ smip->smi_rc = (uint8_t)emr.emr_rc;
+ smip->smi_cmd = (uint8_t)emr.emr_cmd;
+ smip->smi_len = (uint8_t)emr.emr_out_length_used;
+ bcopy(out, smip->smi_payload, smip->smi_len);
+
+ /*
+ * Helpfully trigger a device reset in response to an MCDI_CMD_REBOOT
+ * Both ports will see ->emt_exception callbacks on the next MCDI poll
+ */
+ if (smip->smi_cmd == MC_CMD_REBOOT) {
+
+ DTRACE_PROBE(mcdi_ioctl_mc_reboot);
+ /* sfxge_t->s_state_lock held */
+ (void) sfxge_restart_dispatch(sp, DDI_SLEEP, SFXGE_HW_OK,
+ "MC_REBOOT triggering restart", 0);
+ }
+
+ kmem_free(out, sizeof (smip->smi_payload));
+
+ return (0);
+
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ return (rc);
+}
+
+int
+sfxge_mcdi2_ioctl(sfxge_t *sp, sfxge_mcdi2_ioc_t *smip)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sp->s_enp);
+ sfxge_mcdi_t *smp = &(sp->s_mcdi);
+ efx_mcdi_req_t emr;
+ uint8_t *out;
+ int rc;
+
+ if (smp->sm_state == SFXGE_MCDI_UNINITIALIZED) {
+ rc = ENODEV;
+ goto fail1;
+ }
+
+ if (!(encp->enc_features & EFX_FEATURE_MCDI)) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ out = kmem_zalloc(sizeof (smip->smi_payload), KM_NOSLEEP);
+ if (out == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ emr.emr_cmd = smip->smi_cmd;
+ emr.emr_in_buf = smip->smi_payload;
+ emr.emr_in_length = smip->smi_len;
+
+ emr.emr_out_buf = out;
+ emr.emr_out_length = sizeof (smip->smi_payload);
+
+ sfxge_mcdi_execute(sp, &emr);
+
+ smip->smi_rc = emr.emr_rc;
+ smip->smi_cmd = emr.emr_cmd;
+ smip->smi_len = (uint32_t)emr.emr_out_length_used;
+ bcopy(out, smip->smi_payload, smip->smi_len);
+
+ /*
+ * Helpfully trigger a device reset in response to an MCDI_CMD_REBOOT
+ * Both ports will see ->emt_exception callbacks on the next MCDI poll
+ */
+ if (smip->smi_cmd == MC_CMD_REBOOT) {
+
+ DTRACE_PROBE(mcdi_ioctl_mc_reboot);
+ /* sfxge_t->s_state_lock held */
+ (void) sfxge_restart_dispatch(sp, DDI_SLEEP, SFXGE_HW_OK,
+ "MC_REBOOT triggering restart", 0);
+ }
+
+ kmem_free(out, sizeof (smip->smi_payload));
+
+ return (0);
+
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ return (rc);
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_mon.c b/usr/src/uts/common/io/sfxge/sfxge_mon.c
new file mode 100644
index 0000000000..3317163c52
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_mon.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include "sfxge.h"
+
+#include "efx.h"
+
+/* Monitor DMA attributes */
+static ddi_device_acc_attr_t sfxge_mon_devacc = {
+
+ DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
+ DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
+ DDI_STRICTORDER_ACC /* devacc_attr_dataorder */
+};
+
+static ddi_dma_attr_t sfxge_mon_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ 0x1000, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+
+static int
+sfxge_mon_kstat_update(kstat_t *ksp, int rw)
+{
+ sfxge_t *sp = ksp->ks_private;
+ sfxge_mon_t *smp = &(sp->s_mon);
+ efsys_mem_t *esmp = &(smp->sm_mem);
+ efx_nic_t *enp = sp->s_enp;
+ kstat_named_t *knp;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sp->s_enp);
+ int rc, sn;
+
+ if (rw != KSTAT_READ) {
+ rc = EACCES;
+ goto fail1;
+ }
+
+ ASSERT(mutex_owned(&(smp->sm_lock)));
+
+ if (smp->sm_state != SFXGE_MON_STARTED)
+ goto done;
+
+ if (smp->sm_polling) {
+ rc = efx_mon_stats_update(enp, esmp, smp->sm_statbuf);
+ if (rc != 0)
+ goto fail2;
+ }
+
+ knp = smp->sm_stat;
+ for (sn = 0; sn < EFX_MON_NSTATS; sn++) {
+ if (encp->enc_mon_stat_mask[sn / EFX_MON_MASK_ELEMENT_SIZE] &
+ (1 << (sn % EFX_MON_MASK_ELEMENT_SIZE))) {
+ knp->value.ui64 = smp->sm_statbuf[sn].emsv_value;
+ knp++;
+ }
+ }
+
+ knp->value.ui32 = sp->s_num_restarts;
+ knp++;
+ knp->value.ui32 = sp->s_num_restarts_hw_err;
+ knp++;
+ knp->value.ui32 = smp->sm_polling;
+ knp++;
+
+done:
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_mon_kstat_init(sfxge_t *sp)
+{
+ sfxge_mon_t *smp = &(sp->s_mon);
+ dev_info_t *dip = sp->s_dip;
+ efx_nic_t *enp = sp->s_enp;
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ char name[MAXNAMELEN];
+ unsigned int id;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sp->s_enp);
+ int rc;
+ int nstat;
+
+ if ((smp->sm_statbuf = kmem_zalloc(sizeof (uint32_t) * EFX_MON_NSTATS,
+ KM_NOSLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ (void) snprintf(name, MAXNAMELEN - 1, "%s_%s", ddi_driver_name(dip),
+ efx_mon_name(enp));
+
+
+ /* Create the set */
+ for (id = 0, nstat = 0; id < EFX_MON_NSTATS; id++) {
+ if (encp->enc_mon_stat_mask[id / EFX_MON_MASK_ELEMENT_SIZE] &
+ (1 << (id % EFX_MON_MASK_ELEMENT_SIZE))) {
+ nstat++;
+ }
+ }
+
+ if ((ksp = kstat_create((char *)ddi_driver_name(dip),
+ ddi_get_instance(dip), name, "mon", KSTAT_TYPE_NAMED,
+ nstat+3, 0)) == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ smp->sm_ksp = ksp;
+
+ ksp->ks_update = sfxge_mon_kstat_update;
+ ksp->ks_private = sp;
+ ksp->ks_lock = &(smp->sm_lock);
+
+ /* Initialise the named stats */
+ smp->sm_stat = knp = ksp->ks_data;
+ for (id = 0; id < EFX_MON_NSTATS; id++) {
+ if (encp->enc_mon_stat_mask[id / EFX_MON_MASK_ELEMENT_SIZE] &
+ (1 << (id % EFX_MON_MASK_ELEMENT_SIZE))) {
+ kstat_named_init(knp,
+ (char *)efx_mon_stat_name(enp, id),
+ KSTAT_DATA_UINT64);
+ knp++;
+ }
+ }
+ kstat_named_init(knp, "num_restarts", KSTAT_DATA_UINT32);
+ knp++;
+ kstat_named_init(knp, "num_restarts_hw_err", KSTAT_DATA_UINT32);
+ knp++;
+ kstat_named_init(knp, "mon_polling", KSTAT_DATA_UINT32);
+ knp++;
+
+ kstat_install(ksp);
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+ kmem_free(smp->sm_statbuf, sizeof (uint32_t) * EFX_MON_NSTATS);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_mon_kstat_fini(sfxge_t *sp)
+{
+ sfxge_mon_t *smp = &(sp->s_mon);
+
+ /* Destroy the set */
+ kstat_delete(smp->sm_ksp);
+ smp->sm_ksp = NULL;
+ smp->sm_stat = NULL;
+
+ kmem_free(smp->sm_statbuf, sizeof (uint32_t) * EFX_MON_NSTATS);
+}
+
+int
+sfxge_mon_init(sfxge_t *sp)
+{
+ sfxge_mon_t *smp = &(sp->s_mon);
+ efx_nic_t *enp = sp->s_enp;
+ efsys_mem_t *esmp = &(smp->sm_mem);
+ sfxge_dma_buffer_attr_t dma_attr;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+ int rc;
+
+ SFXGE_OBJ_CHECK(smp, sfxge_mon_t);
+
+ ASSERT3U(smp->sm_state, ==, SFXGE_MON_UNINITIALIZED);
+
+ smp->sm_sp = sp;
+
+ mutex_init(&(smp->sm_lock), NULL, MUTEX_DRIVER, NULL);
+
+ dma_attr.sdba_dip = sp->s_dip;
+ dma_attr.sdba_dattrp = &sfxge_mon_dma_attr;
+ dma_attr.sdba_callback = DDI_DMA_SLEEP;
+ dma_attr.sdba_length = encp->enc_mon_stat_dma_buf_size;
+ dma_attr.sdba_memflags = DDI_DMA_CONSISTENT;
+ dma_attr.sdba_devaccp = &sfxge_mon_devacc;
+ dma_attr.sdba_bindflags = DDI_DMA_READ | DDI_DMA_CONSISTENT;
+ dma_attr.sdba_maxcookies = 1;
+ dma_attr.sdba_zeroinit = B_TRUE;
+
+ if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
+ goto fail1;
+
+ smp->sm_type = encp->enc_mon_type;
+
+ DTRACE_PROBE1(mon, efx_mon_type_t, smp->sm_type);
+
+ smp->sm_state = SFXGE_MON_INITIALIZED;
+
+ /* Initialize the statistics */
+ if ((rc = sfxge_mon_kstat_init(sp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Tear down DMA setup */
+ sfxge_dma_buffer_destroy(esmp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ mutex_destroy(&(smp->sm_lock));
+
+ smp->sm_sp = NULL;
+
+ SFXGE_OBJ_CHECK(smp, sfxge_mac_t);
+
+ return (rc);
+}
+
+int
+sfxge_mon_start(sfxge_t *sp)
+{
+ sfxge_mon_t *smp = &(sp->s_mon);
+ int rc;
+
+ mutex_enter(&(smp->sm_lock));
+ ASSERT3U(smp->sm_state, ==, SFXGE_MON_INITIALIZED);
+
+ /* Initialize the MON module */
+ if ((rc = efx_mon_init(sp->s_enp)) != 0)
+ goto fail1;
+
+ smp->sm_state = SFXGE_MON_STARTED;
+
+ mutex_exit(&(smp->sm_lock));
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(smp->sm_lock));
+
+ return (rc);
+}
+
+void
+sfxge_mon_stop(sfxge_t *sp)
+{
+ sfxge_mon_t *smp = &(sp->s_mon);
+
+ mutex_enter(&(smp->sm_lock));
+
+ ASSERT3U(smp->sm_state, ==, SFXGE_MON_STARTED);
+ smp->sm_state = SFXGE_MON_INITIALIZED;
+
+ /* Tear down the MON module */
+ efx_mon_fini(sp->s_enp);
+
+ mutex_exit(&(smp->sm_lock));
+}
+
+void
+sfxge_mon_fini(sfxge_t *sp)
+{
+ sfxge_mon_t *smp = &(sp->s_mon);
+ efsys_mem_t *esmp = &(smp->sm_mem);
+
+ ASSERT3U(smp->sm_state, ==, SFXGE_MON_INITIALIZED);
+
+ /* Tear down the statistics */
+ sfxge_mon_kstat_fini(sp);
+
+ smp->sm_state = SFXGE_MON_UNINITIALIZED;
+ mutex_destroy(&(smp->sm_lock));
+
+ smp->sm_sp = NULL;
+ smp->sm_type = EFX_MON_INVALID;
+
+ /* Tear down DMA setup */
+ sfxge_dma_buffer_destroy(esmp);
+
+ SFXGE_OBJ_CHECK(smp, sfxge_mon_t);
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_nvram.c b/usr/src/uts/common/io/sfxge/sfxge_nvram.c
new file mode 100644
index 0000000000..9dc052d9c4
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_nvram.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stream.h>
+#include <sys/dlpi.h>
+
+#include "sfxge.h"
+
+static int
+sfxge_nvram_rw(sfxge_t *sp, sfxge_nvram_ioc_t *snip, efx_nvram_type_t type,
+ boolean_t write)
+{
+ int (*op)(efx_nic_t *, efx_nvram_type_t, unsigned int, caddr_t, size_t);
+ efx_nic_t *enp = sp->s_enp;
+ size_t chunk_size;
+ off_t off;
+ int rc;
+
+ op = (write) ? efx_nvram_write_chunk : efx_nvram_read_chunk;
+
+ if ((rc = efx_nvram_rw_start(enp, type, &chunk_size)) != 0)
+ goto fail1;
+
+ off = 0;
+ while (snip->sni_size) {
+ size_t len = MIN(chunk_size, snip->sni_size);
+ caddr_t buf = (caddr_t)(&snip->sni_data[off]);
+
+ if ((rc = op(enp, type, snip->sni_offset + off, buf, len)) != 0)
+ goto fail2;
+
+ snip->sni_size -= len;
+ off += len;
+ }
+
+ efx_nvram_rw_finish(enp, type);
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+ efx_nvram_rw_finish(enp, type);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ return (rc);
+}
+
+
+static int
+sfxge_nvram_erase(sfxge_t *sp, sfxge_nvram_ioc_t *snip, efx_nvram_type_t type)
+{
+ efx_nic_t *enp = sp->s_enp;
+ size_t chunk_size;
+ int rc;
+ _NOTE(ARGUNUSED(snip));
+
+ if ((rc = efx_nvram_rw_start(enp, type, &chunk_size)) != 0)
+ goto fail1;
+
+ if ((rc = efx_nvram_erase(enp, type)) != 0)
+ goto fail2;
+
+ efx_nvram_rw_finish(enp, type);
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+ efx_nvram_rw_finish(enp, type);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ return (rc);
+}
+
+int
+sfxge_nvram_ioctl(sfxge_t *sp, sfxge_nvram_ioc_t *snip)
+{
+ efx_nic_t *enp = sp->s_enp;
+ efx_nvram_type_t type;
+ int rc;
+
+ switch (snip->sni_type) {
+ case SFXGE_NVRAM_TYPE_BOOTROM:
+ type = EFX_NVRAM_BOOTROM;
+ break;
+ case SFXGE_NVRAM_TYPE_BOOTROM_CFG:
+ type = EFX_NVRAM_BOOTROM_CFG;
+ break;
+ case SFXGE_NVRAM_TYPE_MC:
+ type = EFX_NVRAM_MC_FIRMWARE;
+ break;
+ case SFXGE_NVRAM_TYPE_MC_GOLDEN:
+ type = EFX_NVRAM_MC_GOLDEN;
+ if (snip->sni_op == SFXGE_NVRAM_OP_WRITE ||
+ snip->sni_op == SFXGE_NVRAM_OP_ERASE ||
+ snip->sni_op == SFXGE_NVRAM_OP_SET_VER) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ case SFXGE_NVRAM_TYPE_PHY:
+ type = EFX_NVRAM_PHY;
+ break;
+ case SFXGE_NVRAM_TYPE_NULL_PHY:
+ type = EFX_NVRAM_NULLPHY;
+ break;
+ case SFXGE_NVRAM_TYPE_FPGA: /* PTP timestamping FPGA */
+ type = EFX_NVRAM_FPGA;
+ break;
+ case SFXGE_NVRAM_TYPE_FCFW:
+ type = EFX_NVRAM_FCFW;
+ break;
+ case SFXGE_NVRAM_TYPE_CPLD:
+ type = EFX_NVRAM_CPLD;
+ break;
+ case SFXGE_NVRAM_TYPE_FPGA_BACKUP:
+ type = EFX_NVRAM_FPGA_BACKUP;
+ break;
+ case SFXGE_NVRAM_TYPE_DYNAMIC_CFG:
+ type = EFX_NVRAM_DYNAMIC_CFG;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (snip->sni_size > sizeof (snip->sni_data)) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ switch (snip->sni_op) {
+ case SFXGE_NVRAM_OP_SIZE:
+ {
+ size_t size;
+ if ((rc = efx_nvram_size(enp, type, &size)) != 0)
+ goto fail4;
+ snip->sni_size = (uint32_t)size;
+ break;
+ }
+ case SFXGE_NVRAM_OP_READ:
+ if ((rc = sfxge_nvram_rw(sp, snip, type, B_FALSE)) != 0)
+ goto fail4;
+ break;
+ case SFXGE_NVRAM_OP_WRITE:
+ if ((rc = sfxge_nvram_rw(sp, snip, type, B_TRUE)) != 0)
+ goto fail4;
+ break;
+ case SFXGE_NVRAM_OP_ERASE:
+ if ((rc = sfxge_nvram_erase(sp, snip, type)) != 0)
+ goto fail4;
+ break;
+ case SFXGE_NVRAM_OP_GET_VER:
+ if ((rc = efx_nvram_get_version(enp, type, &snip->sni_subtype,
+ &snip->sni_version[0])) != 0)
+ goto fail4;
+ break;
+ case SFXGE_NVRAM_OP_SET_VER:
+ if ((rc = efx_nvram_set_version(enp, type,
+ &snip->sni_version[0])) != 0)
+ goto fail4;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ DTRACE_PROBE(fail5);
+fail4:
+ DTRACE_PROBE(fail4);
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_pci.c b/usr/src/uts/common/io/sfxge/sfxge_pci.c
new file mode 100644
index 0000000000..720b47f7aa
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_pci.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/pci.h>
+#include <sys/pcie.h>
+
+/* PCIe 3.0 link speeds */
+#ifndef PCIE_LINKCAP_MAX_SPEED_5
+#define PCIE_LINKCAP_MAX_SPEED_5 0x2
+#endif
+#ifndef PCIE_LINKSTS_SPEED_5
+#define PCIE_LINKSTS_SPEED_5 0x2
+#endif
+#ifndef PCIE_LINKCAP_MAX_SPEED_8
+#define PCIE_LINKCAP_MAX_SPEED_8 0x3
+#endif
+#ifndef PCIE_LINKSTS_SPEED_8
+#define PCIE_LINKSTS_SPEED_8 0x3
+#endif
+
+#include "sfxge.h"
+
+int
+sfxge_pci_cap_find(sfxge_t *sp, uint8_t cap_id, off_t *offp)
+{
+ off_t off;
+ uint16_t stat;
+ int rc;
+
+ stat = pci_config_get16(sp->s_pci_handle, PCI_CONF_STAT);
+
+ if (!(stat & PCI_STAT_CAP)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ for (off = pci_config_get8(sp->s_pci_handle, PCI_CONF_CAP_PTR);
+ off != PCI_CAP_NEXT_PTR_NULL;
+ off = pci_config_get8(sp->s_pci_handle, off + PCI_CAP_NEXT_PTR)) {
+ if (cap_id == pci_config_get8(sp->s_pci_handle,
+ off + PCI_CAP_ID))
+ goto done;
+ }
+
+ rc = ENOENT;
+ goto fail2;
+
+done:
+ *offp = off;
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+int
+sfxge_pci_init(sfxge_t *sp)
+{
+ off_t off;
+ uint16_t pciecap;
+ uint16_t devctl;
+ uint16_t linksts;
+ uint16_t max_payload_size;
+ uint16_t max_read_request;
+ int rc;
+#if EFSYS_OPT_MCDI_LOGGING
+ int *pci_regs;
+ uint_t pci_nregs = 0;
+
+ /*
+ * We need the PCI bus address to format MCDI logging output in the
+ * same way as on other platforms.
+ * It appears there's no straightforward way to extract the address
+ * from a "dev_info_t" structure, though.
+ * The "reg" property is supported by all PCIe devices, and contains
+ * an arbitrary length array of elements describing logical
+ * resources. Each element contains a 5-tuple of 32bit values,
+ * where the first 32bit value contains the bus/dev/fn slot
+ * information.
+ * See pci(4) and the definition of "struct pci_phys_spec" in sys/pci.h
+ */
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "reg", (int **)&pci_regs, &pci_nregs) !=
+ DDI_PROP_SUCCESS) {
+ rc = ENODEV;
+ goto fail1;
+ }
+ sp->s_bus_addr = pci_regs[0];
+ ddi_prop_free(pci_regs);
+#endif
+
+ if (pci_config_setup(sp->s_dip, &(sp->s_pci_handle)) != DDI_SUCCESS) {
+ rc = ENODEV;
+ goto fail1;
+ }
+
+ sp->s_pci_venid = pci_config_get16(sp->s_pci_handle, PCI_CONF_VENID);
+ sp->s_pci_devid = pci_config_get16(sp->s_pci_handle, PCI_CONF_DEVID);
+ if ((rc = efx_family(sp->s_pci_venid, sp->s_pci_devid,
+ &sp->s_family)) != 0)
+ goto fail2;
+
+ if ((rc = sfxge_pci_cap_find(sp, PCI_CAP_ID_PCI_E, &off)) != 0)
+ goto fail3;
+
+ pciecap = pci_config_get16(sp->s_pci_handle, off + PCIE_PCIECAP);
+ ASSERT3U((pciecap & PCIE_PCIECAP_VER_MASK), >=, PCIE_PCIECAP_VER_1_0);
+
+ linksts = pci_config_get16(sp->s_pci_handle, off + PCIE_LINKSTS);
+ switch (linksts & PCIE_LINKSTS_NEG_WIDTH_MASK) {
+ case PCIE_LINKSTS_NEG_WIDTH_X1:
+ sp->s_pcie_nlanes = 1;
+ break;
+
+ case PCIE_LINKSTS_NEG_WIDTH_X2:
+ sp->s_pcie_nlanes = 2;
+ break;
+
+ case PCIE_LINKSTS_NEG_WIDTH_X4:
+ sp->s_pcie_nlanes = 4;
+ break;
+
+ case PCIE_LINKSTS_NEG_WIDTH_X8:
+ sp->s_pcie_nlanes = 8;
+ break;
+
+ default:
+ ASSERT(B_FALSE);
+ break;
+ }
+
+ switch (linksts & PCIE_LINKSTS_SPEED_MASK) {
+ case PCIE_LINKSTS_SPEED_2_5:
+ sp->s_pcie_linkspeed = 1;
+ break;
+
+ case PCIE_LINKSTS_SPEED_5:
+ sp->s_pcie_linkspeed = 2;
+ break;
+
+ case PCIE_LINKSTS_SPEED_8:
+ sp->s_pcie_linkspeed = 3;
+ break;
+
+ default:
+ ASSERT(B_FALSE);
+ break;
+ }
+
+ devctl = pci_config_get16(sp->s_pci_handle, off + PCIE_DEVCTL);
+
+ max_payload_size = (devctl & PCIE_DEVCTL_MAX_PAYLOAD_MASK)
+ >> PCIE_DEVCTL_MAX_PAYLOAD_SHIFT;
+
+ max_read_request = (devctl & PCIE_DEVCTL_MAX_READ_REQ_MASK)
+ >> PCIE_DEVCTL_MAX_READ_REQ_SHIFT;
+
+ dev_err(sp->s_dip, CE_NOTE,
+ SFXGE_CMN_ERR "PCIe MRR: %d TLP: %d Link: %s Lanes: x%d",
+ 128 << max_read_request,
+ 128 << max_payload_size,
+ (sp->s_pcie_linkspeed == 1) ? "2.5G" :
+ (sp->s_pcie_linkspeed == 2) ? "5.0G" :
+ (sp->s_pcie_linkspeed == 3) ? "8.0G" :
+ "UNKNOWN",
+ sp->s_pcie_nlanes);
+
+ return (0);
+
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+
+ pci_config_teardown(&(sp->s_pci_handle));
+ sp->s_pci_handle = NULL;
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+void
+sfxge_pcie_check_link(sfxge_t *sp, unsigned int full_nlanes,
+ unsigned int full_speed)
+{
+ if ((sp->s_pcie_linkspeed < full_speed) ||
+ (sp->s_pcie_nlanes < full_nlanes))
+ dev_err(sp->s_dip, CE_NOTE,
+ SFXGE_CMN_ERR "This device requires %d PCIe lanes "
+ "at %s link speed to reach full bandwidth.",
+ full_nlanes,
+ (full_speed == 1) ? "2.5G" :
+ (full_speed == 2) ? "5.0G" :
+ (full_speed == 3) ? "8.0G" :
+ "UNKNOWN");
+}
+
+void
+sfxge_pci_fini(sfxge_t *sp)
+{
+ sp->s_pcie_nlanes = 0;
+ sp->s_pcie_linkspeed = 0;
+
+ pci_config_teardown(&(sp->s_pci_handle));
+ sp->s_pci_handle = NULL;
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_phy.c b/usr/src/uts/common/io/sfxge/sfxge_phy.c
new file mode 100644
index 0000000000..04c9f88b0e
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_phy.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include "sfxge.h"
+#include "efx.h"
+
+/*
+ * All efx_phy_*() must be after efx_port_init()
+ *
+ * LOCKING STRATEGY: Aquire sm_lock and test sm_state==SFXGE_MAC_STARTED
+ * to serialise against sfxge_restart()
+ *
+ * Note that there is no seperate PHY lock
+ * Everything is driven from MAC code and the MAC lock is used
+ */
+
+/* PHY DMA attributes */
+static ddi_device_acc_attr_t sfxge_phy_devacc = {
+
+ DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
+ DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
+ DDI_STRICTORDER_ACC /* devacc_attr_dataorder */
+};
+
+static ddi_dma_attr_t sfxge_phy_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ 0x1000, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+
+static int
+sfxge_phy_kstat_update(kstat_t *ksp, int rw)
+{
+ sfxge_t *sp = ksp->ks_private;
+ sfxge_mac_t *smp = &(sp->s_mac);
+ sfxge_phy_t *spp = &(smp->sm_phy);
+ efx_nic_t *enp = sp->s_enp;
+ kstat_named_t *knp;
+ const efx_nic_cfg_t *encp;
+ int rc, sn;
+
+ if (rw != KSTAT_READ) {
+ rc = EACCES;
+ goto fail1;
+ }
+
+ ASSERT(mutex_owned(&(smp->sm_lock)));
+
+ if (smp->sm_state != SFXGE_MAC_STARTED)
+ goto done;
+
+ /* Synchronize the DMA memory for reading */
+ (void) ddi_dma_sync(spp->sp_mem.esm_dma_handle,
+ 0, EFX_PHY_STATS_SIZE, DDI_DMA_SYNC_FORKERNEL);
+
+ if ((rc = efx_phy_stats_update(enp, &spp->sp_mem, spp->sp_statbuf))
+ != 0)
+ goto fail2;
+
+ knp = spp->sp_stat;
+ for (sn = 0; sn < EFX_PHY_NSTATS; sn++) {
+ knp->value.ui64 = spp->sp_statbuf[sn];
+ knp++;
+ }
+
+ encp = efx_nic_cfg_get(enp);
+ knp->value.ui64 = encp->enc_port;
+
+done:
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+int
+sfxge_phy_kstat_init(sfxge_t *sp)
+{
+ dev_info_t *dip = sp->s_dip;
+ sfxge_phy_t *spp = &(sp->s_mac.sm_phy);
+ efx_nic_t *enp = sp->s_enp;
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ const efx_nic_cfg_t *encp;
+ unsigned int id;
+ char name[MAXNAMELEN];
+ int rc;
+
+ if ((spp->sp_statbuf = kmem_zalloc(sizeof (uint32_t) * EFX_PHY_NSTATS,
+ KM_NOSLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ encp = efx_nic_cfg_get(enp);
+
+ (void) snprintf(name, MAXNAMELEN - 1, "%s_%s", ddi_driver_name(dip),
+ encp->enc_phy_name);
+
+ /* Create the set */
+ if ((ksp = kstat_create((char *)ddi_driver_name(dip),
+ ddi_get_instance(dip), name, "phy", KSTAT_TYPE_NAMED,
+ EFX_PHY_NSTATS + 1, 0)) == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ spp->sp_ksp = ksp;
+
+ ksp->ks_update = sfxge_phy_kstat_update;
+ ksp->ks_private = sp;
+ ksp->ks_lock = &(sp->s_mac.sm_lock);
+
+ /* Initialise the named stats */
+ spp->sp_stat = knp = ksp->ks_data;
+ for (id = 0; id < EFX_PHY_NSTATS; id++) {
+ kstat_named_init(knp, (char *)efx_phy_stat_name(enp, id),
+ KSTAT_DATA_UINT64);
+ knp++;
+ }
+
+ kstat_named_init(knp, "port", KSTAT_DATA_UINT64);
+ kstat_install(ksp);
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2)
+ kmem_free(spp->sp_statbuf, sizeof (uint32_t) * EFX_PHY_NSTATS);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+void
+sfxge_phy_kstat_fini(sfxge_t *sp)
+{
+ sfxge_phy_t *spp = &(sp->s_mac.sm_phy);
+
+ /* Destroy the set */
+ kstat_delete(spp->sp_ksp);
+ spp->sp_ksp = NULL;
+ spp->sp_stat = NULL;
+
+ kmem_free(spp->sp_statbuf, sizeof (uint32_t) * EFX_PHY_NSTATS);
+}
+
+
+int
+sfxge_phy_init(sfxge_t *sp)
+{
+ sfxge_phy_t *spp = &(sp->s_mac.sm_phy);
+ efsys_mem_t *esmp = &(spp->sp_mem);
+ sfxge_dma_buffer_attr_t dma_attr;
+ int rc;
+
+ dma_attr.sdba_dip = sp->s_dip;
+ dma_attr.sdba_dattrp = &sfxge_phy_dma_attr;
+ dma_attr.sdba_callback = DDI_DMA_SLEEP;
+ dma_attr.sdba_length = EFX_PHY_STATS_SIZE;
+ dma_attr.sdba_memflags = DDI_DMA_CONSISTENT;
+ dma_attr.sdba_devaccp = &sfxge_phy_devacc;
+ dma_attr.sdba_bindflags = DDI_DMA_READ | DDI_DMA_CONSISTENT;
+ dma_attr.sdba_maxcookies = 1;
+ dma_attr.sdba_zeroinit = B_TRUE;
+
+ if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ SFXGE_OBJ_CHECK(spp, sfxge_phy_t);
+
+ return (rc);
+}
+
+uint8_t
+sfxge_phy_lp_cap_test(sfxge_t *sp, uint32_t field)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ uint32_t cap = 0;
+
+ mutex_enter(&(smp->sm_lock));
+
+ if (smp->sm_state != SFXGE_MAC_STARTED)
+ goto done;
+
+ efx_phy_lp_cap_get(sp->s_enp, &cap);
+
+done:
+ mutex_exit(&(smp->sm_lock));
+
+ return (cap & (1 << field));
+}
+
+/*
+ * Set up the advertised capabilities that may have been asked for
+ * when the mac was not in the state SFXGE_MAC_STARTED.
+ * Must be called after efx_port_init().
+ */
+int
+sfxge_phy_cap_apply(sfxge_t *sp, boolean_t use_default)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ efx_nic_t *enp;
+ uint32_t adv_cap;
+ int rc;
+ int err;
+
+ ASSERT(mutex_owned(&(smp->sm_lock)));
+
+ enp = sp->s_enp;
+
+ if (use_default)
+ efx_phy_adv_cap_get(enp, EFX_PHY_CAP_DEFAULT, &adv_cap);
+ else
+ efx_phy_adv_cap_get(enp, EFX_PHY_CAP_CURRENT, &adv_cap);
+
+ adv_cap |= smp->sm_phy_cap_to_set;
+ smp->sm_phy_cap_to_set = 0;
+ adv_cap &= ~(smp->sm_phy_cap_to_unset);
+ smp->sm_phy_cap_to_unset = 0;
+ if ((err = efx_phy_adv_cap_set(enp, adv_cap)) != 0) {
+ if (err == EINVAL) {
+ /*
+ * The configuation wasn't accepted, so set to
+ * defaults.
+ */
+ uint32_t requested = adv_cap;
+ uint32_t supported;
+ efx_phy_adv_cap_get(enp, EFX_PHY_CAP_PERM, &supported);
+ efx_phy_adv_cap_get(enp, EFX_PHY_CAP_DEFAULT, &adv_cap);
+ if ((rc = efx_phy_adv_cap_set(enp, adv_cap)) != 0)
+ goto fail1;
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "Setting of advertised link capabilities failed. "
+ "Using default settings. "
+ "(Requested 0x%x Given 0x%x Supported 0x%x)",
+ requested,
+ adv_cap,
+ supported);
+ } else {
+ rc = err;
+ goto fail2;
+ }
+ }
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+uint8_t
+sfxge_phy_cap_test(sfxge_t *sp, uint32_t flag, uint32_t field,
+ boolean_t *mutablep)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ efx_nic_t *enp;
+ uint32_t cap = 0;
+ uint32_t perm = 0;
+
+ mutex_enter(&(smp->sm_lock));
+ enp = sp->s_enp;
+
+ if (smp->sm_state != SFXGE_MAC_STARTED)
+ goto done;
+
+ efx_phy_adv_cap_get(enp, flag, &cap);
+ efx_phy_adv_cap_get(enp, EFX_PHY_CAP_PERM, &perm);
+
+done:
+ mutex_exit(&(smp->sm_lock));
+
+ if (mutablep)
+ *mutablep = (perm & (1 << field)) ? B_TRUE : B_FALSE;
+
+ return ((cap & (1 << field)) ? 1 : 0);
+}
+
+
+int
+sfxge_phy_cap_set(sfxge_t *sp, uint32_t field, int set)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ efx_nic_t *enp = sp->s_enp;
+ uint32_t cap;
+ int rc = 0;
+
+ mutex_enter(&(smp->sm_lock));
+
+ if (smp->sm_state != SFXGE_MAC_STARTED) {
+ /* Store the request for when the mac is started */
+ if (set)
+ smp->sm_phy_cap_to_set |= (1 << field);
+ else
+ smp->sm_phy_cap_to_unset |= (1 << field);
+ goto done;
+ }
+
+ efx_phy_adv_cap_get(enp, EFX_PHY_CAP_CURRENT, &cap);
+
+ if (set)
+ cap |= (1 << field);
+ else
+ cap &= ~(1 << field);
+
+ rc = efx_phy_adv_cap_set(enp, cap);
+done:
+ mutex_exit(&(smp->sm_lock));
+
+ return (rc);
+}
+
+
+void
+sfxge_phy_fini(sfxge_t *sp)
+{
+ sfxge_phy_t *spp = &(sp->s_mac.sm_phy);
+ efsys_mem_t *esmp = &(spp->sp_mem);
+
+ sfxge_dma_buffer_destroy(esmp);
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_rx.c b/usr/src/uts/common/io/sfxge/sfxge_rx.c
new file mode 100644
index 0000000000..51b3e0bd34
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_rx.c
@@ -0,0 +1,2828 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/atomic.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/strsubr.h>
+#include <sys/strft.h>
+#include <sys/ksynch.h>
+#include <sys/ethernet.h>
+#include <sys/crc32.h>
+#include <sys/pattr.h>
+#include <sys/cpu.h>
+
+#include <sys/ethernet.h>
+#include <inet/ip.h>
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+
+#include "sfxge.h"
+
+#include "efx.h"
+
+/* RXQ flush response timeout (in microseconds) */
+#define SFXGE_RX_QFLUSH_USEC (2000000)
+
+/* RXQ flush tries in the case of failure */
+#define SFXGE_RX_QFLUSH_TRIES (5)
+
+/* RXQ default packet buffer preallocation (number of packet buffers) */
+#define SFXGE_RX_QPREALLOC (0)
+
+/* Receive packet DMA attributes */
+static ddi_device_acc_attr_t sfxge_rx_packet_devacc = {
+
+ DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
+ DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
+ DDI_STRICTORDER_ACC /* devacc_attr_dataorder */
+};
+
+static ddi_dma_attr_t sfxge_rx_packet_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ SFXGE_CPU_CACHE_SIZE, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+/* Receive queue DMA attributes */
+static ddi_device_acc_attr_t sfxge_rxq_devacc = {
+
+ DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
+ DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
+ DDI_STRICTORDER_ACC /* devacc_attr_dataorder */
+};
+
+static ddi_dma_attr_t sfxge_rxq_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ EFX_BUF_SIZE, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+/* Forward declaration */
+static void sfxge_rx_qpreallocate(sfxge_rxq_t *srp, int nprealloc);
+
+static int
+sfxge_rx_packet_ctor(void *buf, void *arg, int kmflags)
+{
+ sfxge_rx_packet_t *srpp = buf;
+ sfxge_t *sp = arg;
+ dev_info_t *dip = sp->s_dip;
+ int err;
+
+ ASSERT3U(sizeof (srpp->__srp_u1.__srp_s1), <=,
+ sizeof (srpp->__srp_u1.__srp_pad));
+ ASSERT3U(sizeof (srpp->__srp_u2.__srp_s2), <=,
+ sizeof (srpp->__srp_u2.__srp_pad));
+
+ bzero(buf, sizeof (sfxge_rx_packet_t));
+
+ /* Allocate a DMA handle */
+ err = ddi_dma_alloc_handle(dip, &sfxge_rx_packet_dma_attr,
+ (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
+ NULL, &(srpp->srp_dma_handle));
+ if (err != DDI_SUCCESS)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, err);
+
+ SFXGE_OBJ_CHECK(srpp, sfxge_rx_packet_t);
+
+ return (-1);
+}
+
+static void
+sfxge_rx_packet_dtor(void *buf, void *arg)
+{
+ sfxge_rx_packet_t *srpp = buf;
+
+ _NOTE(ARGUNUSED(arg))
+
+ /* Free the DMA handle */
+ ddi_dma_free_handle(&(srpp->srp_dma_handle));
+ srpp->srp_dma_handle = NULL;
+
+ SFXGE_OBJ_CHECK(srpp, sfxge_rx_packet_t);
+}
+
+static int
+sfxge_rx_qctor(void *buf, void *arg, int kmflags)
+{
+ sfxge_rxq_t *srp = buf;
+ efsys_mem_t *esmp = &(srp->sr_mem);
+ sfxge_t *sp = arg;
+ sfxge_dma_buffer_attr_t dma_attr;
+ sfxge_rx_fpp_t *srfppp;
+ int nprealloc;
+ unsigned int id;
+ int rc;
+
+ /* Compile-time structure layout checks */
+ EFX_STATIC_ASSERT(sizeof (srp->__sr_u1.__sr_s1) <=
+ sizeof (srp->__sr_u1.__sr_pad));
+ EFX_STATIC_ASSERT(sizeof (srp->__sr_u2.__sr_s2) <=
+ sizeof (srp->__sr_u2.__sr_pad));
+ EFX_STATIC_ASSERT(sizeof (srp->__sr_u3.__sr_s3) <=
+ sizeof (srp->__sr_u3.__sr_pad));
+
+ bzero(buf, sizeof (sfxge_rxq_t));
+
+ srp->sr_sp = sp;
+
+ dma_attr.sdba_dip = sp->s_dip;
+ dma_attr.sdba_dattrp = &sfxge_rxq_dma_attr;
+ dma_attr.sdba_callback = DDI_DMA_SLEEP;
+ dma_attr.sdba_length = EFX_RXQ_SIZE(sp->s_rxq_size);
+ dma_attr.sdba_memflags = DDI_DMA_CONSISTENT;
+ dma_attr.sdba_devaccp = &sfxge_rxq_devacc;
+ dma_attr.sdba_bindflags = DDI_DMA_READ | DDI_DMA_CONSISTENT;
+ dma_attr.sdba_maxcookies = 1;
+ dma_attr.sdba_zeroinit = B_FALSE;
+
+ if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
+ goto fail1;
+
+ /* Allocate some buffer table entries */
+ if ((rc = sfxge_sram_buf_tbl_alloc(sp, EFX_RXQ_NBUFS(sp->s_rxq_size),
+ &(srp->sr_id))) != 0)
+ goto fail2;
+
+ /* Allocate the context array */
+ if ((srp->sr_srpp = kmem_zalloc(sizeof (sfxge_rx_packet_t *) *
+ sp->s_rxq_size, kmflags)) == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /* Allocate the flow table */
+ if ((srp->sr_flow = kmem_zalloc(sizeof (sfxge_rx_flow_t) *
+ SFXGE_MAX_FLOW, kmflags)) == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ srp->sr_srfpp = &(srp->sr_srfp);
+ srp->sr_rto = drv_usectohz(200000);
+
+ srp->sr_mpp = &(srp->sr_mp);
+
+ /* Initialize the free packet pool */
+ srfppp = &(srp->sr_fpp);
+ if ((srfppp->srfpp_putp = kmem_zalloc(SFXGE_CPU_CACHE_SIZE *
+ SFXGE_RX_FPP_NSLOTS, kmflags)) == NULL) {
+ rc = ENOMEM;
+ goto fail5;
+ }
+ for (id = 0; id < SFXGE_RX_FPP_NSLOTS; id++) {
+ sfxge_rx_fpp_putlist_t *putp;
+ size_t off;
+
+ off = id * SFXGE_CPU_CACHE_SIZE;
+ putp = (void *)(srfppp->srfpp_putp + off);
+
+ putp->srfpl_putp = NULL;
+ putp->srfpl_putpp = &(putp->srfpl_putp);
+ mutex_init(&(putp->srfpl_lock), NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(sp->s_intr.si_intr_pri));
+ }
+
+ cv_init(&(srp->sr_flush_kv), NULL, CV_DRIVER, NULL);
+
+ /* Preallocate some packets on the free packet pool */
+ nprealloc = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "rx_prealloc_pkt_buffers", SFXGE_RX_QPREALLOC);
+ sfxge_rx_qpreallocate(srp, nprealloc);
+
+
+ return (0);
+
+fail5:
+ DTRACE_PROBE(fail5);
+
+ srp->sr_mpp = NULL;
+
+ srp->sr_rto = 0;
+ srp->sr_srfpp = NULL;
+
+ /* Free the flow table */
+ kmem_free(srp->sr_flow, sizeof (sfxge_rx_flow_t) *
+ SFXGE_MAX_FLOW);
+ srp->sr_flow = NULL;
+
+fail4:
+ DTRACE_PROBE(fail4);
+
+ /* Free the context array */
+ kmem_free(srp->sr_srpp, sizeof (sfxge_rx_packet_t *) *
+ sp->s_rxq_size);
+ srp->sr_srpp = NULL;
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+ /* Free the buffer table entries */
+ sfxge_sram_buf_tbl_free(sp, srp->sr_id,
+ EFX_RXQ_NBUFS(sp->s_rxq_size));
+ srp->sr_id = 0;
+
+fail2:
+ DTRACE_PROBE(fail2);
+ /* Remove dma setup */
+ sfxge_dma_buffer_destroy(esmp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ srp->sr_sp = NULL;
+
+ SFXGE_OBJ_CHECK(srp, sfxge_rxq_t);
+
+ return (-1);
+}
+
+static void
+sfxge_rx_qdtor(void *buf, void *arg)
+{
+ sfxge_rxq_t *srp = buf;
+ efsys_mem_t *esmp = &(srp->sr_mem);
+ sfxge_t *sp = srp->sr_sp;
+ sfxge_rx_fpp_t *srfppp = &(srp->sr_fpp);
+ unsigned int id;
+
+ _NOTE(ARGUNUSED(arg))
+
+ cv_destroy(&(srp->sr_flush_kv));
+
+ /* Tear down the free packet pool */
+ for (id = 0; id < SFXGE_RX_FPP_NSLOTS; id++) {
+ sfxge_rx_fpp_putlist_t *putp;
+ size_t off;
+
+ off = id * SFXGE_CPU_CACHE_SIZE;
+ putp = (void *)(srfppp->srfpp_putp + off);
+
+ putp->srfpl_putpp = NULL;
+ mutex_destroy(&(putp->srfpl_lock));
+
+ SFXGE_OBJ_CHECK(putp, sfxge_rx_fpp_putlist_t);
+ }
+ kmem_free(srfppp->srfpp_putp, SFXGE_CPU_CACHE_SIZE *
+ SFXGE_RX_FPP_NSLOTS);
+ srfppp->srfpp_putp = NULL;
+
+ srp->sr_mpp = NULL;
+
+ srp->sr_rto = 0;
+ srp->sr_srfpp = NULL;
+
+ /* Free the flow table */
+ kmem_free(srp->sr_flow, sizeof (sfxge_rx_flow_t) *
+ SFXGE_MAX_FLOW);
+ srp->sr_flow = NULL;
+
+ /* Free the context array */
+ kmem_free(srp->sr_srpp, sizeof (sfxge_rx_packet_t *) *
+ sp->s_rxq_size);
+ srp->sr_srpp = NULL;
+
+ /* Free the buffer table entries */
+ sfxge_sram_buf_tbl_free(sp, srp->sr_id,
+ EFX_RXQ_NBUFS(sp->s_rxq_size));
+ srp->sr_id = 0;
+
+ /* Tear down dma setup */
+ sfxge_dma_buffer_destroy(esmp);
+
+ SFXGE_OBJ_CHECK(srp, sfxge_rxq_t);
+}
+
+/* Note: This function takes ownership of *srpp. */
+static inline void
+sfxge_rx_qfpp_put(sfxge_rxq_t *srp, sfxge_rx_packet_t *srpp)
+{
+ sfxge_rx_fpp_t *srfppp = &(srp->sr_fpp);
+ mblk_t *mp = srpp->srp_mp;
+ unsigned int id;
+ size_t off;
+ sfxge_rx_fpp_putlist_t *putp;
+
+ ASSERT3P(mp->b_next, ==, NULL);
+ ASSERT3P(mp->b_prev, ==, NULL);
+
+ id = CPU->cpu_seqid & SFXGE_RX_FPP_MASK;
+ off = id * SFXGE_CPU_CACHE_SIZE;
+
+ ASSERT3P(srpp->srp_putp, ==, srfppp->srfpp_putp);
+ putp = (void *)(srpp->srp_putp + off);
+
+ mutex_enter(&(putp->srfpl_lock));
+ putp->srfpl_count++;
+ *putp->srfpl_putpp = mp;
+ putp->srfpl_putpp = &(mp->b_next);
+ mutex_exit(&(putp->srfpl_lock));
+}
+
+static unsigned int
+sfxge_rx_qfpp_swizzle(sfxge_rxq_t *srp)
+{
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ sfxge_rx_fpp_t *srfppp = &(srp->sr_fpp);
+ unsigned int start;
+ unsigned int id;
+ mblk_t *p;
+ mblk_t **pp;
+ unsigned int count;
+ unsigned int loaned;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ /* We want to access the put list for the current CPU last */
+ id = start = (CPU->cpu_seqid + 1) & SFXGE_RX_FPP_MASK;
+
+ do {
+ sfxge_rx_fpp_putlist_t *putp;
+ size_t off;
+
+ off = id * SFXGE_CPU_CACHE_SIZE;
+ id = (id + 1) & SFXGE_RX_FPP_MASK;
+
+ putp = (void *)(srfppp->srfpp_putp + off);
+
+ /* Acquire the put list */
+ mutex_enter(&(putp->srfpl_lock));
+
+ p = putp->srfpl_putp;
+ pp = putp->srfpl_putpp;
+ count = putp->srfpl_count;
+
+ putp->srfpl_putp = NULL;
+ putp->srfpl_putpp = &(putp->srfpl_putp);
+ putp->srfpl_count = 0;
+
+ mutex_exit(&(putp->srfpl_lock));
+
+ if (p == NULL)
+ continue;
+
+ /* Add the list to the head of the get list */
+ *pp = srfppp->srfpp_get;
+ srfppp->srfpp_get = p;
+
+ /* Adjust the counters */
+ ASSERT3U(srfppp->srfpp_loaned, >=, count);
+ srfppp->srfpp_loaned -= count;
+ srfppp->srfpp_count += count;
+
+#if 0
+ /* NOTE: this probe is disabled because it is expensive!! */
+ DTRACE_PROBE2(count,
+ unsigned int, (id - 1) & SFXGE_RX_FPP_MASK,
+ unsigned int, count);
+#endif
+
+ } while (id != start);
+
+ /* Return the number of packets yet to appear in the put list */
+ loaned = srfppp->srfpp_loaned;
+
+
+ return (loaned);
+}
+
+
+#define DB_FRTNP(mp) ((mp)->b_datap->db_frtnp)
+
+static void
+sfxge_rx_qfpp_empty(sfxge_rxq_t *srp)
+{
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ sfxge_rx_fpp_t *srfppp;
+ mblk_t *mp;
+
+ mutex_enter(&(sep->se_lock));
+ srfppp = &(srp->sr_fpp);
+
+ /* Swizzle put list to get list */
+ (void) sfxge_rx_qfpp_swizzle(srp);
+ ASSERT3U(srfppp->srfpp_loaned, ==, 0);
+
+ mp = srfppp->srfpp_get;
+ srfppp->srfpp_get = NULL;
+
+ /* Free the remainder */
+ while (mp != NULL) {
+ mblk_t *next;
+ frtn_t *freep;
+ sfxge_rx_packet_t *srpp;
+
+ next = mp->b_next;
+ mp->b_next = NULL;
+
+ ASSERT3U(srfppp->srfpp_count, >, 0);
+ srfppp->srfpp_count--;
+
+ freep = DB_FRTNP(mp);
+ /*
+ * ASSERT3P(freep->free_func, ==, sfxge_rx_qpacket_free);
+ * is implied by srpp test below
+ */
+ /*LINTED*/
+ srpp = (sfxge_rx_packet_t *)(freep->free_arg);
+ ASSERT3P(srpp->srp_mp, ==, mp);
+ ASSERT3P(mp->b_cont, ==, NULL);
+ srpp->srp_recycle = B_FALSE;
+
+ freeb(mp);
+
+ mp = next;
+ }
+ ASSERT3U(srfppp->srfpp_count, ==, 0);
+
+ srfppp->srfpp_min = 0;
+
+ mutex_exit(&(sep->se_lock));
+}
+
+/*
+ * This is an estimate of all memory consumed per RX packet
+ * it can be inaccurate but but sp->s_rx_pkt_mem_alloc mustn't drift
+ */
+static uint64_t
+sfxge_rx_pkt_mem_approx(const sfxge_rx_packet_t *srpp)
+{
+ return (srpp->srp_mblksize + sizeof (mblk_t) + sizeof (dblk_t) +
+ sizeof (sfxge_rx_packet_t));
+}
+
+static void
+sfxge_rx_qpacket_destroy(sfxge_rxq_t *srp, sfxge_rx_packet_t *srpp)
+{
+ sfxge_t *sp = srp->sr_sp;
+ int64_t delta = sfxge_rx_pkt_mem_approx(srpp);
+
+ ASSERT(!(srpp->srp_recycle));
+ ASSERT3P(srpp->srp_mp, ==, NULL);
+
+ srpp->srp_off = 0;
+ srpp->srp_thp = NULL;
+ srpp->srp_iphp = NULL;
+ srpp->srp_etherhp = NULL;
+ srpp->srp_size = 0;
+ srpp->srp_flags = 0;
+
+ bzero(&(srpp->srp_free), sizeof (frtn_t));
+
+ srpp->srp_mblksize = 0;
+ srpp->srp_base = NULL;
+
+ /* Unbind the DMA memory from the DMA handle */
+ srpp->srp_addr = 0;
+ (void) ddi_dma_unbind_handle(srpp->srp_dma_handle);
+
+ /* Free the DMA memory */
+ srpp->srp_base = NULL;
+ ddi_dma_mem_free(&(srpp->srp_acc_handle));
+ srpp->srp_acc_handle = NULL;
+
+ srpp->srp_putp = NULL;
+ srpp->srp_srp = NULL;
+
+ kmem_cache_free(sp->s_rpc, srpp);
+ if (sp->s_rx_pkt_mem_max)
+ atomic_add_64(&sp->s_rx_pkt_mem_alloc, -delta);
+}
+
+static void
+sfxge_rx_qpacket_free(void *arg)
+{
+ sfxge_rx_packet_t *srpp = arg;
+ sfxge_rxq_t *srp = srpp->srp_srp;
+
+ /*
+ * WARNING "man -s 9f esballoc" states:
+ * => runs sync from the thread calling freeb()
+ * => must not sleep, or access data structures that could be freed
+ */
+
+ /* Check whether we want to recycle the receive packets */
+ if (srpp->srp_recycle) {
+ frtn_t *freep;
+ mblk_t *mp;
+ size_t size;
+
+ freep = &(srpp->srp_free);
+ ASSERT3P(freep->free_func, ==, sfxge_rx_qpacket_free);
+ ASSERT3P(freep->free_arg, ==, (caddr_t)srpp);
+
+ /*
+ * Allocate a matching mblk_t before the current one is
+ * freed.
+ */
+ size = srpp->srp_mblksize;
+
+ if ((mp = desballoc(srpp->srp_base, size, BPRI_HI,
+ freep)) != NULL) {
+ srpp->srp_mp = mp;
+
+ /* NORMAL recycled case */
+ sfxge_rx_qfpp_put(srp, srpp);
+ return;
+ }
+ }
+
+ srpp->srp_mp = NULL;
+
+ sfxge_rx_qpacket_destroy(srp, srpp);
+}
+
+static sfxge_rx_packet_t *
+sfxge_rx_qpacket_create(sfxge_rxq_t *srp)
+{
+ sfxge_t *sp = srp->sr_sp;
+ sfxge_rx_fpp_t *srfppp = &(srp->sr_fpp);
+ sfxge_rx_packet_t *srpp;
+ size_t size;
+ caddr_t base;
+ size_t unit;
+ ddi_dma_cookie_t dmac;
+ unsigned int ncookies;
+ frtn_t *freep;
+ mblk_t *mp;
+ int err;
+ int rc;
+
+ size = sp->s_rx_buffer_size;
+
+ if (sp->s_rx_pkt_mem_max &&
+ (sp->s_rx_pkt_mem_alloc + size >= sp->s_rx_pkt_mem_max)) {
+ DTRACE_PROBE(rx_pkt_mem_max);
+ srp->sr_kstat.srk_rx_pkt_mem_limit++;
+ return (NULL);
+ }
+
+ /* Allocate a new packet */
+ if ((srpp = kmem_cache_alloc(sp->s_rpc, KM_NOSLEEP)) == NULL) {
+ srp->sr_kstat.srk_kcache_alloc_nomem++;
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ srpp->srp_srp = srp;
+ srpp->srp_putp = srfppp->srfpp_putp;
+
+ /* Allocate some DMA memory */
+ err = ddi_dma_mem_alloc(srpp->srp_dma_handle, size,
+ &sfxge_rx_packet_devacc, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
+ NULL, &base, &unit, &(srpp->srp_acc_handle));
+ switch (err) {
+ case DDI_SUCCESS:
+ break;
+
+ case DDI_FAILURE:
+ srp->sr_kstat.srk_dma_alloc_nomem++;
+ rc = ENOMEM;
+ goto fail2;
+
+ default:
+ srp->sr_kstat.srk_dma_alloc_fail++;
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ /* Adjust the buffer to align the start of the DMA area correctly */
+ base += sp->s_rx_buffer_align;
+ size -= sp->s_rx_buffer_align;
+
+ /* Bind the DMA memory to the DMA handle */
+ err = ddi_dma_addr_bind_handle(srpp->srp_dma_handle, NULL,
+ base, size, DDI_DMA_READ | DDI_DMA_STREAMING,
+ DDI_DMA_DONTWAIT, NULL, &dmac, &ncookies);
+ switch (err) {
+ case DDI_DMA_MAPPED:
+ break;
+
+ case DDI_DMA_INUSE:
+ srp->sr_kstat.srk_dma_bind_fail++;
+ rc = EEXIST;
+ goto fail3;
+
+ case DDI_DMA_NORESOURCES:
+ srp->sr_kstat.srk_dma_bind_nomem++;
+ rc = ENOMEM;
+ goto fail3;
+
+ case DDI_DMA_NOMAPPING:
+ srp->sr_kstat.srk_dma_bind_fail++;
+ rc = ENOTSUP;
+ goto fail3;
+
+ case DDI_DMA_TOOBIG:
+ srp->sr_kstat.srk_dma_bind_fail++;
+ rc = EFBIG;
+ goto fail3;
+
+ default:
+ srp->sr_kstat.srk_dma_bind_fail++;
+ rc = EFAULT;
+ goto fail3;
+ }
+ ASSERT3U(ncookies, ==, 1);
+
+ srpp->srp_addr = dmac.dmac_laddress;
+
+ srpp->srp_base = (unsigned char *)base;
+ srpp->srp_mblksize = size;
+
+ /*
+ * Allocate a STREAMS block: We use size 1 so that the allocator will
+ * use the first (and smallest) dblk cache.
+ */
+ freep = &(srpp->srp_free);
+ freep->free_func = sfxge_rx_qpacket_free;
+ freep->free_arg = (caddr_t)srpp;
+
+ if ((mp = desballoc(srpp->srp_base, size, BPRI_HI, freep)) == NULL) {
+ srp->sr_kstat.srk_desballoc_fail++;
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ srpp->srp_mp = mp;
+ srpp->srp_recycle = B_TRUE;
+
+ if (sp->s_rx_pkt_mem_max) {
+ int64_t delta = sfxge_rx_pkt_mem_approx(srpp);
+ atomic_add_64(&sp->s_rx_pkt_mem_alloc, delta);
+ }
+
+ return (srpp);
+
+fail4:
+ DTRACE_PROBE(fail4);
+
+ bzero(&(srpp->srp_free), sizeof (frtn_t));
+
+ srpp->srp_mblksize = 0;
+ srpp->srp_base = NULL;
+
+ /* Unbind the DMA memory from the DMA handle */
+ srpp->srp_addr = 0;
+ (void) ddi_dma_unbind_handle(srpp->srp_dma_handle);
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+ /* Free the DMA memory */
+ ddi_dma_mem_free(&(srpp->srp_acc_handle));
+ srpp->srp_acc_handle = NULL;
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ srpp->srp_putp = NULL;
+ srpp->srp_srp = NULL;
+
+ kmem_cache_free(sp->s_rpc, srpp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (NULL);
+}
+
+#define SFXGE_REFILL_BATCH 64
+
+/* Try to refill the RX descriptor ring from the associated free pkt pool */
+static void
+sfxge_rx_qrefill(sfxge_rxq_t *srp, unsigned int target)
+{
+ sfxge_t *sp = srp->sr_sp;
+ sfxge_rx_fpp_t *srfppp = &(srp->sr_fpp);
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ efsys_dma_addr_t addr[SFXGE_REFILL_BATCH];
+ mblk_t *mp;
+ int ntodo;
+ unsigned int count;
+ unsigned int batch;
+ unsigned int rxfill;
+ unsigned int mblksize;
+
+ prefetch_read_many(sp->s_enp);
+ prefetch_read_many(srp->sr_erp);
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ if (srp->sr_state != SFXGE_RXQ_STARTED)
+ return;
+
+ rxfill = srp->sr_added - srp->sr_completed;
+ ASSERT3U(rxfill, <=, EFX_RXQ_LIMIT(sp->s_rxq_size));
+ ntodo = min(EFX_RXQ_LIMIT(sp->s_rxq_size) - rxfill, target);
+ ASSERT3U(ntodo, <=, EFX_RXQ_LIMIT(sp->s_rxq_size));
+
+ if (ntodo == 0)
+ goto out;
+
+ (void) sfxge_rx_qfpp_swizzle(srp);
+
+ mp = srfppp->srfpp_get;
+ count = srfppp->srfpp_count;
+ mblksize = sp->s_rx_buffer_size - sp->s_rx_buffer_align;
+
+ batch = 0;
+ while (ntodo-- > 0) {
+ mblk_t *next;
+ frtn_t *freep;
+ sfxge_rx_packet_t *srpp;
+ unsigned int id;
+
+ if (mp == NULL)
+ break;
+
+ next = mp->b_next;
+ mp->b_next = NULL;
+
+ if (next != NULL)
+ prefetch_read_many(next);
+
+ freep = DB_FRTNP(mp);
+ /*LINTED*/
+ srpp = (sfxge_rx_packet_t *)(freep->free_arg);
+ ASSERT3P(srpp->srp_mp, ==, mp);
+
+ /* The MTU may have changed since the packet was allocated */
+ if (MBLKSIZE(mp) != mblksize) {
+ srpp->srp_recycle = B_FALSE;
+
+ freeb(mp);
+
+ --count;
+ mp = next;
+ continue;
+ }
+
+ srpp->srp_off = 0;
+ srpp->srp_thp = NULL;
+ srpp->srp_iphp = NULL;
+ srpp->srp_etherhp = NULL;
+ srpp->srp_size = 0;
+ srpp->srp_flags = EFX_DISCARD;
+
+ id = (srp->sr_added + batch) & (sp->s_rxq_size - 1);
+ ASSERT(srp->sr_srpp[id] == NULL);
+ srp->sr_srpp[id] = srpp;
+
+ addr[batch++] = srpp->srp_addr;
+ if (batch == SFXGE_REFILL_BATCH) {
+ efx_rx_qpost(srp->sr_erp, addr, mblksize, batch,
+ srp->sr_completed, srp->sr_added);
+ srp->sr_added += batch;
+ batch = 0;
+ }
+
+ --count;
+ mp = next;
+ }
+
+ srfppp->srfpp_get = mp;
+ srfppp->srfpp_count = count;
+
+ if (batch != 0) {
+ efx_rx_qpost(srp->sr_erp, addr, mblksize, batch,
+ srp->sr_completed, srp->sr_added);
+ srp->sr_added += batch;
+ }
+
+ efx_rx_qpush(srp->sr_erp, srp->sr_added, &srp->sr_pushed);
+
+out:
+ if (srfppp->srfpp_count < srfppp->srfpp_min)
+ srfppp->srfpp_min = srfppp->srfpp_count;
+}
+
+/* Preallocate packets and put them in the free packet pool */
+static void
+sfxge_rx_qpreallocate(sfxge_rxq_t *srp, int nprealloc)
+{
+ sfxge_rx_fpp_t *srfppp = &((srp)->sr_fpp);
+ srfppp->srfpp_lowat = nprealloc;
+ while (nprealloc-- > 0) {
+ sfxge_rx_packet_t *srpp;
+
+ if ((srpp = sfxge_rx_qpacket_create(srp)) == NULL)
+ break;
+ sfxge_rx_qfpp_put(srp, srpp);
+ }
+}
+
+/* Try to refill the RX descriptor ring by allocating new packets */
+static void
+sfxge_rx_qfill(sfxge_rxq_t *srp, unsigned int target)
+{
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ unsigned int batch;
+ unsigned int rxfill;
+ unsigned int mblksize;
+ int ntodo;
+ efsys_dma_addr_t addr[SFXGE_REFILL_BATCH];
+ mblk_t *mp = NULL;
+
+ prefetch_read_many(sp->s_enp);
+ prefetch_read_many(srp->sr_erp);
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ if (srp->sr_state != SFXGE_RXQ_STARTED)
+ return;
+
+ rxfill = srp->sr_added - srp->sr_completed;
+ ASSERT3U(rxfill, <=, EFX_RXQ_LIMIT(sp->s_rxq_size));
+ ntodo = min(EFX_RXQ_LIMIT(sp->s_rxq_size) - rxfill, target);
+ ASSERT3U(ntodo, <=, EFX_RXQ_LIMIT(sp->s_rxq_size));
+
+ if (ntodo == 0)
+ return;
+
+ mblksize = sp->s_rx_buffer_size - sp->s_rx_buffer_align;
+
+ batch = 0;
+ while (ntodo-- > 0) {
+ sfxge_rx_packet_t *srpp;
+ unsigned int id;
+
+ if ((srpp = sfxge_rx_qpacket_create(srp)) == NULL)
+ break;
+
+ mp = srpp->srp_mp;
+
+ ASSERT3U(MBLKSIZE(mp), ==, mblksize);
+
+ ASSERT3U(srpp->srp_off, ==, 0);
+ ASSERT3P(srpp->srp_thp, ==, NULL);
+ ASSERT3P(srpp->srp_iphp, ==, NULL);
+ ASSERT3P(srpp->srp_etherhp, ==, NULL);
+ ASSERT3U(srpp->srp_size, ==, 0);
+
+ srpp->srp_flags = EFX_DISCARD;
+
+ id = (srp->sr_added + batch) & (sp->s_rxq_size - 1);
+ ASSERT(srp->sr_srpp[id] == NULL);
+ srp->sr_srpp[id] = srpp;
+
+ addr[batch++] = srpp->srp_addr;
+ if (batch == SFXGE_REFILL_BATCH) {
+ efx_rx_qpost(srp->sr_erp, addr, mblksize, batch,
+ srp->sr_completed, srp->sr_added);
+ srp->sr_added += batch;
+ batch = 0;
+ }
+ }
+
+ if (batch != 0) {
+ efx_rx_qpost(srp->sr_erp, addr, mblksize, batch,
+ srp->sr_completed, srp->sr_added);
+ srp->sr_added += batch;
+ }
+
+ efx_rx_qpush(srp->sr_erp, srp->sr_added, &srp->sr_pushed);
+}
+
+void
+sfxge_rx_qfpp_trim(sfxge_rxq_t *srp)
+{
+ sfxge_rx_fpp_t *srfppp = &(srp->sr_fpp);
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ mblk_t *p;
+ mblk_t **pp;
+ int count;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ if (srp->sr_state != SFXGE_RXQ_STARTED)
+ goto done;
+
+ /* Make sure the queue is full */
+ sfxge_rx_qrefill(srp, EFX_RXQ_LIMIT(sp->s_rxq_size));
+
+ /* The refill may have emptied the pool */
+ if (srfppp->srfpp_min == 0)
+ goto done;
+
+ /* Don't trim below the pool's low water mark */
+ if (srfppp->srfpp_count <= srfppp->srfpp_lowat)
+ goto done;
+
+ ASSERT(srfppp->srfpp_min <= srfppp->srfpp_count);
+
+ /* Trim to the largest of srfppp->srfpp_min and srfpp->srfpp_lowat */
+ if (srfppp->srfpp_lowat > srfppp->srfpp_min)
+ count = srfppp->srfpp_count - srfppp->srfpp_lowat;
+ else
+ count = srfppp->srfpp_count - srfppp->srfpp_min;
+
+ /* Walk the get list */
+ pp = &(srfppp->srfpp_get);
+ while (--count >= 0) {
+ ASSERT(pp);
+ p = *pp;
+ ASSERT(p != NULL);
+
+ pp = &(p->b_next);
+ }
+ ASSERT(pp);
+ p = *pp;
+
+ /* Truncate the get list */
+ *pp = NULL;
+
+ /* Free the remainder */
+ while (p != NULL) {
+ mblk_t *next;
+ frtn_t *freep;
+ sfxge_rx_packet_t *srpp;
+
+ next = p->b_next;
+ p->b_next = NULL;
+
+ ASSERT3U(srfppp->srfpp_min, >, 0);
+ srfppp->srfpp_min--;
+ srfppp->srfpp_count--;
+
+ freep = DB_FRTNP(p);
+ /*LINTED*/
+ srpp = (sfxge_rx_packet_t *)(freep->free_arg);
+ ASSERT3P(srpp->srp_mp, ==, p);
+
+ srpp->srp_recycle = B_FALSE;
+
+ freeb(p);
+
+ p = next;
+ }
+
+done:
+ srfppp->srfpp_min = srfppp->srfpp_count;
+}
+
+static void
+sfxge_rx_qpoll(void *arg)
+{
+ sfxge_rxq_t *srp = arg;
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ uint16_t magic;
+
+ /*
+ * man timeout(9f) states that this code should adhere to the
+ * same requirements as a softirq handler - DO NOT BLOCK
+ */
+
+ /*
+ * Post an event to the event queue to cause the free packet pool to be
+ * trimmed if it is oversize.
+ */
+ magic = SFXGE_MAGIC_RX_QFPP_TRIM | index;
+
+#if defined(DEBUG)
+ /* This is guaranteed due to the start/stop order of rx and ev */
+ ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTED);
+ ASSERT3U(srp->sr_state, ==, SFXGE_RXQ_STARTED);
+#else
+ /*
+ * Bug22691 WORKAROUND:
+ * This handler has been observed in the field to be invoked for a
+ * queue in the INITIALIZED state, which should never happen.
+ * Until the mechanism for this is properly understood, add defensive
+ * checks.
+ */
+ if ((sep->se_state != SFXGE_EVQ_STARTED) ||
+ (srp->sr_state != SFXGE_RXQ_STARTED) ||
+ (!sep->se_eep)) {
+ dev_err(sp->s_dip, CE_WARN, SFXGE_CMN_ERR
+ "RXQ[%d] bad state in sfxge_rx_qpoll %d %d %p",
+ index, sep->se_state, srp->sr_state, sep->se_eep);
+ return;
+ }
+#endif
+ efx_ev_qpost(sep->se_eep, magic);
+
+ srp->sr_tid = timeout(sfxge_rx_qpoll, srp,
+ drv_usectohz(sp->s_rxq_poll_usec));
+}
+
+static void
+sfxge_rx_qpoll_start(sfxge_rxq_t *srp)
+{
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+ ASSERT3U(srp->sr_state, ==, SFXGE_RXQ_STARTED);
+
+ /* Schedule a poll */
+ ASSERT3P(srp->sr_tid, ==, 0);
+ srp->sr_tid = timeout(sfxge_rx_qpoll, srp, 0);
+}
+
+static void
+sfxge_rx_qpoll_stop(sfxge_rxq_t *srp)
+{
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ timeout_id_t tid;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+ ASSERT3U(srp->sr_state, ==, SFXGE_RXQ_STARTED);
+
+ /*
+ * Cancel the qpoll timer. Care is needed as this function
+ * can race with sfxge_rx_qpoll() for timeout id updates.
+ *
+ * Do not hold locks used by any timeout(9f) handlers across
+ * calls to untimeout(9f) as this will deadlock.
+ */
+ tid = 0;
+ while ((srp->sr_tid != 0) && (srp->sr_tid != tid)) {
+ tid = srp->sr_tid;
+ (void) untimeout(tid);
+ }
+ srp->sr_tid = 0;
+}
+
+static int
+sfxge_rx_kstat_update(kstat_t *ksp, int rw)
+{
+ sfxge_rxq_t *srp = ksp->ks_private;
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ kstat_named_t *knp;
+ int rc;
+
+ if (rw != KSTAT_READ) {
+ rc = EACCES;
+ goto fail1;
+ }
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+ if (srp->sr_state != SFXGE_RXQ_STARTED)
+ goto done;
+
+ knp = ksp->ks_data;
+ /* NB pointer post-increment below */
+ knp++->value.ui32 = srp->sr_kstat.srk_rx_pkt_mem_limit;
+ knp++->value.ui32 = srp->sr_kstat.srk_kcache_alloc_nomem;
+ knp++->value.ui32 = srp->sr_kstat.srk_dma_alloc_nomem;
+ knp++->value.ui32 = srp->sr_kstat.srk_dma_alloc_fail;
+ knp++->value.ui32 = srp->sr_kstat.srk_dma_bind_nomem;
+ knp++->value.ui32 = srp->sr_kstat.srk_dma_bind_fail;
+ knp++->value.ui32 = srp->sr_kstat.srk_desballoc_fail;
+ knp++->value.ui32 = srp->sr_kstat.srk_rxq_empty_discard;
+
+done:
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_rx_kstat_init(sfxge_rxq_t *srp)
+{
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ dev_info_t *dip = sp->s_dip;
+ char name[MAXNAMELEN];
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ int rc;
+
+ /* Create the set */
+ (void) snprintf(name, MAXNAMELEN - 1, "%s_rxq%04d",
+ ddi_driver_name(dip), index);
+
+ if ((ksp = kstat_create((char *)ddi_driver_name(dip),
+ ddi_get_instance(dip), name, "rxq", KSTAT_TYPE_NAMED,
+ SFXGE_RX_NSTATS, 0)) == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ srp->sr_ksp = ksp;
+
+ ksp->ks_update = sfxge_rx_kstat_update;
+ ksp->ks_private = srp;
+ ksp->ks_lock = &(sep->se_lock);
+
+ /* Initialise the named stats */
+ knp = ksp->ks_data;
+ kstat_named_init(knp, "rx_pkt_mem_limit", KSTAT_DATA_UINT32);
+ knp++;
+ kstat_named_init(knp, "kcache_alloc_nomem", KSTAT_DATA_UINT32);
+ knp++;
+ kstat_named_init(knp, "dma_alloc_nomem", KSTAT_DATA_UINT32);
+ knp++;
+ kstat_named_init(knp, "dma_alloc_fail", KSTAT_DATA_UINT32);
+ knp++;
+ kstat_named_init(knp, "dma_bind_nomem", KSTAT_DATA_UINT32);
+ knp++;
+ kstat_named_init(knp, "dma_bind_fail", KSTAT_DATA_UINT32);
+ knp++;
+ kstat_named_init(knp, "desballoc_fail", KSTAT_DATA_UINT32);
+ knp++;
+ kstat_named_init(knp, "rxq_empty_discard", KSTAT_DATA_UINT32);
+
+ kstat_install(ksp);
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_rx_qinit(sfxge_t *sp, unsigned int index)
+{
+ sfxge_rxq_t *srp;
+ int rc;
+
+ ASSERT3U(index, <, SFXGE_RX_SCALE_MAX);
+
+ if ((srp = kmem_cache_alloc(sp->s_rqc, KM_SLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+ ASSERT3U(srp->sr_state, ==, SFXGE_RXQ_UNINITIALIZED);
+
+ srp->sr_index = index;
+ sp->s_srp[index] = srp;
+
+ if ((rc = sfxge_rx_kstat_init(srp)) != 0)
+ goto fail2;
+
+ srp->sr_state = SFXGE_RXQ_INITIALIZED;
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+ kmem_cache_free(sp->s_rqc, srp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_rx_qstart(sfxge_t *sp, unsigned int index)
+{
+ sfxge_evq_t *sep = sp->s_sep[index];
+ sfxge_rxq_t *srp;
+ efsys_mem_t *esmp;
+ efx_nic_t *enp;
+ unsigned int level;
+ int rc;
+
+ mutex_enter(&(sep->se_lock));
+ srp = sp->s_srp[index];
+ enp = sp->s_enp;
+ esmp = &(srp->sr_mem);
+
+ ASSERT3U(srp->sr_state, ==, SFXGE_RXQ_INITIALIZED);
+ ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTED);
+
+ /* Zero the memory */
+ bzero(esmp->esm_base, EFX_RXQ_SIZE(sp->s_rxq_size));
+
+ /* Program the buffer table */
+ if ((rc = sfxge_sram_buf_tbl_set(sp, srp->sr_id, esmp,
+ EFX_RXQ_NBUFS(sp->s_rxq_size))) != 0)
+ goto fail1;
+
+ /* Create the receive queue */
+ if ((rc = efx_rx_qcreate(enp, index, index, EFX_RXQ_TYPE_DEFAULT,
+ esmp, sp->s_rxq_size, srp->sr_id, sep->se_eep, &(srp->sr_erp)))
+ != 0)
+ goto fail2;
+
+ /* Enable the receive queue */
+ efx_rx_qenable(srp->sr_erp);
+
+ /* Set the water marks */
+ srp->sr_hiwat = EFX_RXQ_LIMIT(sp->s_rxq_size) * 9 / 10;
+ srp->sr_lowat = srp->sr_hiwat / 2;
+
+ srp->sr_state = SFXGE_RXQ_STARTED;
+ srp->sr_flush = SFXGE_FLUSH_INACTIVE;
+
+ sfxge_rx_qpoll_start(srp);
+
+ /* Try to fill the queue from the pool */
+ sfxge_rx_qrefill(srp, EFX_RXQ_LIMIT(sp->s_rxq_size));
+
+ /*
+ * If there were insufficient buffers in the pool to reach the at
+ * least a batch then allocate some.
+ */
+ level = srp->sr_added - srp->sr_completed;
+ if (level < SFXGE_RX_BATCH)
+ sfxge_rx_qfill(srp, SFXGE_RX_BATCH);
+
+ mutex_exit(&(sep->se_lock));
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Clear entries from the buffer table */
+ sfxge_sram_buf_tbl_clear(sp, srp->sr_id,
+ EFX_RXQ_NBUFS(sp->s_rxq_size));
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(sep->se_lock));
+
+ return (rc);
+}
+
+static void
+sfxge_rx_qflow_complete(sfxge_rxq_t *srp, sfxge_rx_flow_t *srfp)
+{
+ mblk_t *mp;
+ struct ether_header *etherhp;
+ struct ip *iphp;
+ struct tcphdr *thp;
+
+ if (srfp->srf_mp == NULL)
+ return;
+
+ mp = srfp->srf_mp;
+ etherhp = srfp->srf_etherhp;
+ iphp = srfp->srf_iphp;
+ thp = srfp->srf_last_thp;
+
+ ASSERT3U(((etherhp->ether_type == htons(ETHERTYPE_VLAN)) ?
+ sizeof (struct ether_vlan_header) :
+ sizeof (struct ether_header)) +
+ srfp->srf_len, ==, msgdsize(mp));
+
+ ASSERT3U(srfp->srf_len & 0xffff, ==, srfp->srf_len);
+ iphp->ip_len = htons(srfp->srf_len);
+
+ srfp->srf_first_thp->th_ack = thp->th_ack;
+ srfp->srf_first_thp->th_win = thp->th_win;
+ srfp->srf_first_thp->th_flags = thp->th_flags;
+
+ DTRACE_PROBE2(flow_complete, uint32_t, srfp->srf_tag,
+ size_t, srfp->srf_len);
+
+ srfp->srf_mp = NULL;
+ srfp->srf_len = 0;
+
+ ASSERT(mp->b_next == NULL);
+ *(srp->sr_mpp) = mp;
+ srp->sr_mpp = &(mp->b_next);
+}
+
+static boolean_t
+sfxge_rx_qflow_add(sfxge_rxq_t *srp, sfxge_rx_flow_t *srfp,
+ sfxge_rx_packet_t *srpp, clock_t now)
+{
+ sfxge_t *sp = srp->sr_sp;
+ struct ether_header *etherhp = srpp->srp_etherhp;
+ struct ip *iphp = srpp->srp_iphp;
+ struct tcphdr *thp = srpp->srp_thp;
+ size_t off = srpp->srp_off;
+ size_t size = (size_t)(srpp->srp_size);
+ mblk_t *mp = srpp->srp_mp;
+ uint32_t seq;
+ unsigned int shift;
+
+ ASSERT3U(MBLKL(mp), ==, off + size);
+ ASSERT3U(DB_CKSUMFLAGS(mp), ==,
+ HCK_FULLCKSUM | HCK_FULLCKSUM_OK | HCK_IPV4_HDRCKSUM);
+
+ seq = htonl(thp->th_seq);
+
+ /*
+ * If the time between this segment and the last is greater than RTO
+ * then consider this a new flow.
+ */
+ if (now - srfp->srf_lbolt > srp->sr_rto) {
+ srfp->srf_count = 1;
+ srfp->srf_seq = seq + size;
+
+ goto fail1;
+ }
+
+ if (seq != srfp->srf_seq) {
+ if (srfp->srf_count > SFXGE_SLOW_START)
+ srfp->srf_count = SFXGE_SLOW_START;
+
+ srfp->srf_count >>= 1;
+
+ srfp->srf_count++;
+ srfp->srf_seq = seq + size;
+
+ goto fail2;
+ }
+
+ /* Update the in-order segment count and sequence number */
+ srfp->srf_count++;
+ srfp->srf_seq = seq + size;
+
+ /* Don't merge across pure ACK, URG, SYN or RST segments */
+ if (size == 0 || thp->th_flags & (TH_URG | TH_SYN | TH_RST) ||
+ thp->th_urp != 0)
+ goto fail3;
+
+ /*
+ * If the in-order segment count has not yet reached the slow-start
+ * threshold then we cannot coalesce.
+ */
+ if (srfp->srf_count < SFXGE_SLOW_START)
+ goto fail4;
+
+ /* Scale up the packet size from 4k (the maximum being 64k) */
+ ASSERT3U(srfp->srf_count, >=, SFXGE_SLOW_START);
+ shift = MIN(srfp->srf_count - SFXGE_SLOW_START + 12, 16);
+ if (srfp->srf_len + size >= (1 << shift))
+ sfxge_rx_qflow_complete(srp, srfp);
+
+ ASSERT(mp->b_cont == NULL);
+
+ if (srfp->srf_mp == NULL) {
+ /* First packet in this flow */
+ srfp->srf_etherhp = etherhp;
+ srfp->srf_iphp = iphp;
+ srfp->srf_first_thp = srfp->srf_last_thp = thp;
+
+ ASSERT3P(mp->b_cont, ==, NULL);
+ srfp->srf_mp = mp;
+ srfp->srf_mpp = &(mp->b_cont);
+
+ srfp->srf_len = ntohs(iphp->ip_len);
+
+ /*
+ * If the flow is not already in the list of occupied flows then
+ * add it.
+ */
+ if (srfp->srf_next == NULL &&
+ srp->sr_srfpp != &(srfp->srf_next)) {
+ *(srp->sr_srfpp) = srfp;
+ srp->sr_srfpp = &(srfp->srf_next);
+ }
+ } else {
+ /* Later packet in this flow - skip TCP header */
+ srfp->srf_last_thp = thp;
+
+ mp->b_rptr += off;
+ ASSERT3U(MBLKL(mp), ==, size);
+
+ ASSERT3P(mp->b_cont, ==, NULL);
+ *(srfp->srf_mpp) = mp;
+ srfp->srf_mpp = &(mp->b_cont);
+
+ srfp->srf_len += size;
+
+ ASSERT(srfp->srf_next != NULL ||
+ srp->sr_srfpp == &(srfp->srf_next));
+ }
+
+ DTRACE_PROBE2(flow_add, uint32_t, srfp->srf_tag, size_t, size);
+
+ /*
+ * Try to align coalesced segments on push boundaries, unless they
+ * are too frequent.
+ */
+ if (sp->s_rx_coalesce_mode == SFXGE_RX_COALESCE_ALLOW_PUSH &&
+ thp->th_flags & TH_PUSH)
+ sfxge_rx_qflow_complete(srp, srfp);
+
+ srfp->srf_lbolt = now;
+ return (B_TRUE);
+
+fail4:
+fail3:
+fail2:
+fail1:
+ sfxge_rx_qflow_complete(srp, srfp);
+
+ srfp->srf_lbolt = now;
+ return (B_FALSE);
+}
+
+void
+sfxge_rx_qpacket_coalesce(sfxge_rxq_t *srp)
+{
+ sfxge_t *sp = srp->sr_sp;
+ clock_t now;
+ mblk_t *mp;
+ sfxge_rx_flow_t *srfp;
+
+ ASSERT(sp->s_rx_coalesce_mode != SFXGE_RX_COALESCE_OFF);
+
+ now = ddi_get_lbolt();
+
+ mp = srp->sr_mp;
+
+ srp->sr_mp = NULL;
+ srp->sr_mpp = &(srp->sr_mp);
+
+ /* Start with the last flow to be appended to */
+ srfp = *(srp->sr_srfpp);
+
+ while (mp != NULL) {
+ frtn_t *freep;
+ sfxge_rx_packet_t *srpp;
+ struct ether_header *etherhp;
+ struct ip *iphp;
+ struct tcphdr *thp;
+ size_t off;
+ size_t size;
+ uint16_t ether_tci;
+ uint32_t hash;
+ uint32_t tag;
+ mblk_t *next;
+ sfxge_packet_type_t pkt_type;
+ uint16_t sport, dport;
+
+ next = mp->b_next;
+ mp->b_next = NULL;
+
+ if (next != NULL)
+ prefetch_read_many(next);
+
+ freep = DB_FRTNP(mp);
+ /*LINTED*/
+ srpp = (sfxge_rx_packet_t *)(freep->free_arg);
+ ASSERT3P(srpp->srp_mp, ==, mp);
+
+ /* If the packet is not TCP then we cannot coalesce it */
+ if (~(srpp->srp_flags) & EFX_PKT_TCP)
+ goto reject;
+
+ /*
+ * If the packet is not fully checksummed then we cannot
+ * coalesce it.
+ */
+ if (~(srpp->srp_flags) & (EFX_CKSUM_TCPUDP | EFX_CKSUM_IPV4))
+ goto reject;
+
+ /* Parse the TCP header */
+ pkt_type = sfxge_pkthdr_parse(mp, &etherhp, &iphp, &thp, &off,
+ &size, &sport, &dport);
+ ASSERT(pkt_type == SFXGE_PACKET_TYPE_IPV4_TCP);
+ ASSERT(etherhp != NULL);
+ ASSERT(iphp != NULL);
+ ASSERT(thp != NULL);
+ ASSERT(off != 0);
+
+ if ((iphp->ip_off & ~htons(IP_DF)) != 0)
+ goto reject;
+
+ if (etherhp->ether_type == htons(ETHERTYPE_VLAN)) {
+ struct ether_vlan_header *ethervhp;
+
+ ethervhp = (struct ether_vlan_header *)etherhp;
+ ether_tci = ethervhp->ether_tci;
+ } else {
+ ether_tci = 0;
+ }
+
+ /*
+ * Make sure any minimum length padding is stripped
+ * before we try to add the packet to a flow.
+ */
+ ASSERT3U(sp->s_rx_prefix_size + MBLKL(mp), ==,
+ (size_t)(srpp->srp_size));
+ ASSERT3U(sp->s_rx_prefix_size + off + size, <=,
+ (size_t)(srpp->srp_size));
+
+ if (sp->s_rx_prefix_size + off + size <
+ (size_t)(srpp->srp_size))
+ mp->b_wptr = mp->b_rptr + off + size;
+
+ /*
+ * If there is no current flow, or the segment does not match
+ * the current flow then we must attempt to look up the
+ * correct flow in the table.
+ */
+ if (srfp == NULL)
+ goto lookup;
+
+ if (srfp->srf_saddr != iphp->ip_src.s_addr ||
+ srfp->srf_daddr != iphp->ip_dst.s_addr)
+ goto lookup;
+
+ if (srfp->srf_sport != thp->th_sport ||
+ srfp->srf_dport != thp->th_dport)
+ goto lookup;
+
+ if (srfp->srf_tci != ether_tci)
+ goto lookup;
+
+add:
+ ASSERT(srfp != NULL);
+
+ srpp->srp_etherhp = etherhp;
+ srpp->srp_iphp = iphp;
+ srpp->srp_thp = thp;
+ srpp->srp_off = off;
+
+ ASSERT3U(size, <, (1 << 16));
+ srpp->srp_size = (uint16_t)size;
+
+ /* Try to append the packet to the flow */
+ if (!sfxge_rx_qflow_add(srp, srfp, srpp, now))
+ goto reject;
+
+ mp = next;
+ continue;
+
+lookup:
+ /*
+ * If there is a prefix area then read the hash from that,
+ * otherwise calculate it.
+ */
+ if (sp->s_rx_prefix_size != 0) {
+ hash = efx_psuedo_hdr_hash_get(sp->s_enp,
+ EFX_RX_HASHALG_TOEPLITZ,
+ DB_BASE(mp));
+ } else {
+ SFXGE_TCP_HASH(sp,
+ &iphp->ip_src.s_addr,
+ thp->th_sport,
+ &iphp->ip_dst.s_addr,
+ thp->th_dport,
+ hash);
+ }
+
+ srfp = &(srp->sr_flow[(hash >> 6) % SFXGE_MAX_FLOW]);
+ tag = hash + 1; /* Make sure it's not zero */
+
+ /*
+ * If the flow we have found does not match the hash then
+ * it may be an unused flow, or it may be stale.
+ */
+ if (tag != srfp->srf_tag) {
+ if (srfp->srf_count != 0) {
+ if (now - srfp->srf_lbolt <= srp->sr_rto)
+ goto reject;
+ }
+
+ if (srfp->srf_mp != NULL)
+ goto reject;
+
+ /* Start a new flow */
+ ASSERT(srfp->srf_next == NULL);
+
+ srfp->srf_tag = tag;
+
+ srfp->srf_saddr = iphp->ip_src.s_addr;
+ srfp->srf_daddr = iphp->ip_dst.s_addr;
+ srfp->srf_sport = thp->th_sport;
+ srfp->srf_dport = thp->th_dport;
+ srfp->srf_tci = ether_tci;
+
+ srfp->srf_count = 0;
+ srfp->srf_seq = ntohl(thp->th_seq);
+
+ srfp->srf_lbolt = now;
+ goto add;
+ }
+
+ /*
+ * If the flow we have found does match the hash then it could
+ * still be an alias.
+ */
+ if (srfp->srf_saddr != iphp->ip_src.s_addr ||
+ srfp->srf_daddr != iphp->ip_dst.s_addr)
+ goto reject;
+
+ if (srfp->srf_sport != thp->th_sport ||
+ srfp->srf_dport != thp->th_dport)
+ goto reject;
+
+ if (srfp->srf_tci != ether_tci)
+ goto reject;
+
+ goto add;
+
+reject:
+ *(srp->sr_mpp) = mp;
+ srp->sr_mpp = &(mp->b_next);
+
+ mp = next;
+ }
+}
+
+void
+sfxge_rx_qcomplete(sfxge_rxq_t *srp, boolean_t eop)
+{
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ unsigned int completed;
+ sfxge_rx_fpp_t *srfppp = &(srp->sr_fpp);
+ unsigned int level;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ ASSERT(srp->sr_mp == NULL);
+ ASSERT(srp->sr_mpp == &(srp->sr_mp));
+
+ completed = srp->sr_completed;
+ while (completed != srp->sr_pending) {
+ unsigned int id;
+ sfxge_rx_packet_t *srpp;
+ mblk_t *mp;
+ size_t size;
+ uint16_t flags;
+ int rc;
+
+ id = completed++ & (sp->s_rxq_size - 1);
+
+ if (srp->sr_pending - completed >= 4) {
+ unsigned int prefetch;
+
+ prefetch = (id + 4) & (sp->s_rxq_size - 1);
+
+ srpp = srp->sr_srpp[prefetch];
+ ASSERT(srpp != NULL);
+
+ mp = srpp->srp_mp;
+ prefetch_read_many(mp->b_datap);
+ } else if (completed == srp->sr_pending) {
+ prefetch_read_many(srp->sr_mp);
+ }
+
+ srpp = srp->sr_srpp[id];
+ ASSERT(srpp != NULL);
+
+ srp->sr_srpp[id] = NULL;
+
+ mp = srpp->srp_mp;
+ ASSERT(mp->b_cont == NULL);
+
+ /* when called from sfxge_rx_qstop() */
+ if (srp->sr_state != SFXGE_RXQ_STARTED)
+ goto discard;
+
+ if (srpp->srp_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
+ goto discard;
+
+ /* Make the data visible to the kernel */
+ rc = ddi_dma_sync(srpp->srp_dma_handle, 0,
+ sp->s_rx_buffer_size, DDI_DMA_SYNC_FORKERNEL);
+ ASSERT3P(rc, ==, DDI_SUCCESS);
+
+ /* Read the length from the psuedo header if required */
+ if (srpp->srp_flags & EFX_PKT_PREFIX_LEN) {
+ rc = efx_psuedo_hdr_pkt_length_get(sp->s_enp,
+ mp->b_rptr,
+ &srpp->srp_size);
+ ASSERT3P(rc, ==, 0);
+ srpp->srp_size += sp->s_rx_prefix_size;
+ }
+
+ /* Set up the packet length */
+ ASSERT3P(mp->b_rptr, ==, DB_BASE(mp));
+ mp->b_rptr += sp->s_rx_prefix_size;
+
+ prefetch_read_many(mp->b_rptr);
+
+ ASSERT3P(mp->b_wptr, ==, DB_BASE(mp));
+ mp->b_wptr += (size_t)(srpp->srp_size);
+ ASSERT3P(mp->b_wptr, <=, DB_LIM(mp));
+
+ /* Calculate the maximum packet size */
+ size = sp->s_mtu;
+ size += (srpp->srp_flags & EFX_PKT_VLAN_TAGGED) ?
+ sizeof (struct ether_vlan_header) :
+ sizeof (struct ether_header);
+
+ if (MBLKL(mp) > size)
+ goto discard;
+
+ /* Check for loopback packets */
+ if (!(srpp->srp_flags & EFX_PKT_IPV4) &&
+ !(srpp->srp_flags & EFX_PKT_IPV6)) {
+ struct ether_header *etherhp;
+
+ /*LINTED*/
+ etherhp = (struct ether_header *)(mp->b_rptr);
+
+ if (etherhp->ether_type ==
+ htons(SFXGE_ETHERTYPE_LOOPBACK)) {
+ DTRACE_PROBE(loopback);
+
+ srp->sr_loopback++;
+ goto discard;
+ }
+ }
+
+ /* Set up the checksum information */
+ flags = 0;
+
+ if (srpp->srp_flags & EFX_CKSUM_IPV4) {
+ ASSERT(srpp->srp_flags & EFX_PKT_IPV4);
+ flags |= HCK_IPV4_HDRCKSUM;
+ }
+
+ if (srpp->srp_flags & EFX_CKSUM_TCPUDP) {
+ ASSERT(srpp->srp_flags & EFX_PKT_TCP ||
+ srpp->srp_flags & EFX_PKT_UDP);
+ flags |= HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
+ }
+
+ DB_CKSUMSTART(mp) = 0;
+ DB_CKSUMSTUFF(mp) = 0;
+ DB_CKSUMEND(mp) = 0;
+ DB_CKSUMFLAGS(mp) = flags;
+ DB_CKSUM16(mp) = 0;
+
+ /* Add the packet to the tail of the chain */
+ srfppp->srfpp_loaned++;
+
+ ASSERT(mp->b_next == NULL);
+ *(srp->sr_mpp) = mp;
+ srp->sr_mpp = &(mp->b_next);
+
+ continue;
+
+discard:
+ /* Return the packet to the pool */
+ srfppp->srfpp_loaned++;
+ freeb(mp); /* Equivalent to freemsg() as b_cont==0 */
+ }
+ srp->sr_completed = completed;
+
+ /* Attempt to coalesce any TCP packets */
+ if (sp->s_rx_coalesce_mode != SFXGE_RX_COALESCE_OFF)
+ sfxge_rx_qpacket_coalesce(srp);
+
+ /*
+ * If there are any pending flows and this is the end of the
+ * poll then they must be completed.
+ */
+ if (srp->sr_srfp != NULL && eop) {
+ sfxge_rx_flow_t *srfp;
+
+ srfp = srp->sr_srfp;
+
+ srp->sr_srfp = NULL;
+ srp->sr_srfpp = &(srp->sr_srfp);
+
+ do {
+ sfxge_rx_flow_t *next;
+
+ next = srfp->srf_next;
+ srfp->srf_next = NULL;
+
+ sfxge_rx_qflow_complete(srp, srfp);
+
+ srfp = next;
+ } while (srfp != NULL);
+ }
+
+ level = srp->sr_pushed - srp->sr_completed;
+
+ /* If there are any packets then pass them up the stack */
+ if (srp->sr_mp != NULL) {
+ mblk_t *mp;
+
+ mp = srp->sr_mp;
+
+ srp->sr_mp = NULL;
+ srp->sr_mpp = &(srp->sr_mp);
+
+ if (level == 0) {
+ /* Try to refill ASAP */
+ sfxge_rx_qrefill(srp, EFX_RXQ_LIMIT(sp->s_rxq_size));
+ level = srp->sr_pushed - srp->sr_completed;
+ }
+
+ /*
+ * If the RXQ is still empty, discard and recycle the
+ * current entry to ensure that the ring always
+ * contains at least one descriptor. This ensures that
+ * the next hardware RX will trigger an event
+ * (possibly delayed by interrupt moderation) and
+ * trigger another refill/fill attempt.
+ *
+ * Note this drops a complete LRO fragment from the
+ * start of the batch.
+ *
+ * Note also that copymsgchain() does not help with
+ * resource starvation here, unless we are short of DMA
+ * mappings.
+ */
+ if (level == 0) {
+ mblk_t *nmp;
+
+ srp->sr_kstat.srk_rxq_empty_discard++;
+ DTRACE_PROBE1(rxq_empty_discard, int, index);
+ nmp = mp->b_next;
+ if (nmp)
+ sfxge_gld_rx_post(sp, index, nmp);
+ /* as level==0 will swizzle,rxpost below */
+ freemsg(mp);
+ } else {
+ sfxge_gld_rx_post(sp, index, mp);
+ }
+ }
+
+ /* Top up the queue if necessary */
+ if (level < srp->sr_hiwat) {
+ sfxge_rx_qrefill(srp, EFX_RXQ_LIMIT(sp->s_rxq_size));
+
+ level = srp->sr_added - srp->sr_completed;
+ if (level < srp->sr_lowat)
+ sfxge_rx_qfill(srp, EFX_RXQ_LIMIT(sp->s_rxq_size));
+ }
+}
+
+void
+sfxge_rx_qflush_done(sfxge_rxq_t *srp)
+{
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ boolean_t flush_pending;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ /*
+ * Flush successful: wakeup sfxge_rx_qstop() if flush is pending.
+ *
+ * A delayed flush event received after RxQ stop has timed out
+ * will be ignored, as then the flush state will not be PENDING
+ * (see SFCbug22989).
+ */
+ flush_pending = (srp->sr_flush == SFXGE_FLUSH_PENDING);
+ srp->sr_flush = SFXGE_FLUSH_DONE;
+ if (flush_pending)
+ cv_broadcast(&(srp->sr_flush_kv));
+}
+
+void
+sfxge_rx_qflush_failed(sfxge_rxq_t *srp)
+{
+ sfxge_t *sp = srp->sr_sp;
+ unsigned int index = srp->sr_index;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ boolean_t flush_pending;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ /*
+ * Flush failed: wakeup sfxge_rx_qstop() if flush is pending.
+ *
+ * A delayed flush event received after RxQ stop has timed out
+ * will be ignored, as then the flush state will not be PENDING
+ * (see SFCbug22989).
+ */
+ flush_pending = (srp->sr_flush == SFXGE_FLUSH_PENDING);
+ srp->sr_flush = SFXGE_FLUSH_FAILED;
+ if (flush_pending)
+ cv_broadcast(&(srp->sr_flush_kv));
+}
+
+static void
+sfxge_rx_qstop(sfxge_t *sp, unsigned int index)
+{
+ dev_info_t *dip = sp->s_dip;
+ sfxge_evq_t *sep = sp->s_sep[index];
+ sfxge_rxq_t *srp;
+ clock_t timeout;
+ unsigned int flush_tries = SFXGE_RX_QFLUSH_TRIES;
+ int rc;
+
+ ASSERT(mutex_owned(&(sp->s_state_lock)));
+
+ mutex_enter(&(sep->se_lock));
+
+ srp = sp->s_srp[index];
+ ASSERT3U(srp->sr_state, ==, SFXGE_RXQ_STARTED);
+
+ sfxge_rx_qpoll_stop(srp);
+
+ /* Further packets are discarded by sfxge_rx_qcomplete() */
+ srp->sr_state = SFXGE_RXQ_INITIALIZED;
+
+ if (sp->s_hw_err != SFXGE_HW_OK) {
+ /*
+ * Flag indicates possible hardware failure.
+ * Attempt flush but do not wait for it to complete.
+ */
+ srp->sr_flush = SFXGE_FLUSH_DONE;
+ (void) efx_rx_qflush(srp->sr_erp);
+ }
+
+ /* Wait upto 2sec for queue flushing to complete */
+ timeout = ddi_get_lbolt() + drv_usectohz(SFXGE_RX_QFLUSH_USEC);
+
+ while (srp->sr_flush != SFXGE_FLUSH_DONE && flush_tries-- > 0) {
+ if ((rc = efx_rx_qflush(srp->sr_erp)) != 0) {
+ if (rc == EALREADY)
+ srp->sr_flush = SFXGE_FLUSH_DONE;
+ else
+ srp->sr_flush = SFXGE_FLUSH_FAILED;
+ break;
+ }
+ srp->sr_flush = SFXGE_FLUSH_PENDING;
+ if (cv_timedwait(&(srp->sr_flush_kv), &(sep->se_lock),
+ timeout) < 0) {
+ /* Timeout waiting for successful or failed flush */
+ dev_err(dip, CE_NOTE,
+ SFXGE_CMN_ERR "rxq[%d] flush timeout", index);
+ break;
+ }
+ }
+
+ if (srp->sr_flush == SFXGE_FLUSH_FAILED)
+ dev_err(dip, CE_NOTE,
+ SFXGE_CMN_ERR "rxq[%d] flush failed", index);
+
+ DTRACE_PROBE1(flush, sfxge_flush_state_t, srp->sr_flush);
+ srp->sr_flush = SFXGE_FLUSH_DONE;
+
+ /* Destroy the receive queue */
+ efx_rx_qdestroy(srp->sr_erp);
+ srp->sr_erp = NULL;
+
+ /* Clear entries from the buffer table */
+ sfxge_sram_buf_tbl_clear(sp, srp->sr_id,
+ EFX_RXQ_NBUFS(sp->s_rxq_size));
+
+ /*
+ * Free any unused RX packets which had descriptors on the RXQ
+ * Packets will be discard as state != STARTED
+ */
+ srp->sr_pending = srp->sr_added;
+ sfxge_rx_qcomplete(srp, B_TRUE);
+
+ ASSERT3U(srp->sr_completed, ==, srp->sr_pending);
+
+ srp->sr_added = 0;
+ srp->sr_pushed = 0;
+ srp->sr_pending = 0;
+ srp->sr_completed = 0;
+ srp->sr_loopback = 0;
+
+ srp->sr_lowat = 0;
+ srp->sr_hiwat = 0;
+
+ mutex_exit(&(sep->se_lock));
+}
+
+static void
+sfxge_rx_kstat_fini(sfxge_rxq_t *srp)
+{
+ kstat_delete(srp->sr_ksp);
+ srp->sr_ksp = NULL;
+}
+
+static void
+sfxge_rx_qfini(sfxge_t *sp, unsigned int index)
+{
+ sfxge_rxq_t *srp = sp->s_srp[index];
+
+ ASSERT3U(srp->sr_state, ==, SFXGE_RXQ_INITIALIZED);
+
+ sp->s_srp[index] = NULL;
+ srp->sr_state = SFXGE_RXQ_UNINITIALIZED;
+
+ sfxge_rx_kstat_fini(srp);
+
+ /* Empty the pool */
+ sfxge_rx_qfpp_empty(srp);
+
+ srp->sr_index = 0;
+
+ kmem_cache_free(sp->s_rqc, srp);
+}
+
+static int
+sfxge_rx_scale_kstat_update(kstat_t *ksp, int rw)
+{
+ sfxge_t *sp = ksp->ks_private;
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+ sfxge_intr_t *sip = &(sp->s_intr);
+ kstat_named_t *knp;
+ unsigned int index;
+ unsigned int entry;
+ unsigned int *freq;
+ int rc;
+
+ ASSERT(mutex_owned(&(srsp->srs_lock)));
+
+ if (rw != KSTAT_READ) {
+ rc = EACCES;
+ goto fail1;
+ }
+
+ if ((freq = kmem_zalloc(sizeof (unsigned int) * sip->si_nalloc,
+ KM_NOSLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ for (entry = 0; entry < SFXGE_RX_SCALE_MAX; entry++) {
+ index = srsp->srs_tbl[entry];
+
+ freq[index]++;
+ }
+
+ knp = ksp->ks_data;
+ for (index = 0; index < sip->si_nalloc; index++) {
+ knp->value.ui64 = freq[index];
+ knp++;
+ }
+
+ knp->value.ui64 = srsp->srs_count;
+
+ kmem_free(freq, sizeof (unsigned int) * sip->si_nalloc);
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ return (rc);
+}
+
+static int
+sfxge_rx_scale_kstat_init(sfxge_t *sp)
+{
+ dev_info_t *dip = sp->s_dip;
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+ sfxge_intr_t *sip = &(sp->s_intr);
+ char name[MAXNAMELEN];
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ unsigned int index;
+ int rc;
+
+ /* Create the set */
+ (void) snprintf(name, MAXNAMELEN - 1, "%s_rss", ddi_driver_name(dip));
+
+ if ((ksp = kstat_create((char *)ddi_driver_name(dip),
+ ddi_get_instance(dip), name, "rss", KSTAT_TYPE_NAMED,
+ sip->si_nalloc + 1, 0)) == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ srsp->srs_ksp = ksp;
+
+ ksp->ks_update = sfxge_rx_scale_kstat_update;
+ ksp->ks_private = sp;
+ ksp->ks_lock = &(srsp->srs_lock);
+
+ /* Initialise the named stats */
+ knp = ksp->ks_data;
+ for (index = 0; index < sip->si_nalloc; index++) {
+ char name[MAXNAMELEN];
+
+ (void) snprintf(name, MAXNAMELEN - 1, "evq%04d_count", index);
+ kstat_named_init(knp, name, KSTAT_DATA_UINT64);
+ knp++;
+ }
+
+ kstat_named_init(knp, "scale", KSTAT_DATA_UINT64);
+
+ kstat_install(ksp);
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_rx_scale_kstat_fini(sfxge_t *sp)
+{
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+
+ /* Destroy the set */
+ kstat_delete(srsp->srs_ksp);
+ srsp->srs_ksp = NULL;
+}
+
+
+unsigned int
+sfxge_rx_scale_prop_get(sfxge_t *sp)
+{
+ int rx_scale;
+
+ rx_scale = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "rx_scale_count", SFXGE_RX_SCALE_MAX);
+ /* 0 and all -ve numbers sets to number of logical CPUs */
+ if (rx_scale <= 0)
+ rx_scale = ncpus;
+
+ return (rx_scale);
+}
+
+
+static int
+sfxge_rx_scale_init(sfxge_t *sp)
+{
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+ sfxge_intr_t *sip = &(sp->s_intr);
+ int rc;
+
+ ASSERT3U(srsp->srs_state, ==, SFXGE_RX_SCALE_UNINITIALIZED);
+
+ /* Create tables for CPU, core, cache and chip counts */
+ srsp->srs_cpu = kmem_zalloc(sizeof (unsigned int) * NCPU, KM_SLEEP);
+
+ mutex_init(&(srsp->srs_lock), NULL, MUTEX_DRIVER, NULL);
+
+ /* We need at least one event queue */
+ srsp->srs_count = sfxge_rx_scale_prop_get(sp);
+ if (srsp->srs_count > sip->si_nalloc)
+ srsp->srs_count = sip->si_nalloc;
+ if (srsp->srs_count < 1)
+ srsp->srs_count = 1;
+
+ /* Set up the kstats */
+ if ((rc = sfxge_rx_scale_kstat_init(sp)) != 0)
+ goto fail1;
+
+ srsp->srs_state = SFXGE_RX_SCALE_INITIALIZED;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+ mutex_destroy(&(srsp->srs_lock));
+
+ return (rc);
+}
+
+void
+sfxge_rx_scale_update(void *arg)
+{
+ sfxge_t *sp = arg;
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+ sfxge_intr_t *sip;
+ processorid_t id;
+ unsigned int count;
+ unsigned int *tbl;
+ unsigned int *rating;
+ unsigned int entry;
+ int rc;
+
+ mutex_enter(&(srsp->srs_lock));
+
+ if (srsp->srs_state != SFXGE_RX_SCALE_STARTED) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ if ((tbl = kmem_zalloc(sizeof (unsigned int) * SFXGE_RX_SCALE_MAX,
+ KM_NOSLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ sip = &(sp->s_intr);
+ if ((rating = kmem_zalloc(sizeof (unsigned int) * sip->si_nalloc,
+ KM_NOSLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ mutex_enter(&cpu_lock);
+
+ /*
+ * Substract any current CPU, core, cache and chip usage from the
+ * global contention tables.
+ */
+ for (id = 0; id < NCPU; id++) {
+ ASSERT3U(sfxge_cpu[id], >=, srsp->srs_cpu[id]);
+ sfxge_cpu[id] -= srsp->srs_cpu[id];
+ srsp->srs_cpu[id] = 0;
+ }
+
+ ASSERT(srsp->srs_count != 0);
+
+ /* Choose as many event queues as we need */
+ for (count = 0; count < srsp->srs_count; count++) {
+ unsigned int index;
+ sfxge_evq_t *sep;
+ unsigned int choice;
+ unsigned int choice_rating;
+
+ bzero(rating, sizeof (unsigned int) * sip->si_nalloc);
+
+ /*
+ * Rate each event queue on its global level of CPU
+ * contention.
+ */
+ for (index = 0; index < sip->si_nalloc; index++) {
+ sep = sp->s_sep[index];
+
+ id = sep->se_cpu_id;
+ rating[index] += sfxge_cpu[id];
+ }
+
+ /* Choose the queue with the lowest CPU contention */
+ choice = 0;
+ choice_rating = rating[0];
+
+ for (index = 1; index < sip->si_nalloc; index++) {
+ if (rating[index] < choice_rating) {
+ choice = index;
+ choice_rating = rating[index];
+ }
+ }
+
+ /* Add our choice to the condensed RSS table */
+ tbl[count] = choice;
+
+ /* Add information to the global contention tables */
+ sep = sp->s_sep[choice];
+
+ id = sep->se_cpu_id;
+ srsp->srs_cpu[id]++;
+ sfxge_cpu[id]++;
+ }
+
+ mutex_exit(&cpu_lock);
+
+ /* Build the expanded RSS table */
+ count = 0;
+ for (entry = 0; entry < SFXGE_RX_SCALE_MAX; entry++) {
+ unsigned int index;
+
+ index = tbl[count];
+ count = (count + 1) % srsp->srs_count;
+
+ srsp->srs_tbl[entry] = index;
+ }
+
+ /* Program the expanded RSS table into the hardware */
+ (void) efx_rx_scale_tbl_set(sp->s_enp, srsp->srs_tbl,
+ SFXGE_RX_SCALE_MAX);
+
+ mutex_exit(&(srsp->srs_lock));
+ kmem_free(rating, sizeof (unsigned int) * sip->si_nalloc);
+ kmem_free(tbl, sizeof (unsigned int) * SFXGE_RX_SCALE_MAX);
+ return;
+
+fail3:
+ DTRACE_PROBE(fail3);
+ kmem_free(tbl, sizeof (unsigned int) * SFXGE_RX_SCALE_MAX);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(srsp->srs_lock));
+}
+
+static int
+sfxge_rx_scale_start(sfxge_t *sp)
+{
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+ int rc;
+
+ mutex_enter(&(srsp->srs_lock));
+
+ ASSERT3U(srsp->srs_state, ==, SFXGE_RX_SCALE_INITIALIZED);
+
+ /* Clear down the RSS table */
+ bzero(srsp->srs_tbl, sizeof (unsigned int) * SFXGE_RX_SCALE_MAX);
+
+ (void) efx_rx_scale_tbl_set(sp->s_enp, srsp->srs_tbl,
+ SFXGE_RX_SCALE_MAX);
+
+ if ((rc = sfxge_toeplitz_hash_init(sp)) != 0)
+ goto fail1;
+
+ srsp->srs_state = SFXGE_RX_SCALE_STARTED;
+
+ mutex_exit(&(srsp->srs_lock));
+
+ /* sfxge_t->s_state_lock held */
+ (void) ddi_taskq_dispatch(sp->s_tqp, sfxge_rx_scale_update, sp,
+ DDI_SLEEP);
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(srsp->srs_lock));
+
+ return (rc);
+}
+
+int
+sfxge_rx_scale_count_get(sfxge_t *sp, unsigned int *countp)
+{
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+ int rc;
+
+ mutex_enter(&(srsp->srs_lock));
+
+ if (srsp->srs_state != SFXGE_RX_SCALE_INITIALIZED &&
+ srsp->srs_state != SFXGE_RX_SCALE_STARTED) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ *countp = srsp->srs_count;
+
+ mutex_exit(&(srsp->srs_lock));
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(srsp->srs_lock));
+
+ return (rc);
+}
+
+int
+sfxge_rx_scale_count_set(sfxge_t *sp, unsigned int count)
+{
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+ sfxge_intr_t *sip = &(sp->s_intr);
+ int dispatch = 1;
+ int rc;
+
+ if (count < 1 || count > sip->si_nalloc) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ mutex_enter(&(srsp->srs_lock));
+
+ if (srsp->srs_state != SFXGE_RX_SCALE_INITIALIZED &&
+ srsp->srs_state != SFXGE_RX_SCALE_STARTED) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ srsp->srs_count = count;
+
+ if (srsp->srs_state != SFXGE_RX_SCALE_STARTED)
+ dispatch = 0;
+
+ mutex_exit(&(srsp->srs_lock));
+
+ if (dispatch)
+ /* no locks held */
+ (void) ddi_taskq_dispatch(sp->s_tqp, sfxge_rx_scale_update, sp,
+ DDI_SLEEP);
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ mutex_exit(&(srsp->srs_lock));
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_rx_scale_stop(sfxge_t *sp)
+{
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+ processorid_t id;
+
+ mutex_enter(&(srsp->srs_lock));
+
+ ASSERT3U(srsp->srs_state, ==, SFXGE_RX_SCALE_STARTED);
+
+ srsp->srs_state = SFXGE_RX_SCALE_INITIALIZED;
+
+ mutex_enter(&cpu_lock);
+
+ /*
+ * Substract any current CPU, core, cache and chip usage from the
+ * global contention tables.
+ */
+ for (id = 0; id < NCPU; id++) {
+ ASSERT3U(sfxge_cpu[id], >=, srsp->srs_cpu[id]);
+ sfxge_cpu[id] -= srsp->srs_cpu[id];
+ srsp->srs_cpu[id] = 0;
+ }
+
+ mutex_exit(&cpu_lock);
+
+ /* Clear down the RSS table */
+ bzero(srsp->srs_tbl, sizeof (unsigned int) * SFXGE_RX_SCALE_MAX);
+
+ (void) efx_rx_scale_tbl_set(sp->s_enp, srsp->srs_tbl,
+ SFXGE_RX_SCALE_MAX);
+
+ mutex_exit(&(srsp->srs_lock));
+}
+
+static void
+sfxge_rx_scale_fini(sfxge_t *sp)
+{
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+
+ ASSERT3U(srsp->srs_state, ==, SFXGE_RX_SCALE_INITIALIZED);
+
+ srsp->srs_state = SFXGE_RX_SCALE_UNINITIALIZED;
+
+ /* Tear down the kstats */
+ sfxge_rx_scale_kstat_fini(sp);
+
+ srsp->srs_count = 0;
+
+ mutex_destroy(&(srsp->srs_lock));
+
+ /* Destroy tables */
+ kmem_free(srsp->srs_cpu, sizeof (unsigned int) * NCPU);
+ srsp->srs_cpu = NULL;
+
+ sfxge_toeplitz_hash_fini(sp);
+}
+
+int
+sfxge_rx_init(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ char name[MAXNAMELEN];
+ int index;
+ int rc;
+
+ if (sip->si_state == SFXGE_INTR_UNINITIALIZED) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = sfxge_rx_scale_init(sp)) != 0)
+ goto fail2;
+
+ (void) snprintf(name, MAXNAMELEN - 1, "%s%d_rx_packet_cache",
+ ddi_driver_name(sp->s_dip), ddi_get_instance(sp->s_dip));
+
+ sp->s_rpc = kmem_cache_create(name, sizeof (sfxge_rx_packet_t),
+ SFXGE_CPU_CACHE_SIZE, sfxge_rx_packet_ctor, sfxge_rx_packet_dtor,
+ NULL, sp, NULL, 0);
+ ASSERT(sp->s_rpc != NULL);
+
+ (void) snprintf(name, MAXNAMELEN - 1, "%s%d_rxq_cache",
+ ddi_driver_name(sp->s_dip), ddi_get_instance(sp->s_dip));
+
+ sp->s_rqc = kmem_cache_create(name, sizeof (sfxge_rxq_t),
+ SFXGE_CPU_CACHE_SIZE, sfxge_rx_qctor, sfxge_rx_qdtor, NULL, sp,
+ NULL, 0);
+ ASSERT(sp->s_rqc != NULL);
+
+ sp->s_rx_pkt_mem_max = ddi_prop_get_int64(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "rx_pkt_mem_max", 0); /* disabled */
+
+ /* Initialize the receive queue(s) */
+ for (index = 0; index < sip->si_nalloc; index++) {
+ if ((rc = sfxge_rx_qinit(sp, index)) != 0)
+ goto fail3;
+ }
+
+ sp->s_rx_coalesce_mode = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "rx_coalesce_mode", SFXGE_RX_COALESCE_OFF);
+
+ return (0);
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+ /* Tear down the receive queue(s) */
+ while (--index >= 0)
+ sfxge_rx_qfini(sp, index);
+
+ kmem_cache_destroy(sp->s_rqc);
+ sp->s_rqc = NULL;
+
+ kmem_cache_destroy(sp->s_rpc);
+ sp->s_rpc = NULL;
+
+ sfxge_rx_scale_fini(sp);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+int
+sfxge_rx_start(sfxge_t *sp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ sfxge_intr_t *sip;
+ const efx_nic_cfg_t *encp;
+ size_t hdrlen, align;
+ int index;
+ int rc;
+
+ mutex_enter(&(smp->sm_lock));
+
+ /* Calculate the receive packet buffer size and alignment */
+ sp->s_rx_buffer_size = EFX_MAC_PDU(sp->s_mtu);
+
+ encp = efx_nic_cfg_get(sp->s_enp);
+
+ /* Packet buffer allocations are cache line aligned */
+ EFSYS_ASSERT3U(encp->enc_rx_buf_align_start, <=, SFXGE_CPU_CACHE_SIZE);
+
+ if (sp->s_family == EFX_FAMILY_HUNTINGTON) {
+ sp->s_rx_prefix_size = encp->enc_rx_prefix_size;
+
+ hdrlen = sp->s_rx_prefix_size + sizeof (struct ether_header);
+
+ /* Ensure IP headers are 32bit aligned */
+ sp->s_rx_buffer_align = P2ROUNDUP(hdrlen, 4) - hdrlen;
+ sp->s_rx_buffer_size += sp->s_rx_buffer_align;
+
+ } else if (encp->enc_features & EFX_FEATURE_LFSR_HASH_INSERT) {
+ sp->s_rx_prefix_size = encp->enc_rx_prefix_size;
+
+ /*
+ * Place the start of the buffer a prefix length minus 2
+ * before the start of a cache line. This ensures that the
+ * last two bytes of the prefix (which is where the LFSR hash
+ * is located) are in the same cache line as the headers, and
+ * the IP header is 32-bit aligned.
+ */
+ sp->s_rx_buffer_align =
+ SFXGE_CPU_CACHE_SIZE - (encp->enc_rx_prefix_size - 2);
+ sp->s_rx_buffer_size += sp->s_rx_buffer_align;
+ } else {
+ sp->s_rx_prefix_size = 0;
+
+ /*
+ * Place the start of the buffer 2 bytes after a cache line
+ * boundary so that the headers fit into the cache line and
+ * the IP header is 32-bit aligned.
+ */
+ hdrlen = sp->s_rx_prefix_size + sizeof (struct ether_header);
+
+ sp->s_rx_buffer_align = P2ROUNDUP(hdrlen, 4) - hdrlen;
+ sp->s_rx_buffer_size += sp->s_rx_buffer_align;
+ }
+
+ /* Align end of packet buffer for RX DMA end padding */
+ align = MAX(1, encp->enc_rx_buf_align_end);
+ EFSYS_ASSERT(ISP2(align));
+ sp->s_rx_buffer_size = P2ROUNDUP(sp->s_rx_buffer_size, align);
+
+ /* Initialize the receive module */
+ if ((rc = efx_rx_init(sp->s_enp)) != 0)
+ goto fail1;
+
+ mutex_exit(&(smp->sm_lock));
+
+ if ((rc = sfxge_rx_scale_start(sp)) != 0)
+ goto fail2;
+
+ /* Start the receive queue(s) */
+ sip = &(sp->s_intr);
+ for (index = 0; index < sip->si_nalloc; index++) {
+ if ((rc = sfxge_rx_qstart(sp, index)) != 0)
+ goto fail3;
+ }
+
+ ASSERT3U(sp->s_srp[0]->sr_state, ==, SFXGE_RXQ_STARTED);
+ /* It is sufficient to have Rx scale initialized */
+ ASSERT3U(sp->s_rx_scale.srs_state, ==, SFXGE_RX_SCALE_STARTED);
+ rc = efx_mac_filter_default_rxq_set(sp->s_enp, sp->s_srp[0]->sr_erp,
+ sp->s_rx_scale.srs_count > 1);
+ if (rc != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ DTRACE_PROBE(fail4);
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+ /* Stop the receive queue(s) */
+ while (--index >= 0)
+ sfxge_rx_qstop(sp, index);
+
+ sfxge_rx_scale_stop(sp);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ mutex_enter(&(smp->sm_lock));
+
+ /* Tear down the receive module */
+ efx_rx_fini(sp->s_enp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(smp->sm_lock));
+
+ return (rc);
+}
+
+void
+sfxge_rx_coalesce_mode_get(sfxge_t *sp, sfxge_rx_coalesce_mode_t *modep)
+{
+ *modep = sp->s_rx_coalesce_mode;
+}
+
+int
+sfxge_rx_coalesce_mode_set(sfxge_t *sp, sfxge_rx_coalesce_mode_t mode)
+{
+ int rc;
+
+ switch (mode) {
+ case SFXGE_RX_COALESCE_OFF:
+ case SFXGE_RX_COALESCE_DISALLOW_PUSH:
+ case SFXGE_RX_COALESCE_ALLOW_PUSH:
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ sp->s_rx_coalesce_mode = mode;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+void
+sfxge_rx_stop(sfxge_t *sp)
+{
+ sfxge_mac_t *smp = &(sp->s_mac);
+ sfxge_intr_t *sip = &(sp->s_intr);
+ efx_nic_t *enp = sp->s_enp;
+ int index;
+
+ ASSERT(mutex_owned(&(sp->s_state_lock)));
+
+ efx_mac_filter_default_rxq_clear(enp);
+
+ /* Stop the receive queue(s) */
+ index = sip->si_nalloc;
+ while (--index >= 0) {
+ /* TBD: Flush RXQs in parallel; HW has limit + may need retry */
+ sfxge_rx_qstop(sp, index);
+ }
+
+ sfxge_rx_scale_stop(sp);
+
+ mutex_enter(&(smp->sm_lock));
+
+ /* Tear down the receive module */
+ efx_rx_fini(enp);
+
+ sp->s_rx_buffer_align = 0;
+ sp->s_rx_prefix_size = 0;
+ sp->s_rx_buffer_size = 0;
+
+ mutex_exit(&(smp->sm_lock));
+}
+
+unsigned int
+sfxge_rx_loaned(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ int index;
+ unsigned int loaned;
+
+ ASSERT3U(sip->si_state, ==, SFXGE_INTR_INITIALIZED);
+
+ loaned = 0;
+ for (index = 0; index < sip->si_nalloc; index++) {
+ sfxge_rxq_t *srp = sp->s_srp[index];
+ sfxge_evq_t *sep = sp->s_sep[srp->sr_index];
+
+ mutex_enter(&(sep->se_lock));
+
+ loaned += sfxge_rx_qfpp_swizzle(srp);
+
+ mutex_exit(&(sep->se_lock));
+ }
+
+ return (loaned);
+}
+
+void
+sfxge_rx_fini(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ int index;
+
+ ASSERT3U(sip->si_state, ==, SFXGE_INTR_INITIALIZED);
+
+ sp->s_rx_coalesce_mode = SFXGE_RX_COALESCE_OFF;
+
+ /* Tear down the receive queue(s) */
+ index = sip->si_nalloc;
+ while (--index >= 0)
+ sfxge_rx_qfini(sp, index);
+
+ ASSERT3U(sp->s_rx_pkt_mem_alloc, ==, 0);
+
+ kmem_cache_destroy(sp->s_rqc);
+ sp->s_rqc = NULL;
+
+ kmem_cache_destroy(sp->s_rpc);
+ sp->s_rpc = NULL;
+
+ sfxge_rx_scale_fini(sp);
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_sram.c b/usr/src/uts/common/io/sfxge/sfxge_sram.c
new file mode 100644
index 0000000000..71690bd180
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_sram.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include "sfxge.h"
+
+void
+sfxge_sram_init(sfxge_t *sp)
+{
+ sfxge_sram_t *ssp = &(sp->s_sram);
+ dev_info_t *dip = sp->s_dip;
+ char name[MAXNAMELEN];
+
+ ASSERT3U(ssp->ss_state, ==, SFXGE_SRAM_UNINITIALIZED);
+
+ mutex_init(&(ssp->ss_lock), NULL, MUTEX_DRIVER, NULL);
+
+ (void) snprintf(name, MAXNAMELEN - 1, "%s%d_sram", ddi_driver_name(dip),
+ ddi_get_instance(dip));
+ ssp->ss_buf_tbl_map = rmallocmap_wait(EFX_BUF_TBL_SIZE);
+ rmfree(ssp->ss_buf_tbl_map, EFX_BUF_TBL_SIZE - 1, 1);
+ ssp->ss_state = SFXGE_SRAM_INITIALIZED;
+}
+
+int
+sfxge_sram_buf_tbl_alloc(sfxge_t *sp, size_t n, uint32_t *idp)
+{
+ sfxge_sram_t *ssp = &(sp->s_sram);
+ unsigned long id;
+ int rc;
+
+ mutex_enter(&(ssp->ss_lock));
+
+ ASSERT(ssp->ss_state != SFXGE_SRAM_UNINITIALIZED);
+
+ if ((id = rmalloc(ssp->ss_buf_tbl_map, n)) == 0) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ *idp = (uint32_t)id - 1;
+ mutex_exit(&(ssp->ss_lock));
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(ssp->ss_lock));
+
+ return (rc);
+}
+
+int
+sfxge_sram_start(sfxge_t *sp)
+{
+ sfxge_sram_t *ssp = &(sp->s_sram);
+
+ mutex_enter(&(ssp->ss_lock));
+
+ ASSERT3U(ssp->ss_state, ==, SFXGE_SRAM_INITIALIZED);
+ ASSERT3U(ssp->ss_count, ==, 0);
+
+ ssp->ss_state = SFXGE_SRAM_STARTED;
+
+ mutex_exit(&(ssp->ss_lock));
+
+ return (0);
+}
+
+int
+sfxge_sram_buf_tbl_set(sfxge_t *sp, uint32_t id, efsys_mem_t *esmp,
+ size_t n)
+{
+ sfxge_sram_t *ssp = &(sp->s_sram);
+ int rc;
+
+ mutex_enter(&(ssp->ss_lock));
+
+ ASSERT3U(ssp->ss_state, ==, SFXGE_SRAM_STARTED);
+
+ if ((rc = efx_sram_buf_tbl_set(sp->s_enp, id, esmp, n)) != 0)
+ goto fail1;
+
+ ssp->ss_count += n;
+
+ mutex_exit(&(ssp->ss_lock));
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(ssp->ss_lock));
+
+ return (rc);
+}
+
+void
+sfxge_sram_buf_tbl_clear(sfxge_t *sp, uint32_t id, size_t n)
+{
+ sfxge_sram_t *ssp = &(sp->s_sram);
+
+ mutex_enter(&(ssp->ss_lock));
+
+ ASSERT3U(ssp->ss_state, ==, SFXGE_SRAM_STARTED);
+
+ ASSERT3U(ssp->ss_count, >=, n);
+ ssp->ss_count -= n;
+
+ efx_sram_buf_tbl_clear(sp->s_enp, id, n);
+
+ mutex_exit(&(ssp->ss_lock));
+}
+
+void
+sfxge_sram_stop(sfxge_t *sp)
+{
+ sfxge_sram_t *ssp = &(sp->s_sram);
+
+ mutex_enter(&(ssp->ss_lock));
+
+ ASSERT3U(ssp->ss_state, ==, SFXGE_SRAM_STARTED);
+ ASSERT3U(ssp->ss_count, ==, 0);
+
+ ssp->ss_state = SFXGE_SRAM_INITIALIZED;
+
+ mutex_exit(&(ssp->ss_lock));
+}
+
+void
+sfxge_sram_buf_tbl_free(sfxge_t *sp, uint32_t id, size_t n)
+{
+ sfxge_sram_t *ssp = &(sp->s_sram);
+
+ mutex_enter(&(ssp->ss_lock));
+
+ ASSERT(ssp->ss_state != SFXGE_SRAM_UNINITIALIZED);
+
+ rmfree(ssp->ss_buf_tbl_map, n, (unsigned long)id + 1);
+
+ mutex_exit(&(ssp->ss_lock));
+}
+
+void
+sfxge_sram_fini(sfxge_t *sp)
+{
+ sfxge_sram_t *ssp = &(sp->s_sram);
+
+ ASSERT3U(ssp->ss_state, ==, SFXGE_SRAM_INITIALIZED);
+
+ rmfreemap(ssp->ss_buf_tbl_map);
+ ssp->ss_buf_tbl_map = NULL;
+
+ mutex_destroy(&(ssp->ss_lock));
+
+ ssp->ss_state = SFXGE_SRAM_UNINITIALIZED;
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_tcp.c b/usr/src/uts/common/io/sfxge/sfxge_tcp.c
new file mode 100644
index 0000000000..d7c4bf90fb
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_tcp.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/strsubr.h>
+#include <sys/pattr.h>
+
+#include <sys/ethernet.h>
+#include <inet/ip.h>
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+#include <netinet/sctp.h>
+
+#include "sfxge.h"
+
+#include "efx.h"
+
+
+/*
+ * Parse packet headers and return:
+ * etherhpp Ethernet MAC header
+ * iphpp IPv4 header (NULL for non-IPv4 packet)
+ * thpp TCP header (NULL for non-TCP packet)
+ * offp Offset to TCP payload
+ * sizep Size of TCP payload
+ * dportp TCP/UDP/SCTP dest. port (network order), otherwise zero
+ * sportp TCP/UDP/SCTP source port, (network order) otherwise zero
+ */
+sfxge_packet_type_t
+sfxge_pkthdr_parse(mblk_t *mp, struct ether_header **etherhpp,
+ struct ip **iphpp, struct tcphdr **thpp,
+ size_t *offp, size_t *sizep,
+ uint16_t *sportp, uint16_t *dportp)
+{
+ struct ether_header *etherhp;
+ uint16_t ether_type;
+ size_t etherhs;
+ struct ip *iphp;
+ size_t iphs;
+ struct tcphdr *thp;
+ size_t len;
+ size_t ths;
+ size_t off;
+ size_t size;
+ uint16_t sport;
+ uint16_t dport;
+ sfxge_packet_type_t pkt_type = SFXGE_PACKET_TYPE_UNKNOWN;
+
+ etherhp = NULL;
+ iphp = NULL;
+ thp = NULL;
+ off = 0;
+ size = 0;
+ sport = 0;
+ dport = 0;
+
+ /* Grab the MAC header */
+ etherhs = sizeof (struct ether_header);
+ if ((MBLKL(mp) < etherhs) && (pullupmsg(mp, etherhs) == 0))
+ goto done;
+
+ /*LINTED*/
+ etherhp = (struct ether_header *)(mp->b_rptr);
+ ether_type = etherhp->ether_type;
+
+ if (ether_type == htons(ETHERTYPE_VLAN)) {
+ struct ether_vlan_header *ethervhp;
+
+ etherhs = sizeof (struct ether_vlan_header);
+ if ((MBLKL(mp) < etherhs) && (pullupmsg(mp, etherhs) == 0))
+ goto done;
+
+ /*LINTED*/
+ ethervhp = (struct ether_vlan_header *)(mp->b_rptr);
+ ether_type = ethervhp->ether_type;
+ }
+
+ if (ether_type != htons(ETHERTYPE_IP))
+ goto done;
+
+ /* Skip over the MAC header */
+ off += etherhs;
+
+ /* Grab the IP header */
+ len = off + sizeof (struct ip);
+ if ((MBLKL(mp) < len) && (pullupmsg(mp, len) == 0))
+ goto done;
+
+ /*LINTED*/
+ iphp = (struct ip *)(mp->b_rptr + off);
+ iphs = iphp->ip_hl * 4;
+
+ if (iphp->ip_v != IPV4_VERSION)
+ goto done;
+
+ /* Get the size of the packet */
+ size = ntohs(iphp->ip_len);
+
+ ASSERT3U(etherhs + size, <=, msgdsize(mp));
+
+ pkt_type = SFXGE_PACKET_TYPE_IPV4_OTHER;
+
+ /* Skip over the IP header */
+ off += iphs;
+ size -= iphs;
+
+ if (iphp->ip_p == IPPROTO_TCP) {
+ /* Grab the TCP header */
+ len = off + sizeof (struct tcphdr);
+ if ((MBLKL(mp) < len) && (pullupmsg(mp, len) == 0))
+ goto done;
+
+ /*LINTED*/
+ thp = (struct tcphdr *)(mp->b_rptr + off);
+ ths = thp->th_off * 4;
+
+ dport = thp->th_dport;
+ sport = thp->th_sport;
+
+ /* Skip over the TCP header */
+ off += ths;
+ size -= ths;
+
+ pkt_type = SFXGE_PACKET_TYPE_IPV4_TCP;
+
+ } else if (iphp->ip_p == IPPROTO_UDP) {
+ struct udphdr *uhp;
+
+ /* Grab the UDP header */
+ len = off + sizeof (struct udphdr);
+ if ((MBLKL(mp) < len) && (pullupmsg(mp, len) == 0))
+ goto done;
+
+ /*LINTED*/
+ uhp = (struct udphdr *)(mp->b_rptr + off);
+ dport = uhp->uh_dport;
+ sport = uhp->uh_sport;
+
+ /* Skip over the UDP header */
+ off += sizeof (struct udphdr);
+ size -= sizeof (struct udphdr);
+
+ pkt_type = SFXGE_PACKET_TYPE_IPV4_UDP;
+
+ } else if (iphp->ip_p == IPPROTO_SCTP) {
+ struct sctp_hdr *shp;
+
+ /* Grab the SCTP header */
+ len = off + sizeof (struct sctp_hdr);
+ if ((MBLKL(mp) < len) && (pullupmsg(mp, len) == 0))
+ goto done;
+
+ /*LINTED*/
+ shp = (struct sctp_hdr *)(mp->b_rptr + off);
+ dport = shp->sh_dport;
+ sport = shp->sh_sport;
+
+ /* Skip over the SCTP header */
+ off += sizeof (struct sctp_hdr);
+ size -= sizeof (struct sctp_hdr);
+
+ pkt_type = SFXGE_PACKET_TYPE_IPV4_SCTP;
+ }
+
+ if (MBLKL(mp) < off)
+ (void) pullupmsg(mp, off);
+
+done:
+ *etherhpp = etherhp;
+ *iphpp = iphp;
+ *thpp = thp;
+ *offp = off;
+ *sizep = size;
+ *sportp = sport;
+ *dportp = dport;
+
+ return (pkt_type);
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_tx.c b/usr/src/uts/common/io/sfxge/sfxge_tx.c
new file mode 100644
index 0000000000..9a237be74c
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_tx.c
@@ -0,0 +1,3100 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/atomic.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/strsubr.h>
+#include <sys/pattr.h>
+#include <sys/cpu.h>
+
+#include <sys/ethernet.h>
+#include <inet/ip.h>
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+
+#include "sfxge.h"
+
+#include "efx.h"
+
+/* TXQ flush response timeout (in microseconds) */
+#define SFXGE_TX_QFLUSH_USEC (2000000)
+
+/* See sfxge.conf.private for descriptions */
+#define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT 4096
+#define SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT 256
+
+
+/* Transmit buffer DMA attributes */
+static ddi_device_acc_attr_t sfxge_tx_buffer_devacc = {
+
+ DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
+ DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
+ DDI_STRICTORDER_ACC /* devacc_attr_dataorder */
+};
+
+static ddi_dma_attr_t sfxge_tx_buffer_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ SFXGE_TX_BUFFER_SIZE, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+/* Transmit mapping DMA attributes */
+static ddi_dma_attr_t sfxge_tx_mapping_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ 1, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 0x7fffffff, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+/* Transmit queue DMA attributes */
+static ddi_device_acc_attr_t sfxge_txq_devacc = {
+
+ DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
+ DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
+ DDI_STRICTORDER_ACC /* devacc_attr_dataorder */
+};
+
+static ddi_dma_attr_t sfxge_txq_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffffffffffull, /* dma_attr_addr_hi */
+ 0xffffffffffffffffull, /* dma_attr_count_max */
+ EFX_BUF_SIZE, /* dma_attr_align */
+ 0xffffffff, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffffffffffull, /* dma_attr_maxxfer */
+ 0xffffffffffffffffull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+
+/*
+ * A sfxge_tx_qdpl_swizzle() can happen when the DPL get list is one packet
+ * under the limit, and must move all packets from the DPL put->get list
+ * Hence this is the real maximum length of the TX DPL get list.
+ */
+static int
+sfxge_tx_dpl_get_pkt_max(sfxge_txq_t *stp)
+{
+ sfxge_tx_dpl_t *stdp = &(stp->st_dpl);
+ return (stdp->get_pkt_limit + stdp->put_pkt_limit - 1);
+}
+
+
+static int
+sfxge_tx_packet_ctor(void *buf, void *arg, int kmflags)
+{
+ _NOTE(ARGUNUSED(arg, kmflags))
+
+ bzero(buf, sizeof (sfxge_tx_packet_t));
+
+ return (0);
+}
+
+static void
+sfxge_tx_packet_dtor(void *buf, void *arg)
+{
+ sfxge_tx_packet_t *stpp = buf;
+
+ _NOTE(ARGUNUSED(arg))
+
+ SFXGE_OBJ_CHECK(stpp, sfxge_tx_packet_t);
+}
+
+static int
+sfxge_tx_buffer_ctor(void *buf, void *arg, int kmflags)
+{
+ sfxge_tx_buffer_t *stbp = buf;
+ sfxge_t *sp = arg;
+ sfxge_dma_buffer_attr_t dma_attr;
+ int rc;
+
+ bzero(buf, sizeof (sfxge_tx_buffer_t));
+
+ dma_attr.sdba_dip = sp->s_dip;
+ dma_attr.sdba_dattrp = &sfxge_tx_buffer_dma_attr;
+ dma_attr.sdba_callback = ((kmflags == KM_SLEEP) ?
+ DDI_DMA_SLEEP : DDI_DMA_DONTWAIT);
+ dma_attr.sdba_length = SFXGE_TX_BUFFER_SIZE;
+ dma_attr.sdba_memflags = DDI_DMA_STREAMING;
+ dma_attr.sdba_devaccp = &sfxge_tx_buffer_devacc;
+ dma_attr.sdba_bindflags = DDI_DMA_WRITE | DDI_DMA_STREAMING;
+ dma_attr.sdba_maxcookies = 1;
+ dma_attr.sdba_zeroinit = B_FALSE;
+
+ if ((rc = sfxge_dma_buffer_create(&(stbp->stb_esm), &dma_attr)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ SFXGE_OBJ_CHECK(stbp, sfxge_tx_buffer_t);
+
+ return (-1);
+}
+
+static void
+sfxge_tx_buffer_dtor(void *buf, void *arg)
+{
+ sfxge_tx_buffer_t *stbp = buf;
+
+ _NOTE(ARGUNUSED(arg))
+
+ sfxge_dma_buffer_destroy(&(stbp->stb_esm));
+
+ SFXGE_OBJ_CHECK(stbp, sfxge_tx_buffer_t);
+}
+
+static int
+sfxge_tx_mapping_ctor(void *buf, void *arg, int kmflags)
+{
+ sfxge_tx_mapping_t *stmp = buf;
+ sfxge_t *sp = arg;
+ dev_info_t *dip = sp->s_dip;
+ int rc;
+
+ bzero(buf, sizeof (sfxge_tx_mapping_t));
+
+ stmp->stm_sp = sp;
+
+ /* Allocate DMA handle */
+ rc = ddi_dma_alloc_handle(dip, &sfxge_tx_mapping_dma_attr,
+ (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
+ NULL, &(stmp->stm_dma_handle));
+ if (rc != DDI_SUCCESS)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ stmp->stm_sp = NULL;
+
+ SFXGE_OBJ_CHECK(stmp, sfxge_tx_mapping_t);
+
+ return (-1);
+}
+
+static void
+sfxge_tx_mapping_dtor(void *buf, void *arg)
+{
+ sfxge_tx_mapping_t *stmp = buf;
+
+ ASSERT3P(stmp->stm_sp, ==, arg);
+
+ /* Free the DMA handle */
+ ddi_dma_free_handle(&(stmp->stm_dma_handle));
+ stmp->stm_dma_handle = NULL;
+
+ stmp->stm_sp = NULL;
+
+ SFXGE_OBJ_CHECK(stmp, sfxge_tx_mapping_t);
+}
+
+static int
+sfxge_tx_qctor(void *buf, void *arg, int kmflags)
+{
+ sfxge_txq_t *stp = buf;
+ efsys_mem_t *esmp = &(stp->st_mem);
+ sfxge_t *sp = arg;
+ sfxge_dma_buffer_attr_t dma_attr;
+ sfxge_tx_dpl_t *stdp;
+ int rc;
+
+ /* Compile-time structure layout checks */
+ EFX_STATIC_ASSERT(sizeof (stp->__st_u1.__st_s1) <=
+ sizeof (stp->__st_u1.__st_pad));
+ EFX_STATIC_ASSERT(sizeof (stp->__st_u2.__st_s2) <=
+ sizeof (stp->__st_u2.__st_pad));
+ EFX_STATIC_ASSERT(sizeof (stp->__st_u3.__st_s3) <=
+ sizeof (stp->__st_u3.__st_pad));
+ EFX_STATIC_ASSERT(sizeof (stp->__st_u4.__st_s4) <=
+ sizeof (stp->__st_u4.__st_pad));
+
+ bzero(buf, sizeof (sfxge_txq_t));
+
+ stp->st_sp = sp;
+
+ dma_attr.sdba_dip = sp->s_dip;
+ dma_attr.sdba_dattrp = &sfxge_txq_dma_attr;
+ dma_attr.sdba_callback = DDI_DMA_SLEEP;
+ dma_attr.sdba_length = EFX_TXQ_SIZE(SFXGE_TX_NDESCS);
+ dma_attr.sdba_memflags = DDI_DMA_CONSISTENT;
+ dma_attr.sdba_devaccp = &sfxge_txq_devacc;
+ dma_attr.sdba_bindflags = DDI_DMA_READ | DDI_DMA_CONSISTENT;
+ dma_attr.sdba_maxcookies = EFX_TXQ_NBUFS(SFXGE_TX_NDESCS);
+ dma_attr.sdba_zeroinit = B_FALSE;
+
+ if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
+ goto fail1;
+
+ /* Allocate some buffer table entries */
+ if ((rc = sfxge_sram_buf_tbl_alloc(sp, EFX_TXQ_NBUFS(SFXGE_TX_NDESCS),
+ &(stp->st_id))) != 0)
+ goto fail2;
+
+ /* Allocate the descriptor array */
+ if ((stp->st_eb = kmem_zalloc(sizeof (efx_buffer_t) *
+ EFX_TXQ_LIMIT(SFXGE_TX_NDESCS), kmflags)) == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /* Allocate the context arrays */
+ if ((stp->st_stmp = kmem_zalloc(sizeof (sfxge_tx_mapping_t *) *
+ SFXGE_TX_NDESCS, kmflags)) == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ if ((stp->st_stbp = kmem_zalloc(sizeof (sfxge_tx_buffer_t *) *
+ SFXGE_TX_NDESCS, kmflags)) == NULL) {
+ rc = ENOMEM;
+ goto fail5;
+ }
+
+ if ((stp->st_mp = kmem_zalloc(sizeof (mblk_t *) *
+ SFXGE_TX_NDESCS, kmflags)) == NULL) {
+ rc = ENOMEM;
+ goto fail6;
+ }
+
+ /* Initialize the deferred packet list */
+ stdp = &(stp->st_dpl);
+ stdp->std_getp = &(stdp->std_get);
+
+ stp->st_unblock = SFXGE_TXQ_NOT_BLOCKED;
+
+ return (0);
+
+fail6:
+ DTRACE_PROBE(fail6);
+
+ kmem_free(stp->st_stbp, sizeof (sfxge_tx_buffer_t *) * SFXGE_TX_NDESCS);
+ stp->st_stbp = NULL;
+
+fail5:
+ DTRACE_PROBE(fail5);
+
+ kmem_free(stp->st_stmp,
+ sizeof (sfxge_tx_mapping_t *) * SFXGE_TX_NDESCS);
+ stp->st_stmp = NULL;
+
+fail4:
+ DTRACE_PROBE(fail4);
+
+ /* Free the descriptor array */
+ kmem_free(stp->st_eb, sizeof (efx_buffer_t) *
+ EFX_TXQ_LIMIT(SFXGE_TX_NDESCS));
+ stp->st_eb = NULL;
+
+fail3:
+ DTRACE_PROBE(fail3);
+
+ /* Free the buffer table entries */
+ sfxge_sram_buf_tbl_free(sp, stp->st_id, EFX_TXQ_NBUFS(SFXGE_TX_NDESCS));
+ stp->st_id = 0;
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Tear down DMA setup */
+ sfxge_dma_buffer_destroy(esmp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ stp->st_sp = NULL;
+
+ SFXGE_OBJ_CHECK(stp, sfxge_txq_t);
+
+ return (-1);
+}
+
+static void
+sfxge_tx_qdtor(void *buf, void *arg)
+{
+ sfxge_txq_t *stp = buf;
+ efsys_mem_t *esmp = &(stp->st_mem);
+ sfxge_t *sp = stp->st_sp;
+ sfxge_tx_dpl_t *stdp;
+
+ _NOTE(ARGUNUSED(arg))
+
+ stp->st_unblock = 0;
+
+ /* Tear down the deferred packet list */
+ stdp = &(stp->st_dpl);
+ ASSERT3P(stdp->std_getp, ==, &(stdp->std_get));
+ stdp->std_getp = NULL;
+
+ /* Free the context arrays */
+ kmem_free(stp->st_mp, sizeof (mblk_t *) * SFXGE_TX_NDESCS);
+ stp->st_mp = NULL;
+
+ kmem_free(stp->st_stbp, sizeof (sfxge_tx_buffer_t *) * SFXGE_TX_NDESCS);
+ stp->st_stbp = NULL;
+
+ kmem_free(stp->st_stmp,
+ sizeof (sfxge_tx_mapping_t *) * SFXGE_TX_NDESCS);
+ stp->st_stmp = NULL;
+
+ /* Free the descriptor array */
+ kmem_free(stp->st_eb, sizeof (efx_buffer_t) *
+ EFX_TXQ_LIMIT(SFXGE_TX_NDESCS));
+ stp->st_eb = NULL;
+
+ /* Free the buffer table entries */
+ sfxge_sram_buf_tbl_free(sp, stp->st_id, EFX_TXQ_NBUFS(SFXGE_TX_NDESCS));
+ stp->st_id = 0;
+
+ /* Tear down dma setup */
+ sfxge_dma_buffer_destroy(esmp);
+
+ stp->st_sp = NULL;
+
+ SFXGE_OBJ_CHECK(stp, sfxge_txq_t);
+}
+
+static void
+sfxge_tx_packet_destroy(sfxge_t *sp, sfxge_tx_packet_t *stpp)
+{
+ kmem_cache_free(sp->s_tpc, stpp);
+}
+
+static sfxge_tx_packet_t *
+sfxge_tx_packet_create(sfxge_t *sp)
+{
+ sfxge_tx_packet_t *stpp;
+
+ stpp = kmem_cache_alloc(sp->s_tpc, KM_NOSLEEP);
+
+ return (stpp);
+}
+
+static inline int
+sfxge_tx_qfpp_put(sfxge_txq_t *stp, sfxge_tx_packet_t *stpp)
+{
+ sfxge_tx_fpp_t *stfp = &(stp->st_fpp);
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ ASSERT3P(stpp->stp_next, ==, NULL);
+ ASSERT3P(stpp->stp_mp, ==, NULL);
+ ASSERT3P(stpp->stp_etherhp, ==, NULL);
+ ASSERT3P(stpp->stp_iphp, ==, NULL);
+ ASSERT3P(stpp->stp_thp, ==, NULL);
+ ASSERT3U(stpp->stp_off, ==, 0);
+ ASSERT3U(stpp->stp_size, ==, 0);
+ ASSERT3U(stpp->stp_mss, ==, 0);
+ ASSERT3U(stpp->stp_dpl_put_len, ==, 0);
+
+ if (stfp->stf_count < SFXGE_TX_FPP_MAX) {
+ /* Add to the start of the list */
+ stpp->stp_next = stfp->stf_stpp;
+ stfp->stf_stpp = stpp;
+ stfp->stf_count++;
+
+ return (0);
+ }
+
+ DTRACE_PROBE(fpp_full);
+ return (ENOSPC);
+}
+
+static inline sfxge_tx_packet_t *
+sfxge_tx_qfpp_get(sfxge_txq_t *stp)
+{
+ sfxge_tx_packet_t *stpp;
+ sfxge_tx_fpp_t *stfp = &(stp->st_fpp);
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ stpp = stfp->stf_stpp;
+ if (stpp == NULL) {
+ ASSERT3U(stfp->stf_count, ==, 0);
+ return (NULL);
+ }
+
+ /* Remove item from the head of the list */
+ stfp->stf_stpp = stpp->stp_next;
+ stpp->stp_next = NULL;
+
+ ASSERT3U(stfp->stf_count, >, 0);
+ stfp->stf_count--;
+
+ if (stfp->stf_count != 0) {
+ ASSERT(stfp->stf_stpp != NULL);
+ prefetch_read_many(stfp->stf_stpp);
+ }
+ return (stpp);
+}
+
+static void
+sfxge_tx_qfpp_empty(sfxge_txq_t *stp)
+{
+ sfxge_t *sp = stp->st_sp;
+ sfxge_tx_fpp_t *stfp = &(stp->st_fpp);
+ sfxge_tx_packet_t *stpp;
+
+ mutex_enter(&(stp->st_lock));
+
+ stpp = stfp->stf_stpp;
+ stfp->stf_stpp = NULL;
+
+ while (stpp != NULL) {
+ sfxge_tx_packet_t *next;
+
+ next = stpp->stp_next;
+ stpp->stp_next = NULL;
+
+ ASSERT3U(stfp->stf_count, >, 0);
+ stfp->stf_count--;
+
+ sfxge_tx_packet_destroy(sp, stpp);
+
+ stpp = next;
+ }
+ ASSERT3U(stfp->stf_count, ==, 0);
+
+ mutex_exit(&(stp->st_lock));
+}
+
+static inline void
+sfxge_tx_qfbp_put(sfxge_txq_t *stp, sfxge_tx_buffer_t *stbp)
+{
+ sfxge_tx_fbp_t *stfp = &(stp->st_fbp);
+
+ ASSERT3P(stbp->stb_next, ==, NULL);
+ ASSERT3U(stbp->stb_off, ==, 0);
+ ASSERT3U(stbp->stb_esm.esm_used, ==, 0);
+
+ stbp->stb_next = stfp->stf_stbp;
+ stfp->stf_stbp = stbp;
+ stfp->stf_count++;
+}
+
+
+static inline sfxge_tx_buffer_t *
+sfxge_tx_qfbp_get(sfxge_txq_t *stp)
+{
+ sfxge_tx_buffer_t *stbp;
+ sfxge_tx_fbp_t *stfp = &(stp->st_fbp);
+
+ stbp = stfp->stf_stbp;
+ if (stbp == NULL) {
+ ASSERT3U(stfp->stf_count, ==, 0);
+ return (NULL);
+ }
+
+ stfp->stf_stbp = stbp->stb_next;
+ stbp->stb_next = NULL;
+
+ ASSERT3U(stfp->stf_count, >, 0);
+ stfp->stf_count--;
+
+ if (stfp->stf_count != 0) {
+ ASSERT(stfp->stf_stbp != NULL);
+ prefetch_read_many(stfp->stf_stbp);
+ }
+
+ return (stbp);
+}
+
+static void
+sfxge_tx_qfbp_empty(sfxge_txq_t *stp)
+{
+ sfxge_t *sp = stp->st_sp;
+ sfxge_tx_fbp_t *stfp = &(stp->st_fbp);
+ sfxge_tx_buffer_t *stbp;
+
+ mutex_enter(&(stp->st_lock));
+
+ stbp = stfp->stf_stbp;
+ stfp->stf_stbp = NULL;
+
+ while (stbp != NULL) {
+ sfxge_tx_buffer_t *next;
+
+ next = stbp->stb_next;
+ stbp->stb_next = NULL;
+
+ ASSERT3U(stfp->stf_count, >, 0);
+ stfp->stf_count--;
+
+ kmem_cache_free(sp->s_tbc, stbp);
+
+ stbp = next;
+ }
+ ASSERT3U(stfp->stf_count, ==, 0);
+
+ mutex_exit(&(stp->st_lock));
+}
+
+static inline void
+sfxge_tx_qfmp_put(sfxge_txq_t *stp, sfxge_tx_mapping_t *stmp)
+{
+ sfxge_tx_fmp_t *stfp = &(stp->st_fmp);
+
+ ASSERT3P(stmp->stm_next, ==, NULL);
+ ASSERT3P(stmp->stm_mp, ==, NULL);
+ ASSERT3P(stmp->stm_base, ==, NULL);
+ ASSERT3U(stmp->stm_off, ==, 0);
+ ASSERT3U(stmp->stm_size, ==, 0);
+
+ stmp->stm_next = stfp->stf_stmp;
+ stfp->stf_stmp = stmp;
+ stfp->stf_count++;
+}
+
+static inline sfxge_tx_mapping_t *
+sfxge_tx_qfmp_get(sfxge_txq_t *stp)
+{
+ sfxge_tx_mapping_t *stmp;
+ sfxge_tx_fmp_t *stfp = &(stp->st_fmp);
+
+ stmp = stfp->stf_stmp;
+ if (stmp == NULL) {
+ ASSERT3U(stfp->stf_count, ==, 0);
+ return (NULL);
+ }
+
+ stfp->stf_stmp = stmp->stm_next;
+ stmp->stm_next = NULL;
+
+ ASSERT3U(stfp->stf_count, >, 0);
+ stfp->stf_count--;
+
+ if (stfp->stf_count != 0) {
+ ASSERT(stfp->stf_stmp != NULL);
+ prefetch_read_many(stfp->stf_stmp);
+ }
+ return (stmp);
+}
+
+static void
+sfxge_tx_qfmp_empty(sfxge_txq_t *stp)
+{
+ sfxge_t *sp = stp->st_sp;
+ sfxge_tx_fmp_t *stfp = &(stp->st_fmp);
+ sfxge_tx_mapping_t *stmp;
+
+ mutex_enter(&(stp->st_lock));
+
+ stmp = stfp->stf_stmp;
+ stfp->stf_stmp = NULL;
+
+ while (stmp != NULL) {
+ sfxge_tx_mapping_t *next;
+
+ next = stmp->stm_next;
+ stmp->stm_next = NULL;
+
+ ASSERT3U(stfp->stf_count, >, 0);
+ stfp->stf_count--;
+
+ kmem_cache_free(sp->s_tmc, stmp);
+
+ stmp = next;
+ }
+ ASSERT3U(stfp->stf_count, ==, 0);
+
+ mutex_exit(&(stp->st_lock));
+}
+
+static void
+sfxge_tx_msgb_unbind(sfxge_tx_mapping_t *stmp)
+{
+ bzero(stmp->stm_addr, sizeof (uint64_t) * SFXGE_TX_MAPPING_NADDR);
+ stmp->stm_off = 0;
+
+ (void) ddi_dma_unbind_handle(stmp->stm_dma_handle);
+
+ stmp->stm_size = 0;
+ stmp->stm_base = NULL;
+
+ stmp->stm_mp = NULL;
+}
+
+#define SFXGE_TX_DESCSHIFT 12
+#define SFXGE_TX_DESCSIZE (1 << 12)
+
+#define SFXGE_TX_DESCOFFSET (SFXGE_TX_DESCSIZE - 1)
+#define SFXGE_TX_DESCMASK (~SFXGE_TX_DESCOFFSET)
+
+static int
+sfxge_tx_msgb_bind(mblk_t *mp, sfxge_tx_mapping_t *stmp)
+{
+ ddi_dma_cookie_t dmac;
+ unsigned int ncookies;
+ size_t size;
+ unsigned int n;
+ int rc;
+
+ ASSERT(mp != NULL);
+ ASSERT3U(DB_TYPE(mp), ==, M_DATA);
+
+ ASSERT(stmp->stm_mp == NULL);
+ stmp->stm_mp = mp;
+
+ stmp->stm_base = (caddr_t)(mp->b_rptr);
+ stmp->stm_size = MBLKL(mp);
+
+ /* Bind the STREAMS block to the mapping */
+ rc = ddi_dma_addr_bind_handle(stmp->stm_dma_handle, NULL,
+ stmp->stm_base, stmp->stm_size, DDI_DMA_WRITE | DDI_DMA_STREAMING,
+ DDI_DMA_DONTWAIT, NULL, &dmac, &ncookies);
+ if (rc != DDI_DMA_MAPPED)
+ goto fail1;
+
+ ASSERT3U(ncookies, <=, SFXGE_TX_MAPPING_NADDR);
+
+ /*
+ * Construct an array of addresses and an initial
+ * offset.
+ */
+ n = 0;
+ stmp->stm_addr[n++] = dmac.dmac_laddress & SFXGE_TX_DESCMASK;
+ DTRACE_PROBE1(addr, uint64_t, dmac.dmac_laddress & SFXGE_TX_DESCMASK);
+
+ stmp->stm_off = dmac.dmac_laddress & SFXGE_TX_DESCOFFSET;
+
+ size = MIN(SFXGE_TX_DESCSIZE - stmp->stm_off, dmac.dmac_size);
+ dmac.dmac_laddress += size;
+ dmac.dmac_size -= size;
+
+ for (;;) {
+ ASSERT3U(n, <, SFXGE_TX_MAPPING_NADDR);
+
+ if (dmac.dmac_size == 0) {
+ if (--ncookies == 0)
+ break;
+
+ ddi_dma_nextcookie(stmp->stm_dma_handle, &dmac);
+ }
+
+ ASSERT((dmac.dmac_laddress & SFXGE_TX_DESCMASK) != 0);
+ ASSERT((dmac.dmac_laddress & SFXGE_TX_DESCOFFSET) == 0);
+ stmp->stm_addr[n++] = dmac.dmac_laddress;
+ DTRACE_PROBE1(addr, uint64_t, dmac.dmac_laddress);
+
+ size = MIN(SFXGE_TX_DESCSIZE, dmac.dmac_size);
+ dmac.dmac_laddress += size;
+ dmac.dmac_size -= size;
+ }
+ ASSERT3U(n, <=, SFXGE_TX_MAPPING_NADDR);
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ stmp->stm_size = 0;
+ stmp->stm_base = NULL;
+
+ stmp->stm_mp = NULL;
+
+ return (-1);
+}
+
+static void
+sfxge_tx_qreap(sfxge_txq_t *stp)
+{
+ unsigned int reaped;
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ reaped = stp->st_reaped;
+ while (reaped != stp->st_completed) {
+ unsigned int id;
+ sfxge_tx_mapping_t *stmp;
+ sfxge_tx_buffer_t *stbp;
+
+ id = reaped++ & (SFXGE_TX_NDESCS - 1);
+
+ ASSERT3P(stp->st_mp[id], ==, NULL);
+
+ if ((stmp = stp->st_stmp[id]) != NULL) {
+ stp->st_stmp[id] = NULL;
+
+ /* Free all the mappings */
+ do {
+ sfxge_tx_mapping_t *next;
+
+ next = stmp->stm_next;
+ stmp->stm_next = NULL;
+
+ sfxge_tx_qfmp_put(stp, stmp);
+
+ stmp = next;
+ } while (stmp != NULL);
+ }
+
+ if ((stbp = stp->st_stbp[id]) != NULL) {
+ stp->st_stbp[id] = NULL;
+
+ /* Free all the buffers */
+ do {
+ sfxge_tx_buffer_t *next;
+
+ next = stbp->stb_next;
+ stbp->stb_next = NULL;
+
+ stbp->stb_esm.esm_used = 0;
+ stbp->stb_off = 0;
+
+ sfxge_tx_qfbp_put(stp, stbp);
+
+ stbp = next;
+ } while (stbp != NULL);
+ }
+ }
+ stp->st_reaped = reaped;
+}
+
+static void
+sfxge_tx_qlist_abort(sfxge_txq_t *stp)
+{
+ unsigned int id;
+ sfxge_tx_mapping_t *stmp;
+ sfxge_tx_buffer_t *stbp;
+ mblk_t *mp;
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ id = stp->st_added & (SFXGE_TX_NDESCS - 1);
+
+ /* Clear the completion information */
+ stmp = stp->st_stmp[id];
+ stp->st_stmp[id] = NULL;
+
+ /* Free any mappings that were used */
+ while (stmp != NULL) {
+ sfxge_tx_mapping_t *next;
+
+ next = stmp->stm_next;
+ stmp->stm_next = NULL;
+
+ if (stmp->stm_mp != NULL)
+ sfxge_tx_msgb_unbind(stmp);
+
+ sfxge_tx_qfmp_put(stp, stmp);
+
+ stmp = next;
+ }
+
+ stbp = stp->st_stbp[id];
+ stp->st_stbp[id] = NULL;
+
+ /* Free any buffers that were used */
+ while (stbp != NULL) {
+ sfxge_tx_buffer_t *next;
+
+ next = stbp->stb_next;
+ stbp->stb_next = NULL;
+
+ stbp->stb_off = 0;
+ stbp->stb_esm.esm_used = 0;
+
+ sfxge_tx_qfbp_put(stp, stbp);
+
+ stbp = next;
+ }
+
+ mp = stp->st_mp[id];
+ stp->st_mp[id] = NULL;
+
+ if (mp != NULL)
+ freemsg(mp);
+
+ /* Clear the fragment list */
+ stp->st_n = 0;
+}
+
+/* Push descriptors to the TX ring setting blocked if no space */
+static void
+sfxge_tx_qlist_post(sfxge_txq_t *stp)
+{
+ unsigned int id;
+ unsigned int level;
+ unsigned int available;
+ int rc;
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ ASSERT(stp->st_n != 0);
+
+again:
+ level = stp->st_added - stp->st_reaped;
+ available = EFX_TXQ_LIMIT(SFXGE_TX_NDESCS) - level;
+
+ id = stp->st_added & (SFXGE_TX_NDESCS - 1);
+
+ if (available < stp->st_n) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ ASSERT3U(available, >=, stp->st_n);
+
+ /* Post the fragment list */
+ if ((rc = efx_tx_qpost(stp->st_etp, stp->st_eb, stp->st_n,
+ stp->st_reaped, &(stp->st_added))) != 0)
+ goto fail2;
+
+ /*
+ * If the list took more than a single descriptor then we need to
+ * to move the completion information so it is referenced by the last
+ * descriptor.
+ */
+ if (((stp->st_added - 1) & (SFXGE_TX_NDESCS - 1)) != id) {
+ sfxge_tx_mapping_t *stmp;
+ sfxge_tx_buffer_t *stbp;
+ mblk_t *mp;
+
+ stmp = stp->st_stmp[id];
+ stp->st_stmp[id] = NULL;
+
+ stbp = stp->st_stbp[id];
+ stp->st_stbp[id] = NULL;
+
+ mp = stp->st_mp[id];
+ stp->st_mp[id] = NULL;
+
+ id = (stp->st_added - 1) & (SFXGE_TX_NDESCS - 1);
+
+ ASSERT(stp->st_stmp[id] == NULL);
+ stp->st_stmp[id] = stmp;
+
+ ASSERT(stp->st_stbp[id] == NULL);
+ stp->st_stbp[id] = stbp;
+
+ ASSERT(stp->st_mp[id] == NULL);
+ stp->st_mp[id] = mp;
+ }
+
+ /* Clear the list */
+ stp->st_n = 0;
+
+ ASSERT3U(stp->st_unblock, ==, SFXGE_TXQ_NOT_BLOCKED);
+ return;
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ ASSERT(rc == ENOSPC);
+
+ level = stp->st_added - stp->st_completed;
+ available = EFX_TXQ_LIMIT(SFXGE_TX_NDESCS) - level;
+
+ /*
+ * If there would be enough space after we've reaped any completed
+ * mappings and buffers, and we gain sufficient queue space by doing
+ * so, then reap now and try posting again.
+ */
+ if (stp->st_n <= available &&
+ stp->st_completed - stp->st_reaped >= SFXGE_TX_BATCH) {
+ sfxge_tx_qreap(stp);
+
+ goto again;
+ }
+
+ /* Set the unblock level */
+ if (stp->st_unblock == SFXGE_TXQ_NOT_BLOCKED) {
+ stp->st_unblock = SFXGE_TXQ_UNBLOCK_LEVEL1;
+ } else {
+ ASSERT(stp->st_unblock == SFXGE_TXQ_UNBLOCK_LEVEL1);
+
+ stp->st_unblock = SFXGE_TXQ_UNBLOCK_LEVEL2;
+ }
+
+ /*
+ * Avoid a race with completion interrupt handling that could leave the
+ * queue blocked.
+ *
+ * NOTE: The use of st_pending rather than st_completed is intentional
+ * as st_pending is updated per-event rather than per-batch and
+ * therefore avoids needless deferring.
+ */
+ if (stp->st_pending == stp->st_added) {
+ sfxge_tx_qreap(stp);
+
+ stp->st_unblock = SFXGE_TXQ_NOT_BLOCKED;
+ goto again;
+ }
+
+ ASSERT(stp->st_unblock != SFXGE_TXQ_NOT_BLOCKED);
+}
+
+static int
+sfxge_tx_kstat_update(kstat_t *ksp, int rw)
+{
+ sfxge_txq_t *stp = ksp->ks_private;
+ sfxge_tx_dpl_t *stdp = &(stp->st_dpl);
+ kstat_named_t *knp;
+ int rc;
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ if (rw != KSTAT_READ) {
+ rc = EACCES;
+ goto fail1;
+ }
+
+ if (stp->st_state != SFXGE_TXQ_STARTED)
+ goto done;
+
+ efx_tx_qstats_update(stp->st_etp, stp->st_stat);
+ knp = (kstat_named_t *)ksp->ks_data + TX_NQSTATS;
+ knp->value.ui64 = stdp->get_pkt_limit;
+ knp++;
+ knp->value.ui64 = stdp->put_pkt_limit;
+ knp++;
+ knp->value.ui64 = stdp->get_full_count;
+ knp++;
+ knp->value.ui64 = stdp->put_full_count;
+
+done:
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_tx_kstat_init(sfxge_txq_t *stp)
+{
+ sfxge_t *sp = stp->st_sp;
+ unsigned int index = stp->st_index;
+ dev_info_t *dip = sp->s_dip;
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ char name[MAXNAMELEN];
+ unsigned int id;
+ int rc;
+
+ /* Create the set */
+ (void) snprintf(name, MAXNAMELEN - 1, "%s_txq%04d",
+ ddi_driver_name(dip), index);
+
+ if ((ksp = kstat_create((char *)ddi_driver_name(dip),
+ ddi_get_instance(dip), name, "queue", KSTAT_TYPE_NAMED,
+ TX_NQSTATS + 4, 0)) == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ stp->st_ksp = ksp;
+
+ ksp->ks_update = sfxge_tx_kstat_update;
+ ksp->ks_private = stp;
+ ksp->ks_lock = &(stp->st_lock);
+
+ /* Initialise the named stats */
+ stp->st_stat = knp = ksp->ks_data;
+ for (id = 0; id < TX_NQSTATS; id++) {
+ kstat_named_init(knp, (char *)efx_tx_qstat_name(sp->s_enp, id),
+ KSTAT_DATA_UINT64);
+ knp++;
+ }
+ kstat_named_init(knp, "dpl_get_pkt_limit", KSTAT_DATA_UINT64);
+ knp++;
+ kstat_named_init(knp, "dpl_put_pkt_limit", KSTAT_DATA_UINT64);
+ knp++;
+ kstat_named_init(knp, "dpl_get_full_count", KSTAT_DATA_UINT64);
+ knp++;
+ kstat_named_init(knp, "dpl_put_full_count", KSTAT_DATA_UINT64);
+
+ kstat_install(ksp);
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static void
+sfxge_tx_kstat_fini(sfxge_txq_t *stp)
+{
+ /* Destroy the set */
+ kstat_delete(stp->st_ksp);
+ stp->st_ksp = NULL;
+ stp->st_stat = NULL;
+}
+
+static int
+sfxge_tx_qinit(sfxge_t *sp, unsigned int index, sfxge_txq_type_t type,
+ unsigned int evq)
+{
+ sfxge_txq_t *stp;
+ sfxge_tx_dpl_t *stdp;
+ int rc;
+
+ ASSERT3U(index, <, EFX_ARRAY_SIZE(sp->s_stp));
+ ASSERT3U(type, <, SFXGE_TXQ_NTYPES);
+ ASSERT3U(evq, <, EFX_ARRAY_SIZE(sp->s_sep));
+
+ if ((stp = kmem_cache_alloc(sp->s_tqc, KM_SLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+ ASSERT3U(stp->st_state, ==, SFXGE_TXQ_UNINITIALIZED);
+
+ stdp = &(stp->st_dpl);
+
+ stp->st_index = index;
+ stp->st_type = type;
+ stp->st_evq = evq;
+
+ mutex_init(&(stp->st_lock), NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(sp->s_intr.si_intr_pri));
+
+ /* Initialize the statistics */
+ if ((rc = sfxge_tx_kstat_init(stp)) != 0)
+ goto fail2;
+
+ stdp->get_pkt_limit = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "tx_dpl_get_pkt_limit",
+ SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT);
+
+ stdp->put_pkt_limit = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
+ DDI_PROP_DONTPASS, "tx_dpl_put_pkt_limit",
+ SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT);
+
+ /* Allocate a per-EVQ label for events from this TXQ */
+ if ((rc = sfxge_ev_txlabel_alloc(sp, evq, stp, &(stp->st_label))) != 0)
+ goto fail2;
+
+ stp->st_state = SFXGE_TXQ_INITIALIZED;
+
+ /* Attach the TXQ to the driver */
+ ASSERT3P(sp->s_stp[index], ==, NULL);
+ sp->s_stp[index] = stp;
+ sp->s_tx_qcount++;
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ sfxge_tx_kstat_fini(stp);
+
+
+ stp->st_evq = 0;
+ stp->st_type = 0;
+ stp->st_index = 0;
+
+ mutex_destroy(&(stp->st_lock));
+
+ kmem_cache_free(sp->s_tqc, stp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_tx_qstart(sfxge_t *sp, unsigned int index)
+{
+ sfxge_txq_t *stp = sp->s_stp[index];
+ efx_nic_t *enp = sp->s_enp;
+ efsys_mem_t *esmp;
+ sfxge_evq_t *sep;
+ unsigned int evq;
+ unsigned int flags;
+ unsigned int desc_index;
+ int rc;
+
+ mutex_enter(&(stp->st_lock));
+
+ esmp = &(stp->st_mem);
+ evq = stp->st_evq;
+ sep = sp->s_sep[evq];
+
+ ASSERT3U(stp->st_state, ==, SFXGE_TXQ_INITIALIZED);
+ ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTED);
+
+ /* Zero the memory */
+ bzero(esmp->esm_base, EFX_TXQ_SIZE(SFXGE_TX_NDESCS));
+
+ /* Program the buffer table */
+ if ((rc = sfxge_sram_buf_tbl_set(sp, stp->st_id, esmp,
+ EFX_TXQ_NBUFS(SFXGE_TX_NDESCS))) != 0)
+ goto fail1;
+
+ switch (stp->st_type) {
+ case SFXGE_TXQ_NON_CKSUM:
+ flags = 0;
+ break;
+
+ case SFXGE_TXQ_IP_CKSUM:
+ flags = EFX_TXQ_CKSUM_IPV4;
+ break;
+
+ case SFXGE_TXQ_IP_TCP_UDP_CKSUM:
+ flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
+ break;
+
+ default:
+ ASSERT(B_FALSE);
+
+ flags = 0;
+ break;
+ }
+
+ /* Create the transmit queue */
+ if ((rc = efx_tx_qcreate(enp, index, stp->st_label, esmp,
+ SFXGE_TX_NDESCS, stp->st_id, flags, sep->se_eep,
+ &(stp->st_etp), &desc_index)) != 0)
+ goto fail2;
+
+ /* Initialise queue descriptor indexes */
+ stp->st_added = desc_index;
+ stp->st_pending = desc_index;
+ stp->st_completed = desc_index;
+ stp->st_reaped = desc_index;
+
+ /* Enable the transmit queue */
+ efx_tx_qenable(stp->st_etp);
+
+ stp->st_state = SFXGE_TXQ_STARTED;
+
+ mutex_exit(&(stp->st_lock));
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ /* Clear entries from the buffer table */
+ sfxge_sram_buf_tbl_clear(sp, stp->st_id,
+ EFX_TXQ_NBUFS(SFXGE_TX_NDESCS));
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ mutex_exit(&(stp->st_lock));
+
+ return (rc);
+}
+
+static inline int
+sfxge_tx_qmapping_add(sfxge_txq_t *stp, sfxge_tx_mapping_t *stmp,
+ size_t *offp, size_t *limitp)
+{
+ mblk_t *mp;
+ size_t mapping_off;
+ size_t mapping_size;
+ int rc;
+
+ ASSERT3U(*offp, <, stmp->stm_size);
+ ASSERT(*limitp != 0);
+
+ mp = stmp->stm_mp;
+
+ ASSERT3P(stmp->stm_base, ==, mp->b_rptr);
+ ASSERT3U(stmp->stm_size, ==, MBLKL(mp));
+
+ mapping_off = stmp->stm_off + *offp;
+ mapping_size = stmp->stm_size - *offp;
+
+ while (mapping_size != 0 && *limitp != 0) {
+ size_t page =
+ mapping_off >> SFXGE_TX_DESCSHIFT;
+ size_t page_off =
+ mapping_off & SFXGE_TX_DESCOFFSET;
+ size_t page_size =
+ SFXGE_TX_DESCSIZE - page_off;
+ efx_buffer_t *ebp;
+
+ ASSERT3U(page, <, SFXGE_TX_MAPPING_NADDR);
+ ASSERT((stmp->stm_addr[page] & SFXGE_TX_DESCMASK) != 0);
+
+ page_size = MIN(page_size, mapping_size);
+ page_size = MIN(page_size, *limitp);
+
+ ASSERT3U(stp->st_n, <=,
+ EFX_TXQ_LIMIT(SFXGE_TX_NDESCS));
+ if (stp->st_n ==
+ EFX_TXQ_LIMIT(SFXGE_TX_NDESCS)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ ebp = &(stp->st_eb[stp->st_n++]);
+ ebp->eb_addr = stmp->stm_addr[page] +
+ page_off;
+ ebp->eb_size = page_size;
+
+ *offp += page_size;
+ *limitp -= page_size;
+
+ mapping_off += page_size;
+ mapping_size -= page_size;
+
+ ebp->eb_eop = (*limitp == 0 ||
+ (mapping_size == 0 && mp->b_cont == NULL));
+
+ DTRACE_PROBE5(tx_mapping_add,
+ unsigned int, stp->st_index,
+ unsigned int, stp->st_n - 1,
+ uint64_t, ebp->eb_addr,
+ size_t, ebp->eb_size,
+ boolean_t, ebp->eb_eop);
+ }
+
+ ASSERT3U(*offp, <=, stmp->stm_size);
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static inline int
+sfxge_tx_qbuffer_add(sfxge_txq_t *stp, sfxge_tx_buffer_t *stbp, boolean_t eop)
+{
+ efx_buffer_t *ebp;
+ int rc;
+
+ ASSERT3U(stp->st_n, <=,
+ EFX_TXQ_LIMIT(SFXGE_TX_NDESCS));
+ if (stp->st_n == EFX_TXQ_LIMIT(SFXGE_TX_NDESCS)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ ebp = &(stp->st_eb[stp->st_n++]);
+ ebp->eb_addr = stbp->stb_esm.esm_addr + stbp->stb_off;
+ ebp->eb_size = stbp->stb_esm.esm_used - stbp->stb_off;
+ ebp->eb_eop = eop;
+
+ (void) ddi_dma_sync(stbp->stb_esm.esm_dma_handle,
+ stbp->stb_off, ebp->eb_size,
+ DDI_DMA_SYNC_FORDEV);
+
+ stbp->stb_off = stbp->stb_esm.esm_used;
+
+ DTRACE_PROBE5(tx_buffer_add,
+ unsigned int, stp->st_index,
+ unsigned int, stp->st_n - 1,
+ uint64_t, ebp->eb_addr, size_t, ebp->eb_size,
+ boolean_t, ebp->eb_eop);
+
+ return (0);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static inline boolean_t
+sfxge_tx_msgb_copy(mblk_t *mp, sfxge_tx_buffer_t *stbp, size_t *offp,
+ size_t *limitp)
+{
+ size_t data_off;
+ size_t data_size;
+ size_t copy_off;
+ size_t copy_size;
+ boolean_t eop;
+
+ ASSERT3U(*offp, <=, MBLKL(mp));
+ ASSERT(*limitp != 0);
+
+ data_off = *offp;
+ data_size = MBLKL(mp) - *offp;
+
+ copy_off = stbp->stb_esm.esm_used;
+ copy_size = SFXGE_TX_BUFFER_SIZE - copy_off;
+
+ copy_size = MIN(copy_size, data_size);
+ copy_size = MIN(copy_size, *limitp);
+
+ bcopy(mp->b_rptr + data_off,
+ stbp->stb_esm.esm_base + copy_off, copy_size);
+
+ stbp->stb_esm.esm_used += copy_size;
+ ASSERT3U(stbp->stb_esm.esm_used, <=,
+ SFXGE_TX_BUFFER_SIZE);
+
+ *offp += copy_size;
+ *limitp -= copy_size;
+
+ data_off += copy_size;
+ data_size -= copy_size;
+
+ eop = (*limitp == 0 ||
+ (data_size == 0 && mp->b_cont == NULL));
+
+ ASSERT3U(*offp, <=, MBLKL(mp));
+
+ return (eop);
+}
+
+static int
+sfxge_tx_qpayload_fragment(sfxge_txq_t *stp, unsigned int id, mblk_t **mpp,
+ size_t *offp, size_t size, boolean_t copy)
+{
+ sfxge_t *sp = stp->st_sp;
+ mblk_t *mp = *mpp;
+ size_t off = *offp;
+ sfxge_tx_buffer_t *stbp;
+ sfxge_tx_mapping_t *stmp;
+ int rc;
+
+ stbp = stp->st_stbp[id];
+ ASSERT(stbp == NULL || (stbp->stb_esm.esm_used == stbp->stb_off));
+
+ stmp = stp->st_stmp[id];
+
+ while (size != 0) {
+ boolean_t eop;
+
+ ASSERT(mp != NULL);
+
+ if (mp->b_cont != NULL)
+ prefetch_read_many(mp->b_cont);
+
+ ASSERT3U(off, <, MBLKL(mp));
+
+ if (copy)
+ goto copy;
+
+ /*
+ * Check whether we have already mapped this data block for
+ * DMA.
+ */
+ if (stmp == NULL || stmp->stm_mp != mp) {
+ /*
+ * If we are part way through copying a data block then
+ * there's no point in trying to map it for DMA.
+ */
+ if (off != 0)
+ goto copy;
+
+ /*
+ * If the data block is too short then the cost of
+ * mapping it for DMA would outweigh the cost of
+ * copying it.
+ */
+ if (MBLKL(mp) < SFXGE_TX_COPY_THRESHOLD)
+ goto copy;
+
+ /* Try to grab a transmit mapping from the pool */
+ stmp = sfxge_tx_qfmp_get(stp);
+ if (stmp == NULL) {
+ /*
+ * The pool was empty so allocate a new
+ * mapping.
+ */
+ if ((stmp = kmem_cache_alloc(sp->s_tmc,
+ KM_NOSLEEP)) == NULL)
+ goto copy;
+ }
+
+ /* Add the DMA mapping to the list */
+ stmp->stm_next = stp->st_stmp[id];
+ stp->st_stmp[id] = stmp;
+
+ /* Try to bind the data block to the mapping */
+ if (sfxge_tx_msgb_bind(mp, stmp) != 0)
+ goto copy;
+ }
+ ASSERT3P(stmp->stm_mp, ==, mp);
+
+ /*
+ * If we have a partially filled buffer then we must add it to
+ * the fragment list before adding the mapping.
+ */
+ if (stbp != NULL && (stbp->stb_esm.esm_used > stbp->stb_off)) {
+ rc = sfxge_tx_qbuffer_add(stp, stbp, B_FALSE);
+ if (rc != 0)
+ goto fail1;
+ }
+
+ /* Add the mapping to the fragment list */
+ rc = sfxge_tx_qmapping_add(stp, stmp, &off, &size);
+ if (rc != 0)
+ goto fail2;
+
+ ASSERT(off == MBLKL(mp) || size == 0);
+
+ /*
+ * If the data block has been exhausted then Skip over the
+ * control block and advance to the next data block.
+ */
+ if (off == MBLKL(mp)) {
+ mp = mp->b_cont;
+ off = 0;
+ }
+
+ continue;
+
+copy:
+ if (stbp == NULL ||
+ stbp->stb_esm.esm_used == SFXGE_TX_BUFFER_SIZE) {
+ /* Try to grab a buffer from the pool */
+ stbp = sfxge_tx_qfbp_get(stp);
+ if (stbp == NULL) {
+ /*
+ * The pool was empty so allocate a new
+ * buffer.
+ */
+ if ((stbp = kmem_cache_alloc(sp->s_tbc,
+ KM_NOSLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+ }
+
+ /* Add it to the list */
+ stbp->stb_next = stp->st_stbp[id];
+ stp->st_stbp[id] = stbp;
+ }
+
+ /* Copy as much of the data block as we can into the buffer */
+ eop = sfxge_tx_msgb_copy(mp, stbp, &off, &size);
+
+ ASSERT(off == MBLKL(mp) || size == 0 ||
+ stbp->stb_esm.esm_used == SFXGE_TX_BUFFER_SIZE);
+
+ /*
+ * If we have reached the end of the packet, or the buffer is
+ * full, then add the buffer to the fragment list.
+ */
+ if (stbp->stb_esm.esm_used == SFXGE_TX_BUFFER_SIZE || eop) {
+ rc = sfxge_tx_qbuffer_add(stp, stbp, eop);
+ if (rc != 0)
+ goto fail4;
+ }
+
+ /*
+ * If the data block has been exhaused then advance to the next
+ * one.
+ */
+ if (off == MBLKL(mp)) {
+ mp = mp->b_cont;
+ off = 0;
+ }
+ }
+
+ *mpp = mp;
+ *offp = off;
+
+ return (0);
+
+fail4:
+ DTRACE_PROBE(fail4);
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_tx_qlso_fragment(sfxge_txq_t *stp, sfxge_tx_packet_t *stpp,
+ boolean_t copy)
+{
+ sfxge_t *sp = stp->st_sp;
+ mblk_t *mp = stpp->stp_mp;
+ struct ether_header *etherhp = stpp->stp_etherhp;
+ struct ip *iphp = stpp->stp_iphp;
+ struct tcphdr *thp = stpp->stp_thp;
+ size_t size = stpp->stp_size;
+ size_t off = stpp->stp_off;
+ size_t mss = stpp->stp_mss;
+ unsigned int id;
+ caddr_t hp;
+ size_t ehs, hs;
+ uint16_t start_len;
+ uint16_t start_id;
+ uint16_t ip_id;
+ uint8_t start_flags;
+ uint32_t start_seq;
+ uint32_t th_seq;
+ size_t lss;
+ sfxge_tx_buffer_t *stbp;
+ int rc;
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ if ((DB_LSOFLAGS(mp) & HW_LSO) == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ id = stp->st_added & (SFXGE_TX_NDESCS - 1);
+
+ ASSERT(stp->st_n == 0);
+ ASSERT(stp->st_stbp[id] == NULL);
+ ASSERT(stp->st_stmp[id] == NULL);
+
+ ehs = (etherhp->ether_type == htons(ETHERTYPE_VLAN)) ?
+ sizeof (struct ether_vlan_header) :
+ sizeof (struct ether_header);
+ if (msgdsize(mp) != ehs + ntohs(iphp->ip_len)) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* The payload offset is equivalent to the size of the headers */
+ hp = (caddr_t)(mp->b_rptr);
+ hs = off;
+
+ /*
+ * If the initial data block only contains the headers then advance
+ * to the next one.
+ */
+ if (hs > MBLKL(mp)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ mp->b_rptr += hs;
+
+ if (MBLKL(mp) == 0)
+ mp = mp->b_cont;
+
+ off = 0;
+
+ /* Check IP and TCP headers are suitable for LSO */
+ if (((iphp->ip_off & ~htons(IP_DF)) != 0) ||
+ ((thp->th_flags & (TH_URG | TH_SYN)) != 0) ||
+ (thp->th_urp != 0)) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ if (size + (thp->th_off << 2) + (iphp->ip_hl << 2) !=
+ ntohs(iphp->ip_len)) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /*
+ * Get the base IP id, The stack leaves enough of a gap in id space
+ * for us to increment this for each segment we send out.
+ */
+ start_len = ntohs(iphp->ip_len);
+ start_id = ip_id = ntohs(iphp->ip_id);
+
+ /* Get the base TCP sequence number and flags */
+ start_flags = thp->th_flags;
+ start_seq = th_seq = ntohl(thp->th_seq);
+
+ /* Adjust the header for interim segments */
+ iphp->ip_len = htons((iphp->ip_hl << 2) + (thp->th_off << 2) + mss);
+ thp->th_flags = start_flags & ~(TH_PUSH | TH_FIN);
+
+ lss = size;
+ if ((lss / mss) >= (EFX_TXQ_LIMIT(SFXGE_TX_NDESCS) / 2)) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ stbp = NULL;
+ while (lss != 0) {
+ size_t ss = MIN(lss, mss);
+ boolean_t eol = (ss == lss);
+
+ /* Adjust the header for this segment */
+ iphp->ip_id = htons(ip_id);
+ ip_id++;
+
+ thp->th_seq = htonl(th_seq);
+ th_seq += ss;
+
+ /* If this is the final segment then do some extra adjustment */
+ if (eol) {
+ iphp->ip_len = htons((iphp->ip_hl << 2) +
+ (thp->th_off << 2) + ss);
+ thp->th_flags = start_flags;
+ }
+
+ if (stbp == NULL ||
+ stbp->stb_esm.esm_used + hs > SFXGE_TX_BUFFER_SIZE) {
+ /* Try to grab a buffer from the pool */
+ stbp = sfxge_tx_qfbp_get(stp);
+ if (stbp == NULL) {
+ /*
+ * The pool was empty so allocate a new
+ * buffer.
+ */
+ if ((stbp = kmem_cache_alloc(sp->s_tbc,
+ KM_NOSLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail6;
+ }
+ }
+
+ /* Add it to the list */
+ stbp->stb_next = stp->st_stbp[id];
+ stp->st_stbp[id] = stbp;
+ }
+
+ /* Copy in the headers */
+ ASSERT3U(stbp->stb_off, ==, stbp->stb_esm.esm_used);
+ bcopy(hp, stbp->stb_esm.esm_base + stbp->stb_off, hs);
+ stbp->stb_esm.esm_used += hs;
+
+ /* Add the buffer to the fragment list */
+ rc = sfxge_tx_qbuffer_add(stp, stbp, B_FALSE);
+ if (rc != 0)
+ goto fail7;
+
+ /* Add the payload to the fragment list */
+ if ((rc = sfxge_tx_qpayload_fragment(stp, id, &mp, &off,
+ ss, copy)) != 0)
+ goto fail8;
+
+ lss -= ss;
+ }
+ ASSERT3U(off, ==, 0);
+ ASSERT3P(mp, ==, NULL);
+
+ ASSERT3U(th_seq - start_seq, ==, size);
+
+ /*
+ * If no part of the packet has been mapped for DMA then we can free
+ * it now, otherwise it can only be freed on completion.
+ */
+ if (stp->st_stmp[id] == NULL)
+ freemsg(stpp->stp_mp);
+ else
+ stp->st_mp[id] = stpp->stp_mp;
+
+ stpp->stp_mp = NULL;
+
+ return (0);
+
+fail8:
+ DTRACE_PROBE(fail8);
+fail7:
+ DTRACE_PROBE(fail7);
+fail6:
+ DTRACE_PROBE(fail6);
+fail5:
+ DTRACE_PROBE(fail5);
+
+ /* Restore the header */
+ thp->th_seq = htonl(start_seq);
+ thp->th_flags = start_flags;
+
+ iphp->ip_len = htons(start_len);
+ iphp->ip_id = htons(start_id);
+
+fail4:
+ DTRACE_PROBE(fail4);
+
+ mp = stpp->stp_mp;
+ mp->b_rptr -= hs;
+
+ ASSERT3U(((etherhp->ether_type == htons(ETHERTYPE_VLAN)) ?
+ sizeof (struct ether_vlan_header) :
+ sizeof (struct ether_header)) +
+ ntohs(iphp->ip_len), ==, msgdsize(mp));
+
+ ASSERT(stp->st_mp[id] == NULL);
+
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+static int
+sfxge_tx_qpacket_fragment(sfxge_txq_t *stp, sfxge_tx_packet_t *stpp,
+ boolean_t copy)
+{
+ sfxge_t *sp = stp->st_sp;
+ mblk_t *mp = stpp->stp_mp;
+ unsigned int id;
+ size_t off;
+ size_t size;
+ sfxge_tx_mapping_t *stmp;
+ sfxge_tx_buffer_t *stbp;
+ int rc;
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ ASSERT(stp->st_n == 0);
+
+ id = stp->st_added & (SFXGE_TX_NDESCS - 1);
+
+ ASSERT(stp->st_stbp[id] == NULL);
+ ASSERT(stp->st_stmp[id] == NULL);
+
+ off = 0;
+ size = LONG_MAX; /* must be larger than the packet */
+
+ stbp = NULL;
+ stmp = NULL;
+
+ while (mp != NULL) {
+ boolean_t eop;
+
+ ASSERT(mp != NULL);
+
+ if (mp->b_cont != NULL)
+ prefetch_read_many(mp->b_cont);
+
+ ASSERT(stmp == NULL || stmp->stm_mp != mp);
+
+ if (copy)
+ goto copy;
+
+ /*
+ * If we are part way through copying a data block then there's
+ * no point in trying to map it for DMA.
+ */
+ if (off != 0)
+ goto copy;
+
+ /*
+ * If the data block is too short then the cost of mapping it
+ * for DMA would outweigh the cost of copying it.
+ *
+ * TX copy break
+ */
+ if (MBLKL(mp) < SFXGE_TX_COPY_THRESHOLD)
+ goto copy;
+
+ /* Try to grab a transmit mapping from the pool */
+ stmp = sfxge_tx_qfmp_get(stp);
+ if (stmp == NULL) {
+ /*
+ * The pool was empty so allocate a new
+ * mapping.
+ */
+ if ((stmp = kmem_cache_alloc(sp->s_tmc,
+ KM_NOSLEEP)) == NULL)
+ goto copy;
+ }
+
+ /* Add the DMA mapping to the list */
+ stmp->stm_next = stp->st_stmp[id];
+ stp->st_stmp[id] = stmp;
+
+ /* Try to bind the data block to the mapping */
+ if (sfxge_tx_msgb_bind(mp, stmp) != 0)
+ goto copy;
+
+ /*
+ * If we have a partially filled buffer then we must add it to
+ * the fragment list before adding the mapping.
+ */
+ if (stbp != NULL && (stbp->stb_esm.esm_used > stbp->stb_off)) {
+ rc = sfxge_tx_qbuffer_add(stp, stbp, B_FALSE);
+ if (rc != 0)
+ goto fail1;
+ }
+
+ /* Add the mapping to the fragment list */
+ rc = sfxge_tx_qmapping_add(stp, stmp, &off, &size);
+ if (rc != 0)
+ goto fail2;
+
+ ASSERT3U(off, ==, MBLKL(mp));
+
+ /* Advance to the next data block */
+ mp = mp->b_cont;
+ off = 0;
+ continue;
+
+copy:
+ if (stbp == NULL ||
+ stbp->stb_esm.esm_used == SFXGE_TX_BUFFER_SIZE) {
+ /* Try to grab a buffer from the pool */
+ stbp = sfxge_tx_qfbp_get(stp);
+ if (stbp == NULL) {
+ /*
+ * The pool was empty so allocate a new
+ * buffer.
+ */
+ if ((stbp = kmem_cache_alloc(sp->s_tbc,
+ KM_NOSLEEP)) == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+ }
+
+ /* Add it to the list */
+ stbp->stb_next = stp->st_stbp[id];
+ stp->st_stbp[id] = stbp;
+ }
+
+ /* Copy as much of the data block as we can into the buffer */
+ eop = sfxge_tx_msgb_copy(mp, stbp, &off, &size);
+
+ ASSERT(off == MBLKL(mp) ||
+ stbp->stb_esm.esm_used == SFXGE_TX_BUFFER_SIZE);
+
+ /*
+ * If we have reached the end of the packet, or the buffer is
+ * full, then add the buffer to the fragment list.
+ */
+ if (stbp->stb_esm.esm_used == SFXGE_TX_BUFFER_SIZE || eop) {
+ rc = sfxge_tx_qbuffer_add(stp, stbp, eop);
+ if (rc != 0)
+ goto fail4;
+ }
+
+ /*
+ * If the data block has been exhaused then advance to the next
+ * one.
+ */
+ if (off == MBLKL(mp)) {
+ mp = mp->b_cont;
+ off = 0;
+ }
+ }
+ ASSERT3U(off, ==, 0);
+ ASSERT3P(mp, ==, NULL);
+ ASSERT3U(size, !=, 0);
+
+ /*
+ * If no part of the packet has been mapped for DMA then we can free
+ * it now, otherwise it can only be freed on completion.
+ */
+ if (stp->st_stmp[id] == NULL)
+ freemsg(stpp->stp_mp);
+ else
+ stp->st_mp[id] = stpp->stp_mp;
+
+ stpp->stp_mp = NULL;
+
+ return (0);
+
+fail4:
+ DTRACE_PROBE(fail4);
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ ASSERT(stp->st_stmp[id] == NULL);
+
+ return (rc);
+}
+
+
+#define SFXGE_TX_QDPL_PUT_PENDING(_stp) \
+ ((_stp)->st_dpl.std_put != 0)
+
+static void
+sfxge_tx_qdpl_swizzle(sfxge_txq_t *stp)
+{
+ sfxge_tx_dpl_t *stdp = &(stp->st_dpl);
+ volatile uintptr_t *putp;
+ uintptr_t put;
+ sfxge_tx_packet_t *stpp;
+ sfxge_tx_packet_t *p;
+ sfxge_tx_packet_t **pp;
+ unsigned int count;
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ /*
+ * Guaranteed that in flight TX packets will cause more TX completions
+ * hence more swizzles must happen
+ */
+ ASSERT3U(stdp->std_count, <=, sfxge_tx_dpl_get_pkt_max(stp));
+ if (stdp->std_count >= stdp->get_pkt_limit)
+ return;
+
+ /* Acquire the put list - replacing with an empty list */
+ putp = &(stdp->std_put);
+ put = atomic_swap_ulong(putp, 0);
+ stpp = (void *)put;
+
+ if (stpp == NULL)
+ return;
+
+ /* Reverse the list */
+ pp = &(stpp->stp_next);
+ p = NULL;
+
+ count = 0;
+ do {
+ sfxge_tx_packet_t *next;
+
+ next = stpp->stp_next;
+
+ stpp->stp_next = p;
+ p = stpp;
+
+ count++;
+ stpp = next;
+ } while (stpp != NULL);
+
+ /* Add it to the tail of the get list */
+ ASSERT3P(*pp, ==, NULL);
+
+ *(stdp->std_getp) = p;
+ stdp->std_getp = pp;
+ stdp->std_count += count;
+ ASSERT3U(stdp->std_count, <=, sfxge_tx_dpl_get_pkt_max(stp));
+
+ DTRACE_PROBE2(dpl_counts, int, stdp->std_count, int, count);
+}
+
+
+/*
+ * If TXQ locked, add the RX DPL put list and this packet to the TX DPL get list
+ * If TXQ unlocked, atomically add this packet to TX DPL put list
+ *
+ * The only possible error is ENOSPC (used for TX backpressure)
+ * For the TX DPL put or get list becoming full, in both cases there must be
+ * future TX completions (as represented by the packets on the DPL get lists).
+ *
+ * This ensures that in the future mac_tx_update() will be called from
+ * sfxge_tx_qcomplete()
+ */
+static inline int
+sfxge_tx_qdpl_add(sfxge_txq_t *stp, sfxge_tx_packet_t *stpp, int locked)
+{
+ sfxge_tx_dpl_t *stdp = &stp->st_dpl;
+
+ ASSERT3P(stpp->stp_next, ==, NULL);
+
+ if (locked) {
+ ASSERT(mutex_owned(&stp->st_lock));
+
+ if (stdp->std_count >= stdp->get_pkt_limit) {
+ stdp->get_full_count++;
+ return (ENOSPC);
+ }
+
+ /* Reverse the put list onto the get list */
+ sfxge_tx_qdpl_swizzle(stp);
+
+ /* Add to the tail of the get list */
+ *(stdp->std_getp) = stpp;
+ stdp->std_getp = &stpp->stp_next;
+ stdp->std_count++;
+ ASSERT3U(stdp->std_count, <=, sfxge_tx_dpl_get_pkt_max(stp));
+
+ } else {
+ volatile uintptr_t *putp;
+ uintptr_t old;
+ uintptr_t new;
+ sfxge_tx_packet_t *old_pkt;
+
+ putp = &(stdp->std_put);
+ new = (uintptr_t)stpp;
+
+ /* Add to the head of the put list, keeping a list length */
+ do {
+ old = *putp;
+ old_pkt = (sfxge_tx_packet_t *)old;
+
+ stpp->stp_dpl_put_len = old ?
+ old_pkt->stp_dpl_put_len + 1 : 1;
+
+ if (stpp->stp_dpl_put_len >= stdp->put_pkt_limit) {
+ stpp->stp_next = 0;
+ stpp->stp_dpl_put_len = 0;
+ stdp->put_full_count++;
+ return (ENOSPC);
+ }
+
+ stpp->stp_next = (void *)old;
+ } while (atomic_cas_ulong(putp, old, new) != old);
+ }
+ return (0);
+}
+
+
+/* Take all packets from DPL get list and try to send to HW */
+static void
+sfxge_tx_qdpl_drain(sfxge_txq_t *stp)
+{
+ sfxge_t *sp = stp->st_sp;
+ sfxge_tx_dpl_t *stdp = &(stp->st_dpl);
+ unsigned int pushed = stp->st_added;
+ sfxge_tx_packet_t *stpp;
+ unsigned int count;
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ prefetch_read_many(sp->s_enp);
+ prefetch_read_many(stp->st_etp);
+
+ stpp = stdp->std_get;
+ count = stdp->std_count;
+
+ while (count != 0) {
+ sfxge_tx_packet_t *next;
+ boolean_t copy;
+ int rc;
+
+ ASSERT(stpp != NULL);
+
+ /* Split stpp off */
+ next = stpp->stp_next;
+ stpp->stp_next = NULL;
+
+ if (next != NULL)
+ prefetch_read_many(next);
+
+ if (stp->st_state != SFXGE_TXQ_STARTED)
+ goto reject;
+
+ copy = B_FALSE;
+
+again:
+ /* Fragment the packet */
+ if (stpp->stp_mss != 0) {
+ rc = sfxge_tx_qlso_fragment(stp, stpp, copy);
+ } else {
+ rc = sfxge_tx_qpacket_fragment(stp, stpp, copy);
+ }
+
+ switch (rc) {
+ case 0:
+ break;
+
+ case ENOSPC:
+ if (!copy)
+ goto copy;
+
+ /*FALLTHRU*/
+ default:
+ goto reject;
+ }
+
+ /* Free the packet structure */
+ stpp->stp_etherhp = NULL;
+ stpp->stp_iphp = NULL;
+ stpp->stp_thp = NULL;
+ stpp->stp_off = 0;
+ stpp->stp_size = 0;
+ stpp->stp_mss = 0;
+ stpp->stp_dpl_put_len = 0;
+
+ ASSERT3P(stpp->stp_mp, ==, NULL);
+
+ if (sfxge_tx_qfpp_put(stp, stpp) != 0) {
+ sfxge_tx_packet_destroy(sp, stpp);
+ stpp = NULL;
+ }
+
+ --count;
+ stpp = next;
+
+ /* Post the packet */
+ sfxge_tx_qlist_post(stp);
+
+ if (stp->st_unblock != SFXGE_TXQ_NOT_BLOCKED)
+ goto defer;
+
+ if (stp->st_added - pushed >= SFXGE_TX_BATCH) {
+ efx_tx_qpush(stp->st_etp, stp->st_added, pushed);
+ pushed = stp->st_added;
+ }
+
+ continue;
+
+copy:
+ /* Abort the current fragment list */
+ sfxge_tx_qlist_abort(stp);
+
+ /* Try copying the packet to flatten it */
+ ASSERT(!copy);
+ copy = B_TRUE;
+
+ goto again;
+
+reject:
+ /* Abort the current fragment list */
+ sfxge_tx_qlist_abort(stp);
+
+ /* Discard the packet */
+ freemsg(stpp->stp_mp);
+ stpp->stp_mp = NULL;
+
+ /* Free the packet structure */
+ stpp->stp_etherhp = NULL;
+ stpp->stp_iphp = NULL;
+ stpp->stp_thp = NULL;
+ stpp->stp_off = 0;
+ stpp->stp_size = 0;
+ stpp->stp_mss = 0;
+ stpp->stp_dpl_put_len = 0;
+
+ if (sfxge_tx_qfpp_put(stp, stpp) != 0) {
+ sfxge_tx_packet_destroy(sp, stpp);
+ stpp = NULL;
+ }
+
+ --count;
+ stpp = next;
+ continue;
+defer:
+ DTRACE_PROBE1(defer, unsigned int, stp->st_index);
+ break;
+ }
+
+ if (count == 0) {
+ /* New empty get list */
+ ASSERT3P(stpp, ==, NULL);
+ stdp->std_get = NULL;
+ stdp->std_count = 0;
+
+ stdp->std_getp = &(stdp->std_get);
+ } else {
+ /* shorten the list by moving the head */
+ stdp->std_get = stpp;
+ stdp->std_count = count;
+ ASSERT3U(stdp->std_count, <=, sfxge_tx_dpl_get_pkt_max(stp));
+ }
+
+ if (stp->st_added != pushed)
+ efx_tx_qpush(stp->st_etp, stp->st_added, pushed);
+
+ ASSERT(stp->st_unblock != SFXGE_TXQ_NOT_BLOCKED ||
+ stdp->std_count == 0);
+}
+
+/* Swizzle deferred packet list, try and push to HW */
+static inline void
+sfxge_tx_qdpl_service(sfxge_txq_t *stp)
+{
+ do {
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ if (SFXGE_TX_QDPL_PUT_PENDING(stp))
+ sfxge_tx_qdpl_swizzle(stp);
+
+ if (stp->st_unblock == SFXGE_TXQ_NOT_BLOCKED)
+ sfxge_tx_qdpl_drain(stp);
+
+ mutex_exit(&(stp->st_lock));
+
+ if (!SFXGE_TX_QDPL_PUT_PENDING(stp))
+ break;
+ } while (mutex_tryenter(&(stp->st_lock)));
+}
+
+static void
+sfxge_tx_qdpl_flush_locked(sfxge_txq_t *stp)
+{
+ sfxge_t *sp = stp->st_sp;
+ sfxge_tx_dpl_t *stdp = &(stp->st_dpl);
+ sfxge_tx_packet_t *stpp;
+ unsigned int count;
+
+ ASSERT(mutex_owned(&(stp->st_lock)));
+
+ /* Swizzle put list to the get list */
+ sfxge_tx_qdpl_swizzle(stp);
+
+ stpp = stdp->std_get;
+ count = stdp->std_count;
+
+ while (count != 0) {
+ sfxge_tx_packet_t *next;
+
+ next = stpp->stp_next;
+ stpp->stp_next = NULL;
+
+ /* Discard the packet */
+ freemsg(stpp->stp_mp);
+ stpp->stp_mp = NULL;
+
+ /* Free the packet structure */
+ stpp->stp_etherhp = NULL;
+ stpp->stp_iphp = NULL;
+ stpp->stp_thp = NULL;
+ stpp->stp_off = 0;
+ stpp->stp_size = 0;
+ stpp->stp_mss = 0;
+ stpp->stp_dpl_put_len = 0;
+
+ sfxge_tx_packet_destroy(sp, stpp);
+
+ --count;
+ stpp = next;
+ }
+
+ ASSERT3P(stpp, ==, NULL);
+
+ /* Empty list */
+ stdp->std_get = NULL;
+ stdp->std_count = 0;
+ stdp->std_getp = &(stdp->std_get);
+}
+
+
+void
+sfxge_tx_qdpl_flush(sfxge_txq_t *stp)
+{
+ mutex_enter(&(stp->st_lock));
+ sfxge_tx_qdpl_flush_locked(stp);
+ mutex_exit(&(stp->st_lock));
+}
+
+
+static void
+sfxge_tx_qunblock(sfxge_txq_t *stp)
+{
+ sfxge_t *sp = stp->st_sp;
+ unsigned int evq = stp->st_evq;
+ sfxge_evq_t *sep = sp->s_sep[evq];
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ mutex_enter(&(stp->st_lock));
+
+ if (stp->st_state != SFXGE_TXQ_STARTED) {
+ mutex_exit(&(stp->st_lock));
+ return;
+ }
+
+ if (stp->st_unblock != SFXGE_TXQ_NOT_BLOCKED) {
+ unsigned int level;
+
+ level = stp->st_added - stp->st_completed;
+ if (level <= stp->st_unblock) {
+ stp->st_unblock = SFXGE_TXQ_NOT_BLOCKED;
+ sfxge_tx_qlist_post(stp);
+ }
+ }
+
+ sfxge_tx_qdpl_service(stp);
+ /* lock has been dropped */
+}
+
+void
+sfxge_tx_qcomplete(sfxge_txq_t *stp)
+{
+ sfxge_t *sp = stp->st_sp;
+ sfxge_tx_dpl_t *stdp = &(stp->st_dpl);
+ unsigned int evq = stp->st_evq;
+ sfxge_evq_t *sep = sp->s_sep[evq];
+ unsigned int completed;
+
+ ASSERT(mutex_owned(&(sep->se_lock)));
+
+ completed = stp->st_completed;
+ while (completed != stp->st_pending) {
+ unsigned int id;
+ sfxge_tx_mapping_t *stmp;
+
+ id = completed++ & (SFXGE_TX_NDESCS - 1);
+
+ if ((stmp = stp->st_stmp[id]) != NULL) {
+ mblk_t *mp;
+
+ /* Unbind all the mappings */
+ do {
+ ASSERT(stmp->stm_mp != NULL);
+ sfxge_tx_msgb_unbind(stmp);
+
+ stmp = stmp->stm_next;
+ } while (stmp != NULL);
+
+ /*
+ * Now that the packet is no longer mapped for DMA it
+ * can be freed.
+ */
+ mp = stp->st_mp[id];
+ stp->st_mp[id] = NULL;
+
+ ASSERT(mp != NULL);
+ freemsg(mp);
+ }
+ }
+ stp->st_completed = completed;
+
+ /* Check whether we need to unblock the queue */
+ if (stp->st_unblock != SFXGE_TXQ_NOT_BLOCKED) {
+ unsigned int level;
+
+ level = stp->st_added - stp->st_completed;
+ if (level <= stp->st_unblock)
+ sfxge_tx_qunblock(stp);
+ }
+
+ /* Release TX backpressure from the TX DPL put/get list being full */
+ if (stdp->std_count < stdp->get_pkt_limit)
+ mac_tx_update(sp->s_mh);
+}
+
+void
+sfxge_tx_qflush_done(sfxge_txq_t *stp)
+{
+ sfxge_t *sp = stp->st_sp;
+ boolean_t flush_pending = B_FALSE;
+
+ ASSERT(mutex_owned(&(sp->s_sep[stp->st_evq]->se_lock)));
+
+ mutex_enter(&(stp->st_lock));
+
+ switch (stp->st_state) {
+ case SFXGE_TXQ_INITIALIZED:
+ /* Ignore flush event after TxQ destroyed */
+ break;
+
+ case SFXGE_TXQ_FLUSH_PENDING:
+ flush_pending = B_TRUE;
+ stp->st_state = SFXGE_TXQ_FLUSH_DONE;
+ break;
+
+ case SFXGE_TXQ_FLUSH_FAILED:
+ /* MC may have rebooted before handling the flush request */
+ stp->st_state = SFXGE_TXQ_FLUSH_DONE;
+ break;
+
+ case SFXGE_TXQ_STARTED:
+ /*
+ * MC initiated flush on MC reboot or because of bad Tx
+ * descriptor
+ */
+ stp->st_state = SFXGE_TXQ_FLUSH_DONE;
+ break;
+
+ case SFXGE_TXQ_FLUSH_DONE:
+ /* Ignore unexpected extra flush event */
+ ASSERT(B_FALSE);
+ break;
+
+ default:
+ ASSERT(B_FALSE);
+ }
+
+
+ mutex_exit(&(stp->st_lock));
+
+ if (flush_pending == B_FALSE) {
+ /* Flush was not pending */
+ return;
+ }
+
+ mutex_enter(&(sp->s_tx_flush_lock));
+ sp->s_tx_flush_pending--;
+ if (sp->s_tx_flush_pending <= 0) {
+ /* All queues flushed: wakeup sfxge_tx_stop() */
+ cv_signal(&(sp->s_tx_flush_kv));
+ }
+ mutex_exit(&(sp->s_tx_flush_lock));
+}
+
+static void
+sfxge_tx_qflush(sfxge_t *sp, unsigned int index, boolean_t wait_for_flush)
+{
+ sfxge_txq_t *stp = sp->s_stp[index];
+ int rc;
+
+ ASSERT(mutex_owned(&(sp->s_state_lock)));
+ ASSERT(mutex_owned(&(sp->s_tx_flush_lock)));
+
+ mutex_enter(&(stp->st_lock));
+
+ /* Prepare to flush and stop the queue */
+ if (stp->st_state == SFXGE_TXQ_STARTED) {
+ /* Flush the transmit queue */
+ if ((rc = efx_tx_qflush(stp->st_etp)) == EALREADY) {
+ /* Already flushed, may be initiated by MC */
+ stp->st_state = SFXGE_TXQ_FLUSH_DONE;
+ } else if (rc != 0) {
+ /* Unexpected error */
+ stp->st_state = SFXGE_TXQ_FLUSH_FAILED;
+ } else if (wait_for_flush) {
+ stp->st_state = SFXGE_TXQ_FLUSH_PENDING;
+ sp->s_tx_flush_pending++;
+ } else {
+ /* Assume the flush is done */
+ stp->st_state = SFXGE_TXQ_FLUSH_DONE;
+ }
+ }
+
+ mutex_exit(&(stp->st_lock));
+}
+
+static void
+sfxge_tx_qstop(sfxge_t *sp, unsigned int index)
+{
+ sfxge_txq_t *stp = sp->s_stp[index];
+ unsigned int evq = stp->st_evq;
+ sfxge_evq_t *sep = sp->s_sep[evq];
+
+ mutex_enter(&(sep->se_lock));
+ mutex_enter(&(stp->st_lock));
+
+ if (stp->st_state == SFXGE_TXQ_INITIALIZED)
+ goto done;
+
+ ASSERT(stp->st_state == SFXGE_TXQ_FLUSH_PENDING ||
+ stp->st_state == SFXGE_TXQ_FLUSH_DONE ||
+ stp->st_state == SFXGE_TXQ_FLUSH_FAILED);
+
+ /* All queues should have been flushed */
+ if (stp->st_sp->s_tx_flush_pending != 0) {
+ dev_err(sp->s_dip, CE_NOTE,
+ SFXGE_CMN_ERR "txq[%d] stop with flush_pending=%d",
+ index, stp->st_sp->s_tx_flush_pending);
+ }
+ if (stp->st_state == SFXGE_TXQ_FLUSH_FAILED) {
+ dev_err(sp->s_dip, CE_NOTE,
+ SFXGE_CMN_ERR "txq[%d] flush failed", index);
+ }
+
+ /* Destroy the transmit queue */
+ efx_tx_qdestroy(stp->st_etp);
+ stp->st_etp = NULL;
+
+ /* Clear entries from the buffer table */
+ sfxge_sram_buf_tbl_clear(sp, stp->st_id,
+ EFX_TXQ_NBUFS(SFXGE_TX_NDESCS));
+
+ sfxge_tx_qlist_abort(stp);
+ ASSERT3U(stp->st_n, ==, 0);
+
+ stp->st_unblock = SFXGE_TXQ_NOT_BLOCKED;
+
+ stp->st_pending = stp->st_added;
+
+ sfxge_tx_qcomplete(stp);
+ ASSERT3U(stp->st_completed, ==, stp->st_pending);
+
+ sfxge_tx_qreap(stp);
+ ASSERT3U(stp->st_reaped, ==, stp->st_completed);
+
+ /*
+ * Ensure the deferred packet list is cleared
+ * Can race with sfxge_tx_packet_add() adding to the put list
+ */
+ sfxge_tx_qdpl_flush_locked(stp);
+
+ stp->st_added = 0;
+ stp->st_pending = 0;
+ stp->st_completed = 0;
+ stp->st_reaped = 0;
+
+ stp->st_state = SFXGE_TXQ_INITIALIZED;
+
+done:
+ mutex_exit(&(stp->st_lock));
+ mutex_exit(&(sep->se_lock));
+}
+
+static void
+sfxge_tx_qfini(sfxge_t *sp, unsigned int index)
+{
+ sfxge_txq_t *stp = sp->s_stp[index];
+ sfxge_tx_dpl_t *stdp = &(stp->st_dpl);
+
+ ASSERT3U(stp->st_state, ==, SFXGE_TXQ_INITIALIZED);
+ stp->st_state = SFXGE_TXQ_UNINITIALIZED;
+
+ /* Detach the TXQ from the driver */
+ sp->s_stp[index] = NULL;
+ ASSERT(sp->s_tx_qcount > 0);
+ sp->s_tx_qcount--;
+
+ /* Free the EVQ label for events from this TXQ */
+ (void) sfxge_ev_txlabel_free(sp, stp->st_evq, stp, stp->st_label);
+ stp->st_label = 0;
+
+ /* Tear down the statistics */
+ sfxge_tx_kstat_fini(stp);
+
+ /* Ensure the deferred packet list is empty */
+ ASSERT3U(stdp->std_count, ==, 0);
+ ASSERT3P(stdp->std_get, ==, NULL);
+ ASSERT3U(stdp->std_put, ==, 0);
+
+ /* Clear the free buffer pool */
+ sfxge_tx_qfbp_empty(stp);
+
+ /* Clear the free mapping pool */
+ sfxge_tx_qfmp_empty(stp);
+
+ /* Clear the free packet pool */
+ sfxge_tx_qfpp_empty(stp);
+
+ mutex_destroy(&(stp->st_lock));
+
+ stp->st_evq = 0;
+ stp->st_type = 0;
+ stp->st_index = 0;
+
+ kmem_cache_free(sp->s_tqc, stp);
+}
+
+int
+sfxge_tx_init(sfxge_t *sp)
+{
+ sfxge_intr_t *sip = &(sp->s_intr);
+ char name[MAXNAMELEN];
+ sfxge_txq_type_t qtype;
+ unsigned int txq, evq;
+ int index;
+ int rc;
+
+ (void) snprintf(name, MAXNAMELEN - 1, "%s%d_tx_packet_cache",
+ ddi_driver_name(sp->s_dip), ddi_get_instance(sp->s_dip));
+
+ sp->s_tpc = kmem_cache_create(name, sizeof (sfxge_tx_packet_t),
+ SFXGE_CPU_CACHE_SIZE, sfxge_tx_packet_ctor, sfxge_tx_packet_dtor,
+ NULL, sp, NULL, 0);
+ ASSERT(sp->s_tpc != NULL);
+
+ (void) snprintf(name, MAXNAMELEN - 1, "%s%d_tx_buffer_cache",
+ ddi_driver_name(sp->s_dip), ddi_get_instance(sp->s_dip));
+
+ sp->s_tbc = kmem_cache_create(name, sizeof (sfxge_tx_buffer_t),
+ SFXGE_CPU_CACHE_SIZE, sfxge_tx_buffer_ctor, sfxge_tx_buffer_dtor,
+ NULL, sp, NULL, 0);
+ ASSERT(sp->s_tbc != NULL);
+
+ (void) snprintf(name, MAXNAMELEN - 1, "%s%d_tx_mapping_cache",
+ ddi_driver_name(sp->s_dip), ddi_get_instance(sp->s_dip));
+
+ sp->s_tmc = kmem_cache_create(name, sizeof (sfxge_tx_mapping_t),
+ SFXGE_CPU_CACHE_SIZE, sfxge_tx_mapping_ctor, sfxge_tx_mapping_dtor,
+ NULL, sp, NULL, 0);
+ ASSERT(sp->s_tmc != NULL);
+
+ (void) snprintf(name, MAXNAMELEN - 1, "%s%d_txq_cache",
+ ddi_driver_name(sp->s_dip), ddi_get_instance(sp->s_dip));
+
+ sp->s_tqc = kmem_cache_create(name, sizeof (sfxge_txq_t),
+ SFXGE_CPU_CACHE_SIZE, sfxge_tx_qctor, sfxge_tx_qdtor, NULL, sp,
+ NULL, 0);
+ ASSERT(sp->s_tqc != NULL);
+
+ /* Initialize the transmit queues. */
+ sp->s_tx_scale_max[SFXGE_TXQ_NON_CKSUM] = sip->si_nalloc;
+ sp->s_tx_scale_max[SFXGE_TXQ_IP_CKSUM] = 1;
+ sp->s_tx_scale_max[SFXGE_TXQ_IP_TCP_UDP_CKSUM] = sip->si_nalloc;
+
+ /* Ensure minimum queue counts required by sfxge_tx_packet_add(). */
+ if (sp->s_tx_scale_max[SFXGE_TXQ_NON_CKSUM] < 1)
+ sp->s_tx_scale_max[SFXGE_TXQ_NON_CKSUM] = 1;
+
+ if (sp->s_tx_scale_max[SFXGE_TXQ_IP_CKSUM] < 1)
+ sp->s_tx_scale_max[SFXGE_TXQ_IP_CKSUM] = 1;
+
+ txq = 0;
+ for (qtype = 0; qtype < SFXGE_TXQ_NTYPES; qtype++) {
+ unsigned int tx_scale = sp->s_tx_scale_max[qtype];
+
+ if (txq + tx_scale > EFX_ARRAY_SIZE(sp->s_stp)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ sp->s_tx_scale_base[qtype] = txq;
+
+ for (evq = 0; evq < tx_scale; evq++) {
+ if ((rc = sfxge_tx_qinit(sp, txq, qtype, evq)) != 0) {
+ goto fail2;
+ }
+ txq++;
+ }
+ ASSERT3U(txq, <=, EFX_ARRAY_SIZE(sp->s_stp));
+ }
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ index = EFX_ARRAY_SIZE(sp->s_stp);
+ while (--index >= 0) {
+ if (sp->s_stp[index] != NULL)
+ sfxge_tx_qfini(sp, index);
+ }
+
+ kmem_cache_destroy(sp->s_tqc);
+ sp->s_tqc = NULL;
+
+ kmem_cache_destroy(sp->s_tmc);
+ sp->s_tmc = NULL;
+
+ kmem_cache_destroy(sp->s_tbc);
+ sp->s_tbc = NULL;
+
+ kmem_cache_destroy(sp->s_tpc);
+ sp->s_tpc = NULL;
+
+ return (rc);
+}
+
+int
+sfxge_tx_start(sfxge_t *sp)
+{
+ efx_nic_t *enp = sp->s_enp;
+ int index;
+ int rc;
+
+ /* Initialize the transmit module */
+ if ((rc = efx_tx_init(enp)) != 0)
+ goto fail1;
+
+ for (index = 0; index < EFX_ARRAY_SIZE(sp->s_stp); index++) {
+ if (sp->s_stp[index] != NULL)
+ if ((rc = sfxge_tx_qstart(sp, index)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+
+ sfxge_tx_stop(sp);
+
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Add a packet to the TX Deferred Packet List and if the TX queue lock
+ * can be acquired then call sfxge_tx_qdpl_service() to fragment and push
+ * to the H/W transmit descriptor ring
+ *
+ * If ENOSPC is returned then the DPL is full or the packet create failed, but
+ * the mblk isn't freed so that the caller can return this mblk from mc_tx() to
+ * back-pressure the OS stack.
+ *
+ * For all other errors the mblk is freed
+ */
+int
+sfxge_tx_packet_add(sfxge_t *sp, mblk_t *mp)
+{
+ struct ether_header *etherhp;
+ struct ip *iphp;
+ struct tcphdr *thp;
+ size_t off;
+ size_t size;
+ size_t mss;
+ sfxge_txq_t *stp;
+ unsigned int txq;
+ int index;
+ boolean_t locked;
+ sfxge_tx_packet_t *stpp;
+ sfxge_packet_type_t pkt_type;
+ uint16_t sport, dport;
+ int rc = 0;
+
+ ASSERT3P(mp->b_next, ==, NULL);
+ ASSERT(!(DB_CKSUMFLAGS(mp) & HCK_PARTIALCKSUM));
+
+ /*
+ * Do not enqueue packets during startup/shutdown;
+ *
+ * NOTE: This access to the state is NOT protected by the state lock. It
+ * is an imperfect test and anything further getting onto the get/put
+ * deferred packet lists is cleaned up in (possibly repeated) calls to
+ * sfxge_can_destroy().
+ */
+ if (sp->s_state != SFXGE_STARTED) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ etherhp = NULL;
+ iphp = NULL;
+ thp = NULL;
+ off = 0;
+ size = 0;
+ mss = 0;
+
+ /* Check whether we need the header pointers for LSO segmentation */
+ if (DB_LSOFLAGS(mp) & HW_LSO) {
+ /* LSO segmentation relies on hardware checksum offload */
+ DB_CKSUMFLAGS(mp) |= HCK_FULLCKSUM;
+
+ if ((mss = DB_LSOMSS(mp)) == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ pkt_type = sfxge_pkthdr_parse(mp, &etherhp, &iphp, &thp,
+ &off, &size, &sport, &dport);
+
+ if (pkt_type != SFXGE_PACKET_TYPE_IPV4_TCP ||
+ etherhp == NULL ||
+ iphp == NULL ||
+ thp == NULL ||
+ off == 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ }
+
+ /* Choose the appropriate transit queue */
+ if (DB_CKSUMFLAGS(mp) & HCK_FULLCKSUM) {
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+
+ if (srsp->srs_state == SFXGE_RX_SCALE_STARTED) {
+ uint32_t hash;
+
+ if (srsp->srs_count > 1) {
+ /*
+ * If we have not already parsed the headers
+ * for LSO segmentation then we need to do it
+ * now so we can calculate the hash.
+ */
+ if (thp == NULL) {
+ (void) sfxge_pkthdr_parse(mp, &etherhp,
+ &iphp, &thp, &off, &size,
+ &sport, &dport);
+ }
+
+ if (thp != NULL) {
+ SFXGE_TCP_HASH(sp,
+ &iphp->ip_dst.s_addr,
+ thp->th_dport,
+ &iphp->ip_src.s_addr,
+ thp->th_sport, hash);
+
+ index = srsp->srs_tbl[hash %
+ SFXGE_RX_SCALE_MAX];
+ } else if (iphp != NULL) {
+ /*
+ * Calculate IPv4 4-tuple hash, with
+ * TCP/UDP/SCTP src/dest ports. Ports
+ * are zero for other IPv4 protocols.
+ */
+ SFXGE_IP_HASH(sp,
+ &iphp->ip_dst.s_addr, dport,
+ &iphp->ip_src.s_addr, sport, hash);
+
+ index = srsp->srs_tbl[hash %
+ SFXGE_RX_SCALE_MAX];
+ } else {
+ /*
+ * Other traffic always goes to the
+ * the queue in the zero-th entry of
+ * the RSS table.
+ */
+ index = srsp->srs_tbl[0];
+ }
+ } else {
+ /*
+ * It does not matter what the hash is
+ * because all the RSS table entries will be
+ * the same.
+ */
+ index = srsp->srs_tbl[0];
+ }
+
+ /*
+ * Find the event queue corresponding to the hash in
+ * the RSS table.
+ */
+ txq = sp->s_tx_scale_base[SFXGE_TXQ_IP_TCP_UDP_CKSUM] +
+ index;
+ stp = sp->s_stp[txq];
+ ASSERT3U(stp->st_evq, ==, index);
+ } else {
+ index = 0;
+ txq = sp->s_tx_scale_base[SFXGE_TXQ_IP_TCP_UDP_CKSUM] +
+ index;
+ stp = sp->s_stp[txq];
+ }
+ } else if (DB_CKSUMFLAGS(mp) & HCK_IPV4_HDRCKSUM) {
+ ASSERT3U(sp->s_tx_scale_max[SFXGE_TXQ_IP_CKSUM], >=, 1);
+ index = 0;
+ txq = sp->s_tx_scale_base[SFXGE_TXQ_IP_CKSUM] + index;
+ stp = sp->s_stp[txq];
+ } else {
+ /*
+ * No hardware checksum offload requested.
+ */
+ sfxge_rx_scale_t *srsp = &(sp->s_rx_scale);
+
+ if (srsp->srs_state == SFXGE_RX_SCALE_STARTED) {
+ uint32_t hash = 0;
+
+ if (srsp->srs_count > 1) {
+ if (iphp == NULL) {
+ (void) sfxge_pkthdr_parse(mp, &etherhp,
+ &iphp, &thp, &off, &size,
+ &sport, &dport);
+ }
+
+ if (iphp != NULL) {
+ /*
+ * Calculate IPv4 4-tuple hash, with
+ * TCP/UDP/SCTP src/dest ports. Ports
+ * are zero for other IPv4 protocols.
+ */
+ SFXGE_IP_HASH(sp,
+ &iphp->ip_dst.s_addr, dport,
+ &iphp->ip_src.s_addr, sport, hash);
+
+ hash = hash % SFXGE_RX_SCALE_MAX;
+ }
+ }
+ index = srsp->srs_tbl[hash];
+
+ /*
+ * The RSS table (indexed by hash) gives the RXQ index,
+ * (mapped 1:1 with EVQs). Find the TXQ that results in
+ * using the same EVQ as for the RX data path.
+ */
+ ASSERT3U(sp->s_tx_scale_max[SFXGE_TXQ_NON_CKSUM],
+ >, index);
+ txq = sp->s_tx_scale_base[SFXGE_TXQ_NON_CKSUM] + index;
+ stp = sp->s_stp[txq];
+ ASSERT3U(stp->st_evq, ==, index);
+ } else {
+ ASSERT3U(sp->s_tx_scale_max[SFXGE_TXQ_NON_CKSUM], >, 0);
+ index = 0;
+ txq = sp->s_tx_scale_base[SFXGE_TXQ_NON_CKSUM] + index;
+ stp = sp->s_stp[txq];
+ }
+
+
+ }
+ ASSERT(stp != NULL);
+
+ ASSERT(mss == 0 || (DB_LSOFLAGS(mp) & HW_LSO));
+
+ /* Try to grab the lock */
+ locked = mutex_tryenter(&(stp->st_lock));
+
+ if (locked) {
+ /* Try to grab a packet from the pool */
+ stpp = sfxge_tx_qfpp_get(stp);
+ } else {
+ stpp = NULL;
+ }
+
+ if (stpp == NULL) {
+ /*
+ * Either the pool was empty or we don't have the lock so
+ * allocate a new packet.
+ */
+ if ((stpp = sfxge_tx_packet_create(sp)) == NULL) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ }
+
+ stpp->stp_mp = mp;
+ stpp->stp_etherhp = etherhp;
+ stpp->stp_iphp = iphp;
+ stpp->stp_thp = thp;
+ stpp->stp_off = off;
+ stpp->stp_size = size;
+ stpp->stp_mss = mss;
+ stpp->stp_dpl_put_len = 0;
+
+ rc = sfxge_tx_qdpl_add(stp, stpp, locked);
+ if (rc != 0) {
+ /* ENOSPC can happen for DPL get or put list is full */
+ ASSERT3U(rc, ==, ENOSPC);
+
+ /*
+ * Note; if this is the unlocked DPL put list full case there is
+ * no need to worry about a race with locked
+ * sfxge_tx_qdpl_swizzle() as we know that the TX DPL put list
+ * was full and would have been swizzle'd to the TX DPL get
+ * list; hence guaranteeing future TX completions and calls
+ * to mac_tx_update() via sfxge_tx_qcomplete()
+ */
+ goto fail4;
+ }
+
+ /* Try to grab the lock again */
+ if (!locked)
+ locked = mutex_tryenter(&(stp->st_lock));
+
+ if (locked) {
+ /* Try to service the list */
+ sfxge_tx_qdpl_service(stp);
+ /* lock has been dropped */
+ }
+
+ return (0);
+
+fail4:
+ DTRACE_PROBE(fail4);
+ sfxge_tx_packet_destroy(sp, stpp);
+fail3:
+ DTRACE_PROBE(fail3);
+ if (locked)
+ mutex_exit(&(stp->st_lock));
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ if (rc != ENOSPC)
+ freemsg(mp);
+ return (rc);
+}
+
+void
+sfxge_tx_stop(sfxge_t *sp)
+{
+ efx_nic_t *enp = sp->s_enp;
+ clock_t timeout;
+ boolean_t wait_for_flush;
+ int index;
+
+ ASSERT(mutex_owned(&(sp->s_state_lock)));
+
+ mutex_enter(&(sp->s_tx_flush_lock));
+
+ /* Flush all the queues */
+ if (sp->s_hw_err == SFXGE_HW_OK) {
+ wait_for_flush = B_TRUE;
+ } else {
+ /*
+ * Flag indicates possible hardware failure.
+ * Attempt flush but do not wait for it to complete.
+ */
+ wait_for_flush = B_FALSE;
+ }
+
+ /* Prepare queues to stop and flush the hardware ring */
+ index = EFX_ARRAY_SIZE(sp->s_stp);
+ while (--index >= 0) {
+ if (sp->s_stp[index] != NULL)
+ sfxge_tx_qflush(sp, index, wait_for_flush);
+ }
+
+ if (wait_for_flush == B_FALSE)
+ goto flush_done;
+
+ /* Wait upto 2sec for queue flushing to complete */
+ timeout = ddi_get_lbolt() + drv_usectohz(SFXGE_TX_QFLUSH_USEC);
+
+ while (sp->s_tx_flush_pending > 0) {
+ if (cv_timedwait(&(sp->s_tx_flush_kv), &(sp->s_tx_flush_lock),
+ timeout) < 0) {
+ /* Timeout waiting for queues to flush */
+ dev_info_t *dip = sp->s_dip;
+
+ DTRACE_PROBE(timeout);
+ dev_err(dip, CE_NOTE,
+ SFXGE_CMN_ERR "tx qflush timeout");
+ break;
+ }
+ }
+
+flush_done:
+ sp->s_tx_flush_pending = 0;
+ mutex_exit(&(sp->s_tx_flush_lock));
+
+ /* Stop all the queues */
+ index = EFX_ARRAY_SIZE(sp->s_stp);
+ while (--index >= 0) {
+ if (sp->s_stp[index] != NULL)
+ sfxge_tx_qstop(sp, index);
+ }
+
+ /* Tear down the transmit module */
+ efx_tx_fini(enp);
+}
+
+void
+sfxge_tx_fini(sfxge_t *sp)
+{
+ int index;
+
+ index = EFX_ARRAY_SIZE(sp->s_stp);
+ while (--index >= 0) {
+ if (sp->s_stp[index] != NULL)
+ sfxge_tx_qfini(sp, index);
+ }
+
+ kmem_cache_destroy(sp->s_tqc);
+ sp->s_tqc = NULL;
+
+ kmem_cache_destroy(sp->s_tmc);
+ sp->s_tmc = NULL;
+
+ kmem_cache_destroy(sp->s_tbc);
+ sp->s_tbc = NULL;
+
+ kmem_cache_destroy(sp->s_tpc);
+ sp->s_tpc = NULL;
+}
diff --git a/usr/src/uts/common/io/sfxge/sfxge_version.h b/usr/src/uts/common/io/sfxge/sfxge_version.h
new file mode 100644
index 0000000000..37a7351558
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_version.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SFXGE_VERSION_H
+#define _SYS_SFXGE_VERSION_H
+
+/*
+ * Driver version.
+ * It is populated in the driver cfg class kstat and used to build
+ * the module link info (visible in modinfo -w output).
+ * It is updated by mmaketag script when a new v5 tag is created.
+ */
+#define SFXGE_VERSION_STRING "v4.8.0.1027"
+
+#endif /* _SYS_SFXGE_VERSION_H */
diff --git a/usr/src/uts/common/io/sfxge/sfxge_vpd.c b/usr/src/uts/common/io/sfxge/sfxge_vpd.c
new file mode 100644
index 0000000000..3f1a285315
--- /dev/null
+++ b/usr/src/uts/common/io/sfxge/sfxge_vpd.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include "sfxge.h"
+
+
+static int
+sfxge_vpd_get_keyword(sfxge_t *sp, sfxge_vpd_ioc_t *svip)
+{
+ efx_nic_t *enp = sp->s_enp;
+ efx_vpd_value_t vpd;
+ size_t size;
+ void *buf;
+ int rc;
+
+ if ((rc = efx_vpd_size(enp, &size)) != 0)
+ goto fail1;
+
+ buf = kmem_zalloc(size, KM_NOSLEEP);
+ if (buf == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_read(enp, buf, size)) != 0)
+ goto fail2;
+
+ if ((rc = efx_vpd_verify(enp, buf, size)) != 0)
+ goto fail3;
+
+ vpd.evv_tag = svip->svi_tag;
+ vpd.evv_keyword = svip->svi_keyword;
+
+ if ((rc = efx_vpd_get(enp, buf, size, &vpd)) != 0)
+ goto fail4;
+
+ svip->svi_len = vpd.evv_length;
+ EFX_STATIC_ASSERT(sizeof (svip->svi_payload) == sizeof (vpd.evv_value));
+ bcopy(&vpd.evv_value[0], svip->svi_payload, sizeof (svip->svi_payload));
+
+ kmem_free(buf, size);
+
+ return (0);
+
+fail4:
+ DTRACE_PROBE(fail4);
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+ kmem_free(buf, size);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+static int
+sfxge_vpd_set_keyword(sfxge_t *sp, sfxge_vpd_ioc_t *svip)
+{
+ efx_nic_t *enp = sp->s_enp;
+ efx_vpd_value_t vpd;
+ size_t size;
+ void *buf;
+ int rc;
+
+ /* restriction on writable tags is in efx_vpd_hunk_set() */
+
+ if ((rc = efx_vpd_size(enp, &size)) != 0)
+ goto fail1;
+
+ buf = kmem_zalloc(size, KM_NOSLEEP);
+ if (buf == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_read(enp, buf, size)) != 0)
+ goto fail2;
+
+ if ((rc = efx_vpd_verify(enp, buf, size)) != 0) {
+ if ((rc = efx_vpd_reinit(enp, buf, size)) != 0)
+ goto fail3;
+ if ((rc = efx_vpd_verify(enp, buf, size)) != 0)
+ goto fail4;
+ }
+
+ vpd.evv_tag = svip->svi_tag;
+ vpd.evv_keyword = svip->svi_keyword;
+ vpd.evv_length = svip->svi_len;
+
+ EFX_STATIC_ASSERT(sizeof (svip->svi_payload) == sizeof (vpd.evv_value));
+ bcopy(svip->svi_payload, &vpd.evv_value[0], sizeof (svip->svi_payload));
+
+ if ((rc = efx_vpd_set(enp, buf, size, &vpd)) != 0)
+ goto fail5;
+
+ if ((rc = efx_vpd_verify(enp, buf, size)) != 0)
+ goto fail6;
+
+ /* And write the VPD back to the hardware */
+ if ((rc = efx_vpd_write(enp, buf, size)) != 0)
+ goto fail7;
+
+ kmem_free(buf, size);
+
+ return (0);
+
+fail7:
+ DTRACE_PROBE(fail7);
+fail6:
+ DTRACE_PROBE(fail6);
+fail5:
+ DTRACE_PROBE(fail5);
+fail4:
+ DTRACE_PROBE(fail4);
+fail3:
+ DTRACE_PROBE(fail3);
+fail2:
+ DTRACE_PROBE(fail2);
+ kmem_free(buf, size);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
+
+
+int
+sfxge_vpd_ioctl(sfxge_t *sp, sfxge_vpd_ioc_t *svip)
+{
+ int rc;
+
+ switch (svip->svi_op) {
+ case SFXGE_VPD_OP_GET_KEYWORD:
+ if ((rc = sfxge_vpd_get_keyword(sp, svip)) != 0)
+ goto fail1;
+ break;
+ case SFXGE_VPD_OP_SET_KEYWORD:
+ if ((rc = sfxge_vpd_set_keyword(sp, svip)) != 0)
+ goto fail1;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ DTRACE_PROBE(fail2);
+fail1:
+ DTRACE_PROBE1(fail1, int, rc);
+
+ return (rc);
+}
diff --git a/usr/src/uts/intel/Makefile.intel b/usr/src/uts/intel/Makefile.intel
index 0f8a597050..bde282e81c 100644
--- a/usr/src/uts/intel/Makefile.intel
+++ b/usr/src/uts/intel/Makefile.intel
@@ -22,6 +22,7 @@
# Copyright (c) 2015 Nexenta Systems, Inc. All rights reserved.
# Copyright (c) 2013 Andrew Stormont. All rights reserved.
# Copyright (c) 2015, Joyent, Inc. All rights reserved.
+# Copyright 2016 Garrett D'Amore <garrett@damore.org>
#
# This makefile contains the common definitions for all intel
@@ -396,6 +397,7 @@ DRV_KMODS += pcn
DRV_KMODS += rge
DRV_KMODS += rtls
DRV_KMODS += sfe
+DRV_KMODS += sfxge
DRV_KMODS += amd8111s
DRV_KMODS += igb
DRV_KMODS += ipmi
diff --git a/usr/src/uts/intel/sfxge/Makefile b/usr/src/uts/intel/sfxge/Makefile
new file mode 100644
index 0000000000..628f4e1d5b
--- /dev/null
+++ b/usr/src/uts/intel/sfxge/Makefile
@@ -0,0 +1,79 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2016 Garrett D'Amore <garrett@damore.org>
+#
+
+UTSBASE = ../..
+
+MODULE = sfxge
+OBJECTS = $(SFXGE_OBJS:%=$(OBJS_DIR)/%)
+OBJECS += $(SFXGE_SF_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(SFXGE_OBJS:%.o=$(LINTS_DIR)/%.ln)
+LINTS += $(SFXGE_SF_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+
+include $(UTSBASE)/intel/Makefile.intel
+
+#
+# Targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# Overrides
+#
+
+INC_PATH += -I$(UTSBASE)/common/io/sfxge -I$(UTSBASE)/common/io/sfxge/common
+
+#
+# TODO:
+# These are specific to this driver. We will unidef these out later.
+# Some of them need further cleanup as well (e.g. we shouldn't bother with
+# supporting NDD directly.)
+#
+CPPFLAGS += -U_USE_MTU_UPDATE
+
+CFLAGS += $(CCVERBOSE)
+
+#
+# Driver depends on GLDv3 (mac)
+#
+LDFLAGS += -dy -N misc/mac
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ