summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authorqs148142 <none@none>2008-04-03 09:50:25 -0700
committerqs148142 <none@none>2008-04-03 09:50:25 -0700
commit3dec9fcdd56adf1b4a563137b4915c8f2d83b881 (patch)
treeb4aff05bea4b81af67d4c710feb63c96140fc86d /usr/src
parent72e1c0551e3c92bca8ab60d3f38afd96f35cc88c (diff)
downloadillumos-gate-3dec9fcdd56adf1b4a563137b4915c8f2d83b881.tar.gz
PSARC/2008/063 Hydra Support for Solaris
6656720 Initial hxge driver
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/pkgdefs/Makefile1
-rw-r--r--usr/src/pkgdefs/SUNWhxge/Makefile37
-rw-r--r--usr/src/pkgdefs/SUNWhxge/pkginfo.tmpl46
-rw-r--r--usr/src/pkgdefs/SUNWhxge/postinstall76
-rw-r--r--usr/src/pkgdefs/SUNWhxge/postremove38
-rw-r--r--usr/src/pkgdefs/SUNWhxge/prototype_com52
-rw-r--r--usr/src/pkgdefs/SUNWhxge/prototype_i38648
-rw-r--r--usr/src/pkgdefs/SUNWhxge/prototype_sparc48
-rw-r--r--usr/src/uts/common/Makefile.files9
-rw-r--r--usr/src/uts/common/Makefile.rules7
-rw-r--r--usr/src/uts/common/io/hxge/hpi.c104
-rw-r--r--usr/src/uts/common/io/hxge/hpi.h181
-rw-r--r--usr/src/uts/common/io/hxge/hpi_pfc.c962
-rw-r--r--usr/src/uts/common/io/hxge/hpi_pfc.h205
-rw-r--r--usr/src/uts/common/io/hxge/hpi_rxdma.c597
-rw-r--r--usr/src/uts/common/io/hxge/hpi_rxdma.h201
-rw-r--r--usr/src/uts/common/io/hxge/hpi_txdma.c487
-rw-r--r--usr/src/uts/common/io/hxge/hpi_txdma.h142
-rw-r--r--usr/src/uts/common/io/hxge/hpi_vir.c260
-rw-r--r--usr/src/uts/common/io/hxge/hpi_vir.h114
-rw-r--r--usr/src/uts/common/io/hxge/hpi_vmac.c282
-rw-r--r--usr/src/uts/common/io/hxge/hpi_vmac.h67
-rw-r--r--usr/src/uts/common/io/hxge/hxge.conf100
-rw-r--r--usr/src/uts/common/io/hxge/hxge.h591
-rw-r--r--usr/src/uts/common/io/hxge/hxge_classify.h97
-rw-r--r--usr/src/uts/common/io/hxge/hxge_common.h165
-rw-r--r--usr/src/uts/common/io/hxge/hxge_common_impl.h274
-rw-r--r--usr/src/uts/common/io/hxge/hxge_defs.h146
-rw-r--r--usr/src/uts/common/io/hxge/hxge_flow.h182
-rw-r--r--usr/src/uts/common/io/hxge/hxge_fm.c502
-rw-r--r--usr/src/uts/common/io/hxge/hxge_fm.h129
-rw-r--r--usr/src/uts/common/io/hxge/hxge_fzc.c295
-rw-r--r--usr/src/uts/common/io/hxge/hxge_fzc.h62
-rw-r--r--usr/src/uts/common/io/hxge/hxge_hw.c777
-rw-r--r--usr/src/uts/common/io/hxge/hxge_impl.h487
-rw-r--r--usr/src/uts/common/io/hxge/hxge_kstats.c1305
-rw-r--r--usr/src/uts/common/io/hxge/hxge_main.c3773
-rw-r--r--usr/src/uts/common/io/hxge/hxge_ndd.c1529
-rw-r--r--usr/src/uts/common/io/hxge/hxge_peu.h48
-rw-r--r--usr/src/uts/common/io/hxge/hxge_peu_hw.h5763
-rw-r--r--usr/src/uts/common/io/hxge/hxge_pfc.c1306
-rw-r--r--usr/src/uts/common/io/hxge/hxge_pfc.h332
-rw-r--r--usr/src/uts/common/io/hxge/hxge_pfc_hw.h773
-rw-r--r--usr/src/uts/common/io/hxge/hxge_rdc_hw.h1611
-rw-r--r--usr/src/uts/common/io/hxge/hxge_rxdma.c3491
-rw-r--r--usr/src/uts/common/io/hxge/hxge_rxdma.h484
-rw-r--r--usr/src/uts/common/io/hxge/hxge_send.c1014
-rw-r--r--usr/src/uts/common/io/hxge/hxge_tdc_hw.h1394
-rw-r--r--usr/src/uts/common/io/hxge/hxge_txdma.c2900
-rw-r--r--usr/src/uts/common/io/hxge/hxge_txdma.h248
-rw-r--r--usr/src/uts/common/io/hxge/hxge_txdma_hw.h207
-rw-r--r--usr/src/uts/common/io/hxge/hxge_virtual.c1109
-rw-r--r--usr/src/uts/common/io/hxge/hxge_virtual.h55
-rw-r--r--usr/src/uts/common/io/hxge/hxge_vmac.c399
-rw-r--r--usr/src/uts/common/io/hxge/hxge_vmac.h89
-rw-r--r--usr/src/uts/common/io/hxge/hxge_vmac_hw.h693
-rw-r--r--usr/src/uts/intel/Makefile.intel.shared1
-rw-r--r--usr/src/uts/intel/hxge/Makefile129
58 files changed, 36424 insertions, 0 deletions
diff --git a/usr/src/pkgdefs/Makefile b/usr/src/pkgdefs/Makefile
index 3445b5c3bc..42532ba01c 100644
--- a/usr/src/pkgdefs/Makefile
+++ b/usr/src/pkgdefs/Makefile
@@ -231,6 +231,7 @@ COMMON_SUBDIRS= \
SUNWhea \
SUNWhermon \
SUNWhwdata \
+ SUNWhxge \
SUNWib \
SUNWibsdpu \
SUNWibsdp \
diff --git a/usr/src/pkgdefs/SUNWhxge/Makefile b/usr/src/pkgdefs/SUNWhxge/Makefile
new file mode 100644
index 0000000000..e426b333da
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWhxge/Makefile
@@ -0,0 +1,37 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+include ../Makefile.com
+
+DATAFILES += depend i.renamenew
+
+.KEEP_STATE:
+
+all: $(FILES) postinstall postremove
+install: all pkg
+
+include ../Makefile.targ
diff --git a/usr/src/pkgdefs/SUNWhxge/pkginfo.tmpl b/usr/src/pkgdefs/SUNWhxge/pkginfo.tmpl
new file mode 100644
index 0000000000..4e747358a8
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWhxge/pkginfo.tmpl
@@ -0,0 +1,46 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+PKG=SUNWhxge
+NAME=SUN 10Gb hxge NIC Driver
+ARCH="ISA"
+VERSION="ONVERS,REV=0.0.0"
+SUNW_PRODNAME="SunOS"
+SUNW_PRODVERS="RELEASE/VERSION"
+SUNW_PKGVERS="1.0"
+SUNW_PKGTYPE="root"
+MAXINST="1000"
+CATEGORY=system
+VENDOR="Sun Microsystems, Inc."
+DESC="SUN 10Gb hxge Ethernet Network Adapter Driver"
+CLASSES="none renamenew"
+HOTLINE="Please contact your local service provider"
+EMAIL=""
+BASEDIR=/
+SUNW_PKG_ALLZONES="true"
+SUNW_PKG_HOLLOW="true"
+SUNW_PKG_THISZONE="false"
diff --git a/usr/src/pkgdefs/SUNWhxge/postinstall b/usr/src/pkgdefs/SUNWhxge/postinstall
new file mode 100644
index 0000000000..158fa6dee7
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWhxge/postinstall
@@ -0,0 +1,76 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+set -u
+
+PATH="/usr/bin:/usr/sbin:${PATH}"
+export PATH
+
+#
+# Driver info
+#
+DRV=hxge
+DRVALIAS=" \"pci108e,aaaa\" "
+
+DRVPERM='* 0600 root sys'
+# POLICY='read_priv_set=net_rawaccess write_priv_set=net_rawaccess'
+MAJORDEV=11
+
+#
+# Select the correct add_drv options to execute.
+#
+if [ "${BASEDIR}" = "/" ]; then
+ #
+ # Irrespective of whether hardware exists or not don't attempt to
+ # attach driver to the hardware. The driver will be attached when
+ # it is configured using ifconfig
+ #
+ ADD_DRV="add_drv -n"
+else
+ #
+ # On a client,
+ # modify the system files and touch/reconfigure
+ # for reconfigure reboot
+ #
+ ADD_DRV="add_drv -b ${BASEDIR}"
+fi
+
+#
+# Make sure add_drv has *not* been previously executed
+# before attempting to add the driver.
+#
+grep -w "${DRV}" ${BASEDIR}/etc/name_to_major > /dev/null 2>&1
+if [ $? -eq 1 ]; then
+ ${ADD_DRV} -m "${DRVPERM}" -i "${DRVALIAS}" ${DRV}
+ if [ $? -ne 0 ]; then
+ echo "\nFailed add_drv!\n" >&2
+ exit 1
+ fi
+fi
+
+exit 0
diff --git a/usr/src/pkgdefs/SUNWhxge/postremove b/usr/src/pkgdefs/SUNWhxge/postremove
new file mode 100644
index 0000000000..149b9ee9cc
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWhxge/postremove
@@ -0,0 +1,38 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+BD=${BASEDIR:-/}
+if grep -w hxge $BD/etc/name_to_major > /dev/null 2>&1
+then
+ rem_drv -b ${BD} hxge
+ if [ $? -ne 0 ]
+ then
+ exit 1
+ fi
+fi
+exit 0
diff --git a/usr/src/pkgdefs/SUNWhxge/prototype_com b/usr/src/pkgdefs/SUNWhxge/prototype_com
new file mode 100644
index 0000000000..4eca71a540
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWhxge/prototype_com
@@ -0,0 +1,52 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+#
+i pkginfo
+i copyright
+i depend
+i postinstall
+i postremove
+i i.renamenew
+#
+#
+# SUN 10Gb hxge NIC driver
+d none kernel 0755 root sys
+d none kernel/drv 0755 root sys
+e renamenew kernel/drv/hxge.conf 0644 root sys
diff --git a/usr/src/pkgdefs/SUNWhxge/prototype_i386 b/usr/src/pkgdefs/SUNWhxge/prototype_i386
new file mode 100644
index 0000000000..742603b91c
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWhxge/prototype_i386
@@ -0,0 +1,48 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+# Include ISA independent files (prototype_com)
+#
+!include prototype_com
+#
+#
+
+# SUN 10Gb hxge NIC driver
+d none kernel/drv/amd64 0755 root sys
+f none kernel/drv/amd64/hxge 0755 root sys
diff --git a/usr/src/pkgdefs/SUNWhxge/prototype_sparc b/usr/src/pkgdefs/SUNWhxge/prototype_sparc
new file mode 100644
index 0000000000..bdf23a372d
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWhxge/prototype_sparc
@@ -0,0 +1,48 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+# Include ISA independent files (prototype_com)
+#
+!include prototype_com
+#
+#
+
+# SUN 10Gb hxge NIC driver
+d none kernel/drv/sparcv9 0755 root sys
+f none kernel/drv/sparcv9/hxge 0755 root sys
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index cd0ec4c97d..002e2672b5 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -1607,3 +1607,12 @@ KICONV_TC_OBJS += kiconv_cck_common.o kiconv_tc.o
# AAC module
#
AAC_OBJS = aac.o aac_ioctl.o
+#
+# hxge 10G driver module
+#
+HXGE_OBJS = hxge_main.o hxge_vmac.o hxge_send.o \
+ hxge_txdma.o hxge_rxdma.o hxge_virtual.o \
+ hxge_fm.o hxge_fzc.o hxge_hw.o hxge_kstats.o \
+ hxge_ndd.o hxge_pfc.o \
+ hpi.o hpi_vmac.o hpi_rxdma.o hpi_txdma.o \
+ hpi_vir.o hpi_pfc.o
diff --git a/usr/src/uts/common/Makefile.rules b/usr/src/uts/common/Makefile.rules
index f5de112fb9..bed4f69db9 100644
--- a/usr/src/uts/common/Makefile.rules
+++ b/usr/src/uts/common/Makefile.rules
@@ -1072,6 +1072,10 @@ $(OBJS_DIR)/zlib_obj.o: $(ZLIB_OBJS:%=$(OBJS_DIR)/%)
$(ZLIB_OBJS:%=$(OBJS_DIR)/%)
$(CTFMERGE) -t -f -L VERSION -o $@ $(ZLIB_OBJS:%=$(OBJS_DIR)/%)
+$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/hxge/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
#
# SVM
#
@@ -1859,3 +1863,6 @@ $(LINTS_DIR)/zlib_obj.ln: $(ZLIB_OBJS:%.o=$(LINTS_DIR)/%.ln) \
$(UTSBASE)/common/zmod/zlib_lint.c
@($(LHEAD) $(LINT.c) -C $(LINTS_DIR)/zlib_obj \
$(UTSBASE)/common/zmod/zlib_lint.c $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/hxge/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
diff --git a/usr/src/uts/common/io/hxge/hpi.c b/usr/src/uts/common/io/hxge/hpi.c
new file mode 100644
index 0000000000..4f1dcc632e
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi.c
@@ -0,0 +1,104 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hpi.h>
+#include <hxge_impl.h>
+
+static hxge_os_mutex_t hpidebuglock;
+static int hpi_debug_init = 0;
+uint64_t hpi_debug_level = 0x0;
+
+void
+hpi_debug_msg(hpi_handle_function_t function, uint64_t level, char *fmt, ...)
+{
+ char msg_buffer[1024];
+ char prefix_buffer[32];
+ int cmn_level = CE_CONT;
+ va_list ap;
+
+ if ((level & hpi_debug_level) ||
+ (level & HPI_REG_CTL) || (level & HPI_ERR_CTL)) {
+
+ if (hpi_debug_init == 0) {
+ MUTEX_INIT(&hpidebuglock, NULL, MUTEX_DRIVER, NULL);
+ hpi_debug_init = 1;
+ }
+
+ MUTEX_ENTER(&hpidebuglock);
+
+ if (level & HPI_ERR_CTL) {
+ cmn_level = CE_WARN;
+ }
+
+ va_start(ap, fmt);
+ (void) vsprintf(msg_buffer, fmt, ap);
+ va_end(ap);
+
+ (void) sprintf(prefix_buffer, "%s%d(%d):", "hpi",
+ function.instance, function.function);
+
+ cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
+ MUTEX_EXIT(&hpidebuglock);
+ }
+}
+
+void
+hpi_rtrace_buf_init(rtrace_t *rt)
+{
+ int i;
+
+ rt->next_idx = 0;
+ rt->last_idx = MAX_RTRACE_ENTRIES - 1;
+ rt->wrapped = B_FALSE;
+ for (i = 0; i < MAX_RTRACE_ENTRIES; i++) {
+ rt->buf[i].ctl_addr = TRACE_CTL_INVALID;
+ rt->buf[i].val_l32 = 0;
+ rt->buf[i].val_h32 = 0;
+ }
+}
+
+void
+hpi_rtrace_update(hpi_handle_t handle, boolean_t wr, rtrace_t *rt,
+ uint32_t addr, uint64_t val)
+{
+ int idx;
+ idx = rt->next_idx;
+ if (wr == B_TRUE)
+ rt->buf[idx].ctl_addr = (addr & TRACE_ADDR_MASK) | TRACE_CTL_WR;
+ else
+ rt->buf[idx].ctl_addr = (addr & TRACE_ADDR_MASK);
+ rt->buf[idx].ctl_addr |= (((handle.function.function
+ << TRACE_FUNC_SHIFT) & TRACE_FUNC_MASK) |
+ ((handle.function.instance << TRACE_INST_SHIFT) & TRACE_INST_MASK));
+ rt->buf[idx].val_l32 = val & 0xFFFFFFFF;
+ rt->buf[idx].val_h32 = (val >> 32) & 0xFFFFFFFF;
+ rt->next_idx++;
+ if (rt->next_idx > rt->last_idx) {
+ rt->next_idx = 0;
+ rt->wrapped = B_TRUE;
+ }
+}
diff --git a/usr/src/uts/common/io/hxge/hpi.h b/usr/src/uts/common/io/hxge/hpi.h
new file mode 100644
index 0000000000..7b7bb3359b
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi.h
@@ -0,0 +1,181 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HPI_H
+#define _HPI_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <hxge_common_impl.h>
+#include <hxge_common.h>
+
+typedef uint32_t hpi_status_t;
+
+/* Common Block ID */
+#define VMAC_BLK_ID 0x1
+#define TXDMA_BLK_ID 0x2
+#define RXDMA_BLK_ID 0x3
+#define PFC_BLK_ID 0x4
+#define VIR_BLK_ID 0x5
+#define PEU_BLK_ID 0x6
+
+/* Common HW error code */
+/* HW unable to exit from reset state. */
+#define RESET_FAILED 0x81
+
+/* Write operation failed on indirect write. */
+#define WRITE_FAILED 0x82
+/* Read operation failed on indirect read. */
+#define READ_FAILED 0x83
+
+/* Common SW errors code */
+
+#define PORT_INVALID 0x41 /* Invalid port number */
+#define CHANNEL_INVALID 0x42 /* Invalid dma channel number */
+#define OPCODE_INVALID 0x43 /* Invalid opcode */
+#define REGISTER_INVALID 0x44 /* Invalid register number */
+#define COUNTER_INVALID 0x45 /* Invalid counter number */
+#define CONFIG_INVALID 0x46 /* Invalid config input */
+#define LOGICAL_PAGE_INVALID 0x47 /* Invalid logical page # */
+#define VLAN_INVALID 0x48 /* Invalid Vlan ID */
+#define RDC_TAB_INVALID 0x49 /* Invalid RDC Group Number */
+#define LOCATION_INVALID 0x4a /* Invalid Entry Location */
+
+#define HPI_SUCCESS 0 /* Operation succeed */
+#define HPI_FAILURE 0x80000000 /* Operation failed */
+
+/*
+ * Block identifier starts at bit 8.
+ */
+#define HPI_BLOCK_ID_SHIFT 8
+
+/*
+ * Port, channel and misc. information starts at bit 12.
+ */
+#define HPI_PORT_CHAN_SHIFT 12
+
+/*
+ * Software Block specific error codes start at 0x50.
+ */
+#define HPI_BK_ERROR_START 0x50
+
+/*
+ * Hardware block specific error codes start at 0x90.
+ */
+#define HPI_BK_HW_ER_START 0x90
+
+/* Structures for register tracing */
+
+typedef struct _rt_buf {
+ uint32_t ctl_addr;
+ uint32_t val_l32;
+ uint32_t val_h32;
+} rt_buf_t;
+
+/*
+ * Control Address field format
+ *
+ * Bit 0 - 23: Address
+ * Bit 24 - 25: Function Number
+ * Bit 26 - 29: Instance Number
+ * Bit 30: Read/Write Direction bit
+ * Bit 31: Invalid bit
+ */
+
+#define MAX_RTRACE_ENTRIES 1024
+#define MAX_RTRACE_IOC_ENTRIES 64
+#define TRACE_ADDR_MASK 0x00FFFFFF
+#define TRACE_FUNC_MASK 0x03000000
+#define TRACE_INST_MASK 0x3C000000
+#define TRACE_CTL_WR 0x40000000
+#define TRACE_CTL_INVALID 0x80000000
+#define TRACE_FUNC_SHIFT 24
+#define TRACE_INST_SHIFT 26
+#define MSG_BUF_SIZE 1024
+
+
+typedef struct _rtrace {
+ uint16_t next_idx;
+ uint16_t last_idx;
+ boolean_t wrapped;
+ rt_buf_t buf[MAX_RTRACE_ENTRIES];
+} rtrace_t;
+
+/* Configuration options */
+typedef enum config_op {
+ DISABLE = 0,
+ ENABLE,
+ INIT
+} config_op_t;
+
+/* I/O options */
+typedef enum io_op {
+ OP_SET = 0,
+ OP_GET,
+ OP_UPDATE,
+ OP_CLEAR
+} io_op_t;
+
+/* HPI Handle */
+typedef struct _hpi_handle_function {
+ uint16_t instance;
+ uint16_t function;
+} hpi_handle_function_t;
+
+/* HPI Handle */
+typedef struct _hpi_handle {
+ hpi_reg_handle_t regh;
+ hpi_reg_ptr_t regp;
+ boolean_t is_vraddr; /* virtualization region address */
+ hpi_handle_function_t function;
+ void *hxgep;
+} hpi_handle_t;
+
+extern rtrace_t hpi_rtracebuf;
+void hpi_rtrace_update(hpi_handle_t handle, boolean_t wr, rtrace_t *rt,
+ uint32_t addr, uint64_t val);
+void hpi_rtrace_buf_init(rtrace_t *rt);
+
+void hpi_debug_msg(hpi_handle_function_t function, uint64_t level,
+ char *fmt, ...);
+
+#ifdef HPI_DEBUG
+#define HPI_DEBUG_MSG(params) hpi_debug_msg params
+#else
+#define HPI_DEBUG_MSG(params)
+#endif
+
+#define HPI_ERROR_MSG(params) hpi_debug_msg params
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HPI_H */
diff --git a/usr/src/uts/common/io/hxge/hpi_pfc.c b/usr/src/uts/common/io/hxge/hpi_pfc.c
new file mode 100644
index 0000000000..b8d8d939a8
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi_pfc.c
@@ -0,0 +1,962 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+#include <hpi_pfc.h>
+
+#define TCAM_COMPLETION_TRY_COUNT 10
+#define HXGE_VLAN_TABLE_ENTRIES 128
+#define HXGE_PFC_INT_STATUS_CLEAR 0x7ULL
+
+static uint64_t
+hpi_pfc_tcam_check_completion(hpi_handle_t handle, tcam_op_t op_type)
+{
+ uint32_t try_counter, tcam_delay = 10;
+ pfc_tcam_ctrl_t tctl;
+
+ try_counter = TCAM_COMPLETION_TRY_COUNT;
+
+ switch (op_type) {
+ case TCAM_RWC_STAT:
+ READ_TCAM_REG_CTL(handle, &tctl.value);
+ while ((try_counter) &&
+ (tctl.bits.status != TCAM_CTL_RWC_RWC_STAT)) {
+ try_counter--;
+ HXGE_DELAY(tcam_delay);
+ READ_TCAM_REG_CTL(handle, &tctl.value);
+ }
+
+ if (!try_counter) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " TCAM RWC_STAT operation"
+ " failed to complete \n"));
+ return (HPI_PFC_TCAM_HW_ERROR);
+ }
+
+ tctl.value = 0;
+ break;
+ case TCAM_RWC_MATCH:
+ READ_TCAM_REG_CTL(handle, &tctl.value);
+
+ while ((try_counter) &&
+ (tctl.bits.match != TCAM_CTL_RWC_RWC_MATCH)) {
+ try_counter--;
+ HXGE_DELAY(tcam_delay);
+ READ_TCAM_REG_CTL(handle, &tctl.value);
+ }
+
+ if (!try_counter) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " TCAM Match operationfailed to find match \n"));
+ }
+
+ break;
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " Invalid TCAM completion Request \n"));
+ return (HPI_PFC_ERROR | HPI_TCAM_ERROR | OPCODE_INVALID);
+ }
+
+ return (tctl.value);
+}
+
+hpi_status_t
+hpi_pfc_tcam_entry_read(hpi_handle_t handle, uint32_t location,
+ hxge_tcam_entry_t *tcam_ptr)
+{
+ pfc_tcam_ctrl_t tctl;
+ pfc_tcam_ctrl_t tctl_rv;
+
+ /*
+ * Hydra doesn't allow to read TCAM entries. Use compare instead.
+ */
+ WRITE_TCAM_REG_MASK0(handle, tcam_ptr->mask0);
+ WRITE_TCAM_REG_MASK1(handle, tcam_ptr->mask1);
+
+ WRITE_TCAM_REG_KEY0(handle, tcam_ptr->key0);
+ WRITE_TCAM_REG_KEY1(handle, tcam_ptr->key1);
+
+ tctl.value = 0;
+ tctl.bits.addr = location;
+ tctl.bits.cmd = TCAM_CTL_RWC_TCAM_CMP;
+
+ WRITE_TCAM_REG_CTL(handle, tctl.value);
+
+ tctl_rv.value = hpi_pfc_tcam_check_completion(handle, TCAM_RWC_MATCH);
+
+ if (tctl_rv.bits.match)
+ return (HPI_SUCCESS);
+ else
+ return (HPI_FAILURE);
+}
+
+hpi_status_t
+hpi_pfc_tcam_asc_ram_entry_read(hpi_handle_t handle,
+ uint32_t location, uint64_t *ram_data)
+{
+ uint64_t tcam_stat;
+ pfc_tcam_ctrl_t tctl;
+
+ tctl.value = 0;
+ tctl.bits.addr = location;
+ tctl.bits.cmd = TCAM_CTL_RWC_RAM_RD;
+
+ WRITE_TCAM_REG_CTL(handle, tctl.value);
+
+ tcam_stat = hpi_pfc_tcam_check_completion(handle, TCAM_RWC_STAT);
+
+ if (tcam_stat & HPI_FAILURE) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "TCAM RAM read failed loc %d \n", location));
+ return (HPI_PFC_ASC_RAM_RD_ERROR);
+ }
+
+ READ_TCAM_REG_KEY0(handle, ram_data);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_tcam_asc_ram_entry_write(hpi_handle_t handle, uint32_t location,
+ uint64_t ram_data)
+{
+ uint64_t tcam_stat = 0;
+ pfc_tcam_ctrl_t tctl;
+
+ WRITE_TCAM_REG_KEY0(handle, ram_data);
+
+ tctl.value = 0;
+ tctl.bits.addr = location;
+ tctl.bits.cmd = TCAM_CTL_RWC_RAM_WR;
+
+ HPI_DEBUG_MSG((handle.function, HPI_PFC_CTL,
+ " tcam ascr write: location %x data %llx ctl value %llx \n",
+ location, ram_data, tctl.value));
+ WRITE_TCAM_REG_CTL(handle, tctl.value);
+ tcam_stat = hpi_pfc_tcam_check_completion(handle, TCAM_RWC_STAT);
+
+ if (tcam_stat & HPI_FAILURE) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "TCAM RAM write failed loc %d \n", location));
+ return (HPI_PFC_ASC_RAM_WR_ERROR);
+ }
+
+ return (HPI_SUCCESS);
+}
+
+static hpi_status_t
+hpi_pfc_set_config(hpi_handle_t handle, pfc_config_t config)
+{
+ uint64_t offset;
+
+ offset = PFC_CONFIG;
+ REG_PIO_WRITE64(handle, offset, config.value);
+
+ return (HPI_SUCCESS);
+}
+
+static hpi_status_t
+hpi_pfc_get_config(hpi_handle_t handle, pfc_config_t *configp)
+{
+ uint64_t offset;
+
+ offset = PFC_CONFIG;
+ REG_PIO_READ64(handle, offset, &configp->value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_tcam_enable(hpi_handle_t handle, boolean_t tcam)
+{
+ pfc_config_t config;
+
+ /*
+ * Read the register first.
+ */
+ (void) hpi_pfc_get_config(handle, &config);
+
+ if (tcam)
+ config.bits.tcam_en = 1;
+ else
+ config.bits.tcam_en = 0;
+
+ return (hpi_pfc_set_config(handle, config));
+}
+
+hpi_status_t
+hpi_pfc_set_l2_hash(hpi_handle_t handle, boolean_t l2_hash)
+{
+ pfc_config_t config;
+
+ /*
+ * Read the register first.
+ */
+ (void) hpi_pfc_get_config(handle, &config);
+
+ if (l2_hash)
+ config.bits.l2_hash_en = 1;
+ else
+ config.bits.l2_hash_en = 0;
+
+ return (hpi_pfc_set_config(handle, config));
+}
+
+hpi_status_t
+hpi_pfc_set_tcp_cksum(hpi_handle_t handle, boolean_t cksum)
+{
+ pfc_config_t config;
+
+ /*
+ * Read the register first.
+ */
+ (void) hpi_pfc_get_config(handle, &config);
+
+ if (cksum)
+ config.bits.tcp_cs_en = 1;
+ else
+ config.bits.tcp_cs_en = 0;
+
+ return (hpi_pfc_set_config(handle, config));
+}
+
+hpi_status_t
+hpi_pfc_set_default_dma(hpi_handle_t handle, uint32_t dma_channel_no)
+{
+ pfc_config_t config;
+
+ (void) hpi_pfc_get_config(handle, &config);
+
+ if (dma_channel_no > PFC_MAX_DMA_CHANNELS)
+ return (HPI_FAILURE);
+
+ config.bits.default_dma = dma_channel_no;
+
+ return (hpi_pfc_set_config(handle, config));
+}
+
+hpi_status_t
+hpi_pfc_mac_addr_enable(hpi_handle_t handle, uint32_t slot)
+{
+ pfc_config_t config;
+ uint32_t bit;
+
+ if (slot >= PFC_N_MAC_ADDRESSES) {
+ return (HPI_FAILURE);
+ }
+
+ (void) hpi_pfc_get_config(handle, &config);
+
+ bit = 1 << slot;
+ config.bits.mac_addr_en = config.bits.mac_addr_en | bit;
+
+ return (hpi_pfc_set_config(handle, config));
+}
+
+hpi_status_t
+hpi_pfc_mac_addr_disable(hpi_handle_t handle, uint32_t slot)
+{
+ pfc_config_t config;
+ uint32_t bit;
+
+ if (slot >= PFC_N_MAC_ADDRESSES) {
+ return (HPI_FAILURE);
+ }
+
+ (void) hpi_pfc_get_config(handle, &config);
+
+ bit = 1 << slot;
+ config.bits.mac_addr_en = config.bits.mac_addr_en & ~bit;
+
+ return (hpi_pfc_set_config(handle, config));
+}
+
+hpi_status_t
+hpi_pfc_set_force_csum(hpi_handle_t handle, boolean_t force)
+{
+ pfc_config_t config;
+
+ (void) hpi_pfc_get_config(handle, &config);
+
+ if (force)
+ config.bits.force_cs_en = 1;
+ else
+ config.bits.force_cs_en = 0;
+
+ return (hpi_pfc_set_config(handle, config));
+}
+
+hpi_status_t
+hpi_pfc_cfg_vlan_table_clear(hpi_handle_t handle)
+{
+ int i;
+ int offset;
+ int step = 8;
+ pfc_vlan_table_t table_entry;
+
+ table_entry.value = 0;
+ for (i = 0; i < HXGE_VLAN_TABLE_ENTRIES; i++) {
+ table_entry.bits.member = 0;
+ offset = PFC_VLAN_TABLE + i * step;
+ REG_PIO_WRITE64(handle, offset, table_entry.value);
+ }
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_cfg_vlan_table_entry_clear(hpi_handle_t handle, vlan_id_t vlan_id)
+{
+ uint64_t offset;
+ pfc_vlan_table_t vlan_tbl_entry;
+ uint64_t bit;
+
+ /*
+ * Assumes that the hardware will generate the new parity
+ * data.
+ */
+ offset = PFC_VLAN_REG_OFFSET(vlan_id);
+ REG_PIO_READ64(handle, offset, (uint64_t *)&vlan_tbl_entry.value);
+
+ bit = PFC_VLAN_BIT_OFFSET(vlan_id);
+ bit = 1 << bit;
+ vlan_tbl_entry.bits.member = vlan_tbl_entry.bits.member & ~bit;
+
+ REG_PIO_WRITE64(handle, offset, vlan_tbl_entry.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_cfg_vlan_table_entry_set(hpi_handle_t handle, vlan_id_t vlan_id)
+{
+ uint64_t offset;
+ pfc_vlan_table_t vlan_tbl_entry;
+ uint64_t bit;
+
+ /*
+ * Assumes that the hardware will generate the new parity
+ * data.
+ */
+ offset = PFC_VLAN_REG_OFFSET(vlan_id);
+ REG_PIO_READ64(handle, offset, (uint64_t *)&vlan_tbl_entry.value);
+
+ bit = PFC_VLAN_BIT_OFFSET(vlan_id);
+ bit = 1 << bit;
+ vlan_tbl_entry.bits.member = vlan_tbl_entry.bits.member | bit;
+
+ REG_PIO_WRITE64(handle, offset, vlan_tbl_entry.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_cfg_vlan_control_set(hpi_handle_t handle, boolean_t parity,
+ boolean_t valid, vlan_id_t vlan_id)
+{
+ pfc_vlan_ctrl_t vlan_control;
+
+ vlan_control.value = 0;
+
+ if (parity)
+ vlan_control.bits.par_en = 1;
+ else
+ vlan_control.bits.par_en = 0;
+
+ if (valid)
+ vlan_control.bits.valid = 1;
+ else
+ vlan_control.bits.valid = 0;
+
+ vlan_control.bits.id = vlan_id;
+
+ REG_PIO_WRITE64(handle, PFC_VLAN_CTRL, vlan_control.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_get_vlan_parity_log(hpi_handle_t handle, pfc_vlan_par_err_log_t *logp)
+{
+ uint64_t offset;
+
+ offset = PFC_VLAN_PAR_ERR_LOG;
+ REG_PIO_READ64(handle, offset, &logp->value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_mac_address(hpi_handle_t handle, uint32_t slot, uint64_t address)
+{
+ uint64_t offset;
+ uint64_t moffset;
+ pfc_mac_addr_mask_t mask;
+ pfc_mac_addr_t addr;
+
+ if (slot >= PFC_N_MAC_ADDRESSES)
+ return (HPI_FAILURE);
+
+ offset = PFC_MAC_ADDRESS(slot);
+ moffset = PFC_MAC_ADDRESS_MASK(slot);
+
+ addr.bits.addr = address;
+ mask.bits.mask = 0x0;
+
+ REG_PIO_WRITE64(handle, offset, addr.value);
+ REG_PIO_WRITE64(handle, moffset, mask.value);
+
+ return (hpi_pfc_mac_addr_enable(handle, slot));
+}
+
+hpi_status_t
+hpi_pfc_clear_mac_address(hpi_handle_t handle, uint32_t slot)
+{
+ uint64_t offset, moffset;
+ uint64_t zaddr = 0x0ULL;
+ uint64_t zmask = 0x0ULL;
+
+ if (slot >= PFC_N_MAC_ADDRESSES)
+ return (HPI_FAILURE);
+
+ (void) hpi_pfc_mac_addr_disable(handle, slot);
+
+ offset = PFC_MAC_ADDRESS(slot);
+ moffset = PFC_MAC_ADDRESS_MASK(slot);
+
+ REG_PIO_WRITE64(handle, offset, zaddr);
+ REG_PIO_WRITE64(handle, moffset, zmask);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_clear_multicast_hash_table(hpi_handle_t handle, uint32_t slot)
+{
+ uint64_t offset;
+
+ if (slot >= PFC_N_MAC_ADDRESSES)
+ return (HPI_FAILURE);
+
+ offset = PFC_HASH_ADDR(slot);
+ REG_PIO_WRITE64(handle, offset, 0ULL);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_multicast_hash_table(hpi_handle_t handle, uint32_t slot,
+ uint64_t address)
+{
+ uint64_t offset;
+
+ if (slot >= PFC_N_MAC_ADDRESSES)
+ return (HPI_FAILURE);
+
+ offset = PFC_HASH_ADDR(slot);
+ REG_PIO_WRITE64(handle, offset, address);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_l2_class_slot(hpi_handle_t handle, uint16_t etype, boolean_t valid,
+ int slot)
+{
+ pfc_l2_class_config_t l2_config;
+ uint64_t offset;
+
+ if (slot >= PFC_N_MAC_ADDRESSES)
+ return (HPI_FAILURE);
+
+ l2_config.value = 0;
+
+ if (valid)
+ l2_config.bits.valid = 1;
+ else
+ l2_config.bits.valid = 0;
+
+ l2_config.bits.etype = etype;
+ l2_config.bits.rsrvd = 0;
+
+ offset = PFC_L2_CONFIG(slot);
+ REG_PIO_WRITE64(handle, offset, l2_config.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_l3_class_config(hpi_handle_t handle, tcam_class_t slot,
+ tcam_key_cfg_t cfg)
+{
+ pfc_l3_class_config_t l3_config;
+ uint64_t offset;
+
+ if (slot >= PFC_N_MAC_ADDRESSES)
+ return (HPI_FAILURE);
+
+ l3_config.value = 0;
+
+ if (cfg.lookup_enable)
+ l3_config.bits.tsel = 1;
+ else
+ l3_config.bits.tsel = 0;
+
+ if (cfg.discard)
+ l3_config.bits.discard = 1;
+ else
+ l3_config.bits.discard = 0;
+
+ offset = PFC_L3_CONFIG(slot);
+ REG_PIO_WRITE64(handle, offset, l3_config.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_get_l3_class_config(hpi_handle_t handle, tcam_class_t slot,
+ tcam_key_cfg_t *cfg)
+{
+ pfc_l3_class_config_t l3_config;
+ uint64_t offset;
+
+ if (slot >= PFC_N_MAC_ADDRESSES)
+ return (HPI_FAILURE);
+
+ offset = PFC_L3_CONFIG(slot);
+ REG_PIO_READ64(handle, offset, &l3_config.value);
+
+ if (l3_config.bits.tsel)
+ cfg->lookup_enable = 1;
+ else
+ cfg->lookup_enable = 0;
+
+ if (l3_config.bits.discard)
+ cfg->discard = 1;
+ else
+ cfg->discard = 0;
+
+ return (HPI_SUCCESS);
+}
+
+static hpi_status_t
+hpi_pfc_set_tcam_control(hpi_handle_t handle, pfc_tcam_ctrl_t *tcontrolp)
+{
+ uint64_t offset;
+
+ offset = PFC_TCAM_CTRL;
+ REG_PIO_WRITE64(handle, offset, tcontrolp->value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_tcam_entry_invalidate(hpi_handle_t handle, uint32_t location)
+{
+ hxge_tcam_entry_t tcam_ptr;
+
+ (void) memset(&tcam_ptr, 0, sizeof (hxge_tcam_entry_t));
+ (void) hpi_pfc_tcam_entry_write(handle, location, &tcam_ptr);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_tcam_invalidate_all(hpi_handle_t handle)
+{
+ int i;
+ pfc_tcam_ctrl_t tcontrol;
+
+ tcontrol.value = 0;
+ for (i = 0; i < PFC_N_TCAM_ENTRIES; i++) {
+ (void) hpi_pfc_set_tcam_control(handle, &tcontrol);
+ (void) hpi_pfc_tcam_entry_invalidate(handle, i);
+ }
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_tcam_entry_write(hpi_handle_t handle, uint32_t location,
+ hxge_tcam_entry_t *tcam_ptr)
+{
+ uint64_t tcam_stat;
+ pfc_tcam_ctrl_t tctl;
+
+ WRITE_TCAM_REG_MASK0(handle, tcam_ptr->mask0);
+ WRITE_TCAM_REG_MASK1(handle, tcam_ptr->mask1);
+
+ WRITE_TCAM_REG_KEY0(handle, tcam_ptr->key0);
+ WRITE_TCAM_REG_KEY1(handle, tcam_ptr->key1);
+
+ HPI_DEBUG_MSG((handle.function, HPI_PFC_CTL,
+ " tcam write: location %x\n key: %llx %llx\n mask: %llx %llx\n",
+ location, tcam_ptr->key0, tcam_ptr->key1,
+ tcam_ptr->mask0, tcam_ptr->mask1));
+
+ tctl.value = 0;
+ tctl.bits.addr = location;
+ tctl.bits.cmd = TCAM_CTL_RWC_TCAM_WR;
+
+ HPI_DEBUG_MSG((handle.function, HPI_PFC_CTL,
+ " tcam write: ctl value %llx \n", tctl.value));
+
+ WRITE_TCAM_REG_CTL(handle, tctl.value);
+
+ tcam_stat = hpi_pfc_tcam_check_completion(handle, TCAM_RWC_STAT);
+
+ if (tcam_stat & HPI_FAILURE) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "TCAM Write failed loc %d \n", location));
+ return (HPI_PFC_TCAM_WR_ERROR);
+ }
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_get_tcam_parity_log(hpi_handle_t handle, pfc_tcam_par_err_log_t *logp)
+{
+ uint64_t offset;
+
+ offset = PFC_TCAM_PAR_ERR_LOG;
+ REG_PIO_READ64(handle, offset, &logp->value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_get_tcam_auto_init(hpi_handle_t handle, pfc_auto_init_t *autoinitp)
+{
+ uint64_t offset;
+
+ offset = PFC_AUTO_INIT;
+ REG_PIO_READ64(handle, offset, &autoinitp->value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_tcp_control_discard(hpi_handle_t handle, boolean_t discard)
+{
+ uint64_t offset;
+ tcp_ctrl_mask_t tcp;
+
+ tcp.value = 0;
+
+ offset = TCP_CTRL_MASK;
+ REG_PIO_READ64(handle, offset, &tcp.value);
+
+ if (discard)
+ tcp.bits.discard = 1;
+ else
+ tcp.bits.discard = 0;
+
+ REG_PIO_WRITE64(handle, offset, tcp.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_tcp_control_fin(hpi_handle_t handle, boolean_t fin)
+{
+ uint64_t offset;
+ tcp_ctrl_mask_t tcp;
+
+ tcp.value = 0;
+
+ offset = TCP_CTRL_MASK;
+ REG_PIO_READ64(handle, offset, &tcp.value);
+
+ if (fin)
+ tcp.bits.fin = 1;
+ else
+ tcp.bits.fin = 0;
+
+ REG_PIO_WRITE64(handle, offset, tcp.value);
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_tcp_control_syn(hpi_handle_t handle, boolean_t syn)
+{
+ uint64_t offset;
+ tcp_ctrl_mask_t tcp;
+
+ tcp.value = 0;
+
+ offset = TCP_CTRL_MASK;
+ REG_PIO_READ64(handle, offset, &tcp.value);
+
+ if (syn)
+ tcp.bits.syn = 1;
+ else
+ tcp.bits.syn = 0;
+
+ REG_PIO_WRITE64(handle, offset, tcp.value);
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_tcp_control_rst(hpi_handle_t handle, boolean_t rst)
+{
+ uint64_t offset;
+ tcp_ctrl_mask_t tcp;
+
+ tcp.value = 0;
+
+ offset = TCP_CTRL_MASK;
+ REG_PIO_READ64(handle, offset, &tcp.value);
+
+ if (rst)
+ tcp.bits.rst = 1;
+ else
+ tcp.bits.rst = 0;
+
+ REG_PIO_WRITE64(handle, offset, tcp.value);
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_tcp_control_psh(hpi_handle_t handle, boolean_t push)
+{
+ uint64_t offset;
+ tcp_ctrl_mask_t tcp;
+
+ tcp.value = 0;
+
+ offset = TCP_CTRL_MASK;
+ REG_PIO_READ64(handle, offset, &tcp.value);
+
+ if (push)
+ tcp.bits.psh = 1;
+ else
+ tcp.bits.psh = 0;
+
+ REG_PIO_WRITE64(handle, offset, tcp.value);
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_tcp_control_ack(hpi_handle_t handle, boolean_t ack)
+{
+ uint64_t offset;
+ tcp_ctrl_mask_t tcp;
+
+ tcp.value = 0;
+
+ offset = TCP_CTRL_MASK;
+ REG_PIO_READ64(handle, offset, &tcp.value);
+
+ if (ack)
+ tcp.bits.ack = 1;
+ else
+ tcp.bits.ack = 0;
+
+ REG_PIO_WRITE64(handle, offset, tcp.value);
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_hash_seed_value(hpi_handle_t handle, uint32_t seed)
+{
+ uint64_t offset;
+ src_hash_val_t src_hash_seed;
+
+ src_hash_seed.value = 0;
+ src_hash_seed.bits.seed = seed;
+
+ offset = SRC_HASH_VAL;
+ REG_PIO_WRITE64(handle, offset, src_hash_seed.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_get_interrupt_status(hpi_handle_t handle, pfc_int_status_t *statusp)
+{
+ uint64_t offset;
+
+ offset = PFC_INT_STATUS;
+ REG_PIO_READ64(handle, offset, &statusp->value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_clear_interrupt_status(hpi_handle_t handle)
+{
+ uint64_t offset;
+
+ offset = PFC_INT_STATUS;
+ REG_PIO_WRITE64(handle, offset, HXGE_PFC_INT_STATUS_CLEAR);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_interrupt_mask(hpi_handle_t handle, boolean_t drop,
+ boolean_t tcam_parity_error, boolean_t vlan_parity_error)
+{
+ pfc_int_mask_t mask;
+ uint64_t offset;
+
+ mask.value = 0;
+
+ if (drop)
+ mask.bits.pkt_drop_mask = 1;
+ else
+ mask.bits.pkt_drop_mask = 0;
+
+ if (tcam_parity_error)
+ mask.bits.tcam_parity_err_mask = 1;
+ else
+ mask.bits.tcam_parity_err_mask = 0;
+
+ if (vlan_parity_error)
+ mask.bits.vlan_parity_err_mask = 1;
+ else
+ mask.bits.vlan_parity_err_mask = 0;
+
+ offset = PFC_INT_MASK;
+ REG_PIO_WRITE64(handle, offset, mask.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_get_drop_log(hpi_handle_t handle, pfc_drop_log_t *logp)
+{
+ uint64_t offset;
+
+ offset = PFC_DROP_LOG;
+ REG_PIO_READ64(handle, offset, &logp->value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_drop_log_mask(hpi_handle_t handle, boolean_t vlan_drop,
+ boolean_t tcam_drop, boolean_t class_code_drop, boolean_t l2_addr_drop,
+ boolean_t tcp_ctrl_drop)
+{
+ uint64_t offset;
+ pfc_drop_log_mask_t log;
+
+ log.value = 0;
+
+ if (vlan_drop)
+ log.bits.vlan_drop_mask = 1;
+ if (tcam_drop)
+ log.bits.tcam_drop_mask = 1;
+ if (class_code_drop)
+ log.bits.class_code_drop_mask = 1;
+ if (l2_addr_drop)
+ log.bits.l2_addr_drop_mask = 1;
+ if (tcp_ctrl_drop)
+ log.bits.tcp_ctrl_drop_mask = 1;
+
+ offset = PFC_DROP_LOG_MASK;
+ REG_PIO_WRITE64(handle, offset, log.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_get_bad_csum_counter(hpi_handle_t handle, uint64_t *countp)
+{
+ uint64_t offset;
+
+ offset = PFC_BAD_CS_COUNTER;
+ REG_PIO_READ64(handle, offset, countp);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_get_drop_counter(hpi_handle_t handle, uint64_t *countp)
+{
+ uint64_t offset;
+
+ offset = PFC_DROP_COUNTER;
+ REG_PIO_READ64(handle, offset, countp);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_get_number_mac_addrs(hpi_handle_t handle, uint32_t *n_of_addrs)
+{
+ HXGE_REG_RD32(handle, HCR_REG + HCR_N_MAC_ADDRS, n_of_addrs);
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_mac_addr_get_i(hpi_handle_t handle, uint8_t *data, int slot)
+{
+ uint32_t step = sizeof (uint32_t);
+ uint32_t addr_hi = 0, addr_lo = 0;
+
+ if (slot >= PFC_N_MAC_ADDRESSES)
+ return (HPI_FAILURE);
+
+ /*
+ * Read the MAC address out of the SPROM at the blade's
+ * specific location.
+ */
+ HXGE_REG_RD32(handle, HCR_REG + HCR_ADDR_LO + slot * step, &addr_lo);
+ HXGE_REG_RD32(handle, HCR_REG + HCR_ADDR_HI + slot * step, &addr_hi);
+
+ data[0] = addr_lo & 0x000000ff;
+ data[1] = (addr_lo & 0x0000ff00) >> 8;
+ data[2] = (addr_lo & 0x00ff0000) >> 16;
+ data[3] = (addr_lo & 0xff000000) >> 24;
+ data[4] = (addr_hi & 0x00000ff00) >> 8;
+ data[5] = (addr_hi & 0x0000000ff);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_num_macs_get(hpi_handle_t handle, uint8_t *data)
+{
+ uint8_t addr[6];
+ uint8_t num = 0;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ (void) hpi_pfc_mac_addr_get_i(handle, addr, i);
+ if (addr[0] || addr[1] || addr[2] ||
+ addr[3] || addr[4] || addr[5])
+ num++;
+ }
+
+ *data = num;
+
+ return (HPI_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/hxge/hpi_pfc.h b/usr/src/uts/common/io/hxge/hpi_pfc.h
new file mode 100644
index 0000000000..f9163d4ea1
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi_pfc.h
@@ -0,0 +1,205 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HPI_PFC_H
+#define _HPI_PFC_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hpi.h>
+#include <hxge_common.h>
+#include <hxge_pfc_hw.h>
+#include <hxge_pfc.h>
+
+typedef enum _tcam_op {
+ TCAM_RWC_STAT = 0x1,
+ TCAM_RWC_MATCH = 0x2
+} tcam_op_t;
+
+/*
+ * HPI PFC ERROR Codes
+ */
+#define HPI_PFC_BLK_CODE PFC_BLK_ID << 8
+#define HPI_PFC_ERROR (HPI_FAILURE | HPI_PFC_BLK_CODE)
+#define HPI_TCAM_ERROR 0x10
+#define HPI_FCRAM_ERROR 0x20
+#define HPI_GEN_PFC 0x30
+#define HPI_PFC_SW_PARAM_ERROR 0x40
+#define HPI_PFC_HW_ERROR 0x80
+
+#define HPI_PFC_RESET_ERROR (HPI_PFC_ERROR | HPI_GEN_PFC | RESET_FAILED)
+#define HPI_PFC_TCAM_WR_ERROR \
+ (HPI_PFC_ERROR | HPI_TCAM_ERROR | WRITE_FAILED)
+#define HPI_PFC_ASC_RAM_RD_ERROR \
+ (HPI_PFC_ERROR | HPI_TCAM_ERROR | READ_FAILED)
+#define HPI_PFC_ASC_RAM_WR_ERROR \
+ (HPI_PFC_ERROR | HPI_TCAM_ERROR | WRITE_FAILED)
+
+#define TCAM_CLASS_INVALID \
+ (HPI_PFC_SW_PARAM_ERROR | 0xb)
+/* have only 0xc, 0xd, 0xe and 0xf left for sw error codes */
+#define HPI_PFC_TCAM_HW_ERROR \
+ (HPI_PFC_ERROR | HPI_PFC_HW_ERROR | HPI_TCAM_ERROR)
+
+#define PFC_N_VLAN_MEMBERS 0x20
+
+#define PFC_N_MAC_ADDRESSES 16
+#define PFC_MAX_DMA_CHANNELS 4
+#define PFC_MAC_ADDR_STEP 8
+
+#define PFC_HASH_STEP 0x08
+
+#define PFC_L2_CLASS_CONFIG_STEP 0x08
+
+#define PFC_L3_CLASS_CONFIG_STEP 0x08
+
+#define PFC_N_TCAM_ENTRIES 42
+
+#define PFC_VLAN_REG_OFFSET(vlan_id) \
+ ((((vlan_id_t)(vlan_id / PFC_N_VLAN_MEMBERS)) * 8) + PFC_VLAN_TABLE)
+#define PFC_VLAN_BIT_OFFSET(vlan_id) \
+ (vlan_id % PFC_N_VLAN_MEMBERS)
+#define PFC_MAC_ADDRESS(slot) \
+ ((slot * PFC_MAC_ADDR_STEP) + PFC_MAC_ADDR)
+#define PFC_MAC_ADDRESS_MASK(slot) \
+ ((slot * PFC_MAC_ADDR_STEP) + PFC_MAC_ADDR_MASK)
+#define PFC_HASH_ADDR(slot) \
+ ((slot * PFC_HASH_STEP) + PFC_HASH_TABLE)
+
+#define PFC_L2_CONFIG(slot) \
+ ((slot * PFC_L2_CLASS_CONFIG_STEP) + PFC_L2_CLASS_CONFIG)
+#define PFC_L3_CONFIG(slot) \
+ (((slot - TCAM_CLASS_TCP_IPV4) * PFC_L3_CLASS_CONFIG_STEP) + \
+ PFC_L3_CLASS_CONFIG)
+
+typedef uint16_t vlan_id_t;
+
+/*
+ * PFC Control Register Functions
+ */
+hpi_status_t hpi_pfc_set_tcam_enable(hpi_handle_t, boolean_t);
+hpi_status_t hpi_pfc_set_l2_hash(hpi_handle_t, boolean_t);
+hpi_status_t hpi_pfc_set_tcp_cksum(hpi_handle_t, boolean_t);
+hpi_status_t hpi_pfc_set_default_dma(hpi_handle_t, uint32_t);
+hpi_status_t hpi_pfc_mac_addr_enable(hpi_handle_t, uint32_t);
+hpi_status_t hpi_pfc_mac_addr_disable(hpi_handle_t, uint32_t);
+hpi_status_t hpi_pfc_set_force_csum(hpi_handle_t, boolean_t);
+
+/*
+ * PFC vlan Functions
+ */
+hpi_status_t hpi_pfc_cfg_vlan_table_clear(hpi_handle_t);
+hpi_status_t hpi_pfc_cfg_vlan_table_entry_clear(hpi_handle_t, vlan_id_t);
+hpi_status_t hpi_pfc_cfg_vlan_table_entry_set(hpi_handle_t, vlan_id_t);
+hpi_status_t hpi_pfc_cfg_vlan_control_set(hpi_handle_t, boolean_t,
+ boolean_t, vlan_id_t);
+hpi_status_t hpi_pfc_get_vlan_parity_log(hpi_handle_t,
+ pfc_vlan_par_err_log_t *);
+
+/*
+ * PFC Mac Address Functions
+ */
+hpi_status_t hpi_pfc_set_mac_address(hpi_handle_t, uint32_t, uint64_t);
+hpi_status_t hpi_pfc_clear_mac_address(hpi_handle_t, uint32_t);
+hpi_status_t hpi_pfc_clear_multicast_hash_table(hpi_handle_t, uint32_t);
+hpi_status_t hpi_pfc_set_multicast_hash_table(hpi_handle_t, uint32_t,
+ uint64_t);
+
+/*
+ * PFC L2 and L3 Config Functions.
+ */
+hpi_status_t hpi_pfc_set_l2_class_slot(hpi_handle_t, uint16_t, boolean_t, int);
+hpi_status_t hpi_pfc_get_l3_class_config(hpi_handle_t handle, tcam_class_t slot,
+ tcam_key_cfg_t *cfg);
+hpi_status_t hpi_pfc_set_l3_class_config(hpi_handle_t handle, tcam_class_t slot,
+ tcam_key_cfg_t cfg);
+
+/*
+ * PFC TCAM Functions
+ */
+hpi_status_t hpi_pfc_tcam_invalidate_all(hpi_handle_t);
+hpi_status_t hpi_pfc_tcam_entry_invalidate(hpi_handle_t, uint32_t);
+hpi_status_t hpi_pfc_tcam_entry_write(hpi_handle_t, uint32_t,
+ hxge_tcam_entry_t *);
+hpi_status_t hpi_pfc_tcam_entry_read(hpi_handle_t, uint32_t,
+ hxge_tcam_entry_t *);
+hpi_status_t hpi_pfc_tcam_asc_ram_entry_read(hpi_handle_t handle,
+ uint32_t location, uint64_t *ram_data);
+hpi_status_t hpi_pfc_tcam_asc_ram_entry_write(hpi_handle_t handle,
+ uint32_t location, uint64_t ram_data);
+hpi_status_t hpi_pfc_get_tcam_parity_log(hpi_handle_t,
+ pfc_tcam_par_err_log_t *);
+hpi_status_t hpi_pfc_get_tcam_auto_init(hpi_handle_t,
+ pfc_auto_init_t *);
+
+/*
+ * PFC TCP Control
+ */
+hpi_status_t hpi_pfc_set_tcp_control_discard(hpi_handle_t, boolean_t);
+hpi_status_t hpi_pfc_set_tcp_control_fin(hpi_handle_t, boolean_t);
+hpi_status_t hpi_pfc_set_tcp_control_syn(hpi_handle_t, boolean_t);
+hpi_status_t hpi_pfc_set_tcp_control_rst(hpi_handle_t, boolean_t);
+hpi_status_t hpi_pfc_set_tcp_control_psh(hpi_handle_t, boolean_t);
+hpi_status_t hpi_pfc_set_tcp_control_ack(hpi_handle_t, boolean_t);
+hpi_status_t hpi_pfc_set_tcp_control_urg(hpi_handle_t, boolean_t);
+
+/*
+ * PFC Hash Seed Value
+ */
+hpi_status_t hpi_pfc_set_hash_seed_value(hpi_handle_t, uint32_t);
+
+/*
+ * PFC Interrupt Management Functions
+ */
+hpi_status_t hpi_pfc_get_interrupt_status(hpi_handle_t, pfc_int_status_t *);
+hpi_status_t hpi_pfc_clear_interrupt_status(hpi_handle_t);
+hpi_status_t hpi_pfc_set_interrupt_mask(hpi_handle_t, boolean_t,
+ boolean_t, boolean_t);
+
+/*
+ * PFC Packet Logs
+ */
+hpi_status_t hpi_pfc_get_drop_log(hpi_handle_t, pfc_drop_log_t *);
+hpi_status_t hpi_pfc_set_drop_log_mask(hpi_handle_t, boolean_t,
+ boolean_t, boolean_t, boolean_t, boolean_t);
+hpi_status_t hpi_pfc_get_bad_csum_counter(hpi_handle_t, uint64_t *);
+hpi_status_t hpi_pfc_get_drop_counter(hpi_handle_t, uint64_t *);
+
+hpi_status_t hpi_pfc_get_number_mac_addrs(hpi_handle_t handle,
+ uint32_t *n_of_addrs);
+hpi_status_t hpi_pfc_mac_addr_get_i(hpi_handle_t handle, uint8_t *data,
+ int slot);
+hpi_status_t hpi_pfc_num_macs_get(hpi_handle_t handle, uint8_t *data);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_HPI_PFC_H */
diff --git a/usr/src/uts/common/io/hxge/hpi_rxdma.c b/usr/src/uts/common/io/hxge/hpi_rxdma.c
new file mode 100644
index 0000000000..516c4a9582
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi_rxdma.c
@@ -0,0 +1,597 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hpi_rxdma.h>
+#include <hxge_common.h>
+
+#define RXDMA_RESET_TRY_COUNT 5
+#define RXDMA_RESET_DELAY 5
+
+#define RXDMA_OP_DISABLE 0
+#define RXDMA_OP_ENABLE 1
+#define RXDMA_OP_RESET 2
+
+#define RCR_TIMEOUT_ENABLE 1
+#define RCR_TIMEOUT_DISABLE 2
+#define RCR_THRESHOLD 4
+
+hpi_status_t
+hpi_rxdma_cfg_logical_page_handle(hpi_handle_t handle, uint8_t rdc,
+ uint64_t page_handle)
+{
+ rdc_page_handle_t page_hdl;
+
+ if (!RXDMA_CHANNEL_VALID(rdc)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "rxdma_cfg_logical_page_handle"
+ " Illegal RDC number %d \n", rdc));
+ return (HPI_RXDMA_RDC_INVALID);
+ }
+
+ page_hdl.value = 0;
+ page_hdl.bits.handle = (uint32_t)page_handle;
+
+ RXDMA_REG_WRITE64(handle, RDC_PAGE_HANDLE, rdc, page_hdl.value);
+
+ return (HPI_SUCCESS);
+}
+
+
+/* RX DMA functions */
+static hpi_status_t
+hpi_rxdma_cfg_rdc_ctl(hpi_handle_t handle, uint8_t rdc, uint8_t op)
+{
+ rdc_rx_cfg1_t cfg;
+ uint32_t count = RXDMA_RESET_TRY_COUNT;
+ uint32_t delay_time = RXDMA_RESET_DELAY;
+ uint32_t error = HPI_RXDMA_ERROR_ENCODE(HPI_RXDMA_RESET_ERR, rdc);
+
+ if (!RXDMA_CHANNEL_VALID(rdc)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "hpi_rxdma_cfg_rdc_ctl Illegal RDC number %d \n", rdc));
+ return (HPI_RXDMA_RDC_INVALID);
+ }
+
+ switch (op) {
+ case RXDMA_OP_ENABLE:
+ RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
+ cfg.bits.enable = 1;
+ RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg.value);
+
+ HXGE_DELAY(delay_time);
+
+ break;
+ case RXDMA_OP_DISABLE:
+ RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
+ cfg.bits.enable = 0;
+ RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg.value);
+
+ HXGE_DELAY(delay_time);
+ RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
+
+ while ((count--) && (cfg.bits.qst == 0)) {
+ HXGE_DELAY(delay_time);
+ RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
+ }
+ if (cfg.bits.qst == 0) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_rxdma_cfg_rdc_ctl"
+ " RXDMA_OP_DISABLE Failed for RDC %d \n",
+ rdc));
+ return (error);
+ }
+
+ break;
+ case RXDMA_OP_RESET:
+ cfg.value = 0;
+ cfg.bits.reset = 1;
+ RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg.value);
+ HXGE_DELAY(delay_time);
+ RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
+
+ while ((count--) && (cfg.bits.qst == 0)) {
+ HXGE_DELAY(delay_time);
+ RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
+ }
+ if (count == 0) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_rxdma_cfg_rdc_ctl"
+ " Reset Failed for RDC %d \n", rdc));
+ return (error);
+ }
+
+ break;
+ default:
+ return (HPI_RXDMA_SW_PARAM_ERROR);
+ }
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_rxdma_cfg_rdc_enable(hpi_handle_t handle, uint8_t rdc)
+{
+ return (hpi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_ENABLE));
+}
+
+hpi_status_t
+hpi_rxdma_cfg_rdc_disable(hpi_handle_t handle, uint8_t rdc)
+{
+ return (hpi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_DISABLE));
+}
+
+hpi_status_t
+hpi_rxdma_cfg_rdc_reset(hpi_handle_t handle, uint8_t rdc)
+{
+ return (hpi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_RESET));
+}
+
+static hpi_status_t
+hpi_rxdma_cfg_rdc_rcr_ctl(hpi_handle_t handle, uint8_t rdc,
+ uint8_t op, uint16_t param)
+{
+ rdc_rcr_cfg_b_t rcr_cfgb;
+
+ if (!RXDMA_CHANNEL_VALID(rdc)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "rxdma_cfg_rdc_rcr_ctl Illegal RDC number %d \n", rdc));
+ return (HPI_RXDMA_RDC_INVALID);
+ }
+
+ RXDMA_REG_READ64(handle, RDC_RCR_CFG_B, rdc, &rcr_cfgb.value);
+
+ switch (op) {
+ case RCR_TIMEOUT_ENABLE:
+ rcr_cfgb.bits.timeout = (uint8_t)param;
+ rcr_cfgb.bits.entout = 1;
+ break;
+
+ case RCR_THRESHOLD:
+ rcr_cfgb.bits.pthres = param;
+ break;
+
+ case RCR_TIMEOUT_DISABLE:
+ rcr_cfgb.bits.entout = 0;
+ break;
+
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "rxdma_cfg_rdc_rcr_ctl Illegal opcode %x \n", op));
+ return (HPI_RXDMA_OPCODE_INVALID(rdc));
+ }
+
+ RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, rdc, rcr_cfgb.value);
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_rxdma_cfg_rdc_rcr_threshold(hpi_handle_t handle, uint8_t rdc,
+ uint16_t rcr_threshold)
+{
+ return (hpi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
+ RCR_THRESHOLD, rcr_threshold));
+}
+
+hpi_status_t
+hpi_rxdma_cfg_rdc_rcr_timeout(hpi_handle_t handle, uint8_t rdc,
+ uint8_t rcr_timeout)
+{
+ return (hpi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
+ RCR_TIMEOUT_ENABLE, rcr_timeout));
+}
+
+/*
+ * Configure The RDC channel Rcv Buffer Ring
+ */
+hpi_status_t
+hpi_rxdma_cfg_rdc_ring(hpi_handle_t handle, uint8_t rdc,
+ rdc_desc_cfg_t *rdc_desc_cfg)
+{
+ rdc_rbr_cfg_a_t cfga;
+ rdc_rbr_cfg_b_t cfgb;
+ rdc_rx_cfg1_t cfg1;
+ rdc_rx_cfg2_t cfg2;
+ rdc_rcr_cfg_a_t rcr_cfga;
+ rdc_rcr_cfg_b_t rcr_cfgb;
+ rdc_page_handle_t page_handle;
+
+ if (!RXDMA_CHANNEL_VALID(rdc)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "rxdma_cfg_rdc_ring Illegal RDC number %d \n", rdc));
+ return (HPI_RXDMA_RDC_INVALID);
+ }
+
+ cfga.value = 0;
+ cfgb.value = 0;
+ cfg1.value = 0;
+ cfg2.value = 0;
+ page_handle.value = 0;
+
+ if (rdc_desc_cfg->mbox_enable == 1) {
+ cfg1.bits.mbaddr_h = (rdc_desc_cfg->mbox_addr >> 32) & 0xfff;
+ cfg2.bits.mbaddr_l = ((rdc_desc_cfg->mbox_addr &
+ RXDMA_CFIG2_MBADDR_L_MASK) >> RXDMA_CFIG2_MBADDR_L_SHIFT);
+
+ /*
+ * Only after all the configurations are set, then
+ * enable the RDC or else configuration fatal error
+ * will be returned (especially if the Hypervisor
+ * set up the logical pages with non-zero values.
+ * This HPI function only sets up the configuration.
+ * Call the enable function to enable the RDMC!
+ */
+ }
+
+ if (rdc_desc_cfg->full_hdr == 1)
+ cfg2.bits.full_hdr = 1;
+
+ if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
+ cfg2.bits.offset = rdc_desc_cfg->offset;
+ } else {
+ cfg2.bits.offset = SW_OFFSET_NO_OFFSET;
+ }
+
+ /* rbr config */
+ cfga.value = (rdc_desc_cfg->rbr_addr &
+ (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK));
+
+ /* The remaining 20 bits in the DMA address form the handle */
+ page_handle.bits.handle = (rdc_desc_cfg->rbr_addr >> 44) && 0xfffff;
+
+ /*
+ * The RBR ring size must be multiple of 64.
+ */
+ if ((rdc_desc_cfg->rbr_len < RBR_DEFAULT_MIN_LEN) ||
+ (rdc_desc_cfg->rbr_len > RBR_DEFAULT_MAX_LEN) ||
+ (rdc_desc_cfg->rbr_len % 64)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "hpi_rxdma_cfg_rdc_ring Illegal RBR Queue Length %d \n",
+ rdc_desc_cfg->rbr_len));
+ return (HPI_RXDMA_ERROR_ENCODE(HPI_RXDMA_RBRSZIE_INVALID, rdc));
+ }
+
+ /*
+ * The lower 6 bits are hardcoded to 0 and the higher 10 bits are
+ * stored in len.
+ */
+ cfga.bits.len = rdc_desc_cfg->rbr_len >> 6;
+ HPI_DEBUG_MSG((handle.function, HPI_RDC_CTL,
+ "hpi_rxdma_cfg_rdc_ring CFGA 0x%llx len %d (RBR LEN %d)\n",
+ cfga.value, cfga.bits.len, rdc_desc_cfg->rbr_len));
+
+ /*
+ * bksize is 1 bit
+ * Buffer Block Size. b0 - 4K; b1 - 8K.
+ */
+ if (rdc_desc_cfg->page_size == SIZE_4KB)
+ cfgb.bits.bksize = RBR_BKSIZE_4K;
+ else if (rdc_desc_cfg->page_size == SIZE_8KB)
+ cfgb.bits.bksize = RBR_BKSIZE_8K;
+ else {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "rxdma_cfg_rdc_ring blksize: Illegal buffer size %d \n",
+ rdc_desc_cfg->page_size));
+ return (HPI_RXDMA_BUFSZIE_INVALID);
+ }
+
+ /*
+ * Size 0 of packet buffer. b00 - 256; b01 - 512; b10 - 1K; b11 - resvd.
+ */
+ if (rdc_desc_cfg->valid0) {
+ if (rdc_desc_cfg->size0 == SIZE_256B)
+ cfgb.bits.bufsz0 = RBR_BUFSZ0_256B;
+ else if (rdc_desc_cfg->size0 == SIZE_512B)
+ cfgb.bits.bufsz0 = RBR_BUFSZ0_512B;
+ else if (rdc_desc_cfg->size0 == SIZE_1KB)
+ cfgb.bits.bufsz0 = RBR_BUFSZ0_1K;
+ else {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " rxdma_cfg_rdc_ring"
+ " blksize0: Illegal buffer size %x \n",
+ rdc_desc_cfg->size0));
+ return (HPI_RXDMA_BUFSZIE_INVALID);
+ }
+ cfgb.bits.vld0 = 1;
+ } else {
+ cfgb.bits.vld0 = 0;
+ }
+
+ /*
+ * Size 1 of packet buffer. b0 - 1K; b1 - 2K.
+ */
+ if (rdc_desc_cfg->valid1) {
+ if (rdc_desc_cfg->size1 == SIZE_1KB)
+ cfgb.bits.bufsz1 = RBR_BUFSZ1_1K;
+ else if (rdc_desc_cfg->size1 == SIZE_2KB)
+ cfgb.bits.bufsz1 = RBR_BUFSZ1_2K;
+ else {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " rxdma_cfg_rdc_ring"
+ " blksize1: Illegal buffer size %x \n",
+ rdc_desc_cfg->size1));
+ return (HPI_RXDMA_BUFSZIE_INVALID);
+ }
+ cfgb.bits.vld1 = 1;
+ } else {
+ cfgb.bits.vld1 = 0;
+ }
+
+ /*
+ * Size 2 of packet buffer. b0 - 2K; b1 - 4K.
+ */
+ if (rdc_desc_cfg->valid2) {
+ if (rdc_desc_cfg->size2 == SIZE_2KB)
+ cfgb.bits.bufsz2 = RBR_BUFSZ2_2K;
+ else if (rdc_desc_cfg->size2 == SIZE_4KB)
+ cfgb.bits.bufsz2 = RBR_BUFSZ2_4K;
+ else {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " rxdma_cfg_rdc_ring"
+ " blksize2: Illegal buffer size %x \n",
+ rdc_desc_cfg->size2));
+ return (HPI_RXDMA_BUFSZIE_INVALID);
+ }
+ cfgb.bits.vld2 = 1;
+ } else {
+ cfgb.bits.vld2 = 0;
+ }
+
+ rcr_cfga.value = (rdc_desc_cfg->rcr_addr &
+ (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK));
+
+ /*
+ * The rcr len must be multiple of 32.
+ */
+ if ((rdc_desc_cfg->rcr_len < RCR_DEFAULT_MIN_LEN) ||
+ (rdc_desc_cfg->rcr_len > HXGE_RCR_MAX) ||
+ (rdc_desc_cfg->rcr_len % 32)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " rxdma_cfg_rdc_ring Illegal RCR Queue Length %d \n",
+ rdc_desc_cfg->rcr_len));
+ return (HPI_RXDMA_ERROR_ENCODE(HPI_RXDMA_RCRSZIE_INVALID, rdc));
+ }
+
+ /*
+ * Bits 15:5 of the maximum number of 8B entries in RCR. Bits 4:0 are
+ * hard-coded to zero. The maximum size is 2^16 - 32.
+ */
+ rcr_cfga.bits.len = rdc_desc_cfg->rcr_len >> 5;
+
+ rcr_cfgb.value = 0;
+ if (rdc_desc_cfg->rcr_timeout_enable == 1) {
+ /* check if the rcr timeout value is valid */
+
+ if (RXDMA_RCR_TO_VALID(rdc_desc_cfg->rcr_timeout)) {
+ rcr_cfgb.bits.timeout = rdc_desc_cfg->rcr_timeout;
+ rcr_cfgb.bits.entout = 1;
+ } else {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " rxdma_cfg_rdc_ring"
+ " Illegal RCR Timeout value %d \n",
+ rdc_desc_cfg->rcr_timeout));
+ rcr_cfgb.bits.entout = 0;
+ }
+ } else {
+ rcr_cfgb.bits.entout = 0;
+ }
+
+ /* check if the rcr threshold value is valid */
+ if (RXDMA_RCR_THRESH_VALID(rdc_desc_cfg->rcr_threshold)) {
+ rcr_cfgb.bits.pthres = rdc_desc_cfg->rcr_threshold;
+ } else {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " rxdma_cfg_rdc_ring Illegal RCR Threshold value %d \n",
+ rdc_desc_cfg->rcr_threshold));
+ rcr_cfgb.bits.pthres = 1;
+ }
+
+ /* now do the actual HW configuration */
+ RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg1.value);
+ RXDMA_REG_WRITE64(handle, RDC_RX_CFG2, rdc, cfg2.value);
+
+ RXDMA_REG_WRITE64(handle, RDC_RBR_CFG_A, rdc, cfga.value);
+ RXDMA_REG_WRITE64(handle, RDC_RBR_CFG_B, rdc, cfgb.value);
+
+ RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_A, rdc, rcr_cfga.value);
+ RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, rdc, rcr_cfgb.value);
+
+ RXDMA_REG_WRITE64(handle, RDC_PAGE_HANDLE, rdc, page_handle.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_rxdma_ring_perr_stat_get(hpi_handle_t handle,
+ rdc_pref_par_log_t *pre_log, rdc_pref_par_log_t *sha_log)
+{
+ /*
+ * Hydra doesn't have details about these errors.
+ * It only provides the addresses of the errors.
+ */
+ HXGE_REG_RD64(handle, RDC_PREF_PAR_LOG, &pre_log->value);
+ HXGE_REG_RD64(handle, RDC_SHADOW_PAR_LOG, &sha_log->value);
+
+ return (HPI_SUCCESS);
+}
+
+
+/* system wide conf functions */
+
+hpi_status_t
+hpi_rxdma_cfg_clock_div_set(hpi_handle_t handle, uint16_t count)
+{
+ uint64_t offset;
+ rdc_clock_div_t clk_div;
+
+ offset = RDC_CLOCK_DIV;
+
+ clk_div.value = 0;
+ clk_div.bits.count = count;
+ HPI_DEBUG_MSG((handle.function, HPI_RDC_CTL,
+ " hpi_rxdma_cfg_clock_div_set: add 0x%llx "
+ "handle 0x%llx value 0x%llx",
+ handle.regp, handle.regh, clk_div.value));
+
+ HXGE_REG_WR64(handle, offset, clk_div.value);
+
+ return (HPI_SUCCESS);
+}
+
+
+hpi_status_t
+hpi_rxdma_rdc_rbr_stat_get(hpi_handle_t handle, uint8_t rdc,
+ rdc_rbr_qlen_t *rbr_stat)
+{
+ if (!RXDMA_CHANNEL_VALID(rdc)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " rxdma_rdc_rbr_stat_get Illegal RDC Number %d \n", rdc));
+ return (HPI_RXDMA_RDC_INVALID);
+ }
+
+ RXDMA_REG_READ64(handle, RDC_RBR_QLEN, rdc, &rbr_stat->value);
+ return (HPI_SUCCESS);
+}
+
+
+hpi_status_t
+hpi_rxdma_rdc_rcr_qlen_get(hpi_handle_t handle, uint8_t rdc,
+ uint16_t *rcr_qlen)
+{
+ rdc_rcr_qlen_t stats;
+
+ if (!RXDMA_CHANNEL_VALID(rdc)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " rxdma_rdc_rcr_qlen_get Illegal RDC Number %d \n", rdc));
+ return (HPI_RXDMA_RDC_INVALID);
+ }
+
+ RXDMA_REG_READ64(handle, RDC_RCR_QLEN, rdc, &stats.value);
+ *rcr_qlen = stats.bits.qlen;
+ HPI_DEBUG_MSG((handle.function, HPI_RDC_CTL,
+ " rxdma_rdc_rcr_qlen_get RDC %d qlen %x qlen %x\n",
+ rdc, *rcr_qlen, stats.bits.qlen));
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_rxdma_channel_rbr_empty_clear(hpi_handle_t handle, uint8_t channel)
+{
+ rdc_stat_t cs;
+
+ if (!RXDMA_CHANNEL_VALID(channel)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_rxdma_channel_rbr_empty_clear", " channel", channel));
+ return (HPI_FAILURE | HPI_RXDMA_CHANNEL_INVALID(channel));
+ }
+
+ RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
+ cs.bits.rbr_empty = 1;
+ RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
+
+ return (HPI_SUCCESS);
+}
+
+/*
+ * This function is called to operate on the control and status register.
+ */
+hpi_status_t
+hpi_rxdma_control_status(hpi_handle_t handle, io_op_t op_mode, uint8_t channel,
+ rdc_stat_t *cs_p)
+{
+ int status = HPI_SUCCESS;
+ rdc_stat_t cs;
+
+ if (!RXDMA_CHANNEL_VALID(channel)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "hpi_rxdma_control_status", "channel", channel));
+ return (HPI_FAILURE | HPI_RXDMA_CHANNEL_INVALID(channel));
+ }
+
+ switch (op_mode) {
+ case OP_GET:
+ RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs_p->value);
+ break;
+
+ case OP_SET:
+ RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_p->value);
+ break;
+
+ case OP_UPDATE:
+ RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
+ RXDMA_REG_WRITE64(handle, RDC_STAT, channel,
+ cs_p->value | cs.value);
+ break;
+
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "hpi_rxdma_control_status", "control", op_mode));
+ return (HPI_FAILURE | HPI_RXDMA_OPCODE_INVALID(channel));
+ }
+
+ return (status);
+}
+
+/*
+ * This function is called to operate on the event mask
+ * register which is used for generating interrupts.
+ */
+hpi_status_t
+hpi_rxdma_event_mask(hpi_handle_t handle, io_op_t op_mode, uint8_t channel,
+ rdc_int_mask_t *mask_p)
+{
+ int status = HPI_SUCCESS;
+ rdc_int_mask_t mask;
+
+ if (!RXDMA_CHANNEL_VALID(channel)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "hpi_rxdma_event_mask", "channel", channel));
+ return (HPI_FAILURE | HPI_RXDMA_CHANNEL_INVALID(channel));
+ }
+
+ switch (op_mode) {
+ case OP_GET:
+ RXDMA_REG_READ64(handle, RDC_INT_MASK, channel, &mask_p->value);
+ break;
+
+ case OP_SET:
+ RXDMA_REG_WRITE64(handle, RDC_INT_MASK, channel, mask_p->value);
+ break;
+
+ case OP_UPDATE:
+ RXDMA_REG_READ64(handle, RDC_INT_MASK, channel, &mask.value);
+ RXDMA_REG_WRITE64(handle, RDC_INT_MASK, channel,
+ mask_p->value | mask.value);
+ break;
+
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "hpi_rxdma_event_mask", "eventmask", op_mode));
+ return (HPI_FAILURE | HPI_RXDMA_OPCODE_INVALID(channel));
+ }
+
+ return (status);
+}
diff --git a/usr/src/uts/common/io/hxge/hpi_rxdma.h b/usr/src/uts/common/io/hxge/hpi_rxdma.h
new file mode 100644
index 0000000000..2645bd427f
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi_rxdma.h
@@ -0,0 +1,201 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HPI_RXDMA_H
+#define _HPI_RXDMA_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hpi.h>
+#include <hxge_defs.h>
+#include <hxge_pfc.h>
+#include <hxge_pfc_hw.h>
+#include <hxge_rdc_hw.h>
+
+#define RXDMA_CFIG2_MBADDR_L_SHIFT 6 /* bit 31:6 */
+#define RXDMA_CFIG2_MBADDR_L_MASK 0x00000000ffffffc0ULL
+
+#define RBR_CFIG_A_STDADDR_MASK 0x000000000003ffc0ULL
+#define RBR_CFIG_A_STDADDR_BASE_MASK 0x00000ffffffc0000ULL
+
+#define RCRCFIG_A_STADDR_SHIFT 6 /* bit 18:6 */
+#define RCRCFIG_A_STADDR_MASK 0x000000000007FFC0ULL
+#define RCRCFIG_A_STADDR_BASE_SHIF 19 /* bit 43:19 */
+#define RCRCFIG_A_STADDR_BASE_MASK 0x00000FFFFFF80000ULL
+#define RCRCFIG_A_LEN_SHIF 48 /* bit 63:48 */
+#define RCRCFIG_A_LEN_MASK 0xFFFF000000000000ULL
+
+#define RCR_FLSH_SHIFT 0 /* RW, bit 0:0 */
+#define RCR_FLSH_SET 0x0000000000000001ULL
+#define RCR_FLSH_MASK 0x0000000000000001ULL
+
+#define RBR_CFIG_A_LEN_SHIFT 48 /* bits 63:48 */
+#define RBR_CFIG_A_LEN_MASK 0xFFFF000000000000ULL
+
+/*
+ * Buffer block descriptor
+ */
+typedef struct _rx_desc_t {
+ uint32_t block_addr;
+} rx_desc_t, *p_rx_desc_t;
+
+typedef enum _bsize {
+ SIZE_0B = 0x0,
+ SIZE_64B,
+ SIZE_128B,
+ SIZE_192B,
+ SIZE_256B,
+ SIZE_512B,
+ SIZE_1KB,
+ SIZE_2KB,
+ SIZE_4KB,
+ SIZE_8KB,
+ SIZE_16KB,
+ SIZE_32KB
+} bsize_t;
+
+typedef struct _rdc_desc_cfg_t {
+ uint8_t mbox_enable; /* Enable full (18b) header */
+ uint8_t full_hdr; /* Enable full (18b) header */
+ uint8_t offset; /* 64 byte offsets */
+ uint8_t valid2; /* size 2 is valid */
+ bsize_t size2; /* Size 2 length */
+ uint8_t valid1; /* size 1 is valid */
+ bsize_t size1; /* Size 1 length */
+ uint8_t valid0; /* size 0 is valid */
+ bsize_t size0; /* Size 1 length */
+ bsize_t page_size; /* Page or buffer Size */
+ uint8_t rcr_timeout_enable;
+ uint8_t rcr_timeout;
+ uint16_t rcr_threshold;
+ uint16_t rcr_len; /* RBR Descriptor size (entries) */
+ uint16_t rbr_len; /* RBR Descriptor size (entries) */
+ uint64_t mbox_addr; /* Mailbox Address */
+ uint64_t rcr_addr; /* RCR Address */
+ uint64_t rbr_addr; /* RBB Address */
+} rdc_desc_cfg_t;
+
+
+/*
+ * Register offset (0x800 bytes for each channel) for receive ring registers.
+ */
+#define HXGE_RXDMA_OFFSET(x, v, channel) (x + \
+ (!v ? DMC_OFFSET(channel) : \
+ RDMC_PIOVADDR_OFFSET(channel)))
+
+#define RXDMA_REG_READ64(handle, reg, channel, data_p) {\
+ HXGE_REG_RD64(handle, (HXGE_RXDMA_OFFSET(reg, handle.is_vraddr,\
+ channel)), (data_p))\
+}
+
+#define RXDMA_REG_READ32(handle, reg, channel) \
+ HXGE_HPI_PIO_READ32(handle, (HXGE_RXDMA_OFFSET(reg, handle.is_vraddr,\
+ channel)))
+
+#define RXDMA_REG_WRITE64(handle, reg, channel, data) {\
+ HXGE_REG_WR64(handle, (HXGE_RXDMA_OFFSET(reg, handle.is_vraddr,\
+ channel)), (data))\
+}
+
+/*
+ * RX HPI error codes
+ */
+#define RXDMA_ER_ST (RXDMA_BLK_ID << HPI_BLOCK_ID_SHIFT)
+#define RXDMA_ID_SHIFT(n) (n << HPI_PORT_CHAN_SHIFT)
+
+#define HPI_RXDMA_ERROR RXDMA_ER_ST
+
+#define HPI_RXDMA_SW_PARAM_ERROR (HPI_RXDMA_ERROR | 0x40)
+#define HPI_RXDMA_HW_ERROR (HPI_RXDMA_ERROR | 0x80)
+
+#define HPI_RXDMA_RDC_INVALID (HPI_RXDMA_ERROR | CHANNEL_INVALID)
+#define HPI_RXDMA_RESET_ERR (HPI_RXDMA_HW_ERROR | RESET_FAILED)
+#define HPI_RXDMA_BUFSZIE_INVALID (HPI_RXDMA_SW_PARAM_ERROR | 0x0000b)
+#define HPI_RXDMA_RBRSZIE_INVALID (HPI_RXDMA_SW_PARAM_ERROR | 0x0000c)
+#define HPI_RXDMA_RCRSZIE_INVALID (HPI_RXDMA_SW_PARAM_ERROR | 0x0000d)
+
+#define HPI_RXDMA_CHANNEL_INVALID(n) (RXDMA_ID_SHIFT(n) | \
+ HPI_RXDMA_ERROR | CHANNEL_INVALID)
+#define HPI_RXDMA_OPCODE_INVALID(n) (RXDMA_ID_SHIFT(n) | \
+ HPI_RXDMA_ERROR | OPCODE_INVALID)
+
+#define HPI_RXDMA_ERROR_ENCODE(err, rdc) \
+ (RXDMA_ID_SHIFT(rdc) | RXDMA_ER_ST | err)
+
+#define RXDMA_CHANNEL_VALID(rdc) \
+ ((rdc < HXGE_MAX_RDCS))
+
+#define RXDMA_BUFF_OFFSET_VALID(offset) \
+ ((offset == SW_OFFSET_NO_OFFSET) || \
+ (offset == SW_OFFSET_64) || \
+ (offset == SW_OFFSET_128))
+
+#define RXDMA_RCR_TO_VALID(tov) ((tov) && (tov < 64))
+#define RXDMA_RCR_THRESH_VALID(thresh) ((thresh) && (thresh < 512))
+
+#define hpi_rxdma_rdc_rcr_flush(handle, rdc) \
+ RXDMA_REG_WRITE64(handle, RDC_RCR_FLUSH, rdc, \
+ (RCR_FLSH_SET << RCR_FLSH_SHIFT))
+#define hpi_rxdma_rdc_rbr_kick(handle, rdc, num_buffers) \
+ RXDMA_REG_WRITE64(handle, RDC_RBR_KICK, rdc, num_buffers)
+
+hpi_status_t hpi_rxdma_cfg_rdc_ring(hpi_handle_t handle, uint8_t rdc,
+ rdc_desc_cfg_t *rdc_desc_params);
+hpi_status_t hpi_rxdma_cfg_clock_div_set(hpi_handle_t handle, uint16_t count);
+hpi_status_t hpi_rxdma_cfg_logical_page_handle(hpi_handle_t handle, uint8_t rdc,
+ uint64_t pg_handle);
+
+hpi_status_t hpi_rxdma_rdc_rbr_stat_get(hpi_handle_t handle, uint8_t rdc,
+ rdc_rbr_qlen_t *rbr_stat);
+hpi_status_t hpi_rxdma_cfg_rdc_reset(hpi_handle_t handle, uint8_t rdc);
+hpi_status_t hpi_rxdma_cfg_rdc_enable(hpi_handle_t handle, uint8_t rdc);
+hpi_status_t hpi_rxdma_cfg_rdc_disable(hpi_handle_t handle, uint8_t rdc);
+hpi_status_t hpi_rxdma_cfg_rdc_rcr_timeout(hpi_handle_t handle, uint8_t rdc,
+ uint8_t rcr_timeout);
+
+hpi_status_t hpi_rxdma_cfg_rdc_rcr_threshold(hpi_handle_t handle, uint8_t rdc,
+ uint16_t rcr_threshold);
+hpi_status_t hpi_rxdma_rdc_rcr_qlen_get(hpi_handle_t handle,
+ uint8_t rdc, uint16_t *qlen);
+
+hpi_status_t hpi_rxdma_ring_perr_stat_get(hpi_handle_t handle,
+ rdc_pref_par_log_t *pre_log, rdc_pref_par_log_t *sha_log);
+
+hpi_status_t hpi_rxdma_control_status(hpi_handle_t handle, io_op_t op_mode,
+ uint8_t channel, rdc_stat_t *cs_p);
+hpi_status_t hpi_rxdma_event_mask(hpi_handle_t handle, io_op_t op_mode,
+ uint8_t channel, rdc_int_mask_t *mask_p);
+hpi_status_t hpi_rxdma_channel_rbr_empty_clear(hpi_handle_t handle,
+ uint8_t channel);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HPI_RXDMA_H */
diff --git a/usr/src/uts/common/io/hxge/hpi_txdma.c b/usr/src/uts/common/io/hxge/hpi_txdma.c
new file mode 100644
index 0000000000..83802673c4
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi_txdma.c
@@ -0,0 +1,487 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hpi_txdma.h>
+
+#define TXDMA_WAIT_LOOP 10000
+#define TXDMA_WAIT_MSEC 5
+
+static hpi_status_t hpi_txdma_control_reset_wait(hpi_handle_t handle,
+ uint8_t channel);
+static hpi_status_t hpi_txdma_control_stop_wait(hpi_handle_t handle,
+ uint8_t channel);
+
+hpi_status_t
+hpi_txdma_log_page_handle_set(hpi_handle_t handle, uint8_t channel,
+ tdc_page_handle_t *hdl_p)
+{
+ int status = HPI_SUCCESS;
+
+ if (!TXDMA_CHANNEL_VALID(channel)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_log_page_handle_set"
+ " Invalid Input: channel <0x%x>", channel));
+ return (HPI_FAILURE | HPI_TXDMA_CHANNEL_INVALID(channel));
+ }
+
+ TXDMA_REG_WRITE64(handle, TDC_PAGE_HANDLE, channel, hdl_p->value);
+
+ return (status);
+}
+
+hpi_status_t
+hpi_txdma_channel_reset(hpi_handle_t handle, uint8_t channel)
+{
+ HPI_DEBUG_MSG((handle.function, HPI_TDC_CTL,
+ " hpi_txdma_channel_reset" " RESETTING", channel));
+ return (hpi_txdma_channel_control(handle, TXDMA_RESET, channel));
+}
+
+hpi_status_t
+hpi_txdma_channel_init_enable(hpi_handle_t handle, uint8_t channel)
+{
+ return (hpi_txdma_channel_control(handle, TXDMA_INIT_START, channel));
+}
+
+hpi_status_t
+hpi_txdma_channel_enable(hpi_handle_t handle, uint8_t channel)
+{
+ return (hpi_txdma_channel_control(handle, TXDMA_START, channel));
+}
+
+hpi_status_t
+hpi_txdma_channel_disable(hpi_handle_t handle, uint8_t channel)
+{
+ return (hpi_txdma_channel_control(handle, TXDMA_STOP, channel));
+}
+
+hpi_status_t
+hpi_txdma_channel_mbox_enable(hpi_handle_t handle, uint8_t channel)
+{
+ return (hpi_txdma_channel_control(handle, TXDMA_MBOX_ENABLE, channel));
+}
+
+hpi_status_t
+hpi_txdma_channel_control(hpi_handle_t handle, txdma_cs_cntl_t control,
+ uint8_t channel)
+{
+ int status = HPI_SUCCESS;
+ tdc_stat_t cs;
+ tdc_tdr_cfg_t cfg;
+
+ if (!TXDMA_CHANNEL_VALID(channel)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_channel_control"
+ " Invalid Input: channel <0x%x>", channel));
+ return (HPI_FAILURE | HPI_TXDMA_CHANNEL_INVALID(channel));
+ }
+
+ switch (control) {
+ case TXDMA_INIT_RESET:
+ cfg.value = 0;
+ TXDMA_REG_READ64(handle, TDC_TDR_CFG, channel, &cfg.value);
+ cfg.bits.reset = 1;
+ TXDMA_REG_WRITE64(handle, TDC_TDR_CFG, channel, cfg.value);
+ return (hpi_txdma_control_reset_wait(handle, channel));
+
+ case TXDMA_INIT_START:
+ cfg.value = 0;
+ TXDMA_REG_READ64(handle, TDC_TDR_CFG, channel, &cfg.value);
+ cfg.bits.enable = 1;
+ TXDMA_REG_WRITE64(handle, TDC_TDR_CFG, channel, cfg.value);
+ break;
+
+ case TXDMA_RESET:
+ /*
+ * Sets reset bit only (Hardware will reset all the RW bits but
+ * leave the RO bits alone.
+ */
+ cfg.value = 0;
+ cfg.bits.reset = 1;
+ TXDMA_REG_WRITE64(handle, TDC_TDR_CFG, channel, cfg.value);
+ return (hpi_txdma_control_reset_wait(handle, channel));
+
+ case TXDMA_START:
+ /* Enable the DMA channel */
+ TXDMA_REG_READ64(handle, TDC_TDR_CFG, channel, &cfg.value);
+ cfg.bits.enable = 1;
+ TXDMA_REG_WRITE64(handle, TDC_TDR_CFG, channel, cfg.value);
+ break;
+
+ case TXDMA_STOP:
+ /* Disable the DMA channel */
+ TXDMA_REG_READ64(handle, TDC_TDR_CFG, channel, &cfg.value);
+ cfg.bits.enable = 0;
+ TXDMA_REG_WRITE64(handle, TDC_TDR_CFG, channel, cfg.value);
+ status = hpi_txdma_control_stop_wait(handle, channel);
+ if (status) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "Cannot stop channel %d (TXC hung!)", channel));
+ }
+ break;
+
+ case TXDMA_MBOX_ENABLE:
+ /*
+ * Write 1 to MB bit to enable mailbox update (cleared to 0 by
+ * hardware after update).
+ */
+ TXDMA_REG_READ64(handle, TDC_STAT, channel, &cs.value);
+ cs.bits.mb = 1;
+ TXDMA_REG_WRITE64(handle, TDC_STAT, channel, cs.value);
+ break;
+
+ default:
+ status = (HPI_FAILURE | HPI_TXDMA_OPCODE_INVALID(channel));
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_channel_control"
+ " Invalid Input: control <0x%x>", control));
+ }
+
+ return (status);
+}
+
+hpi_status_t
+hpi_txdma_control_status(hpi_handle_t handle, io_op_t op_mode, uint8_t channel,
+ tdc_stat_t *cs_p)
+{
+ int status = HPI_SUCCESS;
+ tdc_stat_t txcs;
+
+ if (!TXDMA_CHANNEL_VALID(channel)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_control_status"
+ " Invalid Input: channel <0x%x>", channel));
+ return (HPI_FAILURE | HPI_TXDMA_CHANNEL_INVALID(channel));
+ }
+ switch (op_mode) {
+ case OP_GET:
+ TXDMA_REG_READ64(handle, TDC_STAT, channel, &cs_p->value);
+ break;
+
+ case OP_SET:
+ TXDMA_REG_WRITE64(handle, TDC_STAT, channel, cs_p->value);
+ break;
+
+ case OP_UPDATE:
+ TXDMA_REG_READ64(handle, TDC_STAT, channel, &txcs.value);
+ TXDMA_REG_WRITE64(handle, TDC_STAT, channel,
+ cs_p->value | txcs.value);
+ break;
+
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_control_status"
+ " Invalid Input: control <0x%x>", op_mode));
+ return (HPI_FAILURE | HPI_TXDMA_OPCODE_INVALID(channel));
+ }
+
+ return (status);
+}
+
+hpi_status_t
+hpi_txdma_event_mask(hpi_handle_t handle, io_op_t op_mode, uint8_t channel,
+ tdc_int_mask_t *mask_p)
+{
+ int status = HPI_SUCCESS;
+ tdc_int_mask_t mask;
+
+ if (!TXDMA_CHANNEL_VALID(channel)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_event_mask Invalid Input: channel <0x%x>",
+ channel));
+ return (HPI_FAILURE | HPI_TXDMA_CHANNEL_INVALID(channel));
+ }
+ switch (op_mode) {
+ case OP_GET:
+ TXDMA_REG_READ64(handle, TDC_INT_MASK, channel, &mask_p->value);
+ break;
+
+ case OP_SET:
+ TXDMA_REG_WRITE64(handle, TDC_INT_MASK, channel, mask_p->value);
+ break;
+
+ case OP_UPDATE:
+ TXDMA_REG_READ64(handle, TDC_INT_MASK, channel, &mask.value);
+ TXDMA_REG_WRITE64(handle, TDC_INT_MASK, channel,
+ mask_p->value | mask.value);
+ break;
+
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_event_mask Invalid Input: eventmask <0x%x>",
+ op_mode));
+ return (HPI_FAILURE | HPI_TXDMA_OPCODE_INVALID(channel));
+ }
+
+ return (status);
+}
+
+hpi_status_t
+hpi_txdma_ring_config(hpi_handle_t handle, io_op_t op_mode,
+ uint8_t channel, uint64_t *reg_data)
+{
+ int status = HPI_SUCCESS;
+
+ if (!TXDMA_CHANNEL_VALID(channel)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_ring_config"
+ " Invalid Input: channel <0x%x>", channel));
+ return (HPI_FAILURE | HPI_TXDMA_CHANNEL_INVALID(channel));
+ }
+ switch (op_mode) {
+ case OP_GET:
+ TXDMA_REG_READ64(handle, TDC_TDR_CFG, channel, reg_data);
+ break;
+
+ case OP_SET:
+ TXDMA_REG_WRITE64(handle, TDC_TDR_CFG, channel, *reg_data);
+ break;
+
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_ring_config"
+ " Invalid Input: ring_config <0x%x>", op_mode));
+ return (HPI_FAILURE | HPI_TXDMA_OPCODE_INVALID(channel));
+ }
+
+ return (status);
+}
+
+hpi_status_t
+hpi_txdma_mbox_config(hpi_handle_t handle, io_op_t op_mode,
+ uint8_t channel, uint64_t *mbox_addr)
+{
+ int status = HPI_SUCCESS;
+ tdc_mbh_t mh;
+ tdc_mbl_t ml;
+
+ if (!TXDMA_CHANNEL_VALID(channel)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_mbox_config Invalid Input: channel <0x%x>",
+ channel));
+ return (HPI_FAILURE | HPI_TXDMA_CHANNEL_INVALID(channel));
+ }
+
+ mh.value = ml.value = 0;
+
+ switch (op_mode) {
+ case OP_GET:
+ TXDMA_REG_READ64(handle, TDC_MBH, channel, &mh.value);
+ TXDMA_REG_READ64(handle, TDC_MBL, channel, &ml.value);
+ *mbox_addr = ml.value;
+ *mbox_addr |= (mh.value << TDC_MBH_ADDR_SHIFT);
+
+ break;
+
+ case OP_SET:
+ ml.bits.mbaddr = ((*mbox_addr & TDC_MBL_MASK) >> TDC_MBL_SHIFT);
+ TXDMA_REG_WRITE64(handle, TDC_MBL, channel, ml.value);
+ mh.bits.mbaddr = ((*mbox_addr >> TDC_MBH_ADDR_SHIFT) &
+ TDC_MBH_MASK);
+ TXDMA_REG_WRITE64(handle, TDC_MBH, channel, mh.value);
+ break;
+
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_mbox_config Invalid Input: mbox <0x%x>",
+ op_mode));
+ return (HPI_FAILURE | HPI_TXDMA_OPCODE_INVALID(channel));
+ }
+
+ return (status);
+}
+
+/*
+ * This function is called to set up a transmit descriptor entry.
+ */
+hpi_status_t
+hpi_txdma_desc_gather_set(hpi_handle_t handle, p_tx_desc_t desc_p,
+ uint8_t gather_index, boolean_t mark, uint8_t ngathers,
+ uint64_t dma_ioaddr, uint32_t transfer_len)
+{
+ int status;
+
+ status = HPI_TXDMA_GATHER_INDEX(gather_index);
+ if (status) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_desc_gather_set"
+ " Invalid Input: gather_index <0x%x>", gather_index));
+ return (status);
+ }
+ if (transfer_len > TX_MAX_TRANSFER_LENGTH) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_desc_gather_set"
+ " Invalid Input: tr_len <0x%x>", transfer_len));
+ return (HPI_FAILURE | HPI_TXDMA_XFER_LEN_INVALID);
+ }
+ if (gather_index == 0) {
+ desc_p->bits.sop = 1;
+ desc_p->bits.mark = mark;
+ desc_p->bits.num_ptr = ngathers;
+ HPI_DEBUG_MSG((handle.function, HPI_TDC_CTL,
+ "hpi_txdma_gather_set: SOP len %d (%d)",
+ desc_p->bits.tr_len, transfer_len));
+ }
+ desc_p->bits.tr_len = transfer_len;
+ desc_p->bits.sad = dma_ioaddr;
+
+ HPI_DEBUG_MSG((handle.function, HPI_TDC_CTL,
+ "hpi_txdma_gather_set: xfer len %d to set (%d)",
+ desc_p->bits.tr_len, transfer_len));
+
+ HXGE_MEM_PIO_WRITE64(handle, desc_p->value);
+
+ return (status);
+}
+
+hpi_status_t
+hpi_txdma_desc_set_zero(hpi_handle_t handle, uint16_t entries)
+{
+ uint32_t offset;
+ int i;
+
+ /*
+ * Assume no wrapped around.
+ */
+ offset = 0;
+ for (i = 0; i < entries; i++) {
+ HXGE_REG_WR64(handle, offset, 0);
+ offset += (i * (sizeof (tx_desc_t)));
+ }
+
+ return (HPI_SUCCESS);
+}
+
+/*
+ * This function is called to get the transmit ring head index.
+ */
+hpi_status_t
+hpi_txdma_ring_head_get(hpi_handle_t handle, uint8_t channel,
+ tdc_tdr_head_t *hdl_p)
+{
+ int status = HPI_SUCCESS;
+
+ if (!TXDMA_CHANNEL_VALID(channel)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_txdma_ring_head_get"
+ " Invalid Input: channel <0x%x>", channel));
+ return (HPI_FAILURE | HPI_TXDMA_CHANNEL_INVALID(channel));
+ }
+ TXDMA_REG_READ64(handle, TDC_TDR_HEAD, channel, &hdl_p->value);
+
+ return (status);
+}
+
+/*
+ * Dumps the contents of transmit descriptors.
+ */
+/*ARGSUSED*/
+void
+hpi_txdma_dump_desc_one(hpi_handle_t handle, p_tx_desc_t desc_p, int desc_index)
+{
+ tx_desc_t desc, *desp;
+
+#ifdef HXGE_DEBUG
+ uint64_t sad;
+ int xfer_len;
+#endif
+
+ HPI_DEBUG_MSG((handle.function, HPI_TDC_CTL,
+ "\n==> hpi_txdma_dump_desc_one: dump "
+ " desc_p $%p descriptor entry %d\n", desc_p, desc_index));
+ desc.value = 0;
+ desp = ((desc_p != NULL) ? desc_p : (p_tx_desc_t)&desc);
+ desp->value = HXGE_MEM_PIO_READ64(handle);
+#ifdef HXGE_DEBUG
+ sad = desp->bits.sad;
+ xfer_len = desp->bits.tr_len;
+#endif
+ HPI_DEBUG_MSG((handle.function, HPI_TDC_CTL, "\n\t: value 0x%llx\n"
+ "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\tmark %d sop %d\n",
+ desp->value, sad, desp->bits.tr_len, xfer_len,
+ desp->bits.num_ptr, desp->bits.mark, desp->bits.sop));
+
+ HPI_DEBUG_MSG((handle.function, HPI_TDC_CTL,
+ "\n<== hpi_txdma_dump_desc_one: Done \n"));
+}
+
+/*
+ * Static functions start here.
+ */
+static hpi_status_t
+hpi_txdma_control_reset_wait(hpi_handle_t handle, uint8_t channel)
+{
+ tdc_tdr_cfg_t txcs;
+ int loop = 0;
+
+ txcs.value = 0;
+ do {
+ HXGE_DELAY(TXDMA_WAIT_MSEC);
+ TXDMA_REG_READ64(handle, TDC_TDR_CFG, channel, &txcs.value);
+
+ /*
+ * Reset completes when this bit is set to 1 by hw
+ */
+ if (txcs.bits.qst) {
+ return (HPI_SUCCESS);
+ }
+ loop++;
+ } while (loop < TXDMA_WAIT_LOOP);
+
+ if (loop == TXDMA_WAIT_LOOP) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "hpi_txdma_control_reset_wait: RST bit not "
+ "cleared to 0 txcs.bits 0x%llx", txcs.value));
+ return (HPI_FAILURE | HPI_TXDMA_RESET_FAILED);
+ }
+ return (HPI_SUCCESS);
+}
+
+static hpi_status_t
+hpi_txdma_control_stop_wait(hpi_handle_t handle, uint8_t channel)
+{
+ tdc_tdr_cfg_t txcs;
+ int loop = 0;
+
+ do {
+ txcs.value = 0;
+ HXGE_DELAY(TXDMA_WAIT_MSEC);
+ TXDMA_REG_READ64(handle, TDC_TDR_CFG, channel, &txcs.value);
+ if (txcs.bits.qst) {
+ return (HPI_SUCCESS);
+ }
+ loop++;
+ } while (loop < TXDMA_WAIT_LOOP);
+
+ if (loop == TXDMA_WAIT_LOOP) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ "hpi_txdma_control_stop_wait: SNG_STATE not "
+ "set to 1 txcs.bits 0x%llx", txcs.value));
+ return (HPI_FAILURE | HPI_TXDMA_STOP_FAILED);
+ }
+ return (HPI_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/hxge/hpi_txdma.h b/usr/src/uts/common/io/hxge/hpi_txdma.h
new file mode 100644
index 0000000000..9fbf846f8f
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi_txdma.h
@@ -0,0 +1,142 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HPI_TXDMA_H
+#define _HPI_TXDMA_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hpi.h>
+#include <hxge_txdma_hw.h>
+#include <hxge_tdc_hw.h>
+
+typedef enum _txdma_cs_cntl_e {
+ TXDMA_INIT_RESET = 0x1,
+ TXDMA_INIT_START = 0x2,
+ TXDMA_START = 0x3,
+ TXDMA_RESET = 0x4,
+ TXDMA_STOP = 0x5,
+ TXDMA_MBOX_ENABLE = 0x6
+} txdma_cs_cntl_t;
+
+#define HXGE_TXDMA_OFFSET(x, v, channel) (x + \
+ (!v ? DMC_OFFSET(channel) : TDMC_PIOVADDR_OFFSET(channel)))
+/*
+ * PIO macros to read and write the transmit registers.
+ */
+#define TXDMA_REG_READ64(handle, reg, channel, val_p) \
+ HXGE_REG_RD64(handle, \
+ (HXGE_TXDMA_OFFSET(reg, handle.is_vraddr, channel)), val_p)
+
+#define TXDMA_REG_WRITE64(handle, reg, channel, data) \
+ HXGE_REG_WR64(handle, \
+ HXGE_TXDMA_OFFSET(reg, handle.is_vraddr, channel), data)
+
+#define HPI_TXDMA_GATHER_INDEX(index) \
+ ((index <= TX_MAX_GATHER_POINTERS)) ? HPI_SUCCESS : \
+ (HPI_TXDMA_GATHER_INVALID)
+
+/*
+ * Transmit HPI error codes
+ */
+#define TXDMA_ER_ST (TXDMA_BLK_ID << HPI_BLOCK_ID_SHIFT)
+#define TXDMA_ID_SHIFT(n) (n << HPI_PORT_CHAN_SHIFT)
+
+#define TXDMA_HW_STOP_FAILED (HPI_BK_HW_ER_START | 0x1)
+#define TXDMA_HW_RESUME_FAILED (HPI_BK_HW_ER_START | 0x2)
+
+#define TXDMA_GATHER_INVALID (HPI_BK_ERROR_START | 0x1)
+#define TXDMA_XFER_LEN_INVALID (HPI_BK_ERROR_START | 0x2)
+
+#define HPI_TXDMA_OPCODE_INVALID(n) (TXDMA_ID_SHIFT(n) | \
+ TXDMA_ER_ST | OPCODE_INVALID)
+
+#define HPI_TXDMA_FUNC_INVALID(n) (TXDMA_ID_SHIFT(n) | \
+ TXDMA_ER_ST | PORT_INVALID)
+#define HPI_TXDMA_CHANNEL_INVALID(n) (TXDMA_ID_SHIFT(n) | \
+ TXDMA_ER_ST | CHANNEL_INVALID)
+
+#define HPI_TXDMA_PAGE_INVALID(n) (TXDMA_ID_SHIFT(n) | \
+ TXDMA_ER_ST | LOGICAL_PAGE_INVALID)
+
+#define HPI_TXDMA_REGISTER_INVALID (TXDMA_ER_ST | REGISTER_INVALID)
+#define HPI_TXDMA_COUNTER_INVALID (TXDMA_ER_ST | COUNTER_INVALID)
+#define HPI_TXDMA_CONFIG_INVALID (TXDMA_ER_ST | CONFIG_INVALID)
+
+
+#define HPI_TXDMA_GATHER_INVALID (TXDMA_ER_ST | TXDMA_GATHER_INVALID)
+#define HPI_TXDMA_XFER_LEN_INVALID (TXDMA_ER_ST | TXDMA_XFER_LEN_INVALID)
+
+#define HPI_TXDMA_RESET_FAILED (TXDMA_ER_ST | RESET_FAILED)
+#define HPI_TXDMA_STOP_FAILED (TXDMA_ER_ST | TXDMA_HW_STOP_FAILED)
+#define HPI_TXDMA_RESUME_FAILED (TXDMA_ER_ST | TXDMA_HW_RESUME_FAILED)
+
+/*
+ * Transmit DMA Channel HPI Prototypes.
+ */
+hpi_status_t hpi_txdma_log_page_handle_set(hpi_handle_t handle,
+ uint8_t channel, tdc_page_handle_t *hdl_p);
+hpi_status_t hpi_txdma_channel_reset(hpi_handle_t handle, uint8_t channel);
+hpi_status_t hpi_txdma_channel_init_enable(hpi_handle_t handle,
+ uint8_t channel);
+hpi_status_t hpi_txdma_channel_enable(hpi_handle_t handle, uint8_t channel);
+hpi_status_t hpi_txdma_channel_disable(hpi_handle_t handle, uint8_t channel);
+hpi_status_t hpi_txdma_channel_mbox_enable(hpi_handle_t handle,
+ uint8_t channel);
+hpi_status_t hpi_txdma_channel_control(hpi_handle_t handle,
+ txdma_cs_cntl_t control, uint8_t channel);
+hpi_status_t hpi_txdma_control_status(hpi_handle_t handle, io_op_t op_mode,
+ uint8_t channel, tdc_stat_t *cs_p);
+
+hpi_status_t hpi_txdma_event_mask(hpi_handle_t handle, io_op_t op_mode,
+ uint8_t channel, tdc_int_mask_t *mask_p);
+
+hpi_status_t hpi_txdma_ring_config(hpi_handle_t handle, io_op_t op_mode,
+ uint8_t channel, uint64_t *reg_data);
+hpi_status_t hpi_txdma_mbox_config(hpi_handle_t handle, io_op_t op_mode,
+ uint8_t channel, uint64_t *mbox_addr);
+hpi_status_t hpi_txdma_desc_gather_set(hpi_handle_t handle,
+ p_tx_desc_t desc_p, uint8_t gather_index,
+ boolean_t mark, uint8_t ngathers,
+ uint64_t dma_ioaddr, uint32_t transfer_len);
+
+hpi_status_t hpi_txdma_desc_set_xfer_len(hpi_handle_t handle,
+ p_tx_desc_t desc_p, uint32_t transfer_len);
+
+hpi_status_t hpi_txdma_desc_set_zero(hpi_handle_t handle, uint16_t entries);
+hpi_status_t hpi_txdma_ring_head_get(hpi_handle_t handle, uint8_t channel,
+ tdc_tdr_head_t *hdl_p);
+void hpi_txdma_dump_desc_one(hpi_handle_t handle, p_tx_desc_t desc_p,
+ int desc_index);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HPI_TXDMA_H */
diff --git a/usr/src/uts/common/io/hxge/hpi_vir.c b/usr/src/uts/common/io/hxge/hpi_vir.c
new file mode 100644
index 0000000000..c21177f654
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi_vir.c
@@ -0,0 +1,260 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hpi_vir.h>
+#include <hxge_defs.h>
+
+/*
+ * Set up a logical group number that a logical device belongs to.
+ */
+hpi_status_t
+hpi_fzc_ldg_num_set(hpi_handle_t handle, uint8_t ld, uint8_t ldg)
+{
+ ld_grp_ctrl_t gnum;
+
+ if (!LD_VALID(ld)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_fzc_ldg_num_set ld <0x%x>", ld));
+ return (HPI_FAILURE | HPI_VIR_LD_INVALID(ld));
+ }
+
+ if (!LDG_VALID(ldg)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_fzc_ldg_num_set ldg <0x%x>", ldg));
+ return (HPI_FAILURE | HPI_VIR_LDG_INVALID(ld));
+ }
+
+ gnum.value = 0;
+ gnum.bits.num = ldg;
+
+ HXGE_REG_WR32(handle, LD_GRP_CTRL + LD_NUM_OFFSET(ld), gnum.value);
+
+ return (HPI_SUCCESS);
+}
+
+/*
+ * Get device state vectors.
+ */
+hpi_status_t
+hpi_ldsv_ldfs_get(hpi_handle_t handle, uint8_t ldg, uint32_t *vector0_p,
+ uint32_t *vector1_p)
+{
+ int status;
+
+ if ((status = hpi_ldsv_get(handle, ldg, VECTOR0, vector0_p))) {
+ return (status);
+ }
+ if ((status = hpi_ldsv_get(handle, ldg, VECTOR1, vector1_p))) {
+ return (status);
+ }
+
+ return (HPI_SUCCESS);
+}
+
+/*
+ * Get device state vectors.
+ */
+hpi_status_t
+hpi_ldsv_get(hpi_handle_t handle, uint8_t ldg, ldsv_type_t vector,
+ uint32_t *ldf_p)
+{
+ uint32_t offset;
+
+ if (!LDG_VALID(ldg)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_ldsv_get Invalid Input ldg <0x%x>", ldg));
+ return (HPI_FAILURE | HPI_VIR_LDG_INVALID(ldg));
+ }
+
+ switch (vector) {
+ case VECTOR0:
+ offset = LDSV0 + LDSV_OFFSET(ldg);
+ break;
+
+ case VECTOR1:
+ offset = LDSV1 + LDSV_OFFSET(ldg);
+ break;
+
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_ldsv_get Invalid Input: ldsv type <0x%x>", vector));
+ return (HPI_FAILURE | HPI_VIR_LDSV_INVALID(vector));
+ }
+
+ HXGE_REG_RD32(handle, offset, ldf_p);
+
+ return (HPI_SUCCESS);
+}
+
+/*
+ * Set the mask bits for both ldf0 and ldf1.
+ */
+hpi_status_t
+hpi_intr_mask_set(hpi_handle_t handle, uint8_t ld, uint8_t ldf_mask)
+{
+ uint32_t offset;
+
+ if (!LD_VALID(ld)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_intr_mask_set ld", ld));
+ return (HPI_FAILURE | HPI_VIR_LD_INVALID(ld));
+ }
+
+ ldf_mask &= LD_IM_MASK;
+ offset = LDSV_OFFSET_MASK(ld);
+
+ HPI_DEBUG_MSG((handle.function, HPI_VIR_CTL,
+ "hpi_intr_mask_set: ld %d offset 0x%0x mask 0x%x",
+ ld, offset, ldf_mask));
+
+ HXGE_REG_WR32(handle, offset, (uint32_t)ldf_mask);
+
+ return (HPI_SUCCESS);
+}
+
+/*
+ * Set interrupt timer and arm bit.
+ */
+hpi_status_t
+hpi_intr_ldg_mgmt_set(hpi_handle_t handle, uint8_t ldg, boolean_t arm,
+ uint8_t timer)
+{
+ ld_intr_mgmt_t mgm;
+
+ if (!LDG_VALID(ldg)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_intr_ldg_mgmt_set Invalid Input: ldg <0x%x>", ldg));
+ return (HPI_FAILURE | HPI_VIR_LDG_INVALID(ldg));
+ }
+
+ if (!LD_INTTIMER_VALID(timer)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_intr_ldg_mgmt_set Invalid Input"
+ " timer <0x%x>", timer));
+ return (HPI_FAILURE | HPI_VIR_INTM_TM_INVALID(ldg));
+ }
+
+ if (arm) {
+ mgm.bits.arm = 1;
+ } else {
+ HXGE_REG_RD32(handle, LD_INTR_MGMT + LDSV_OFFSET(ldg),
+ &mgm.value);
+ }
+
+ mgm.bits.timer = timer;
+ HXGE_REG_WR32(handle, LD_INTR_MGMT + LDSV_OFFSET(ldg), mgm.value);
+
+ HPI_DEBUG_MSG((handle.function, HPI_VIR_CTL,
+ " hpi_intr_ldg_mgmt_set: ldg %d reg offset 0x%x",
+ ldg, LD_INTR_MGMT + LDSV_OFFSET(ldg)));
+
+ return (HPI_SUCCESS);
+}
+
+/*
+ * Set the timer resolution.
+ */
+hpi_status_t
+hpi_fzc_ldg_timer_res_set(hpi_handle_t handle, uint32_t res)
+{
+ ld_intr_tim_res_t tm;
+
+ if (res > LDGTITMRES_RES_MASK) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_fzc_ldg_timer_res_set Invalid Input: res <0x%x>",
+ res));
+ return (HPI_FAILURE | HPI_VIR_TM_RES_INVALID);
+ }
+
+ tm.value = 0;
+ tm.bits.res = res;
+
+ HXGE_REG_WR32(handle, LD_INTR_TIM_RES, tm.value);
+
+ return (HPI_SUCCESS);
+}
+
+/*
+ * Set the system interrupt data.
+ */
+hpi_status_t
+hpi_fzc_sid_set(hpi_handle_t handle, fzc_sid_t sid)
+{
+ sid_t sd;
+
+ if (!LDG_VALID(sid.ldg)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_fzc_sid_set Invalid Input: ldg <0x%x>", sid.ldg));
+ return (HPI_FAILURE | HPI_VIR_LDG_INVALID(sid.ldg));
+ }
+
+ if (!SID_VECTOR_VALID(sid.vector)) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_fzc_sid_set Invalid Input: vector <0x%x>",
+ sid.vector));
+
+ return (HPI_FAILURE | HPI_VIR_SID_VEC_INVALID(sid.vector));
+ }
+
+ sd.value = 0;
+ sd.bits.data = sid.vector;
+ HXGE_REG_WR32(handle, SID + LDG_SID_OFFSET(sid.ldg), sd.value);
+
+ return (HPI_SUCCESS);
+}
+
+/*
+ * Mask/Unmask the device error mask bits.
+ */
+hpi_status_t
+hpi_fzc_sys_err_mask_set(hpi_handle_t handle, boolean_t mask)
+{
+ dev_err_mask_t dev_mask;
+
+ dev_mask.value = 0;
+ if (mask) {
+ dev_mask.bits.tdc_mask0 = 1;
+ dev_mask.bits.rdc_mask0 = 1;
+ dev_mask.bits.vnm_pio_mask1 = 1;
+ dev_mask.bits.tdc_mask1 = 1;
+ dev_mask.bits.rdc_mask1 = 1;
+ dev_mask.bits.peu_mask1 = 1;
+ }
+
+ HXGE_REG_WR32(handle, DEV_ERR_MASK, dev_mask.value);
+ return (HPI_SUCCESS);
+}
+
+/*
+ * Get the system error stats.
+ */
+hpi_status_t
+hpi_fzc_sys_err_stat_get(hpi_handle_t handle, dev_err_stat_t *statp)
+{
+ HXGE_REG_RD32(handle, DEV_ERR_STAT, &statp->value);
+ return (HPI_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/hxge/hpi_vir.h b/usr/src/uts/common/io/hxge/hpi_vir.h
new file mode 100644
index 0000000000..69eaa35c6a
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi_vir.h
@@ -0,0 +1,114 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HPI_VIR_H
+#define _HPI_VIR_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hpi.h>
+#include <hxge_peu_hw.h>
+
+/*
+ * Virtualization and Logical devices HPI error codes
+ */
+#define VIR_ERR_ST (VIR_BLK_ID << HPI_BLOCK_ID_SHIFT)
+#define VIR_ID_SHIFT(n) (n << HPI_PORT_CHAN_SHIFT)
+
+#define VIR_LD_INVALID (HPI_BK_ERROR_START | 0x30)
+#define VIR_LDG_INVALID (HPI_BK_ERROR_START | 0x31)
+#define VIR_LDSV_INVALID (HPI_BK_ERROR_START | 0x32)
+
+#define VIR_INTM_TM_INVALID (HPI_BK_ERROR_START | 0x33)
+#define VIR_TM_RES_INVALID (HPI_BK_ERROR_START | 0x34)
+#define VIR_SID_VEC_INVALID (HPI_BK_ERROR_START | 0x35)
+
+/*
+ * Error codes of logical devices and groups functions.
+ */
+#define HPI_VIR_LD_INVALID(n) (VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_LD_INVALID)
+#define HPI_VIR_LDG_INVALID(n) (VIR_ID_SHIFT(n) | VIR_ERR_ST | VIR_LDG_INVALID)
+#define HPI_VIR_LDSV_INVALID(n) (VIR_ID_SHIFT(n) | \
+ VIR_ERR_ST | VIR_LDSV_INVALID)
+#define HPI_VIR_INTM_TM_INVALID(n) (VIR_ID_SHIFT(n) | \
+ VIR_ERR_ST | VIR_INTM_TM_INVALID)
+#define HPI_VIR_TM_RES_INVALID (VIR_ERR_ST | VIR_TM_RES_INVALID)
+#define HPI_VIR_SID_VEC_INVALID(n) (VIR_ID_SHIFT(n) | \
+ VIR_ERR_ST | VIR_TM_RES_INVALID)
+
+/*
+ * Logical device definitions.
+ */
+#define LDG_NUM_STEP 4
+#define LD_NUM_OFFSET(ld) (ld * LDG_NUM_STEP)
+
+#define LDSV_STEP 8192
+#define LDSVG_OFFSET(ldg) (ldg * LDSV_STEP)
+#define LDSV_OFFSET(ldv) (ldv * LDSV_STEP)
+#define LDSV_OFFSET_MASK(ld) (LD_INTR_MASK + LDSV_OFFSET(ld))
+
+#define LDG_SID_STEP 8192
+#define LDG_SID_OFFSET(ldg) (ldg * LDG_SID_STEP)
+
+typedef enum {
+ VECTOR0,
+ VECTOR1,
+} ldsv_type_t;
+
+/*
+ * Definitions for the system interrupt data.
+ */
+typedef struct _fzc_sid {
+ uint8_t ldg;
+ uint8_t vector;
+} fzc_sid_t, *p_fzc_sid_t;
+
+/*
+ * Virtualization and Interrupt Prototypes.
+ */
+hpi_status_t hpi_fzc_ldg_num_set(hpi_handle_t handle, uint8_t ld, uint8_t ldg);
+hpi_status_t hpi_ldsv_ldfs_get(hpi_handle_t handle, uint8_t ldg,
+ uint32_t *vector0_p, uint32_t *vecto1_p);
+hpi_status_t hpi_ldsv_get(hpi_handle_t handle, uint8_t ldg, ldsv_type_t vector,
+ uint32_t *ldf_p);
+hpi_status_t hpi_intr_mask_set(hpi_handle_t handle, uint8_t ld,
+ uint8_t ldf_mask);
+hpi_status_t hpi_intr_ldg_mgmt_set(hpi_handle_t handle, uint8_t ldg,
+ boolean_t arm, uint8_t timer);
+hpi_status_t hpi_fzc_ldg_timer_res_set(hpi_handle_t handle, uint32_t res);
+hpi_status_t hpi_fzc_sid_set(hpi_handle_t handle, fzc_sid_t sid);
+hpi_status_t hpi_fzc_sys_err_mask_set(hpi_handle_t handle, boolean_t mask);
+hpi_status_t hpi_fzc_sys_err_stat_get(hpi_handle_t handle,
+ dev_err_stat_t *statp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HPI_VIR_H */
diff --git a/usr/src/uts/common/io/hxge/hpi_vmac.c b/usr/src/uts/common/io/hxge/hpi_vmac.c
new file mode 100644
index 0000000000..a1844e65fc
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi_vmac.c
@@ -0,0 +1,282 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hpi_vmac.h>
+
+#define HXGE_VMAC_RX_STAT_CLEAR 0x1ffULL
+#define HXGE_VMAC_TX_STAT_CLEAR 0x7ULL
+#define HXGE_VMAC_RX_MASK_OVERFLOW 0x1fe
+#define HXGE_VMAC_RX_MASK_FRAME 0x1
+
+hpi_status_t
+hpi_tx_vmac_reset(hpi_handle_t handle)
+{
+ vmac_rst_t reset;
+
+ HXGE_REG_RD64(handle, VMAC_RST, &(reset.value));
+
+ reset.bits.tx_reset = 1;
+
+ HXGE_REG_WR64(handle, VMAC_RST, reset.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_rx_vmac_reset(hpi_handle_t handle)
+{
+ vmac_rst_t reset;
+
+ HXGE_REG_RD64(handle, VMAC_RST, &(reset.value));
+
+ reset.bits.rx_reset = 1;
+
+ HXGE_REG_WR64(handle, VMAC_RST, reset.value);
+
+ return (HPI_SUCCESS);
+}
+
+
+hpi_status_t
+hpi_vmac_tx_config(hpi_handle_t handle, config_op_t op, uint64_t config,
+ uint16_t max_frame_length)
+{
+ vmac_tx_cfg_t cfg;
+
+ if (config == 0) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_vmac_tx_config Invalid Input: config <0x%x>",
+ config));
+ return (HPI_FAILURE);
+ }
+
+ HXGE_REG_RD64(handle, VMAC_TX_CFG, &cfg.value);
+
+ switch (op) {
+ case ENABLE:
+ if (config & CFG_VMAC_TX_EN)
+ cfg.bits.tx_en = 1;
+ if (config & CFG_VMAC_TX_CRC_INSERT)
+ cfg.bits.crc_insert = 1;
+ if (config & CFG_VMAC_TX_PAD)
+ cfg.bits.tx_pad = 1;
+ if (max_frame_length)
+ cfg.bits.tx_max_frame_length = max_frame_length;
+ break;
+ case DISABLE:
+ if (config & CFG_VMAC_TX_EN)
+ cfg.bits.tx_en = 0;
+ if (config & CFG_VMAC_TX_CRC_INSERT)
+ cfg.bits.crc_insert = 0;
+ if (config & CFG_VMAC_TX_PAD)
+ cfg.bits.tx_pad = 0;
+ break;
+ case INIT:
+ if (config & CFG_VMAC_TX_EN)
+ cfg.bits.tx_en = 1;
+ else
+ cfg.bits.tx_en = 0;
+
+ if (config & CFG_VMAC_TX_CRC_INSERT)
+ cfg.bits.crc_insert = 1;
+ else
+ cfg.bits.crc_insert = 0;
+
+ if (config & CFG_VMAC_TX_PAD)
+ cfg.bits.tx_pad = 1;
+ else
+ cfg.bits.tx_pad = 0;
+
+ if (max_frame_length)
+ cfg.bits.tx_max_frame_length = max_frame_length;
+
+ break;
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_vmac_tx_config Invalid Input: op <0x%x>", op));
+ return (HPI_FAILURE);
+ }
+
+ HXGE_REG_WR64(handle, VMAC_TX_CFG, cfg.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_vmac_rx_config(hpi_handle_t handle, config_op_t op, uint64_t config,
+ uint16_t max_frame_length)
+{
+ vmac_rx_cfg_t cfg;
+
+ if (config == 0) {
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_vmac_rx_config Invalid Input: config <0x%x>",
+ config));
+ return (HPI_FAILURE);
+ }
+
+ HXGE_REG_RD64(handle, VMAC_RX_CFG, &cfg.value);
+
+ switch (op) {
+ case ENABLE:
+ if (config & CFG_VMAC_RX_EN)
+ cfg.bits.rx_en = 1;
+ if (config & CFG_VMAC_RX_CRC_CHECK_DISABLE)
+ cfg.bits.crc_check_disable = 1;
+ if (config & CFG_VMAC_RX_STRIP_CRC)
+ cfg.bits.strip_crc = 1;
+ if (config & CFG_VMAC_RX_PASS_FLOW_CTRL_FR)
+ cfg.bits.pass_flow_ctrl_fr = 1;
+ if (config & CFG_VMAC_RX_PROMIXCUOUS_GROUP)
+ cfg.bits.promiscuous_group = 1;
+ if (config & CFG_VMAC_RX_PROMISCUOUS_MODE)
+ cfg.bits.promiscuous_mode = 1;
+ if (config & CFG_VMAC_RX_LOOP_BACK)
+ cfg.bits.loopback = 1;
+ break;
+ case DISABLE:
+ if (config & CFG_VMAC_RX_EN)
+ cfg.bits.rx_en = 0;
+ if (config & CFG_VMAC_RX_CRC_CHECK_DISABLE)
+ cfg.bits.crc_check_disable = 0;
+ if (config & CFG_VMAC_RX_STRIP_CRC)
+ cfg.bits.strip_crc = 0;
+ if (config & CFG_VMAC_RX_PASS_FLOW_CTRL_FR)
+ cfg.bits.pass_flow_ctrl_fr = 0;
+ if (config & CFG_VMAC_RX_PROMIXCUOUS_GROUP)
+ cfg.bits.promiscuous_group = 0;
+ if (config & CFG_VMAC_RX_PROMISCUOUS_MODE)
+ cfg.bits.promiscuous_mode = 0;
+ if (config & CFG_VMAC_RX_LOOP_BACK)
+ cfg.bits.loopback = 0;
+ break;
+ case INIT:
+ if (config & CFG_VMAC_RX_EN)
+ cfg.bits.rx_en = 1;
+ else
+ cfg.bits.rx_en = 0;
+ if (config & CFG_VMAC_RX_CRC_CHECK_DISABLE)
+ cfg.bits.crc_check_disable = 1;
+ else
+ cfg.bits.crc_check_disable = 0;
+ if (config & CFG_VMAC_RX_STRIP_CRC)
+ cfg.bits.strip_crc = 1;
+ else
+ cfg.bits.strip_crc = 0;
+ if (config & CFG_VMAC_RX_PASS_FLOW_CTRL_FR)
+ cfg.bits.pass_flow_ctrl_fr = 1;
+ else
+ cfg.bits.pass_flow_ctrl_fr = 0;
+ if (config & CFG_VMAC_RX_PROMIXCUOUS_GROUP)
+ cfg.bits.promiscuous_group = 1;
+ else
+ cfg.bits.promiscuous_group = 0;
+ if (config & CFG_VMAC_RX_PROMISCUOUS_MODE)
+ cfg.bits.promiscuous_mode = 1;
+ else
+ cfg.bits.promiscuous_mode = 0;
+ if (config & CFG_VMAC_RX_LOOP_BACK)
+ cfg.bits.loopback = 1;
+ else
+ cfg.bits.loopback = 0;
+
+ break;
+ default:
+ HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
+ " hpi_vmac_rx_config Invalid Input: op <0x%x>", op));
+ return (HPI_FAILURE);
+ }
+
+ if (max_frame_length)
+ cfg.bits.rx_max_frame_length = max_frame_length;
+
+ HXGE_REG_WR64(handle, VMAC_RX_CFG, cfg.value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_vmac_clear_rx_int_stat(hpi_handle_t handle)
+{
+ uint64_t offset;
+
+ offset = VMAC_RX_STAT;
+ REG_PIO_WRITE64(handle, offset, HXGE_VMAC_RX_STAT_CLEAR);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_vmac_clear_tx_int_stat(hpi_handle_t handle)
+{
+ uint64_t offset;
+
+ offset = VMAC_TX_STAT;
+ REG_PIO_WRITE64(handle, offset, HXGE_VMAC_TX_STAT_CLEAR);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_rx_int_stat_mask(hpi_handle_t handle, boolean_t overflow_cnt,
+ boolean_t frame_cnt)
+{
+ uint64_t offset;
+ uint64_t value = 0;
+
+ if (overflow_cnt)
+ value |= HXGE_VMAC_RX_MASK_OVERFLOW;
+
+ if (frame_cnt)
+ value |= HXGE_VMAC_RX_MASK_FRAME;
+
+ offset = VMAC_RX_MSK;
+ REG_PIO_WRITE64(handle, offset, value);
+
+ return (HPI_SUCCESS);
+}
+
+hpi_status_t
+hpi_pfc_set_tx_int_stat_mask(hpi_handle_t handle, boolean_t overflow_cnt,
+ boolean_t frame_cnt)
+{
+ uint64_t offset;
+ uint64_t value = 0;
+ uint64_t overflow_mask = 0x6;
+ uint64_t frame_mask = 0x1;
+
+ if (overflow_cnt)
+ value |= overflow_mask;
+
+ if (frame_cnt)
+ value |= frame_mask;
+
+ offset = VMAC_TX_MSK;
+ REG_PIO_WRITE64(handle, offset, value);
+
+ return (HPI_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/hxge/hpi_vmac.h b/usr/src/uts/common/io/hxge/hpi_vmac.h
new file mode 100644
index 0000000000..a17a37a276
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hpi_vmac.h
@@ -0,0 +1,67 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HPI_MAC_H
+#define _HPI_MAC_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hpi.h>
+#include <hxge_vmac_hw.h>
+
+hpi_status_t hpi_tx_vmac_reset(hpi_handle_t handle);
+hpi_status_t hpi_rx_vmac_reset(hpi_handle_t handle);
+hpi_status_t hpi_vmac_tx_config(hpi_handle_t handle, config_op_t op,
+ uint64_t config, uint16_t max_frame_length);
+hpi_status_t hpi_vmac_rx_config(hpi_handle_t handle, config_op_t op,
+ uint64_t config, uint16_t max_frame_length);
+hpi_status_t hpi_vmac_clear_rx_int_stat(hpi_handle_t handle);
+hpi_status_t hpi_vmac_clear_tx_int_stat(hpi_handle_t handle);
+hpi_status_t hpi_pfc_set_rx_int_stat_mask(hpi_handle_t handle,
+ boolean_t overflow_cnt, boolean_t frame_cnt);
+hpi_status_t hpi_pfc_set_tx_int_stat_mask(hpi_handle_t handle,
+ boolean_t overflow_cnt, boolean_t frame_cnt);
+
+#define CFG_VMAC_TX_EN 0x00000001
+#define CFG_VMAC_TX_CRC_INSERT 0x00000002
+#define CFG_VMAC_TX_PAD 0x00000004
+
+#define CFG_VMAC_RX_EN 0x00000001
+#define CFG_VMAC_RX_CRC_CHECK_DISABLE 0x00000002
+#define CFG_VMAC_RX_STRIP_CRC 0x00000004
+#define CFG_VMAC_RX_PASS_FLOW_CTRL_FR 0x00000008
+#define CFG_VMAC_RX_PROMIXCUOUS_GROUP 0x00000010
+#define CFG_VMAC_RX_PROMISCUOUS_MODE 0x00000020
+#define CFG_VMAC_RX_LOOP_BACK 0x00000040
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HPI_MAC_H */
diff --git a/usr/src/uts/common/io/hxge/hxge.conf b/usr/src/uts/common/io/hxge/hxge.conf
new file mode 100644
index 0000000000..91242f6fa0
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge.conf
@@ -0,0 +1,100 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#########################################################################
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+#
+# driver.conf file for Sun 10Gb Ethernet Driver (hxge)
+#
+#
+#------- Jumbo frame support ---------------------------------
+# To enable jumbo support,
+# accept-jumbo = 1;
+#
+# To disable jumbo support,
+# accept-jumbo = 0;
+#
+# Default is 0.
+#
+#
+#------- Receive DMA Configuration ----------------------------
+#
+# rxdma-intr-time
+# Interrupts after this number of NIU hardware ticks have
+# elapsed since the last packet was received.
+# A value of zero means no time blanking (Default = 8).
+#
+# rxdma-intr-pkts
+# Interrupt after this number of packets have arrived since
+# the last packet was serviced. A value of zero indicates
+# no packet blanking (Default = 0x20).
+#
+# Default Interrupt Blanking parameters.
+#
+# rxdma-intr-time = 0x8;
+# rxdma-intr-pkts = 0x20;
+#
+#
+#------- Classification and Load Distribution Configuration ------
+#
+# class-opt-****-***
+# These variables define how each IP class is configured.
+# Configuration options includes whether TCAM lookup
+# is enabled and whether to discard packets of this class
+#
+# supported classes:
+# class-opt-ipv4-tcp class-opt-ipv4-udp class-opt-ipv4-sctp
+# class-opt-ipv4-ah class-opt-ipv6-tcp class-opt-ipv6-udp
+# class-opt-ipv6-sctp class-opt-ipv6-ah
+#
+# Configuration bits (The following bits will be decoded
+# by the driver as hex format).
+#
+# 0x10000: TCAM lookup for this IP class
+# 0x20000: Discard packets of this IP class
+#
+# class-opt-ipv4-tcp = 0x10000;
+# class-opt-ipv4-udp = 0x10000;
+# class-opt-ipv4-sctp = 0x10000;
+# class-opt-ipv4-ah = 0x10000;
+# class-opt-ipv6-tcp = 0x10000;
+# class-opt-ipv6-udp = 0x10000;
+# class-opt-ipv6-sctp = 0x10000;
+# class-opt-ipv6-ah = 0x10000;
+#
+#
+#------- FMA Capabilities ---------------------------------
+#
+# Change FMA capabilities to non-default
+#
+# DDI_FM_NOT_CAPABLE 0x00000000
+# DDI_FM_EREPORT_CAPABLE 0x00000001
+# DDI_FM_ACCCHK_CAPABLE 0x00000002
+# DDI_FM_DMACHK_CAPABLE 0x00000004
+# DDI_FM_ERRCB_CAPABLE 0x00000008
+#
+# fm-capable = 0xF;
+#
+# default is DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE = 0x5
diff --git a/usr/src/uts/common/io/hxge/hxge.h b/usr/src/uts/common/io/hxge/hxge.h
new file mode 100644
index 0000000000..79dc287737
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge.h
@@ -0,0 +1,591 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_H
+#define _SYS_HXGE_HXGE_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hxge_vmac.h>
+#include <hxge_pfc.h>
+#include <hxge_classify.h>
+
+/*
+ * HXGE diagnostics IOCTLS.
+ */
+#define HXGE_IOC ((((('N' << 8) + 'X') << 8) + 'G') << 8)
+
+#define HXGE_GET64 (HXGE_IOC|1)
+#define HXGE_PUT64 (HXGE_IOC|2)
+#define HXGE_GET_TX_RING_SZ (HXGE_IOC|3)
+#define HXGE_GET_TX_DESC (HXGE_IOC|4)
+#define HXGE_GLOBAL_RESET (HXGE_IOC|5)
+#define HXGE_TX_SIDE_RESET (HXGE_IOC|6)
+#define HXGE_RX_SIDE_RESET (HXGE_IOC|7)
+#define HXGE_RESET_MAC (HXGE_IOC|8)
+#define HXGE_RTRACE (HXGE_IOC|9)
+#define HXGE_GET_TCAM (HXGE_IOC|10)
+#define HXGE_PUT_TCAM (HXGE_IOC|11)
+
+#define HXGE_OK 0
+#define HXGE_ERROR 0x40000000
+#define HXGE_DDI_FAILED 0x20000000
+
+/*
+ * Definitions for module_info.
+ */
+#define HXGE_DRIVER_NAME "hxge" /* module name */
+#define HXGE_CHECK_TIMER (5000)
+
+typedef enum {
+ param_instance,
+
+ param_accept_jumbo,
+ param_rxdma_rbr_size,
+ param_rxdma_rcr_size,
+ param_rxdma_intr_time,
+ param_rxdma_intr_pkts,
+ param_vlan_ids,
+ param_implicit_vlan_id,
+ param_tcam_enable,
+
+ param_hash_init_value,
+ param_class_cfg_ether_usr1,
+ param_class_cfg_ether_usr2,
+ param_class_opt_ipv4_tcp,
+ param_class_opt_ipv4_udp,
+ param_class_opt_ipv4_ah,
+ param_class_opt_ipv4_sctp,
+ param_class_opt_ipv6_tcp,
+ param_class_opt_ipv6_udp,
+ param_class_opt_ipv6_ah,
+ param_class_opt_ipv6_sctp,
+ param_hxge_debug_flag,
+ param_hpi_debug_flag,
+ param_dump_ptrs,
+ param_end
+} hxge_param_index_t;
+
+
+#define HXGE_PARAM_READ 0x00000001ULL
+#define HXGE_PARAM_WRITE 0x00000002ULL
+#define HXGE_PARAM_SHARED 0x00000004ULL
+#define HXGE_PARAM_PRIV 0x00000008ULL
+#define HXGE_PARAM_RW HXGE_PARAM_READ | HXGE_PARAM_WRITE
+#define HXGE_PARAM_RWS HXGE_PARAM_RW | HXGE_PARAM_SHARED
+#define HXGE_PARAM_RWP HXGE_PARAM_RW | HXGE_PARAM_PRIV
+
+#define HXGE_PARAM_RXDMA 0x00000010ULL
+#define HXGE_PARAM_TXDMA 0x00000020ULL
+#define HXGE_PARAM_MAC 0x00000040ULL
+
+#define HXGE_PARAM_CMPLX 0x00010000ULL
+#define HXGE_PARAM_NDD_WR_OK 0x00020000ULL
+#define HXGE_PARAM_INIT_ONLY 0x00040000ULL
+#define HXGE_PARAM_INIT_CONFIG 0x00080000ULL
+
+#define HXGE_PARAM_READ_PROP 0x00100000ULL
+#define HXGE_PARAM_PROP_ARR32 0x00200000ULL
+#define HXGE_PARAM_PROP_ARR64 0x00400000ULL
+#define HXGE_PARAM_PROP_STR 0x00800000ULL
+
+#define HXGE_PARAM_DONT_SHOW 0x80000000ULL
+
+#define HXGE_PARAM_ARRAY_CNT_MASK 0x0000ffff00000000ULL
+#define HXGE_PARAM_ARRAY_CNT_SHIFT 32ULL
+#define HXGE_PARAM_ARRAY_ALLOC_MASK 0xffff000000000000ULL
+#define HXGE_PARAM_ARRAY_ALLOC_SHIFT 48ULL
+
+typedef struct _hxge_param_t {
+ int (*getf)();
+ int (*setf)(); /* null for read only */
+ uint64_t type; /* R/W/ Common/Port/ .... */
+ uint64_t minimum;
+ uint64_t maximum;
+ uint64_t value; /* for array params, pointer to value array */
+ uint64_t old_value; /* for array params, pointer to old_value array */
+ char *fcode_name;
+ char *name;
+} hxge_param_t, *p_hxge_param_t;
+
+
+typedef enum {
+ hxge_lb_normal,
+ hxge_lb_mac10g
+} hxge_lb_t;
+
+enum hxge_mac_state {
+ HXGE_MAC_STOPPED = 0,
+ HXGE_MAC_STARTED
+};
+
+typedef struct _filter_t {
+ uint32_t all_phys_cnt;
+ uint32_t all_multicast_cnt;
+ uint32_t all_sap_cnt;
+} filter_t, *p_filter_t;
+
+typedef struct _hxge_port_stats_t {
+ hxge_lb_t lb_mode;
+ uint32_t poll_mode;
+} hxge_port_stats_t, *p_hxge_port_stats_t;
+
+
+typedef struct _hxge_peu_sys_stats {
+ uint32_t spc_acc_err;
+ uint32_t tdc_pioacc_err;
+ uint32_t rdc_pioacc_err;
+ uint32_t pfc_pioacc_err;
+ uint32_t vmac_pioacc_err;
+ uint32_t cpl_hdrq_parerr;
+ uint32_t cpl_dataq_parerr;
+ uint32_t retryram_xdlh_parerr;
+ uint32_t retrysotram_xdlh_parerr;
+ uint32_t p_hdrq_parerr;
+ uint32_t p_dataq_parerr;
+ uint32_t np_hdrq_parerr;
+ uint32_t np_dataq_parerr;
+ uint32_t eic_msix_parerr;
+ uint32_t hcr_parerr;
+} hxge_peu_sys_stats_t, *p_hxge_peu_sys_stats_t;
+
+
+typedef struct _hxge_stats_t {
+ /*
+ * Overall structure size
+ */
+ size_t stats_size;
+
+ kstat_t *ksp;
+ kstat_t *rdc_ksp[HXGE_MAX_RDCS];
+ kstat_t *tdc_ksp[HXGE_MAX_TDCS];
+ kstat_t *rdc_sys_ksp;
+ kstat_t *tdc_sys_ksp;
+ kstat_t *pfc_ksp;
+ kstat_t *vmac_ksp;
+ kstat_t *port_ksp;
+ kstat_t *mmac_ksp;
+ kstat_t *peu_sys_ksp;
+
+ hxge_mac_stats_t mac_stats;
+ hxge_vmac_stats_t vmac_stats; /* VMAC Statistics */
+
+ hxge_rx_ring_stats_t rdc_stats[HXGE_MAX_RDCS]; /* per rdc stats */
+ hxge_rdc_sys_stats_t rdc_sys_stats; /* RDC system stats */
+
+ hxge_tx_ring_stats_t tdc_stats[HXGE_MAX_TDCS]; /* per tdc stats */
+ hxge_tdc_sys_stats_t tdc_sys_stats; /* TDC system stats */
+
+ hxge_pfc_stats_t pfc_stats; /* pfc stats */
+ hxge_port_stats_t port_stats; /* port stats */
+ hxge_mmac_stats_t mmac_stats; /* Multi mac. stats */
+
+ hxge_peu_sys_stats_t peu_sys_stats; /* PEU system stats */
+} hxge_stats_t, *p_hxge_stats_t;
+
+typedef struct _hxge_intr_t {
+ boolean_t intr_registered; /* interrupts are registered */
+ boolean_t intr_enabled; /* interrupts are enabled */
+ boolean_t niu_msi_enable; /* debug or configurable? */
+ uint8_t nldevs; /* # of logical devices */
+ int intr_types; /* interrupt types supported */
+ int intr_type; /* interrupt type to add */
+ int msi_intx_cnt; /* # msi/intx ints returned */
+ int intr_added; /* # ints actually needed */
+ int intr_cap; /* interrupt capabilities */
+ size_t intr_size; /* size of array to allocate */
+ ddi_intr_handle_t *htable; /* For array of interrupts */
+ /* Add interrupt number for each interrupt vector */
+ int pri;
+} hxge_intr_t, *p_hxge_intr_t;
+
+typedef struct _hxge_ldgv_t {
+ uint8_t ndma_ldvs;
+ uint8_t nldvs;
+ uint8_t start_ldg;
+ uint8_t maxldgs;
+ uint8_t maxldvs;
+ uint8_t ldg_intrs;
+ uint32_t tmres;
+ p_hxge_ldg_t ldgp;
+ p_hxge_ldv_t ldvp;
+ p_hxge_ldv_t ldvp_syserr;
+} hxge_ldgv_t, *p_hxge_ldgv_t;
+
+/*
+ * Hydra Device instance state information.
+ * Each instance is dynamically allocated on first attach.
+ */
+struct _hxge_t {
+ dev_info_t *dip; /* device instance */
+ dev_info_t *p_dip; /* Parent's device instance */
+ int instance; /* instance number */
+ uint32_t drv_state; /* driver state bit flags */
+ uint64_t hxge_debug_level; /* driver state bit flags */
+ kmutex_t genlock[1];
+ enum hxge_mac_state hxge_mac_state;
+ ddi_softintr_t resched_id; /* reschedule callback */
+ boolean_t resched_needed;
+ boolean_t resched_running;
+
+ p_dev_regs_t dev_regs;
+ hpi_handle_t hpi_handle;
+ hpi_handle_t hpi_pci_handle;
+ hpi_handle_t hpi_reg_handle;
+ hpi_handle_t hpi_msi_handle;
+
+ hxge_vmac_t vmac;
+ hxge_classify_t classifier;
+
+ mac_handle_t mach; /* mac module handle */
+
+ p_hxge_stats_t statsp;
+ uint32_t param_count;
+ p_hxge_param_t param_arr;
+ hxge_hw_list_t *hxge_hw_p; /* pointer to per Hydra */
+ uint8_t nrdc;
+ uint8_t rdc[HXGE_MAX_RDCS];
+ uint8_t ntdc;
+ uint8_t tdc[HXGE_MAX_TDCS];
+
+ hxge_intr_t hxge_intr_type;
+ hxge_dma_pt_cfg_t pt_config;
+ hxge_class_pt_cfg_t class_config;
+
+ /* Logical device and group data structures. */
+ p_hxge_ldgv_t ldgvp;
+
+ caddr_t param_list; /* Parameter list */
+
+ ether_addr_st factaddr; /* factory mac address */
+ ether_addr_st ouraddr; /* individual address */
+ kmutex_t ouraddr_lock; /* lock to protect to uradd */
+
+ ddi_iblock_cookie_t interrupt_cookie;
+
+ /*
+ * Blocks of memory may be pre-allocated by the
+ * partition manager or the driver. They may include
+ * blocks for configuration and buffers. The idea is
+ * to preallocate big blocks of contiguous areas in
+ * system memory (i.e. with IOMMU). These blocks then
+ * will be broken up to a fixed number of blocks with
+ * each block having the same block size (4K, 8K, 16K or
+ * 32K) in the case of buffer blocks. For systems that
+ * do not support DVMA, more than one big block will be
+ * allocated.
+ */
+ uint32_t rx_default_block_size;
+ hxge_rx_block_size_t rx_bksize_code;
+
+ p_hxge_dma_pool_t rx_buf_pool_p;
+ p_hxge_dma_pool_t rx_cntl_pool_p;
+
+ p_hxge_dma_pool_t tx_buf_pool_p;
+ p_hxge_dma_pool_t tx_cntl_pool_p;
+
+ /* Receive buffer block ring and completion ring. */
+ p_rx_rbr_rings_t rx_rbr_rings;
+ p_rx_rcr_rings_t rx_rcr_rings;
+ p_rx_mbox_areas_t rx_mbox_areas_p;
+
+ uint32_t start_rdc;
+ uint32_t max_rdcs;
+
+ /* Transmit descriptors rings */
+ p_tx_rings_t tx_rings;
+ p_tx_mbox_areas_t tx_mbox_areas_p;
+
+ uint32_t start_tdc;
+ uint32_t max_tdcs;
+ uint32_t tdc_mask;
+
+ ddi_dma_handle_t dmasparehandle;
+
+ ulong_t sys_page_sz;
+ ulong_t sys_page_mask;
+ int suspended;
+
+ filter_t filter; /* Current instance filter */
+ p_hash_filter_t hash_filter; /* Multicast hash filter. */
+ krwlock_t filter_lock; /* Lock to protect filters. */
+
+ ulong_t sys_burst_sz;
+ timeout_id_t hxge_timerid;
+ uint8_t msg_min;
+
+ uint16_t intr_timeout;
+ uint16_t intr_threshold;
+
+ rtrace_t rtrace;
+ int fm_capabilities; /* FMA capabilities */
+
+ uint32_t hxge_port_rbr_size;
+ uint32_t hxge_port_rcr_size;
+ uint32_t hxge_port_tx_ring_size;
+ hxge_mmac_t hxge_mmac_info;
+};
+
+/*
+ * Driver state flags.
+ */
+#define STATE_REGS_MAPPED 0x000000001 /* device registers mapped */
+#define STATE_KSTATS_SETUP 0x000000002 /* kstats allocated */
+#define STATE_NODE_CREATED 0x000000004 /* device node created */
+#define STATE_HW_CONFIG_CREATED 0x000000008 /* hardware properties */
+#define STATE_HW_INITIALIZED 0x000000010 /* hardware initialized */
+
+typedef struct _hxge_port_kstat_t {
+ /*
+ * Transciever state informations.
+ */
+ kstat_named_t cap_autoneg;
+ kstat_named_t cap_10gfdx;
+
+ /*
+ * Link partner capabilities.
+ */
+ kstat_named_t lp_cap_autoneg;
+ kstat_named_t lp_cap_10gfdx;
+
+ /*
+ * Shared link setup.
+ */
+ kstat_named_t link_speed;
+ kstat_named_t link_duplex;
+ kstat_named_t link_up;
+
+ /*
+ * Lets the user know the MTU currently in use by
+ * the physical MAC port.
+ */
+ kstat_named_t lb_mode;
+
+ kstat_named_t tx_max_pend;
+ kstat_named_t rx_jumbo_pkts;
+
+ /*
+ * Misc MAC statistics.
+ */
+ kstat_named_t ifspeed;
+ kstat_named_t promisc;
+} hxge_port_kstat_t, *p_hxge_port_kstat_t;
+
+typedef struct _hxge_rdc_kstat {
+ /*
+ * Receive DMA channel statistics.
+ * This structure needs to be consistent with hxge_rdc_stat_index_t
+ * in hxge_kstat.c
+ */
+ kstat_named_t ipackets;
+ kstat_named_t rbytes;
+ kstat_named_t errors;
+ kstat_named_t jumbo_pkts;
+
+ kstat_named_t rcr_unknown_err;
+ kstat_named_t rcr_sha_par_err;
+ kstat_named_t rbr_pre_par_err;
+ kstat_named_t rbr_pre_emty;
+
+ kstat_named_t rcr_shadow_full;
+ kstat_named_t rbr_tmout;
+ kstat_named_t peu_resp_err;
+
+ kstat_named_t ctrl_fifo_ecc_err;
+ kstat_named_t data_fifo_ecc_err;
+
+ kstat_named_t rcrfull;
+ kstat_named_t rbr_empty;
+ kstat_named_t rbrfull;
+
+ kstat_named_t rcr_to;
+ kstat_named_t rcr_thresh;
+} hxge_rdc_kstat_t, *p_hxge_rdc_kstat_t;
+
+typedef struct _hxge_rdc_sys_kstat {
+ /*
+ * Receive DMA system statistics.
+ * This structure needs to be consistent with hxge_rdc_sys_stat_idx_t
+ * in hxge_kstat.c
+ */
+ kstat_named_t ctrl_fifo_sec;
+ kstat_named_t ctrl_fifo_ded;
+ kstat_named_t data_fifo_sec;
+ kstat_named_t data_fifo_ded;
+} hxge_rdc_sys_kstat_t, *p_hxge_rdc_sys_kstat_t;
+
+typedef struct _hxge_tdc_kstat {
+ /*
+ * Transmit DMA channel statistics.
+ * This structure needs to be consistent with hxge_tdc_stats_index_t
+ * in hxge_kstat.c
+ */
+ kstat_named_t opackets;
+ kstat_named_t obytes;
+ kstat_named_t obytes_with_pad;
+ kstat_named_t oerrors;
+ kstat_named_t tx_inits;
+ kstat_named_t tx_no_buf;
+
+ kstat_named_t peu_resp_err;
+ kstat_named_t pkt_size_err;
+ kstat_named_t tx_rng_oflow;
+ kstat_named_t pkt_size_hdr_err;
+ kstat_named_t runt_pkt_drop_err;
+ kstat_named_t pref_par_err;
+ kstat_named_t tdr_pref_cpl_to;
+ kstat_named_t pkt_cpl_to;
+ kstat_named_t invalid_sop;
+ kstat_named_t unexpected_sop;
+
+ kstat_named_t count_hdr_size_err;
+ kstat_named_t count_runt;
+ kstat_named_t count_abort;
+
+ kstat_named_t tx_starts;
+ kstat_named_t tx_no_desc;
+ kstat_named_t tx_dma_bind_fail;
+ kstat_named_t tx_hdr_pkts;
+ kstat_named_t tx_ddi_pkts;
+ kstat_named_t tx_jumbo_pkts;
+ kstat_named_t tx_max_pend;
+ kstat_named_t tx_marks;
+} hxge_tdc_kstat_t, *p_hxge_tdc_kstat_t;
+
+typedef struct _hxge_tdc_sys_kstat {
+ /*
+ * Transmit DMA system statistics.
+ * This structure needs to be consistent with hxge_tdc_sys_stat_idx_t
+ * in hxge_kstat.c
+ */
+ kstat_named_t reord_tbl_par_err;
+ kstat_named_t reord_buf_ded_err;
+ kstat_named_t reord_buf_sec_err;
+} hxge_tdc_sys_kstat_t, *p_hxge_tdc_sys_kstat_t;
+
+typedef struct _hxge_vmac_kstat {
+ /*
+ * VMAC statistics.
+ * This structure needs to be consistent with hxge_vmac_stat_index_t
+ * in hxge_kstat.c
+ */
+ kstat_named_t tx_frame_cnt;
+ kstat_named_t tx_byte_cnt;
+
+ kstat_named_t rx_frame_cnt;
+ kstat_named_t rx_byte_cnt;
+ kstat_named_t rx_drop_frame_cnt;
+ kstat_named_t rx_drop_byte_cnt;
+ kstat_named_t rx_crc_cnt;
+ kstat_named_t rx_pause_cnt;
+ kstat_named_t rx_bcast_fr_cnt;
+ kstat_named_t rx_mcast_fr_cnt;
+} hxge_vmac_kstat_t, *p_hxge_vmac_kstat_t;
+
+typedef struct _hxge_pfc_kstat {
+ /*
+ * This structure needs to be consistent with hxge_pfc_stat_index_t
+ * in hxge_kstat.c
+ */
+ kstat_named_t pfc_pkt_drop;
+ kstat_named_t pfc_tcam_parity_err;
+ kstat_named_t pfc_vlan_parity_err;
+ kstat_named_t pfc_bad_cs_count;
+ kstat_named_t pfc_drop_count;
+ kstat_named_t pfc_tcp_ctrl_drop;
+ kstat_named_t pfc_l2_addr_drop;
+ kstat_named_t pfc_class_code_drop;
+ kstat_named_t pfc_tcam_drop;
+ kstat_named_t pfc_vlan_drop;
+} hxge_pfc_kstat_t, *p_hxge_pfc_kstat_t;
+
+typedef struct _hxge_mmac_kstat {
+ /*
+ * This structure needs to be consistent with hxge_mmac_stat_index_t
+ * in hxge_kstat.c
+ */
+ kstat_named_t mmac_max_addr_cnt;
+ kstat_named_t mmac_avail_addr_cnt;
+ kstat_named_t mmac_addr1;
+ kstat_named_t mmac_addr2;
+ kstat_named_t mmac_addr3;
+ kstat_named_t mmac_addr4;
+ kstat_named_t mmac_addr5;
+ kstat_named_t mmac_addr6;
+ kstat_named_t mmac_addr7;
+ kstat_named_t mmac_addr8;
+ kstat_named_t mmac_addr9;
+ kstat_named_t mmac_addr10;
+ kstat_named_t mmac_addr11;
+ kstat_named_t mmac_addr12;
+ kstat_named_t mmac_addr13;
+ kstat_named_t mmac_addr14;
+ kstat_named_t mmac_addr15;
+ kstat_named_t mmac_addr16;
+} hxge_mmac_kstat_t, *p_hxge_mmac_kstat_t;
+
+typedef struct _hxge_peu_sys_kstat {
+ /*
+ * This structure needs to be consistent with hxge_peu_sys_stat_idx_t
+ * in hxge_kstat.c
+ */
+ kstat_named_t spc_acc_err;
+ kstat_named_t tdc_pioacc_err;
+ kstat_named_t rdc_pioacc_err;
+ kstat_named_t pfc_pioacc_err;
+ kstat_named_t vmac_pioacc_err;
+ kstat_named_t cpl_hdrq_parerr;
+ kstat_named_t cpl_dataq_parerr;
+ kstat_named_t retryram_xdlh_parerr;
+ kstat_named_t retrysotram_xdlh_parerr;
+ kstat_named_t p_hdrq_parerr;
+ kstat_named_t p_dataq_parerr;
+ kstat_named_t np_hdrq_parerr;
+ kstat_named_t np_dataq_parerr;
+ kstat_named_t eic_msix_parerr;
+ kstat_named_t hcr_parerr;
+} hxge_peu_sys_kstat_t, *p_hxge_peu_sys_kstat_t;
+
+/*
+ * Prototype definitions.
+ */
+hxge_status_t hxge_init(p_hxge_t);
+void hxge_uninit(p_hxge_t);
+void hxge_get64(p_hxge_t hxgep, p_mblk_t mp);
+void hxge_put64(p_hxge_t hxgep, p_mblk_t mp);
+
+typedef void (*fptrv_t)();
+timeout_id_t hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec);
+void hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_classify.h b/usr/src/uts/common/io/hxge/hxge_classify.h
new file mode 100644
index 0000000000..dc6c296898
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_classify.h
@@ -0,0 +1,97 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HXGE_CLASSIFY_H
+#define _HXGE_CLASSIFY_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hxge_pfc.h>
+#include <hxge_pfc_hw.h>
+#include <hpi_pfc.h>
+
+
+/*
+ * The following are the user configurable ether types. Refer to
+ * /usr/include/sys/ethernet.h
+ *
+ * ETHERTYPE_PUP (0x0200)
+ * ETHERTYPE_802_MIN (0x0600)
+ * ETHERTYPE_IP (0x0800)
+ * ETHERTYPE_ARP (0x0806)
+ * ETHERTYPE_REVARP (0x8035)
+ * ETHERTYPE_AT (0x809b)
+ * ETHERTYPE_AARP (0x80f3)
+ * ETHERTYPE_IPV6 (0x86dd)
+ * ETHERTYPE_SLOW (0x8809)
+ * ETHERTYPE_PPPOED (0x8863)
+ * ETHERTYPE_PPPOES (0x8864)
+ * ETHERTYPE_MAX (0xffff)
+ */
+
+/*
+ * Used for ip class tcam key config
+ */
+#define HXGE_CLASS_TCAM_LOOKUP 0x10000
+#define HXGE_CLASS_DISCARD 0x20000
+#define HXGE_CLASS_VALID 0x40000
+#define HXGE_CLASS_ETHER_TYPE_MASK 0x0FFFF
+
+typedef struct _tcam_flow_spec {
+ hxge_tcam_entry_t tce;
+ uint64_t flags;
+ uint64_t user_info;
+} tcam_flow_spec_t, *p_tcam_flow_spec_t;
+
+typedef struct {
+ uint16_t ether_type;
+ int count; /* How many TCAM entries using this class. */
+} hxge_class_usage_t;
+
+#define HXGE_PFC_HW_RESET 0x1
+#define HXGE_PFC_HW_INIT 0x2
+#define HXGE_PFC_SW_INIT 0x4
+
+typedef struct _hxge_classify {
+ uint32_t tcam_size;
+ uint32_t n_used;
+ uint32_t state;
+ p_hxge_pfc_stats_t pfc_stats;
+
+ tcam_flow_spec_t *tcam_entries;
+ uint8_t tcam_location;
+ hxge_class_usage_t class_usage[TCAM_CLASS_MAX];
+} hxge_classify_t, *p_hxge_classify_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HXGE_CLASSIFY_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_common.h b/usr/src/uts/common/io/hxge/hxge_common.h
new file mode 100644
index 0000000000..4f3000582f
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_common.h
@@ -0,0 +1,165 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_COMMON_H
+#define _SYS_HXGE_HXGE_COMMON_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <hxge_defs.h>
+#include <hxge_pfc.h>
+#include <hxge_common_impl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define HXGE_DMA_START B_TRUE
+#define HXGE_DMA_STOP B_FALSE
+#define HXGE_TIMER_RESO 2
+#define HXGE_TIMER_LDG 2
+
+/*
+ * Receive and Transmit DMA definitions
+ */
+#ifdef _DMA_USES_VIRTADDR
+#define HXGE_DMA_BLOCK 1
+#else
+#define HXGE_DMA_BLOCK (64 * 64)
+#endif
+
+#define HXGE_RBR_RBB_MIN (128)
+#define HXGE_RBR_RBB_MAX (64 * 128 -1)
+#define HXGE_RBR_RBB_DEFAULT (64 * 16) /* x86 hello */
+#define HXGE_RCR_MIN (HXGE_RBR_RBB_MIN * 2)
+#define HXGE_RCR_MAX (65504) /* 2^16 - 32 */
+
+#if defined(_BIG_ENDIAN)
+#define HXGE_RCR_DEFAULT (HXGE_RBR_RBB_DEFAULT * 8)
+#else /* _BIG_ENDIAN */
+#ifdef USE_RX_BIG_BUF
+#define HXGE_RCR_DEFAULT (HXGE_RBR_RBB_DEFAULT * 8)
+#else
+#define HXGE_RCR_DEFAULT (HXGE_RBR_RBB_DEFAULT * 4)
+#endif
+#endif /* _BIG_ENDIAN */
+
+#define HXGE_TX_RING_DEFAULT (1024)
+#define HXGE_TX_RING_MAX (64 * 128 - 1)
+
+#define RBR_BKSIZE_4K 0
+#define RBR_BKSIZE_8K 1
+#define RBR_BKSIZE_4K_BYTES (4 * 1024)
+
+#define RBR_BUFSZ2_2K 0
+#define RBR_BUFSZ2_4K 1
+#define RBR_BUFSZ2_2K_BYTES (2 * 1024)
+#define RBR_BUFSZ2_4K_BYTES (4 * 1024)
+
+#define RBR_BUFSZ1_1K 0
+#define RBR_BUFSZ1_2K 1
+#define RBR_BUFSZ1_1K_BYTES 1024
+#define RBR_BUFSZ1_2K_BYTES (2 * 1024)
+
+#define RBR_BUFSZ0_256B 0
+#define RBR_BUFSZ0_512B 1
+#define RBR_BUFSZ0_1K 2
+#define RBR_BUFSZ0_256_BYTES 256
+#define RBR_BUFSZ0_512B_BYTES 512
+#define RBR_BUFSZ0_1K_BYTES (1024)
+
+/*
+ * VLAN table configuration
+ */
+typedef struct hxge_mv_cfg {
+ uint8_t flag; /* 0:unconfigure 1:configured */
+} hxge_mv_cfg_t, *p_hxge_mv_cfg_t;
+
+typedef struct hxge_param_map {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd2:2; /* [30:31] rsrvd */
+ uint32_t remove:1; /* [29] Remove */
+ uint32_t pref:1; /* [28] preference */
+ uint32_t rsrv:4; /* [27:24] preference */
+ uint32_t map_to:8; /* [23:16] map to resource */
+ uint32_t param_id:16; /* [15:0] Param ID */
+#else
+ uint32_t param_id:16; /* [15:0] Param ID */
+ uint32_t map_to:8; /* [23:16] map to resource */
+ uint32_t rsrv:4; /* [27:24] preference */
+ uint32_t pref:1; /* [28] preference */
+ uint32_t remove:1; /* [29] Remove */
+ uint32_t rsrvd2:2; /* [30:31] rsrvd */
+#endif
+} hxge_param_map_t, *p_hxge_param_map_t;
+
+typedef struct hxge_hw_pt_cfg {
+ uint32_t start_tdc; /* start TDC (0 - 3) */
+ uint32_t max_tdcs; /* max TDC in sequence */
+ uint32_t start_rdc; /* start RDC (0 - 3) */
+ uint32_t max_rdcs; /* max rdc in sequence */
+ uint32_t rx_full_header; /* select the header flag */
+ uint32_t start_ldg; /* starting logical group # */
+ uint32_t max_ldgs; /* max logical device group */
+ uint32_t max_ldvs; /* max logical devices */
+} hxge_hw_pt_cfg_t, *p_hxge_hw_pt_cfg_t;
+
+/* per port configuration */
+typedef struct hxge_dma_pt_cfg {
+ hxge_hw_pt_cfg_t hw_config; /* hardware configuration */
+
+ uint32_t alloc_buf_size;
+ uint32_t rbr_size;
+ uint32_t rcr_size;
+} hxge_dma_pt_cfg_t, *p_hxge_dma_pt_cfg_t;
+
+/* classification configuration */
+typedef struct hxge_class_pt_cfg {
+ /* VLAN table */
+ hxge_mv_cfg_t vlan_tbl[VLAN_ID_MAX + 1];
+ /* class config value */
+ uint32_t init_hash;
+ uint32_t class_cfg[TCAM_CLASS_MAX];
+} hxge_class_pt_cfg_t, *p_hxge_class_pt_cfg_t;
+
+typedef struct hxge_hw_list {
+ struct hxge_hw_list *next;
+ hxge_os_mutex_t hxge_cfg_lock;
+ hxge_os_mutex_t hxge_tcam_lock;
+ hxge_os_mutex_t hxge_vlan_lock;
+
+ hxge_dev_info_t *parent_devp;
+ struct _hxge_t *hxge_p;
+ uint32_t ndevs;
+ uint32_t flags;
+ uint32_t magic;
+} hxge_hw_list_t, *p_hxge_hw_list_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_COMMON_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_common_impl.h b/usr/src/uts/common/io/hxge/hxge_common_impl.h
new file mode 100644
index 0000000000..39ad0edf85
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_common_impl.h
@@ -0,0 +1,274 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_COMMON_IMPL_H
+#define _SYS_HXGE_HXGE_COMMON_IMPL_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define HPI_REGH(hpi_handle) (hpi_handle.regh)
+#define HPI_REGP(hpi_handle) (hpi_handle.regp)
+
+#define NO_DEBUG 0x0000000000000000ULL
+#define RX_CTL 0x0000000000000001ULL
+#define TX_CTL 0x0000000000000002ULL
+#define OBP_CTL 0x0000000000000004ULL
+#define VPD_CTL 0x0000000000000008ULL
+#define DDI_CTL 0x0000000000000010ULL
+#define MEM_CTL 0x0000000000000020ULL
+#define IOC_CTL 0x0000000000000040ULL
+#define MOD_CTL 0x0000000000000080ULL
+#define DMA_CTL 0x0000000000000100ULL
+#define STR_CTL 0x0000000000000200ULL
+#define INT_CTL 0x0000000000000400ULL
+#define SYSERR_CTL 0x0000000000000800ULL
+#define KST_CTL 0x0000000000001000ULL
+#define FCRAM_CTL 0x0000000000002000ULL
+#define MAC_CTL 0x0000000000004000ULL
+#define DMA2_CTL 0x0000000000008000ULL
+#define RX2_CTL 0x0000000000010000ULL
+#define TX2_CTL 0x0000000000020000ULL
+#define MEM2_CTL 0x0000000000040000ULL
+#define MEM3_CTL 0x0000000000080000ULL
+#define NEMO_CTL 0x0000000000100000ULL
+#define NDD_CTL 0x0000000000200000ULL
+#define NDD2_CTL 0x0000000000400000ULL
+#define PFC_CTL 0x0000000000800000ULL
+#define CFG_CTL 0x0000000001000000ULL
+#define CFG2_CTL 0x0000000002000000ULL
+#define VIR_CTL 0x0000000004000000ULL
+#define VIR2_CTL 0x0000000008000000ULL
+#define HXGE_NOTE 0x0000000010000000ULL
+#define HXGE_ERR_CTL 0x0000000020000000ULL
+#define MAC_INT_CTL 0x0000000040000000ULL
+#define RX_INT_CTL 0x0000000080000000ULL
+#define TX_ERR_CTL 0x0000000100000000ULL
+#define DDI_INT_CTL 0x0000000200000000ULL
+#define DUMP_ALWAYS 0x2000000000000000ULL
+
+/* HPI Debug and Error defines */
+#define HPI_RDC_CTL 0x0000000000000001ULL
+#define HPI_TDC_CTL 0x0000000000000002ULL
+#define HPI_VMAC_CTL 0x0000000000000004ULL
+#define HPI_PFC_CTL 0x0000000000000008ULL
+#define HPI_VIR_CTL 0x0000000000000010ULL
+#define HPI_PIO_CTL 0x0000000000000020ULL
+#define HPI_VIO_CTL 0x0000000000000040ULL
+#define HPI_REG_CTL 0x0000000000000080ULL
+#define HPI_ERR_CTL 0x0000000000000100ULL
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/dditypes.h>
+#include <sys/ethernet.h>
+
+#ifdef HXGE_DEBUG
+#define HXGE_DEBUG_MSG(params) hxge_debug_msg params
+#else
+#define HXGE_DEBUG_MSG(params)
+#endif
+
+#define HXGE_ERROR_MSG(params) hxge_debug_msg params
+
+typedef kmutex_t hxge_os_mutex_t;
+typedef krwlock_t hxge_os_rwlock_t;
+
+typedef dev_info_t hxge_dev_info_t;
+typedef ddi_iblock_cookie_t hxge_intr_cookie_t;
+
+typedef ddi_acc_handle_t hxge_os_acc_handle_t;
+typedef hxge_os_acc_handle_t hpi_reg_handle_t;
+typedef uint64_t hpi_reg_ptr_t;
+
+typedef ddi_dma_handle_t hxge_os_dma_handle_t;
+typedef struct _hxge_dma_common_t hxge_os_dma_common_t;
+typedef struct _hxge_block_mv_t hxge_os_block_mv_t;
+typedef frtn_t hxge_os_frtn_t;
+
+#define HXGE_MUTEX_DRIVER MUTEX_DRIVER
+#define MUTEX_INIT(lock, name, type, arg) \
+ mutex_init(lock, name, type, arg)
+#define MUTEX_ENTER(lock) mutex_enter(lock)
+#define MUTEX_TRY_ENTER(lock) mutex_tryenter(lock)
+#define MUTEX_EXIT(lock) mutex_exit(lock)
+#define MUTEX_DESTROY(lock) mutex_destroy(lock)
+
+#define RW_INIT(lock, name, type, arg) rw_init(lock, name, type, arg)
+#define RW_ENTER_WRITER(lock) rw_enter(lock, RW_WRITER)
+#define RW_ENTER_READER(lock) rw_enter(lock, RW_READER)
+#define RW_TRY_ENTER(lock, type) rw_tryenter(lock, type)
+#define RW_EXIT(lock) rw_exit(lock)
+#define RW_DESTROY(lock) rw_destroy(lock)
+#define KMEM_ALLOC(size, flag) kmem_alloc(size, flag)
+#define KMEM_ZALLOC(size, flag) kmem_zalloc(size, flag)
+#define KMEM_FREE(buf, size) kmem_free(buf, size)
+
+#define HXGE_DELAY(microseconds) (drv_usecwait(microseconds))
+
+#define HXGE_PIO_READ8(handle, devaddr, offset) \
+ (ddi_get8(handle, (uint8_t *)((caddr_t)devaddr + offset)))
+
+#define HXGE_PIO_READ16(handle, devaddr, offset) \
+ (ddi_get16(handle, (uint16_t *)((caddr_t)devaddr + offset)))
+
+#define HXGE_PIO_READ32(handle, devaddr, offset) \
+ (ddi_get32(handle, (uint32_t *)((caddr_t)devaddr + offset)))
+
+#define HXGE_PIO_READ64(handle, devaddr, offset) \
+ (ddi_get64(handle, (uint64_t *)((caddr_t)devaddr + offset)))
+
+#define HXGE_PIO_WRITE8(handle, devaddr, offset, data) \
+ (ddi_put8(handle, (uint8_t *)((caddr_t)devaddr + offset), data))
+
+#define HXGE_PIO_WRITE16(handle, devaddr, offset, data) \
+ (ddi_get16(handle, (uint16_t *)((caddr_t)devaddr + offset), data))
+
+#define HXGE_PIO_WRITE32(handle, devaddr, offset, data) \
+ (ddi_put32(handle, (uint32_t *)((caddr_t)devaddr + offset), data))
+
+#define HXGE_PIO_WRITE64(handle, devaddr, offset, data) \
+ (ddi_put64(handle, (uint64_t *)((caddr_t)devaddr + offset), data))
+
+#define HXGE_HPI_PIO_READ8(hpi_handle, offset) \
+ (ddi_get8(HPI_REGH(hpi_handle), \
+ (uint8_t *)(HPI_REGP(hpi_handle) + offset)))
+
+#define HXGE_HPI_PIO_READ16(hpi_handle, offset) \
+ (ddi_get16(HPI_REGH(hpi_handle), \
+ (uint16_t *)(HPI_REGP(hpi_handle) + offset)))
+
+#define HXGE_HPI_PIO_READ32(hpi_handle, offset) \
+ (ddi_get32(HPI_REGH(hpi_handle), \
+ (uint32_t *)(HPI_REGP(hpi_handle) + offset)))
+
+#define HXGE_HPI_PIO_READ64(hpi_handle, offset) \
+ (ddi_get64(HPI_REGH(hpi_handle), \
+ (uint64_t *)(HPI_REGP(hpi_handle) + offset)))
+
+#define HXGE_HPI_PIO_WRITE8(hpi_handle, offset, data) \
+ (ddi_put8(HPI_REGH(hpi_handle), \
+ (uint8_t *)(HPI_REGP(hpi_handle) + offset), data))
+
+#define HXGE_HPI_PIO_WRITE16(hpi_handle, offset, data) \
+ (ddi_put16(HPI_REGH(hpi_handle), \
+ (uint16_t *)(HPI_REGP(hpi_handle) + offset), data))
+
+#define HXGE_HPI_PIO_WRITE32(hpi_handle, offset, data) \
+ (ddi_put32(HPI_REGH(hpi_handle), \
+ (uint32_t *)(HPI_REGP(hpi_handle) + offset), data))
+
+#define HXGE_HPI_PIO_WRITE64(hpi_handle, offset, data) \
+ (ddi_put64(HPI_REGH(hpi_handle), \
+ (uint64_t *)(HPI_REGP(hpi_handle) + offset), data))
+
+#define HXGE_MEM_PIO_READ8(hpi_handle) \
+ (ddi_get8(HPI_REGH(hpi_handle), (uint8_t *)HPI_REGP(hpi_handle)))
+
+#define HXGE_MEM_PIO_READ16(hpi_handle) \
+ (ddi_get16(HPI_REGH(hpi_handle), (uint16_t *)HPI_REGP(hpi_handle)))
+
+#define HXGE_MEM_PIO_READ32(hpi_handle) \
+ (ddi_get32(HPI_REGH(hpi_handle), (uint32_t *)HPI_REGP(hpi_handle)))
+
+#define HXGE_MEM_PIO_READ64(hpi_handle) \
+ (ddi_get64(HPI_REGH(hpi_handle), (uint64_t *)HPI_REGP(hpi_handle)))
+
+#define HXGE_MEM_PIO_WRITE8(hpi_handle, data) \
+ (ddi_put8(HPI_REGH(hpi_handle), (uint8_t *)HPI_REGP(hpi_handle), data))
+
+#define HXGE_MEM_PIO_WRITE16(hpi_handle, data) \
+ (ddi_put16(HPI_REGH(hpi_handle), \
+ (uint16_t *)HPI_REGP(hpi_handle), data))
+
+#define HXGE_MEM_PIO_WRITE32(hpi_handle, data) \
+ (ddi_put32(HPI_REGH(hpi_handle), \
+ (uint32_t *)HPI_REGP(hpi_handle), data))
+
+#define HXGE_MEM_PIO_WRITE64(hpi_handle, data) \
+ (ddi_put64(HPI_REGH(hpi_handle), \
+ (uint64_t *)HPI_REGP(hpi_handle), data))
+
+#define FM_SERVICE_RESTORED(hxgep) \
+ if (DDI_FM_EREPORT_CAP(hxgep->fm_capabilities)) \
+ ddi_fm_service_impact(hxgep->dip, DDI_SERVICE_RESTORED)
+#define HXGE_FM_REPORT_ERROR(hxgep, chan, ereport_id) \
+ if (DDI_FM_EREPORT_CAP(hxgep->fm_capabilities)) \
+ hxge_fm_report_error(hxgep, chan, ereport_id)
+
+
+#if defined(REG_TRACE)
+#define HXGE_REG_RD64(handle, offset, val_p) {\
+ *(val_p) = HXGE_HPI_PIO_READ64(handle, offset);\
+ hpi_rtrace_update(handle, B_FALSE, &hpi_rtracebuf, (uint32_t)offset, \
+ (uint64_t)(*(val_p)));\
+}
+#define HXGE_REG_WR64(handle, offset, val) {\
+ HXGE_HPI_PIO_WRITE64(handle, (offset), (val));\
+ hpi_rtrace_update(handle, B_TRUE, &hpi_rtracebuf, (uint32_t)offset,\
+ (uint64_t)(val));\
+}
+#elif defined(REG_SHOW)
+ /*
+ * Send 0xbadbad to tell rs_show_reg that we do not have
+ * a valid RTBUF index to pass
+ */
+#define HXGE_REG_RD64(handle, offset, val_p) {\
+ *(val_p) = HXGE_HPI_PIO_READ64(handle, offset);\
+ rt_show_reg(0xbadbad, B_FALSE, (uint32_t)offset, (uint64_t)(*(val_p)));\
+}
+/*
+ * Send 0xbadbad to tell rs_show_reg that we do not have
+ * a valid RTBUF index to pass
+ */
+#define HXGE_REG_WR64(handle, offset, val) {\
+ HXGE_HPI_PIO_WRITE64(handle, offset, (val));\
+ rt_show_reg(0xbadbad, B_TRUE, (uint32_t)offset, (uint64_t)(val));\
+}
+#else
+
+#define HXGE_REG_RD64(handle, offset, val_p) {\
+ *(val_p) = HXGE_HPI_PIO_READ64(handle, offset);\
+}
+#define HXGE_REG_RD32(handle, offset, val_p) {\
+ *(val_p) = HXGE_HPI_PIO_READ32(handle, offset);\
+}
+#define HXGE_REG_WR64(handle, offset, val) {\
+ HXGE_HPI_PIO_WRITE64(handle, (offset), (val));\
+}
+#define HXGE_REG_WR32(handle, offset, val) {\
+ HXGE_HPI_PIO_WRITE32(handle, (offset), (val));\
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_COMMON_IMPL_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_defs.h b/usr/src/uts/common/io/hxge/hxge_defs.h
new file mode 100644
index 0000000000..075904ff0b
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_defs.h
@@ -0,0 +1,146 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_DEFS_H
+#define _SYS_HXGE_HXGE_DEFS_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) && \
+ !defined(__BIG_ENDIAN) && !defined(__LITTLE_ENDIAN)
+#error Host endianness not defined
+#endif
+
+#if !defined(_BIT_FIELDS_HTOL) && !defined(_BIT_FIELDS_LTOH) && \
+ !defined(__BIT_FIELDS_HTOL) && !defined(__BIT_FIELDS_LTOH)
+#error Bit ordering not defined
+#endif
+
+/* RDC/TDC CSR size */
+#define DMA_CSR_SIZE 2048
+
+/*
+ * Define the Default RBR, RCR
+ */
+#define RBR_DEFAULT_MAX_BLKS 4096 /* each entry (16 blockaddr/64B) */
+#define RBR_NBLK_PER_LINE 16 /* 16 block addresses per 64 B line */
+#define RBR_DEFAULT_MAX_LEN 65472 /* 2^16 - 64 */
+#define RBR_DEFAULT_MIN_LEN 64 /* multiple of 64 */
+
+#define SW_OFFSET_NO_OFFSET 0
+#define SW_OFFSET_64 1 /* 64 bytes */
+#define SW_OFFSET_128 2 /* 128 bytes */
+#define SW_OFFSET_INVALID 3
+
+/*
+ * RBR block descriptor is 32 bits (bits [43:12]
+ */
+#define RBR_BKADDR_SHIFT 12
+#define RCR_DEFAULT_MAX_BLKS 4096 /* each entry (8 blockaddr/64B) */
+#define RCR_NBLK_PER_LINE 8 /* 8 block addresses per 64 B line */
+#define RCR_DEFAULT_MAX_LEN (RCR_DEFAULT_MAX_BLKS)
+#define RCR_DEFAULT_MIN_LEN 32
+
+/* DMA Channels. */
+#define HXGE_MAX_DMCS (HXGE_MAX_RDCS + HXGE_MAX_TDCS)
+#define HXGE_MAX_RDCS 4
+#define HXGE_MAX_TDCS 4
+
+#define VLAN_ETHERTYPE (0x8100)
+
+/* 256 total, each blade gets 42 */
+#define TCAM_HXGE_TCAM_MAX_ENTRY 42
+
+/*
+ * Locate the DMA channel start offset (PIO_VADDR)
+ * (DMA virtual address space of the PIO block)
+ */
+/* TX_RNG_CFIG is not used since we are not using VADDR. */
+#define TX_RNG_CFIG 0x1000000
+#define TDMC_PIOVADDR_OFFSET(channel) (2 * DMA_CSR_SIZE * channel)
+#define RDMC_PIOVADDR_OFFSET(channel) (TDMC_OFFSET(channel) + DMA_CSR_SIZE)
+
+/*
+ * PIO access using the DMC block directly (DMC)
+ */
+#define DMC_OFFSET(channel) (DMA_CSR_SIZE * channel)
+#define TDMC_OFFSET(channel) (TX_RNG_CFIG + DMA_CSR_SIZE * channel)
+
+#ifdef SOLARIS
+#ifndef i386
+#define _BIT_FIELDS_BIG_ENDIAN _BIT_FIELDS_HTOL
+#else
+#define _BIT_FIELDS_LITTLE_ENDIAN _BIT_FIELDS_LTOH
+#endif
+#else
+#define _BIT_FIELDS_LITTLE_ENDIAN _LITTLE_ENDIAN_BITFIELD
+#endif
+
+/*
+ * The following macros expect unsigned input values.
+ */
+#define TXDMA_CHANNEL_VALID(cn) (cn < HXGE_MAX_TDCS)
+
+/*
+ * Logical device definitions.
+ */
+#define HXGE_INT_MAX_LD 32
+#define HXGE_INT_MAX_LDG 32
+
+#define HXGE_RDMA_LD_START 0 /* 0 - 3 with 4 - 7 reserved */
+#define HXGE_TDMA_LD_START 8 /* 8 - 11 with 12 - 15 reserved */
+#define HXGE_VMAC_LD 16
+#define HXGE_PFC_LD 17
+#define HXGE_NMAC_LD 18
+#define HXGE_MBOX_LD_START 20 /* 20 - 23 for SW Mbox */
+#define HXGE_SYS_ERROR_LD 31
+
+#define LDG_VALID(n) (n < HXGE_INT_MAX_LDG)
+#define LD_VALID(n) (n < HXGE_INT_MAX_LD)
+#define LD_RXDMA_LD_VALID(n) (n < HXGE_MAX_RDCS)
+#define LD_TXDMA_LD_VALID(n) (n >= HXGE_MAX_RDCS && \
+ ((n - HXGE_MAX_RDCS) < HXGE_MAX_TDCS)))
+
+#define LD_TIMER_MAX 0x3f
+#define LD_INTTIMER_VALID(n) (n <= LD_TIMER_MAX)
+
+/* System Interrupt Data */
+#define SID_VECTOR_MAX 0x1f
+#define SID_VECTOR_VALID(n) (n <= SID_VECTOR_MAX)
+
+#define LD_IM_MASK 0x00000003ULL
+#define LDGTITMRES_RES_MASK 0x000FFFFFULL
+
+#define STD_FRAME_SIZE 1522 /* 1518 + 4 = 5EE + 4 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_DEFS_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_flow.h b/usr/src/uts/common/io/hxge/hxge_flow.h
new file mode 100644
index 0000000000..3cc74619d8
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_flow.h
@@ -0,0 +1,182 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_FLOW_H
+#define _SYS_HXGE_HXGE_FLOW_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <netinet/in.h>
+#define S6_addr32 _S6_un._S6_u32
+
+typedef struct tcpip4_spec_s {
+ in_addr_t ip4src;
+ in_addr_t ip4dst;
+ in_port_t psrc;
+ in_port_t pdst;
+} tcpip4_spec_t;
+
+typedef struct tcpip6_spec_s {
+ struct in6_addr ip6src;
+ struct in6_addr ip6dst;
+ in_port_t psrc;
+ in_port_t pdst;
+} tcpip6_spec_t;
+
+typedef struct udpip4_spec_s {
+ in_addr_t ip4src;
+ in_addr_t ip4dst;
+ in_port_t psrc;
+ in_port_t pdst;
+} udpip4_spec_t;
+
+typedef struct udpip6_spec_s {
+ struct in6_addr ip6src;
+ struct in6_addr ip6dst;
+ in_port_t psrc;
+ in_port_t pdst;
+} udpip6_spec_t;
+
+typedef struct ahip4_spec_s {
+ in_addr_t ip4src;
+ in_addr_t ip4dst;
+ uint32_t spi;
+} ahip4_spec_t;
+
+typedef struct ahip6_spec_s {
+ struct in6_addr ip6src;
+ struct in6_addr ip6dst;
+ uint32_t spi;
+} ahip6_spec_t;
+
+typedef ahip4_spec_t espip4_spec_t;
+typedef ahip6_spec_t espip6_spec_t;
+
+typedef struct rawip4_spec_s {
+ struct in6_addr ip4src;
+ struct in6_addr ip4dst;
+ uint8_t hdata[64];
+} rawip4_spec_t;
+
+typedef struct rawip6_spec_s {
+ struct in6_addr ip6src;
+ struct in6_addr ip6dst;
+ uint8_t hdata[64];
+} rawip6_spec_t;
+
+
+typedef struct ether_spec_s {
+ uint16_t ether_type;
+ uint8_t frame_size;
+ uint8_t eframe[16];
+} ether_spec_t;
+
+
+typedef struct ip_user_spec_s {
+ uint8_t id;
+ uint8_t ip_ver;
+ uint8_t proto;
+ uint8_t tos_mask;
+ uint8_t tos;
+} ip_user_spec_t;
+
+typedef ether_spec_t arpip_spec_t;
+typedef ether_spec_t ether_user_spec_t;
+
+typedef struct flow_spec_s {
+ uint32_t flow_type;
+ union {
+ tcpip4_spec_t tcpip4spec;
+ tcpip6_spec_t tcpip6spec;
+ udpip4_spec_t udpip4spec;
+ udpip6_spec_t udpip6spec;
+ arpip_spec_t arpipspec;
+ ahip4_spec_t ahip4spec;
+ ahip6_spec_t ahip6spec;
+ espip4_spec_t espip4spec;
+ espip6_spec_t espip6spec;
+ rawip4_spec_t rawip4spec;
+ rawip6_spec_t rawip6spec;
+ ether_spec_t etherspec;
+ ip_user_spec_t ip_usr_spec;
+ uint8_t hdata[64];
+ } uh, um; /* entry, mask */
+} flow_spec_t;
+
+#define FSPEC_TCPIP4 0x1 /* TCP/IPv4 Flow */
+#define FSPEC_TCPIP6 0x2 /* TCP/IPv6 */
+#define FSPEC_UDPIP4 0x3 /* UDP/IPv4 */
+#define FSPEC_UDPIP6 0x4 /* UDP/IPv6 */
+#define FSPEC_ARPIP 0x5 /* ARP/IPv4 */
+#define FSPEC_AHIP4 0x6 /* AH/IP4 */
+#define FSPEC_AHIP6 0x7 /* AH/IP6 */
+#define FSPEC_ESPIP4 0x8 /* ESP/IP4 */
+#define FSPEC_ESPIP6 0x9 /* ESP/IP6 */
+#define FSPEC_SCTPIP4 0xA /* ESP/IP4 */
+#define FSPEC_SCTPIP6 0xB /* ESP/IP6 */
+#define FSPEC_RAW4 0xC /* RAW/IP4 */
+#define FSPEC_RAW6 0xD /* RAW/IP6 */
+#define FSPEC_ETHER 0xE /* ETHER Programmable */
+#define FSPEC_IP_USR 0xF /* IP Programmable */
+#define FSPEC_HDATA 0x10 /* Pkt Headers eth-da,sa,etype,ip,tcp(Bitmap) */
+
+
+#define TCAM_IPV6_ADDR(m32, ip6addr) { \
+ m32[0] = ip6addr.S6_addr32[0]; \
+ m32[1] = ip6addr.S6_addr32[1]; \
+ m32[2] = ip6addr.S6_addr32[2]; \
+ m32[3] = ip6addr.S6_addr32[3]; \
+ }
+
+
+#define TCAM_IPV4_ADDR(m32, ip4addr) (m32 = ip4addr)
+#define TCAM_IP_PORTS(port32, dp, sp) (port32 = dp | (sp << 16))
+#define TCAM_IP_CLASS(key, mask, class) { \
+ key = class; \
+ mask = 0x1f; \
+ }
+
+#define TCAM_IP_PROTO(key, mask, proto) { \
+ key = proto; \
+ mask = 0xff; \
+ }
+
+
+typedef struct flow_resource_s {
+ uint64_t channel_cookie;
+ uint64_t flow_cookie;
+ uint8_t tcam_location;
+ flow_spec_t flow_spec;
+} flow_resource_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_FLOW_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_fm.c b/usr/src/uts/common/io/hxge/hxge_fm.c
new file mode 100644
index 0000000000..c19c7ff2b2
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_fm.c
@@ -0,0 +1,502 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/ddi.h>
+
+static hxge_fm_ereport_attr_t
+*hxge_fm_get_ereport_attr(hxge_fm_ereport_id_t ereport_id);
+
+static int
+hxge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data);
+
+hxge_fm_ereport_attr_t hxge_fm_ereport_vmac[] = {
+ {HXGE_FM_EREPORT_VMAC_LINK_DOWN, "10g_link_down",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_LOST}
+};
+
+hxge_fm_ereport_attr_t hxge_fm_ereport_pfc[] = {
+ /*
+ * The following are part of LDF 0, non-fatal
+ */
+ {HXGE_FM_EREPORT_PFC_TCAM_PAR_ERR, "classifier_tcam_par_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_UNAFFECTED},
+ {HXGE_FM_EREPORT_PFC_VLAN_PAR_ERR, "classifier_vlan_par_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_UNAFFECTED},
+ {HXGE_FM_EREPORT_PFC_PKT_DROP, "classifier_pkt_drop_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_UNAFFECTED}
+};
+
+hxge_fm_ereport_attr_t hxge_fm_ereport_rdmc[] = {
+ /*
+ * The following are part of LDF1, fatal
+ */
+ {HXGE_FM_EREPORT_RDMC_RBR_CPL_TO, "rxdma_rbr_cpl_to",
+ DDI_FM_DEVICE_NO_RESPONSE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR, "rxdma_peu_resp_err",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR, "rxdma_rcr_sha_par_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR, "rxdma_rbr_pre_par_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY, "rxdma_rbr_pre_empty_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL, "rxdma_rcr_sha_full",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_RDMC_RCRFULL, "rxdma_rcr_full",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_RDMC_RBR_EMPTY, "rxdma_rbr_empty",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_RDMC_RBRFULL, "rxdma_rbr_full",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_RDMC_RCR_ERR, "rxdma_completion_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_DEGRADED},
+ /*
+ * Control/Data ram received a ecc double bit error.
+ * Fatal error. Part of Device Error 1
+ */
+ {HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED, "rxdma_ctrl_fifo_ded",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED, "rxdma_data_fifo_ded",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_DEGRADED},
+ /*
+ * Control/Data ram received a ecc single bit error.
+ * Non-Fatal error. Part of Device Error 0
+ */
+ {HXGE_FM_EREPORT_RDMC_CTRL_FIFO_SEC, "rxdma_ctrl_fifo_sec",
+ DDI_FM_DEVICE_INTERN_CORR,
+ DDI_SERVICE_UNAFFECTED},
+ {HXGE_FM_EREPORT_RDMC_DATA_FIFO_SEC, "rxdma_data_fifo_sec",
+ DDI_FM_DEVICE_INTERN_CORR,
+ DDI_SERVICE_UNAFFECTED}
+};
+
+hxge_fm_ereport_attr_t hxge_fm_ereport_tdmc[] = {
+ {HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR, "txdma_peu_resp_err",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR, "txdma_pkt_size_hdr_err",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR, "txdma_runt_pkt_drop_err",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR, "txdma_pkt_size_err",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_TX_RNG_OFLOW, "txdma_tx_rng_oflow",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR, "txdma_pref_par_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO, "txdma_tdr_pref_cpl_to",
+ DDI_FM_DEVICE_NO_RESPONSE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_PKT_CPL_TO, "txdma_pkt_cpl_to",
+ DDI_FM_DEVICE_NO_RESPONSE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_INVALID_SOP, "txdma_invalid_sop",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP, "txdma_unexpected_sop",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_REORD_TBL_PAR, "txdma_reord_tbl_par_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_DEGRADED},
+ {HXGE_FM_EREPORT_TDMC_REORD_BUF_DED, "txdma_reord_buf_ded_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_DEGRADED}
+};
+
+hxge_fm_ereport_attr_t hxge_fm_ereport_peu[] = {
+ {HXGE_FM_EREPORT_PEU_ERR, "peu_peu_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_LOST},
+ {HXGE_FM_EREPORT_PEU_VNM_PIO_ERR, "peu_vnm_pio_err",
+ DDI_FM_DEVICE_INTERN_UNCORR,
+ DDI_SERVICE_LOST}
+};
+
+hxge_fm_ereport_attr_t hxge_fm_ereport_sw[] = {
+ {HXGE_FM_EREPORT_SW_INVALID_CHAN_NUM, "invalid_chan_num",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_LOST},
+ {HXGE_FM_EREPORT_SW_INVALID_PARAM, "invalid_param",
+ DDI_FM_DEVICE_INVAL_STATE,
+ DDI_SERVICE_LOST}
+};
+
+void
+hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
+ ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr)
+{
+ ddi_iblock_cookie_t iblk;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_fm_init"));
+
+ /* fm-capable in hxge.conf can be used to set fm_capabilities. */
+ hxgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, hxgep->dip,
+ DDI_PROP_DONTPASS, "fm-capable",
+ DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "FM capable = %d\n", hxgep->fm_capabilities));
+
+ /*
+ * Register capabilities with IO Fault Services. The capabilities
+ * set above may not be supported by the parent nexus, in that case
+ * some capability bits may be cleared.
+ */
+ if (hxgep->fm_capabilities)
+ ddi_fm_init(hxgep->dip, &hxgep->fm_capabilities, &iblk);
+
+ /*
+ * Initialize pci ereport capabilities if ereport capable
+ */
+ if (DDI_FM_EREPORT_CAP(hxgep->fm_capabilities) ||
+ DDI_FM_ERRCB_CAP(hxgep->fm_capabilities)) {
+ pci_ereport_setup(hxgep->dip);
+ }
+
+ /* Register error callback if error callback capable */
+ if (DDI_FM_ERRCB_CAP(hxgep->fm_capabilities)) {
+ ddi_fm_handler_register(hxgep->dip,
+ hxge_fm_error_cb, (void *) hxgep);
+ }
+
+ /*
+ * DDI_FLGERR_ACC indicates:
+ * o Driver will check its access handle(s) for faults on
+ * a regular basis by calling ddi_fm_acc_err_get
+ * o Driver is able to cope with incorrect results of I/O
+ * operations resulted from an I/O fault
+ */
+ if (DDI_FM_ACC_ERR_CAP(hxgep->fm_capabilities)) {
+ reg_attr->devacc_attr_access = DDI_FLAGERR_ACC;
+ desc_attr->devacc_attr_access = DDI_FLAGERR_ACC;
+ } else {
+ reg_attr->devacc_attr_access = DDI_DEFAULT_ACC;
+ desc_attr->devacc_attr_access = DDI_DEFAULT_ACC;
+ }
+
+ /*
+ * DDI_DMA_FLAGERR indicates:
+ * o Driver will check its DMA handle(s) for faults on a
+ * regular basis using ddi_fm_dma_err_get
+ * o Driver is able to cope with incorrect results of DMA
+ * operations resulted from an I/O fault
+ */
+ if (DDI_FM_DMA_ERR_CAP(hxgep->fm_capabilities))
+ dma_attr->dma_attr_flags |= DDI_DMA_FLAGERR;
+ else
+ dma_attr->dma_attr_flags &= ~DDI_DMA_FLAGERR;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_fm_init"));
+}
+
+void
+hxge_fm_fini(p_hxge_t hxgep)
+{
+ /* Only unregister FMA capabilities if we registered some */
+ if (hxgep->fm_capabilities) {
+ /*
+ * Release any resources allocated by pci_ereport_setup()
+ */
+ if (DDI_FM_EREPORT_CAP(hxgep->fm_capabilities) ||
+ DDI_FM_ERRCB_CAP(hxgep->fm_capabilities))
+ pci_ereport_teardown(hxgep->dip);
+
+ /*
+ * Un-register error callback if error callback capable
+ */
+ if (DDI_FM_ERRCB_CAP(hxgep->fm_capabilities))
+ ddi_fm_handler_unregister(hxgep->dip);
+
+ /* Unregister from IO Fault Services */
+ ddi_fm_fini(hxgep->dip);
+ }
+}
+
+
+/*
+ * Simply call pci_ereport_post which generates ereports for errors
+ * that occur in the PCI local bus configuration status registers.
+ */
+/*ARGSUSED*/
+static int
+hxge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
+ const void *impl_data)
+{
+ pci_ereport_post(dip, err, NULL);
+ return (err->fme_status);
+}
+
+
+static hxge_fm_ereport_attr_t *
+hxge_fm_get_ereport_attr(hxge_fm_ereport_id_t ereport_id)
+{
+ hxge_fm_ereport_attr_t *attr;
+ uint8_t blk_id;
+ uint8_t index;
+
+ /* Extract the block id and the index within the block */
+ blk_id = ((ereport_id >> EREPORT_FM_ID_SHIFT) & EREPORT_FM_ID_MASK);
+ index = (ereport_id & EREPORT_INDEX_MASK);
+
+ /* Return the appropriate structure of type hxge_fm_ereport_attr_t */
+ switch (blk_id) {
+ case FM_SW_ID:
+ attr = &hxge_fm_ereport_sw[index];
+ break;
+ case FM_VMAC_ID:
+ attr = &hxge_fm_ereport_vmac[index];
+ break;
+ case FM_PFC_ID:
+ attr = &hxge_fm_ereport_pfc[index];
+ break;
+ case FM_RXDMA_ID:
+ attr = &hxge_fm_ereport_rdmc[index];
+ break;
+ case FM_TXDMA_ID:
+ attr = &hxge_fm_ereport_tdmc[index];
+ break;
+ case FM_PEU_ID:
+ attr = &hxge_fm_ereport_peu[index];
+ break;
+ default:
+ attr = NULL;
+ }
+
+ return (attr);
+}
+
+static void
+hxge_fm_ereport(p_hxge_t hxgep, uint8_t err_chan,
+ hxge_fm_ereport_attr_t *ereport)
+{
+ uint64_t ena;
+ char eclass[FM_MAX_CLASS];
+ char *err_str;
+ p_hxge_stats_t statsp;
+
+ (void) snprintf(eclass, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE,
+ ereport->eclass);
+
+ err_str = ereport->str;
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
+ statsp = hxgep->statsp;
+
+ switch (ereport->index) {
+ case HXGE_FM_EREPORT_VMAC_LINK_DOWN:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ NULL);
+ break;
+ case HXGE_FM_EREPORT_PFC_TCAM_PAR_ERR:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ ERNAME_PFC_TCAM_ERR, DATA_TYPE_UINT32,
+ statsp->pfc_stats.tcam_parity_err,
+ NULL);
+ break;
+ case HXGE_FM_EREPORT_PFC_VLAN_PAR_ERR:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ ERNAME_PFC_VLAN_ERR, DATA_TYPE_UINT32,
+ statsp->pfc_stats.vlan_parity_err,
+ NULL);
+ break;
+ case HXGE_FM_EREPORT_PFC_PKT_DROP:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ ERNAME_PFC_PKT_DROP, DATA_TYPE_UINT32,
+ statsp->pfc_stats.pkt_drop,
+ NULL);
+ break;
+ case HXGE_FM_EREPORT_RDMC_RBR_CPL_TO:
+ case HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR:
+ case HXGE_FM_EREPORT_RDMC_RCRFULL:
+ case HXGE_FM_EREPORT_RDMC_RBR_EMPTY:
+ case HXGE_FM_EREPORT_RDMC_RBRFULL:
+ case HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY:
+ case HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
+ NULL);
+ break;
+ case HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
+ case HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: {
+ uint32_t err_log;
+ hxge_rx_ring_stats_t *rdc_statsp;
+
+ rdc_statsp = &statsp->rdc_stats[err_chan];
+ if (ereport->index == HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
+ err_log = (uint32_t)
+ rdc_statsp->errlog.pre_par.value;
+ else
+ err_log = (uint32_t)
+ rdc_statsp->errlog.sha_par.value;
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
+ ERNAME_RDMC_PAR_ERR_LOG, DATA_TYPE_UINT8, err_log,
+ NULL);
+ }
+ break;
+ case HXGE_FM_EREPORT_RDMC_RCR_ERR: {
+ uint8_t err_type;
+ err_type = statsp->rdc_stats[err_chan].errlog.compl_err_type;
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
+ ERNAME_RDC_ERR_TYPE, DATA_TYPE_UINT8, err_type,
+ NULL);
+ }
+ break;
+ case HXGE_FM_EREPORT_RDMC_CTRL_FIFO_SEC:
+ case HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED:
+ case HXGE_FM_EREPORT_RDMC_DATA_FIFO_SEC:
+ case HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ NULL);
+ break;
+
+ case HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR:
+ case HXGE_FM_EREPORT_TDMC_TX_RNG_OFLOW:
+ case HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR:
+ case HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR:
+ case HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
+ case HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO:
+ case HXGE_FM_EREPORT_TDMC_PKT_CPL_TO:
+ case HXGE_FM_EREPORT_TDMC_INVALID_SOP:
+ case HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
+ NULL);
+ break;
+
+ case HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ ERNAME_ERR_DCHAN, DATA_TYPE_UINT8, err_chan,
+ ERNAME_TDC_PREF_PAR_LOG, DATA_TYPE_UINT32,
+ statsp->tdc_stats[err_chan].errlog.value, NULL);
+ break;
+ case HXGE_FM_EREPORT_TDMC_REORD_TBL_PAR:
+ case HXGE_FM_EREPORT_TDMC_REORD_BUF_DED:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ NULL);
+ break;
+
+ case HXGE_FM_EREPORT_PEU_ERR:
+ case HXGE_FM_EREPORT_PEU_VNM_PIO_ERR:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ NULL);
+ break;
+
+ case HXGE_FM_EREPORT_SW_INVALID_CHAN_NUM:
+ case HXGE_FM_EREPORT_SW_INVALID_PARAM:
+ ddi_fm_ereport_post(hxgep->dip, eclass, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ ERNAME_DETAILED_ERR_TYPE, DATA_TYPE_STRING, err_str,
+ NULL);
+ break;
+ }
+}
+
+void
+hxge_fm_report_error(p_hxge_t hxgep, uint8_t err_chan,
+ hxge_fm_ereport_id_t fm_ereport_id)
+{
+ hxge_fm_ereport_attr_t *fm_ereport_attr;
+
+ fm_ereport_attr = hxge_fm_get_ereport_attr(fm_ereport_id);
+
+ if (fm_ereport_attr != NULL &&
+ (DDI_FM_EREPORT_CAP(hxgep->fm_capabilities))) {
+ hxge_fm_ereport(hxgep, err_chan, fm_ereport_attr);
+ ddi_fm_service_impact(hxgep->dip, fm_ereport_attr->impact);
+ }
+}
+
+int
+fm_check_acc_handle(ddi_acc_handle_t handle)
+{
+ ddi_fm_error_t err;
+
+ ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
+ ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
+
+ return (err.fme_status);
+}
+
+int
+fm_check_dma_handle(ddi_dma_handle_t handle)
+{
+ ddi_fm_error_t err;
+
+ ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
+ return (err.fme_status);
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_fm.h b/usr/src/uts/common/io/hxge/hxge_fm.h
new file mode 100644
index 0000000000..b2ec039e52
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_fm.h
@@ -0,0 +1,129 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_FM_H
+#define _SYS_HXGE_HXGE_FM_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/ddi.h>
+
+#define ERNAME_DETAILED_ERR_TYPE "detailed error type"
+#define ERNAME_ERR_DCHAN "dma channel number"
+#define ERNAME_PFC_TCAM_ERR "pfc tcam error"
+#define ERNAME_PFC_VLAN_ERR "pfc vlan table error"
+#define ERNAME_PFC_PKT_DROP "pfc pkt drop error"
+#define ERNAME_RDMC_PAR_ERR_LOG "rdmc parity error log"
+#define ERNAME_RDC_ERR_TYPE "completion error type"
+#define ERNAME_TDC_PREF_PAR_LOG "tdc pref par log"
+
+#define EREPORT_FM_ID_SHIFT 16
+#define EREPORT_FM_ID_MASK 0xFF
+#define EREPORT_INDEX_MASK 0xFF
+#define HXGE_FM_EREPORT_UNKNOWN 0
+
+#define FM_SW_ID 0xFF
+#define FM_VMAC_ID VMAC_BLK_ID
+#define FM_TXDMA_ID TXDMA_BLK_ID
+#define FM_RXDMA_ID RXDMA_BLK_ID
+#define FM_PFC_ID PFC_BLK_ID
+#define FM_PEU_ID PEU_BLK_ID
+
+typedef uint32_t hxge_fm_ereport_id_t;
+
+typedef struct _hxge_fm_ereport_attr {
+ uint32_t index;
+ char *str;
+ char *eclass;
+ ddi_fault_impact_t impact;
+} hxge_fm_ereport_attr_t;
+
+/* VMAC ereports */
+typedef enum {
+ HXGE_FM_EREPORT_VMAC_LINK_DOWN = (FM_VMAC_ID << EREPORT_FM_ID_SHIFT),
+} hxge_fm_ereport_vmac_t;
+
+/* PFC ereports */
+typedef enum {
+ HXGE_FM_EREPORT_PFC_TCAM_PAR_ERR = (FM_PFC_ID << EREPORT_FM_ID_SHIFT),
+ HXGE_FM_EREPORT_PFC_VLAN_PAR_ERR,
+ HXGE_FM_EREPORT_PFC_PKT_DROP
+} hxge_fm_ereport_pfc_t;
+
+/* RDMC ereports */
+typedef enum {
+ HXGE_FM_EREPORT_RDMC_RBR_CPL_TO = (FM_RXDMA_ID << EREPORT_FM_ID_SHIFT),
+ HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR,
+ HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR,
+ HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR,
+ HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY,
+ HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL,
+ HXGE_FM_EREPORT_RDMC_RCRFULL,
+ HXGE_FM_EREPORT_RDMC_RBR_EMPTY,
+ HXGE_FM_EREPORT_RDMC_RBRFULL,
+ HXGE_FM_EREPORT_RDMC_RCR_ERR,
+ HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED,
+ HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED,
+ HXGE_FM_EREPORT_RDMC_CTRL_FIFO_SEC,
+ HXGE_FM_EREPORT_RDMC_DATA_FIFO_SEC
+} hxge_fm_ereport_rdmc_t;
+
+typedef enum {
+ HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR =
+ (FM_TXDMA_ID << EREPORT_FM_ID_SHIFT),
+ HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR,
+ HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR,
+ HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR,
+ HXGE_FM_EREPORT_TDMC_TX_RNG_OFLOW,
+ HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR,
+ HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO,
+ HXGE_FM_EREPORT_TDMC_PKT_CPL_TO,
+ HXGE_FM_EREPORT_TDMC_INVALID_SOP,
+ HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP,
+ HXGE_FM_EREPORT_TDMC_REORD_TBL_PAR,
+ HXGE_FM_EREPORT_TDMC_REORD_BUF_DED
+} hxge_fm_ereport_attr_tdmc_t;
+
+/* PEU ereports */
+typedef enum {
+ HXGE_FM_EREPORT_PEU_ERR = (FM_PEU_ID << EREPORT_FM_ID_SHIFT),
+ HXGE_FM_EREPORT_PEU_VNM_PIO_ERR
+} hxge_fm_ereport_peu_t;
+
+typedef enum {
+ HXGE_FM_EREPORT_SW_INVALID_CHAN_NUM = (FM_SW_ID << EREPORT_FM_ID_SHIFT),
+ HXGE_FM_EREPORT_SW_INVALID_PARAM
+} hxge_fm_ereport_sw_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_FM_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_fzc.c b/usr/src/uts/common/io/hxge/hxge_fzc.c
new file mode 100644
index 0000000000..7168be0072
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_fzc.c
@@ -0,0 +1,295 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+#include <hpi_vmac.h>
+#include <hpi_rxdma.h>
+
+/*
+ * System interrupt registers that are under function zero management.
+ */
+hxge_status_t
+hxge_fzc_intr_init(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_fzc_intr_init"));
+
+ /* Configure the initial timer resolution */
+ if ((status = hxge_fzc_intr_tmres_set(hxgep)) != HXGE_OK) {
+ return (status);
+ }
+
+ /*
+ * Set up the logical device group's logical devices that
+ * the group owns.
+ */
+ if ((status = hxge_fzc_intr_ldg_num_set(hxgep)) != HXGE_OK) {
+ return (status);
+ }
+
+ /* Configure the system interrupt data */
+ if ((status = hxge_fzc_intr_sid_set(hxgep)) != HXGE_OK) {
+ return (status);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_fzc_intr_init"));
+
+ return (status);
+}
+
+hxge_status_t
+hxge_fzc_intr_ldg_num_set(p_hxge_t hxgep)
+{
+ p_hxge_ldg_t ldgp;
+ p_hxge_ldv_t ldvp;
+ hpi_handle_t handle;
+ int i, j;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_fzc_intr_ldg_num_set"));
+
+ if (hxgep->ldgvp == NULL) {
+ return (HXGE_ERROR);
+ }
+
+ ldgp = hxgep->ldgvp->ldgp;
+ ldvp = hxgep->ldgvp->ldvp;
+ if (ldgp == NULL || ldvp == NULL) {
+ return (HXGE_ERROR);
+ }
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_fzc_intr_ldg_num_set "
+ "<== hxge_f(Hydra): # ldv %d in group %d", ldgp->nldvs,
+ ldgp->ldg));
+
+ for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
+ rs = hpi_fzc_ldg_num_set(handle, ldvp->ldv,
+ ldvp->ldg_assigned);
+ if (rs != HPI_SUCCESS) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "<== hxge_fzc_intr_ldg_num_set failed "
+ " rs 0x%x ldv %d ldg %d",
+ rs, ldvp->ldv, ldvp->ldg_assigned));
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "<== hxge_fzc_intr_ldg_num_set OK ldv %d ldg %d",
+ ldvp->ldv, ldvp->ldg_assigned));
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_fzc_intr_ldg_num_set"));
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_fzc_intr_tmres_set(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_fzc_intr_tmrese_set"));
+ if (hxgep->ldgvp == NULL) {
+ return (HXGE_ERROR);
+ }
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ if ((rs = hpi_fzc_ldg_timer_res_set(handle, hxgep->ldgvp->tmres))) {
+ return (HXGE_ERROR | rs);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_fzc_intr_tmrese_set"));
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_fzc_intr_sid_set(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+ p_hxge_ldg_t ldgp;
+ fzc_sid_t sid;
+ int i;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_fzc_intr_sid_set"));
+ if (hxgep->ldgvp == NULL) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "<== hxge_fzc_intr_sid_set: no ldg"));
+ return (HXGE_ERROR);
+ }
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ ldgp = hxgep->ldgvp->ldgp;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_fzc_intr_sid_set: #int %d", hxgep->ldgvp->ldg_intrs));
+ for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) {
+ sid.ldg = ldgp->ldg;
+ sid.vector = ldgp->vector;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_fzc_intr_sid_set(%d): group %d vector %d",
+ i, sid.ldg, sid.vector));
+ rs = hpi_fzc_sid_set(handle, sid);
+ if (rs != HPI_SUCCESS) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "<== hxge_fzc_intr_sid_set:failed 0x%x", rs));
+ return (HXGE_ERROR | rs);
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_fzc_intr_sid_set"));
+ return (HXGE_OK);
+}
+
+/*
+ * Receive DMA registers that are under function zero management.
+ */
+/*ARGSUSED*/
+hxge_status_t
+hxge_init_fzc_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_init_fzc_rxdma_channel"));
+
+ /* Initialize the RXDMA logical pages */
+ status = hxge_init_fzc_rxdma_channel_pages(hxgep, channel, rbr_p);
+ if (status != HXGE_OK)
+ return (status);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_init_fzc_rxdma_channel"));
+ return (status);
+}
+
+/*ARGSUSED*/
+hxge_status_t
+hxge_init_fzc_rxdma_channel_pages(p_hxge_t hxgep,
+ uint16_t channel, p_rx_rbr_ring_t rbrp)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_init_fzc_rxdma_channel_pages"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ /* Initialize the page handle */
+ rs = hpi_rxdma_cfg_logical_page_handle(handle, channel,
+ rbrp->page_hdl.bits.handle);
+ if (rs != HPI_SUCCESS)
+ return (HXGE_ERROR | rs);
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "<== hxge_init_fzc_rxdma_channel_pages"));
+ return (HXGE_OK);
+}
+
+/*ARGSUSED*/
+hxge_status_t
+hxge_init_fzc_txdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_init_fzc_txdma_channel"));
+
+ /* Initialize the TXDMA logical pages */
+ (void) hxge_init_fzc_txdma_channel_pages(hxgep, channel, tx_ring_p);
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_init_fzc_txdma_channel"));
+ return (status);
+}
+
+hxge_status_t
+hxge_init_fzc_rx_common(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_init_fzc_rx_common"));
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ /*
+ * Configure the rxdma clock divider
+ * This is the granularity counter based on
+ * the hardware system clock (i.e. 300 Mhz) and
+ * it is running around 3 nanoseconds.
+ * So, set the clock divider counter to 1000 to get
+ * microsecond granularity.
+ * For example, for a 3 microsecond timeout, the timeout
+ * will be set to 1.
+ */
+ rs = hpi_rxdma_cfg_clock_div_set(handle, RXDMA_CK_DIV_DEFAULT);
+ if (rs != HPI_SUCCESS)
+ return (HXGE_ERROR | rs);
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "<== hxge_init_fzc_rx_common:status 0x%08x", status));
+ return (status);
+}
+
+hxge_status_t
+hxge_init_fzc_txdma_channel_pages(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_init_fzc_txdma_channel_pages"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ /* Initialize the page handle */
+ rs = hpi_txdma_log_page_handle_set(handle, channel,
+ &tx_ring_p->page_hdl);
+
+ if (rs == HPI_SUCCESS)
+ return (HXGE_OK);
+ else
+ return (HXGE_ERROR | rs);
+}
+
+hxge_status_t
+hxge_fzc_sys_err_mask_set(p_hxge_t hxgep, boolean_t mask)
+{
+ hpi_status_t rs = HPI_SUCCESS;
+ hpi_handle_t handle;
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ rs = hpi_fzc_sys_err_mask_set(handle, mask);
+ if (rs == HPI_SUCCESS)
+ return (HXGE_OK);
+ else
+ return (HXGE_ERROR | rs);
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_fzc.h b/usr/src/uts/common/io/hxge/hxge_fzc.h
new file mode 100644
index 0000000000..83c7b1441e
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_fzc.h
@@ -0,0 +1,62 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_FZC_H
+#define _SYS_HXGE_HXGE_FZC_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hpi_vir.h>
+
+hxge_status_t hxge_fzc_intr_init(p_hxge_t hxgep);
+hxge_status_t hxge_fzc_intr_ldg_num_set(p_hxge_t hxgep);
+hxge_status_t hxge_fzc_intr_tmres_set(p_hxge_t hxgep);
+hxge_status_t hxge_fzc_intr_sid_set(p_hxge_t hxgep);
+
+hxge_status_t hxge_init_fzc_txdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p);
+
+hxge_status_t hxge_init_fzc_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p);
+
+hxge_status_t hxge_init_fzc_rx_common(p_hxge_t hxgep);
+
+hxge_status_t hxge_init_fzc_rxdma_channel_pages(p_hxge_t hxgep,
+ uint16_t channel, p_rx_rbr_ring_t rbr_p);
+
+hxge_status_t hxge_init_fzc_txdma_channel_pages(p_hxge_t hxgep,
+ uint16_t channel, p_tx_ring_t tx_ring_p);
+
+hxge_status_t hxge_fzc_sys_err_mask_set(p_hxge_t hxgep, boolean_t mask);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_FZC_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_hw.c b/usr/src/uts/common/io/hxge/hxge_hw.c
new file mode 100644
index 0000000000..3d88827f17
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_hw.c
@@ -0,0 +1,777 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+
+lb_property_t lb_normal = {normal, "normal", hxge_lb_normal};
+lb_property_t lb_mac10g = {internal, "mac10g", hxge_lb_mac10g};
+
+uint32_t hxge_lb_dbg = 1;
+
+extern uint32_t hxge_jumbo_mtu;
+extern boolean_t hxge_jumbo_enable;
+
+static void hxge_rtrace_ioctl(p_hxge_t, queue_t *, mblk_t *, struct iocblk *);
+
+void
+hxge_global_reset(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_global_reset"));
+
+ (void) hxge_intr_hw_disable(hxgep);
+
+ if (hxgep->suspended)
+ (void) hxge_link_init(hxgep);
+
+ (void) hxge_vmac_init(hxgep);
+
+ (void) hxge_intr_hw_enable(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_global_reset"));
+}
+
+
+void
+hxge_hw_id_init(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init"));
+
+ /*
+ * Set up initial hardware parameters required such as mac mtu size.
+ */
+ hxgep->vmac.is_jumbo = B_FALSE;
+ /* 1518 + 4 + 16 */
+ hxgep->vmac.maxframesize = STD_FRAME_SIZE + TX_PKT_HEADER_SIZE;
+ if (hxgep->param_arr[param_accept_jumbo].value || hxge_jumbo_enable) {
+ hxgep->vmac.maxframesize = (uint16_t)hxge_jumbo_mtu;
+ hxgep->vmac.is_jumbo = B_TRUE;
+ }
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init: maxframesize %d",
+ hxgep->vmac.maxframesize));
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_id_init"));
+}
+
+void
+hxge_hw_init_niu_common(p_hxge_t hxgep)
+{
+ p_hxge_hw_list_t hw_p;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_init_niu_common"));
+
+ if ((hw_p = hxgep->hxge_hw_p) == NULL) {
+ return;
+ }
+
+ MUTEX_ENTER(&hw_p->hxge_cfg_lock);
+ if (hw_p->flags & COMMON_INIT_DONE) {
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common"
+ " already done for dip $%p exiting", hw_p->parent_devp));
+ MUTEX_EXIT(&hw_p->hxge_cfg_lock);
+ return;
+ }
+
+ hw_p->flags = COMMON_INIT_START;
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "hxge_hw_init_niu_common Started for device id %x",
+ hw_p->parent_devp));
+
+ (void) hxge_pfc_hw_reset(hxgep);
+ hw_p->flags = COMMON_INIT_DONE;
+ MUTEX_EXIT(&hw_p->hxge_cfg_lock);
+
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "hxge_hw_init_niu_common Done for device id %x",
+ hw_p->parent_devp));
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_init_niu_common"));
+}
+
+uint_t
+hxge_intr(caddr_t arg1, caddr_t arg2)
+{
+ p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
+ p_hxge_t hxgep = (p_hxge_t)arg2;
+ uint_t serviced = DDI_INTR_UNCLAIMED;
+ uint8_t ldv;
+ hpi_handle_t handle;
+ p_hxge_ldgv_t ldgvp;
+ p_hxge_ldg_t ldgp, t_ldgp;
+ p_hxge_ldv_t t_ldvp;
+ uint32_t vector0 = 0, vector1 = 0;
+ int i, j, nldvs, nintrs = 1;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ /*
+ * DDI interface returns second arg as NULL
+ */
+ if ((arg2 == NULL) || ((void *) ldvp->hxgep != arg2)) {
+ hxgep = ldvp->hxgep;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr"));
+
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ HXGE_ERROR_MSG((hxgep, INT_CTL,
+ "<== hxge_intr: not initialized 0x%x", serviced));
+ return (serviced);
+ }
+
+ ldgvp = hxgep->ldgvp;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: ldgvp $%p", ldgvp));
+
+ if (ldvp == NULL && ldgvp) {
+ t_ldvp = ldvp = ldgvp->ldvp;
+ }
+
+ if (ldvp) {
+ ldgp = t_ldgp = ldvp->ldgp;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
+ "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
+
+ if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
+ HXGE_ERROR_MSG((hxgep, INT_CTL, "==> hxge_intr: "
+ "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
+ HXGE_ERROR_MSG((hxgep, INT_CTL, "<== hxge_intr: not ready"));
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ /*
+ * This interrupt handler will have to go through
+ * all the logical devices to find out which
+ * logical device interrupts us and then call
+ * its handler to process the events.
+ */
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ t_ldgp = ldgp;
+ t_ldvp = ldgp->ldvp;
+
+ nldvs = ldgp->nldvs;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: #ldvs %d #intrs %d",
+ nldvs, ldgvp->ldg_intrs));
+
+ serviced = DDI_INTR_CLAIMED;
+ for (i = 0; i < nintrs; i++, t_ldgp++) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr(%d): #ldvs %d "
+ " #intrs %d", i, nldvs, nintrs));
+
+ /* Get this group's flag bits. */
+ t_ldgp->interrupted = B_FALSE;
+ rs = hpi_ldsv_ldfs_get(handle, t_ldgp->ldg, &vector0, &vector1);
+ if (rs) {
+ continue;
+ }
+
+ if (!vector0 && !vector1) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
+ "no interrupts on group %d", t_ldgp->ldg));
+ continue;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
+ "vector0 0x%llx vector1 0x%llx", vector0, vector1));
+
+ t_ldgp->interrupted = B_TRUE;
+ nldvs = t_ldgp->nldvs;
+
+ for (j = 0; j < nldvs; j++, t_ldvp++) {
+ /*
+ * Call device's handler if flag bits are on.
+ */
+ ldv = t_ldvp->ldv;
+ if ((LDV_ON(ldv, vector0) | (LDV_ON(ldv, vector1)))) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr: calling device %d"
+ " #ldvs %d #intrs %d", j, nldvs, nintrs));
+ (void) (t_ldvp->ldv_intr_handler)(
+ (caddr_t)t_ldvp, arg2);
+ }
+ }
+ }
+
+ t_ldgp = ldgp;
+ for (i = 0; i < nintrs; i++, t_ldgp++) {
+ /* rearm group interrupts */
+ if (t_ldgp->interrupted) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr: arm group %d", t_ldgp->ldg));
+ (void) hpi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
+ t_ldgp->arm, t_ldgp->ldg_timer);
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr: serviced 0x%x",
+ serviced));
+ return (serviced);
+}
+
+hxge_status_t
+hxge_peu_handle_sys_errors(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+ p_hxge_peu_sys_stats_t statsp;
+ peu_intr_stat_t stat;
+
+ handle = hxgep->hpi_handle;
+ statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats;
+
+ HXGE_REG_RD64(handle, PEU_INTR_STAT, &stat.value);
+
+ /*
+ * The PCIE errors are unrecoverrable and cannot be cleared.
+ * The only thing we can do here is to mask them off to prevent
+ * continued interrupts.
+ */
+ HXGE_REG_WR64(handle, PEU_INTR_MASK, 0xffffffff);
+
+ if (stat.bits.spc_acc_err) {
+ statsp->spc_acc_err++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: spc_acc_err"));
+ }
+
+ if (stat.bits.tdc_pioacc_err) {
+ statsp->tdc_pioacc_err++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: tdc_pioacc_err"));
+ }
+
+ if (stat.bits.rdc_pioacc_err) {
+ statsp->rdc_pioacc_err++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: rdc_pioacc_err"));
+ }
+
+ if (stat.bits.pfc_pioacc_err) {
+ statsp->pfc_pioacc_err++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: pfc_pioacc_err"));
+ }
+
+ if (stat.bits.vmac_pioacc_err) {
+ statsp->vmac_pioacc_err++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: vmac_pioacc_err"));
+ }
+
+ if (stat.bits.cpl_hdrq_parerr) {
+ statsp->cpl_hdrq_parerr++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: cpl_hdrq_parerr"));
+ }
+
+ if (stat.bits.cpl_dataq_parerr) {
+ statsp->cpl_dataq_parerr++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: cpl_dataq_parerr"));
+ }
+
+ if (stat.bits.retryram_xdlh_parerr) {
+ statsp->retryram_xdlh_parerr++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: retryram_xdlh_parerr"));
+ }
+
+ if (stat.bits.retrysotram_xdlh_parerr) {
+ statsp->retrysotram_xdlh_parerr++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: retrysotram_xdlh_parerr"));
+ }
+
+ if (stat.bits.p_hdrq_parerr) {
+ statsp->p_hdrq_parerr++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: p_hdrq_parerr"));
+ }
+
+ if (stat.bits.p_dataq_parerr) {
+ statsp->p_dataq_parerr++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: p_dataq_parerr"));
+ }
+
+ if (stat.bits.np_hdrq_parerr) {
+ statsp->np_hdrq_parerr++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: np_hdrq_parerr"));
+ }
+
+ if (stat.bits.np_dataq_parerr) {
+ statsp->np_dataq_parerr++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: np_dataq_parerr"));
+ }
+
+ if (stat.bits.eic_msix_parerr) {
+ statsp->eic_msix_parerr++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: eic_msix_parerr"));
+ }
+
+ if (stat.bits.hcr_parerr) {
+ statsp->hcr_parerr++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_peu_handle_sys_errors: hcr_parerr"));
+ }
+
+ return (HXGE_OK);
+}
+
+/*ARGSUSED*/
+uint_t
+hxge_syserr_intr(caddr_t arg1, caddr_t arg2)
+{
+ p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
+ p_hxge_t hxgep = (p_hxge_t)arg2;
+ p_hxge_ldg_t ldgp = NULL;
+ hpi_handle_t handle;
+ dev_err_stat_t estat;
+ uint_t serviced = DDI_INTR_UNCLAIMED;
+
+ if ((arg1 == NULL) && (arg2 == NULL)) {
+ return (serviced);
+ }
+
+ if ((arg2 == NULL) ||
+ ((ldvp != NULL) && ((void *)ldvp->hxgep != arg2))) {
+ if (ldvp != NULL) {
+ hxgep = ldvp->hxgep;
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, SYSERR_CTL,
+ "==> hxge_syserr_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
+
+ if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
+ ldgp = ldvp->ldgp;
+ if (ldgp == NULL) {
+ HXGE_ERROR_MSG((hxgep, SYSERR_CTL,
+ "<== hxge_syserrintr(no logical group): "
+ "arg2 $%p arg1 $%p", hxgep, ldvp));
+ return (DDI_INTR_UNCLAIMED);
+ }
+ /*
+ * Get the logical device state if the function uses interrupt.
+ */
+ }
+
+ /* This interrupt handler is for system error interrupts. */
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ estat.value = 0;
+ (void) hpi_fzc_sys_err_stat_get(handle, &estat);
+ HXGE_DEBUG_MSG((hxgep, SYSERR_CTL,
+ "==> hxge_syserr_intr: device error 0x%016llx", estat.value));
+
+ if (estat.bits.tdc_err0 || estat.bits.tdc_err1) {
+ /* TDMC */
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_syserr_intr: device error - TDMC"));
+ (void) hxge_txdma_handle_sys_errors(hxgep);
+ } else if (estat.bits.rdc_err0 || estat.bits.rdc_err1) {
+ /* RDMC */
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_syserr_intr: device error - RDMC"));
+ (void) hxge_rxdma_handle_sys_errors(hxgep);
+ } else if (estat.bits.vnm_pio_err1 || estat.bits.peu_err1) {
+ /* PCI-E */
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_syserr_intr: device error - PCI-E"));
+
+ /* kstats are updated here */
+ (void) hxge_peu_handle_sys_errors(hxgep);
+
+ if (estat.bits.peu_err1)
+ HXGE_FM_REPORT_ERROR(hxgep, NULL,
+ HXGE_FM_EREPORT_PEU_ERR);
+
+ if (estat.bits.vnm_pio_err1)
+ HXGE_FM_REPORT_ERROR(hxgep, NULL,
+ HXGE_FM_EREPORT_PEU_VNM_PIO_ERR);
+ } else if (estat.value != 0) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_syserr_intr: device error - unknown"));
+ }
+
+ serviced = DDI_INTR_CLAIMED;
+
+ if ((ldgp != NULL) && (ldvp != NULL) &&
+ (ldgp->nldvs == 1) && !ldvp->use_timer) {
+ (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
+ B_TRUE, ldgp->ldg_timer);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_syserr_intr"));
+ return (serviced);
+}
+
+void
+hxge_intr_hw_enable(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_enable"));
+
+ (void) hxge_intr_mask_mgmt_set(hxgep, B_TRUE);
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_enable"));
+}
+
+void
+hxge_intr_hw_disable(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_disable"));
+
+ (void) hxge_intr_mask_mgmt_set(hxgep, B_FALSE);
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_disable"));
+}
+
+void
+hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_hw_blank"));
+
+ /*
+ * Replace current ticks and counts for later
+ * processing by the receive packet interrupt routines.
+ */
+ hxgep->intr_timeout = (uint16_t)ticks;
+ hxgep->intr_threshold = (uint16_t)count;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_hw_blank"));
+}
+
+void
+hxge_hw_stop(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_stop"));
+
+ (void) hxge_tx_vmac_disable(hxgep);
+ (void) hxge_rx_vmac_disable(hxgep);
+ (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
+ (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_stop"));
+}
+
+void
+hxge_hw_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
+{
+ int cmd;
+
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_hw_ioctl"));
+
+ if (hxgep == NULL) {
+ miocnak(wq, mp, 0, EINVAL);
+ return;
+ }
+
+ iocp->ioc_error = 0;
+ cmd = iocp->ioc_cmd;
+
+ switch (cmd) {
+ default:
+ miocnak(wq, mp, 0, EINVAL);
+ return;
+
+ case HXGE_GET64:
+ hxge_get64(hxgep, mp->b_cont);
+ miocack(wq, mp, sizeof (uint32_t), 0);
+ break;
+
+ case HXGE_PUT64:
+ hxge_put64(hxgep, mp->b_cont);
+ miocack(wq, mp, 0, 0);
+ break;
+
+ case HXGE_PUT_TCAM:
+ hxge_put_tcam(hxgep, mp->b_cont);
+ miocack(wq, mp, 0, 0);
+ break;
+
+ case HXGE_GET_TCAM:
+ hxge_get_tcam(hxgep, mp->b_cont);
+ miocack(wq, mp, 0, 0);
+ break;
+
+ case HXGE_RTRACE:
+ hxge_rtrace_ioctl(hxgep, wq, mp, iocp);
+ break;
+ }
+}
+
+/*
+ * 10G is the only loopback mode for Hydra.
+ */
+void
+hxge_loopback_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp,
+ struct iocblk *iocp)
+{
+ p_lb_property_t lb_props;
+ size_t size;
+ int i;
+
+ if (mp->b_cont == NULL) {
+ miocnak(wq, mp, 0, EINVAL);
+ }
+
+ switch (iocp->ioc_cmd) {
+ case LB_GET_MODE:
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_MODE command"));
+ if (hxgep != NULL) {
+ *(lb_info_sz_t *)mp->b_cont->b_rptr =
+ hxgep->statsp->port_stats.lb_mode;
+ miocack(wq, mp, sizeof (hxge_lb_t), 0);
+ } else
+ miocnak(wq, mp, 0, EINVAL);
+ break;
+
+ case LB_SET_MODE:
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_SET_LB_MODE command"));
+ if (iocp->ioc_count != sizeof (uint32_t)) {
+ miocack(wq, mp, 0, 0);
+ break;
+ }
+ if ((hxgep != NULL) && hxge_set_lb(hxgep, wq, mp->b_cont)) {
+ miocack(wq, mp, 0, 0);
+ } else {
+ miocnak(wq, mp, 0, EPROTO);
+ }
+ break;
+
+ case LB_GET_INFO_SIZE:
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
+ if (hxgep != NULL) {
+ size = sizeof (lb_normal) + sizeof (lb_mac10g);
+
+ *(lb_info_sz_t *)mp->b_cont->b_rptr = size;
+
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL,
+ "HXGE_GET_LB_INFO command: size %d", size));
+ miocack(wq, mp, sizeof (lb_info_sz_t), 0);
+ } else
+ miocnak(wq, mp, 0, EINVAL);
+ break;
+
+ case LB_GET_INFO:
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_INFO command"));
+ if (hxgep != NULL) {
+ size = sizeof (lb_normal) + sizeof (lb_mac10g);
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL,
+ "HXGE_GET_LB_INFO command: size %d", size));
+ if (size == iocp->ioc_count) {
+ i = 0;
+ lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
+ lb_props[i++] = lb_normal;
+ lb_props[i++] = lb_mac10g;
+
+ miocack(wq, mp, size, 0);
+ } else
+ miocnak(wq, mp, 0, EINVAL);
+ } else {
+ miocnak(wq, mp, 0, EINVAL);
+ cmn_err(CE_NOTE, "hxge_hw_ioctl: invalid command 0x%x",
+ iocp->ioc_cmd);
+ }
+
+ break;
+ }
+}
+
+/*ARGSUSED*/
+boolean_t
+hxge_set_lb(p_hxge_t hxgep, queue_t *wq, p_mblk_t mp)
+{
+ boolean_t status = B_TRUE;
+ uint32_t lb_mode;
+ lb_property_t *lb_info;
+
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_set_lb"));
+ lb_mode = hxgep->statsp->port_stats.lb_mode;
+ if (lb_mode == *(uint32_t *)mp->b_rptr) {
+ cmn_err(CE_NOTE,
+ "hxge%d: Loopback mode already set (lb_mode %d).\n",
+ hxgep->instance, lb_mode);
+ status = B_FALSE;
+ goto hxge_set_lb_exit;
+ }
+
+ lb_mode = *(uint32_t *)mp->b_rptr;
+ lb_info = NULL;
+
+ /* 10G is the only loopback mode for Hydra */
+ if (lb_mode == lb_normal.value)
+ lb_info = &lb_normal;
+ else if (lb_mode == lb_mac10g.value)
+ lb_info = &lb_mac10g;
+ else {
+ cmn_err(CE_NOTE,
+ "hxge%d: Loopback mode not supported(mode %d).\n",
+ hxgep->instance, lb_mode);
+ status = B_FALSE;
+ goto hxge_set_lb_exit;
+ }
+
+ if (lb_mode == hxge_lb_normal) {
+ if (hxge_lb_dbg) {
+ cmn_err(CE_NOTE,
+ "!hxge%d: Returning to normal operation",
+ hxgep->instance);
+ }
+
+ hxgep->statsp->port_stats.lb_mode = hxge_lb_normal;
+ hxge_global_reset(hxgep);
+
+ goto hxge_set_lb_exit;
+ }
+
+ hxgep->statsp->port_stats.lb_mode = lb_mode;
+
+ if (hxge_lb_dbg)
+ cmn_err(CE_NOTE, "!hxge%d: Adapter now in %s loopback mode",
+ hxgep->instance, lb_info->key);
+
+ if (lb_info->lb_type == internal) {
+ if ((hxgep->statsp->port_stats.lb_mode == hxge_lb_mac10g))
+ hxgep->statsp->mac_stats.link_speed = 10000;
+ else {
+ cmn_err(CE_NOTE,
+ "hxge%d: Loopback mode not supported(mode %d).\n",
+ hxgep->instance, lb_mode);
+ status = B_FALSE;
+ goto hxge_set_lb_exit;
+ }
+ hxgep->statsp->mac_stats.link_duplex = 2;
+ hxgep->statsp->mac_stats.link_up = 1;
+ }
+
+ hxge_global_reset(hxgep);
+
+hxge_set_lb_exit:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "<== hxge_set_lb status = 0x%08x", status));
+
+ return (status);
+}
+
+void
+hxge_check_hw_state(p_hxge_t hxgep)
+{
+ p_hxge_ldgv_t ldgvp;
+ p_hxge_ldv_t t_ldvp;
+
+ HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "==> hxge_check_hw_state"));
+
+ MUTEX_ENTER(hxgep->genlock);
+
+ hxgep->hxge_timerid = 0;
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ goto hxge_check_hw_state_exit;
+ }
+
+ hxge_check_tx_hang(hxgep);
+
+ ldgvp = hxgep->ldgvp;
+ if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
+ HXGE_ERROR_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: "
+ "NULL ldgvp (interrupt not ready)."));
+ goto hxge_check_hw_state_exit;
+ }
+
+ t_ldvp = ldgvp->ldvp_syserr;
+ if (!t_ldvp->use_timer) {
+ HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: "
+ "ldgvp $%p t_ldvp $%p use_timer flag %d",
+ ldgvp, t_ldvp, t_ldvp->use_timer));
+ goto hxge_check_hw_state_exit;
+ }
+
+ if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Bad register acc handle"));
+ }
+
+ (void) hxge_syserr_intr((caddr_t)t_ldvp, (caddr_t)hxgep);
+
+ hxgep->hxge_timerid = hxge_start_timer(hxgep, hxge_check_hw_state,
+ HXGE_CHECK_TIMER);
+
+hxge_check_hw_state_exit:
+ MUTEX_EXIT(hxgep->genlock);
+
+ HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state"));
+}
+
+/*ARGSUSED*/
+static void
+hxge_rtrace_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp,
+ struct iocblk *iocp)
+{
+ ssize_t size;
+ rtrace_t *rtp;
+ mblk_t *nmp;
+ uint32_t i, j;
+ uint32_t start_blk;
+ uint32_t base_entry;
+ uint32_t num_entries;
+
+ HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_rtrace_ioctl"));
+
+ size = 1024;
+ if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
+ HXGE_DEBUG_MSG((hxgep, STR_CTL,
+ "malformed M_IOCTL MBLKL = %d size = %d",
+ MBLKL(mp->b_cont), size));
+ miocnak(wq, mp, 0, EINVAL);
+ return;
+ }
+
+ nmp = mp->b_cont;
+ rtp = (rtrace_t *)nmp->b_rptr;
+ start_blk = rtp->next_idx;
+ num_entries = rtp->last_idx;
+ base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
+
+ HXGE_DEBUG_MSG((hxgep, STR_CTL, "start_blk = %d\n", start_blk));
+ HXGE_DEBUG_MSG((hxgep, STR_CTL, "num_entries = %d\n", num_entries));
+ HXGE_DEBUG_MSG((hxgep, STR_CTL, "base_entry = %d\n", base_entry));
+
+ rtp->next_idx = hpi_rtracebuf.next_idx;
+ rtp->last_idx = hpi_rtracebuf.last_idx;
+ rtp->wrapped = hpi_rtracebuf.wrapped;
+ for (i = 0, j = base_entry; i < num_entries; i++, j++) {
+ rtp->buf[i].ctl_addr = hpi_rtracebuf.buf[j].ctl_addr;
+ rtp->buf[i].val_l32 = hpi_rtracebuf.buf[j].val_l32;
+ rtp->buf[i].val_h32 = hpi_rtracebuf.buf[j].val_h32;
+ }
+
+ nmp->b_wptr = nmp->b_rptr + size;
+ HXGE_DEBUG_MSG((hxgep, STR_CTL, "<== hxge_rtrace_ioctl"));
+ miocack(wq, mp, (int)size, 0);
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_impl.h b/usr/src/uts/common/io/hxge/hxge_impl.h
new file mode 100644
index 0000000000..d36518e90f
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_impl.h
@@ -0,0 +1,487 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_IMPL_H
+#define _SYS_HXGE_HXGE_IMPL_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef _ASM
+#include <sys/types.h>
+#include <sys/byteorder.h>
+#include <sys/debug.h>
+#include <sys/stropts.h>
+#include <sys/stream.h>
+#include <sys/strlog.h>
+#include <sys/strsubr.h>
+#include <sys/cmn_err.h>
+#include <sys/vtrace.h>
+#include <sys/kmem.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/strsun.h>
+#include <sys/stat.h>
+#include <sys/cpu.h>
+#include <sys/kstat.h>
+#include <inet/common.h>
+#include <inet/ip.h>
+#include <inet/ip6.h>
+#include <sys/dlpi.h>
+#include <inet/nd.h>
+#include <netinet/in.h>
+#include <sys/ethernet.h>
+#include <sys/vlan.h>
+#include <sys/pci.h>
+#include <sys/taskq.h>
+#include <sys/atomic.h>
+
+#include <hxge_defs.h>
+#include <hxge_peu.h>
+#include <hxge_pfc.h>
+#include <hxge_pfc_hw.h>
+#include <hxge_vmac.h>
+#include <hxge_fm.h>
+#include <sys/netlb.h>
+#include <sys/ddi_intr.h>
+
+#include <sys/mac.h>
+#include <sys/mac_impl.h>
+#include <sys/mac_ether.h>
+
+/*
+ * Handy macros (taken from bge driver)
+ */
+#define RBR_SIZE 4
+#define DMA_COMMON_VPTR(area) ((area.kaddrp))
+#define DMA_COMMON_HANDLE(area) ((area.dma_handle))
+#define DMA_COMMON_ACC_HANDLE(area) ((area.acc_handle))
+#define DMA_COMMON_IOADDR(area) ((area.dma_cookie.dmac_laddress))
+#define DMA_COMMON_SYNC(area, flag) ((void) ddi_dma_sync((area).dma_handle,\
+ (area).offset, (area).alength, \
+ (flag)))
+#define DMA_COMMON_SYNC_OFFSET(area, bufoffset, len, flag) \
+ ((void) ddi_dma_sync((area).dma_handle,\
+ (area.offset + bufoffset), len, \
+ (flag)))
+
+#define NEXT_ENTRY(index, wrap) ((index + 1) & wrap)
+#define NEXT_ENTRY_PTR(ptr, first, last) \
+ ((ptr == last) ? first : (ptr + 1))
+
+/*
+ * HPI related macros
+ */
+#define HXGE_DEV_HPI_HANDLE(hxgep) (hxgep->hpi_handle)
+
+#define HPI_PCI_ACC_HANDLE_SET(hxgep, ah) (hxgep->hpi_pci_handle.regh = ah)
+#define HPI_PCI_ADD_HANDLE_SET(hxgep, ap) (hxgep->hpi_pci_handle.regp = ap)
+
+#define HPI_ACC_HANDLE_SET(hxgep, ah) (hxgep->hpi_handle.regh = ah)
+#define HPI_ADD_HANDLE_SET(hxgep, ap) \
+ hxgep->hpi_handle.is_vraddr = B_FALSE; \
+ hxgep->hpi_handle.function.instance = hxgep->instance; \
+ hxgep->hpi_handle.function.function = 0; \
+ hxgep->hpi_handle.hxgep = (void *) hxgep; \
+ hxgep->hpi_handle.regp = ap;
+
+#define HPI_REG_ACC_HANDLE_SET(hxgep, ah) (hxgep->hpi_reg_handle.regh = ah)
+#define HPI_REG_ADD_HANDLE_SET(hxgep, ap) \
+ hxgep->hpi_reg_handle.is_vraddr = B_FALSE; \
+ hxgep->hpi_handle.function.instance = hxgep->instance; \
+ hxgep->hpi_handle.function.function = 0; \
+ hxgep->hpi_reg_handle.hxgep = (void *) hxgep; \
+ hxgep->hpi_reg_handle.regp = ap;
+
+#define HPI_MSI_ACC_HANDLE_SET(hxgep, ah) (hxgep->hpi_msi_handle.regh = ah)
+#define HPI_MSI_ADD_HANDLE_SET(hxgep, ap) (hxgep->hpi_msi_handle.regp = ap)
+
+#define HPI_DMA_ACC_HANDLE_SET(dmap, ah) (dmap->hpi_handle.regh = ah)
+#define HPI_DMA_ACC_HANDLE_GET(dmap) (dmap->hpi_handle.regh)
+
+#define LDV_ON(ldv, vector) ((vector >> ldv) & 0x1)
+
+typedef uint32_t hxge_status_t;
+
+typedef enum {
+ DVMA,
+ DMA,
+ SDMA
+} dma_method_t;
+
+typedef enum {
+ BKSIZE_4K,
+ BKSIZE_8K,
+ BKSIZE_16K,
+ BKSIZE_32K
+} hxge_rx_block_size_t;
+
+#ifdef TX_ONE_BUF
+#define TX_BCOPY_MAX 1514
+#else
+#define TX_BCOPY_MAX 2048
+#define TX_BCOPY_SIZE 2048
+#endif
+
+#define TX_STREAM_MIN 512
+#define TX_FASTDVMA_MIN 1024
+
+#define HXGE_RDC_RCR_THRESHOLD_MAX 256
+#define HXGE_RDC_RCR_TIMEOUT_MAX 64
+#define HXGE_RDC_RCR_THRESHOLD_MIN 1
+#define HXGE_RDC_RCR_TIMEOUT_MIN 1
+
+#define HXGE_IS_VLAN_PACKET(ptr) \
+ ((((struct ether_vlan_header *)ptr)->ether_tpid) == \
+ htons(VLAN_ETHERTYPE))
+
+typedef enum {
+ USE_NONE,
+ USE_BCOPY,
+ USE_DVMA,
+ USE_DMA,
+ USE_SDMA
+} dma_type_t;
+
+struct _hxge_block_mv_t {
+ uint32_t msg_type;
+ dma_type_t dma_type;
+};
+
+typedef struct _hxge_block_mv_t hxge_block_mv_t, *p_hxge_block_mv_t;
+
+typedef struct ether_addr ether_addr_st, *p_ether_addr_t;
+typedef struct ether_header ether_header_t, *p_ether_header_t;
+typedef queue_t *p_queue_t;
+typedef mblk_t *p_mblk_t;
+
+/*
+ * Common DMA data elements.
+ */
+struct _hxge_dma_common_t {
+ uint16_t dma_channel;
+ void *kaddrp;
+ void *ioaddr_pp;
+ ddi_dma_cookie_t dma_cookie;
+ uint32_t ncookies;
+
+ ddi_dma_handle_t dma_handle;
+ hxge_os_acc_handle_t acc_handle;
+ hpi_handle_t hpi_handle;
+
+ size_t block_size;
+ uint32_t nblocks;
+ size_t alength;
+ uint_t offset;
+ uint_t dma_chunk_index;
+ void *orig_ioaddr_pp;
+ uint64_t orig_vatopa;
+ void *orig_kaddrp;
+ size_t orig_alength;
+ boolean_t contig_alloc_type;
+};
+
+typedef struct _hxge_t hxge_t, *p_hxge_t;
+typedef struct _hxge_dma_common_t hxge_dma_common_t, *p_hxge_dma_common_t;
+
+typedef struct _hxge_dma_pool_t {
+ p_hxge_dma_common_t *dma_buf_pool_p;
+ uint32_t ndmas;
+ uint32_t *num_chunks;
+ boolean_t buf_allocated;
+} hxge_dma_pool_t, *p_hxge_dma_pool_t;
+
+/*
+ * Each logical device (69):
+ * - LDG #
+ * - flag bits
+ * - masks.
+ * - interrupt handler function.
+ *
+ * Generic system interrupt handler with two arguments:
+ * (hxge_sys_intr_t)
+ * Per device instance data structure
+ * Logical group data structure.
+ *
+ * Logical device interrupt handler with two arguments:
+ * (hxge_ldv_intr_t)
+ * Per device instance data structure
+ * Logical device number
+ */
+typedef struct _hxge_ldg_t hxge_ldg_t, *p_hxge_ldg_t;
+typedef struct _hxge_ldv_t hxge_ldv_t, *p_hxge_ldv_t;
+typedef uint_t (*hxge_sys_intr_t)(caddr_t arg1, caddr_t arg2);
+typedef uint_t (*hxge_ldv_intr_t)(caddr_t arg1, caddr_t arg2);
+
+/*
+ * Each logical device Group (64) needs to have the following
+ * configurations:
+ * - timer counter (6 bits)
+ * - timer resolution (20 bits, number of system clocks)
+ * - system data (7 bits)
+ */
+struct _hxge_ldg_t {
+ uint8_t ldg; /* logical group number */
+ uint8_t vldg_index;
+ boolean_t arm;
+ boolean_t interrupted;
+ uint16_t ldg_timer; /* counter */
+ uint8_t vector;
+ uint8_t nldvs;
+ p_hxge_ldv_t ldvp;
+ hxge_sys_intr_t sys_intr_handler;
+ p_hxge_t hxgep;
+};
+
+struct _hxge_ldv_t {
+ uint8_t ldg_assigned;
+ uint8_t ldv;
+ boolean_t is_rxdma;
+ boolean_t is_txdma;
+ boolean_t is_vmac;
+ boolean_t is_syserr;
+ boolean_t is_pfc;
+ boolean_t use_timer;
+ uint8_t channel;
+ uint8_t vdma_index;
+ p_hxge_ldg_t ldgp;
+ uint8_t ldv_ldf_masks;
+ hxge_ldv_intr_t ldv_intr_handler;
+ p_hxge_t hxgep;
+};
+
+typedef struct _pci_cfg_t {
+ uint16_t vendorid;
+ uint16_t devid;
+ uint16_t command;
+ uint16_t status;
+ uint8_t revid;
+ uint8_t res0;
+ uint16_t junk1;
+ uint8_t cache_line;
+ uint8_t latency;
+ uint8_t header;
+ uint8_t bist;
+ uint32_t base;
+ uint32_t base14;
+ uint32_t base18;
+ uint32_t base1c;
+ uint32_t base20;
+ uint32_t base24;
+ uint32_t base28;
+ uint32_t base2c;
+ uint32_t base30;
+ uint32_t res1[2];
+ uint8_t int_line;
+ uint8_t int_pin;
+ uint8_t min_gnt;
+ uint8_t max_lat;
+} pci_cfg_t, *p_pci_cfg_t;
+
+typedef struct _dev_regs_t {
+ hxge_os_acc_handle_t hxge_pciregh; /* PCI config DDI IO handle */
+ p_pci_cfg_t hxge_pciregp; /* mapped PCI registers */
+
+ hxge_os_acc_handle_t hxge_regh; /* device DDI IO (BAR 0) */
+ void *hxge_regp; /* mapped device registers */
+
+ hxge_os_acc_handle_t hxge_msix_regh; /* MSI/X DDI handle (BAR 2) */
+ void *hxge_msix_regp; /* MSI/X register */
+
+ hxge_os_acc_handle_t hxge_romh; /* fcode rom handle */
+ unsigned char *hxge_romp; /* fcode pointer */
+} dev_regs_t, *p_dev_regs_t;
+
+typedef struct _nxge_mac_addr_t {
+ ether_addr_t addr;
+ uint_t flags;
+} hxge_mac_addr_t;
+
+/*
+ * Driver alternate mac address structure.
+ */
+typedef struct _hxge_mmac_t {
+ uint8_t total_factory_macs;
+ uint8_t num_mmac;
+ uint8_t num_factory_mmac;
+ hxge_mac_addr_t mac_pool[16];
+ ether_addr_t factory_mac_pool[16];
+ uint8_t naddrfree; /* number of alt mac addr available */
+} hxge_mmac_t;
+
+/*
+ * mmac stats structure
+ */
+typedef struct _hxge_mmac_stats_t {
+ uint8_t mmac_max_cnt;
+ uint8_t mmac_avail_cnt;
+ struct ether_addr mmac_avail_pool[16];
+} hxge_mmac_stats_t, *p_hxge_mmac_stats_t;
+
+#include <hxge_common_impl.h>
+#include <hxge_common.h>
+#include <hxge_rxdma.h>
+#include <hxge_txdma.h>
+#include <hxge_fzc.h>
+#include <hxge_flow.h>
+#include <hxge_virtual.h>
+#include <hxge.h>
+#include <sys/modctl.h>
+#include <sys/pattr.h>
+#include <hpi_vir.h>
+
+/*
+ * Reconfiguring the network devices requires the net_config privilege
+ * in Solaris 10+. Prior to this, root privilege is required. In order
+ * that the driver binary can run on both S10+ and earlier versions, we
+ * make the decisiion as to which to use at runtime. These declarations
+ * allow for either (or both) to exist ...
+ */
+extern int secpolicy_net_config(const cred_t *, boolean_t);
+extern void hxge_fm_report_error(p_hxge_t hxgep,
+ uint8_t err_chan, hxge_fm_ereport_id_t fm_ereport_id);
+extern int fm_check_acc_handle(ddi_acc_handle_t);
+extern int fm_check_dma_handle(ddi_dma_handle_t);
+
+#pragma weak secpolicy_net_config
+
+hxge_status_t hxge_classify_init(p_hxge_t hxgep);
+hxge_status_t hxge_classify_uninit(p_hxge_t hxgep);
+void hxge_put_tcam(p_hxge_t hxgep, p_mblk_t mp);
+void hxge_get_tcam(p_hxge_t hxgep, p_mblk_t mp);
+
+hxge_status_t hxge_classify_init_hw(p_hxge_t hxgep);
+hxge_status_t hxge_classify_init_sw(p_hxge_t hxgep);
+hxge_status_t hxge_classify_exit_sw(p_hxge_t hxgep);
+hxge_status_t hxge_pfc_ip_class_config_all(p_hxge_t hxgep);
+hxge_status_t hxge_pfc_ip_class_config(p_hxge_t hxgep, tcam_class_t l3_class,
+ uint32_t class_config);
+hxge_status_t hxge_pfc_ip_class_config_get(p_hxge_t hxgep,
+ tcam_class_t l3_class, uint32_t *class_config);
+
+hxge_status_t hxge_pfc_set_hash(p_hxge_t, uint32_t);
+hxge_status_t hxge_pfc_config_tcam_enable(p_hxge_t);
+hxge_status_t hxge_pfc_config_tcam_disable(p_hxge_t);
+hxge_status_t hxge_pfc_ip_class_config(p_hxge_t, tcam_class_t, uint32_t);
+hxge_status_t hxge_pfc_ip_class_config_get(p_hxge_t, tcam_class_t, uint32_t *);
+hxge_status_t hxge_pfc_num_macs_get(p_hxge_t, uint32_t *);
+hxge_status_t hxge_pfc_mac_addrs_get(p_hxge_t hxgep);
+
+
+hxge_status_t hxge_pfc_hw_reset(p_hxge_t hxgep);
+hxge_status_t hxge_pfc_handle_sys_errors(p_hxge_t hxgep);
+
+/* hxge_kstats.c */
+void hxge_init_statsp(p_hxge_t);
+void hxge_setup_kstats(p_hxge_t);
+void hxge_destroy_kstats(p_hxge_t);
+int hxge_port_kstat_update(kstat_t *, int);
+
+int hxge_m_stat(void *arg, uint_t stat, uint64_t *val);
+
+/* hxge_hw.c */
+void
+hxge_hw_ioctl(p_hxge_t, queue_t *, mblk_t *, struct iocblk *);
+void hxge_loopback_ioctl(p_hxge_t, queue_t *, mblk_t *, struct iocblk *);
+void hxge_global_reset(p_hxge_t);
+uint_t hxge_intr(caddr_t arg1, caddr_t arg2);
+void hxge_intr_enable(p_hxge_t hxgep);
+void hxge_intr_disable(p_hxge_t hxgep);
+void hxge_hw_id_init(p_hxge_t hxgep);
+void hxge_hw_init_niu_common(p_hxge_t hxgep);
+void hxge_intr_hw_enable(p_hxge_t hxgep);
+void hxge_intr_hw_disable(p_hxge_t hxgep);
+void hxge_hw_stop(p_hxge_t hxgep);
+void hxge_global_reset(p_hxge_t hxgep);
+void hxge_check_hw_state(p_hxge_t hxgep);
+
+/* hxge_send.c. */
+uint_t hxge_reschedule(caddr_t arg);
+
+/* hxge_ndd.c */
+void hxge_get_param_soft_properties(p_hxge_t);
+void hxge_setup_param(p_hxge_t);
+void hxge_init_param(p_hxge_t);
+void hxge_destroy_param(p_hxge_t);
+boolean_t hxge_check_rxdma_port_member(p_hxge_t, uint8_t);
+boolean_t hxge_check_txdma_port_member(p_hxge_t, uint8_t);
+int hxge_param_get_generic(p_hxge_t, queue_t *, mblk_t *, caddr_t);
+int hxge_param_set_generic(p_hxge_t, queue_t *, mblk_t *, char *, caddr_t);
+int hxge_get_default(p_hxge_t, queue_t *, p_mblk_t, caddr_t);
+int hxge_set_default(p_hxge_t, queue_t *, p_mblk_t, char *, caddr_t);
+int hxge_nd_get_names(p_hxge_t, queue_t *, p_mblk_t, caddr_t);
+int hxge_mk_mblk_tail_space(p_mblk_t mp, p_mblk_t *nmp, size_t size);
+void hxge_param_ioctl(p_hxge_t hxgep, queue_t *, mblk_t *, struct iocblk *);
+boolean_t hxge_nd_load(caddr_t *, char *, pfi_t, pfi_t, caddr_t);
+void hxge_nd_free(caddr_t *);
+int hxge_nd_getset(p_hxge_t, queue_t *, caddr_t, p_mblk_t);
+boolean_t hxge_set_lb(p_hxge_t, queue_t *wq, p_mblk_t mp);
+
+/* hxge_virtual.c */
+hxge_status_t hxge_get_config_properties(p_hxge_t);
+hxge_status_t hxge_init_fzc_txdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p);
+hxge_status_t hxge_init_fzc_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p);
+hxge_status_t hxge_init_fzc_rx_common(p_hxge_t hxgep);
+hxge_status_t hxge_init_fzc_rxdma_channel_pages(p_hxge_t hxgep,
+ uint16_t channel, p_rx_rbr_ring_t rbr_p);
+hxge_status_t hxge_init_fzc_txdma_channel_pages(p_hxge_t hxgep,
+ uint16_t channel, p_tx_ring_t tx_ring_p);
+hxge_status_t hxge_intr_mask_mgmt_set(p_hxge_t hxgep, boolean_t on);
+
+/* MAC functions */
+hxge_status_t hxge_vmac_init(p_hxge_t hxgep);
+hxge_status_t hxge_link_init(p_hxge_t hxgep);
+hxge_status_t hxge_tx_vmac_init(p_hxge_t hxgep);
+hxge_status_t hxge_rx_vmac_init(p_hxge_t hxgep);
+hxge_status_t hxge_tx_vmac_enable(p_hxge_t hxgep);
+hxge_status_t hxge_tx_vmac_disable(p_hxge_t hxgep);
+hxge_status_t hxge_rx_vmac_enable(p_hxge_t hxgep);
+hxge_status_t hxge_rx_vmac_disable(p_hxge_t hxgep);
+hxge_status_t hxge_tx_vmac_reset(p_hxge_t hxgep);
+hxge_status_t hxge_rx_vmac_reset(p_hxge_t hxgep);
+hxge_status_t hxge_add_mcast_addr(p_hxge_t, struct ether_addr *);
+hxge_status_t hxge_del_mcast_addr(p_hxge_t, struct ether_addr *);
+hxge_status_t hxge_set_mac_addr(p_hxge_t hxgep, struct ether_addr *addr);
+hxge_status_t hxge_set_promisc(p_hxge_t hxgep, boolean_t on);
+void hxge_save_cntrs(p_hxge_t hxgep);
+
+void hxge_debug_msg(p_hxge_t, uint64_t, char *, ...);
+
+#ifdef HXGE_DEBUG
+char *hxge_dump_packet(char *addr, int size);
+#endif
+
+#endif /* !_ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_IMPL_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_kstats.c b/usr/src/uts/common/io/hxge/hxge_kstats.c
new file mode 100644
index 0000000000..08de219c52
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_kstats.c
@@ -0,0 +1,1305 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+#include <inet/mi.h>
+#include <sys/cmn_err.h>
+
+#define RDC_NAME_FORMAT1 "RDC_"
+#define TDC_NAME_FORMAT1 "TDC_"
+#define CH_NAME_FORMAT "%d"
+
+void
+hxge_init_statsp(p_hxge_t hxgep)
+{
+ size_t stats_size;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_init_statsp"));
+
+ stats_size = sizeof (hxge_stats_t);
+ hxgep->statsp = KMEM_ZALLOC(stats_size, KM_SLEEP);
+ hxgep->statsp->stats_size = stats_size;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, " <== hxge_init_statsp"));
+}
+
+typedef struct {
+ uint8_t index;
+ uint8_t type;
+ char *name;
+} hxge_kstat_index_t;
+
+typedef enum {
+ RDC_STAT_PACKETS = 0,
+ RDC_STAT_BYTES,
+ RDC_STAT_ERRORS,
+ RDC_STAT_JUMBO_PKTS,
+ RDC_STAT_RCR_UNKNOWN_ERR,
+ RDC_STAT_RCR_SHA_PAR_ERR,
+ RDC_STAT_RBR_PRE_PAR_ERR,
+ RDC_STAT_RBR_PRE_EMTY,
+ RDC_STAT_RCR_SHADOW_FULL,
+ RDC_STAT_RBR_TMOUT,
+ RDC_STAT_PEU_RESP_ERR,
+ RDC_STAT_CTRL_FIFO_ECC_ERR,
+ RDC_STAT_DATA_FIFO_ECC_ERR,
+ RDC_STAT_RCRFULL,
+ RDC_STAT_RBR_EMPTY,
+ RDC_STAT_RBR_FULL,
+ RDC_STAT_RCRTO,
+ RDC_STAT_RCRTHRES,
+ RDC_STAT_END
+} hxge_rdc_stat_index_t;
+
+hxge_kstat_index_t hxge_rdc_stats[] = {
+ {RDC_STAT_PACKETS, KSTAT_DATA_UINT64, "rdc_packets"},
+ {RDC_STAT_BYTES, KSTAT_DATA_UINT64, "rdc_bytes"},
+ {RDC_STAT_JUMBO_PKTS, KSTAT_DATA_ULONG, "rdc_jumbo_pkts"},
+ {RDC_STAT_RCR_UNKNOWN_ERR, KSTAT_DATA_ULONG, "rdc_rcr_unknown_err"},
+ {RDC_STAT_ERRORS, KSTAT_DATA_ULONG, "rdc_errors"},
+ {RDC_STAT_RCR_SHA_PAR_ERR, KSTAT_DATA_ULONG, "rdc_rcr_sha_par_err"},
+ {RDC_STAT_RBR_PRE_PAR_ERR, KSTAT_DATA_ULONG, "rdc_rbr_pre_par_err"},
+ {RDC_STAT_RBR_PRE_EMTY, KSTAT_DATA_ULONG, "rdc_rbr_pre_empty"},
+ {RDC_STAT_RCR_SHADOW_FULL, KSTAT_DATA_ULONG, "rdc_rcr_shadow_full"},
+ {RDC_STAT_RBR_TMOUT, KSTAT_DATA_ULONG, "rdc_rbr_tmout"},
+ {RDC_STAT_PEU_RESP_ERR, KSTAT_DATA_ULONG, "peu_resp_err"},
+ {RDC_STAT_CTRL_FIFO_ECC_ERR, KSTAT_DATA_ULONG, "ctrl_fifo_ecc_err"},
+ {RDC_STAT_DATA_FIFO_ECC_ERR, KSTAT_DATA_ULONG, "data_fifo_ecc_err"},
+ {RDC_STAT_RCRFULL, KSTAT_DATA_ULONG, "rdc_rcrfull"},
+ {RDC_STAT_RBR_EMPTY, KSTAT_DATA_ULONG, "rdc_rbr_empty"},
+ {RDC_STAT_RBR_FULL, KSTAT_DATA_ULONG, "rdc_rbrfull"},
+ {RDC_STAT_RCRTO, KSTAT_DATA_ULONG, "rdc_rcrto"},
+ {RDC_STAT_RCRTHRES, KSTAT_DATA_ULONG, "rdc_rcrthres"},
+ {RDC_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+ RDC_SYS_STAT_CTRL_FIFO_SEC = 0,
+ RDC_SYS_STAT_CTRL_FIFO_DED,
+ RDC_SYS_STAT_DATA_FIFO_SEC,
+ RDC_SYS_STAT_DATA_FIFO_DED,
+ RDC_SYS_STAT_END
+} hxge_rdc_sys_stat_idx_t;
+
+hxge_kstat_index_t hxge_rdc_sys_stats[] = {
+ {RDC_SYS_STAT_CTRL_FIFO_SEC, KSTAT_DATA_UINT64, "rdc_ctrl_fifo_sec"},
+ {RDC_SYS_STAT_CTRL_FIFO_DED, KSTAT_DATA_UINT64, "rdc_ctrl_fifo_ded"},
+ {RDC_SYS_STAT_DATA_FIFO_SEC, KSTAT_DATA_UINT64, "rdc_data_fifo_sec"},
+ {RDC_SYS_STAT_DATA_FIFO_DED, KSTAT_DATA_UINT64, "tdc_data_fifo_ded"},
+ {RDC_SYS_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+ TDC_STAT_PACKETS = 0,
+ TDC_STAT_BYTES,
+ TDC_STAT_BYTES_WITH_PAD,
+ TDC_STAT_ERRORS,
+ TDC_STAT_TX_INITS,
+ TDC_STAT_TX_NO_BUF,
+ TDC_STAT_PEU_RESP_ERR,
+ TDC_STAT_PKT_SIZE_ERR,
+ TDC_STAT_TX_RNG_OFLOW,
+ TDC_STAT_PKT_SIZE_HDR_ERR,
+ TDC_STAT_RUNT_PKT_DROP_ERR,
+ TDC_STAT_PREF_PAR_ERR,
+ TDC_STAT_TDR_PREF_CPL_TO,
+ TDC_STAT_PKT_CPL_TO,
+ TDC_STAT_INVALID_SOP,
+ TDC_STAT_UNEXPECTED_SOP,
+ TDC_STAT_COUNT_HDR_SIZE_ERR,
+ TDC_STAT_COUNT_RUNT,
+ TDC_STAT_COUNT_ABORT,
+ TDC_STAT_TX_STARTS,
+ TDC_STAT_TX_NO_DESC,
+ TDC_STAT_TX_DMA_BIND_FAIL,
+ TDC_STAT_TX_HDR_PKTS,
+ TDC_STAT_TX_DDI_PKTS,
+ TDC_STAT_TX_JUMBO_PKTS,
+ TDC_STAT_TX_MAX_PEND,
+ TDC_STAT_TX_MARKS,
+ TDC_STAT_END
+} hxge_tdc_stats_index_t;
+
+hxge_kstat_index_t hxge_tdc_stats[] = {
+ {TDC_STAT_PACKETS, KSTAT_DATA_UINT64, "tdc_packets"},
+ {TDC_STAT_BYTES, KSTAT_DATA_UINT64, "tdc_bytes"},
+ {TDC_STAT_BYTES_WITH_PAD, KSTAT_DATA_UINT64, "tdc_bytes_with_pad"},
+ {TDC_STAT_ERRORS, KSTAT_DATA_UINT64, "tdc_errors"},
+ {TDC_STAT_TX_INITS, KSTAT_DATA_ULONG, "tdc_tx_inits"},
+ {TDC_STAT_TX_NO_BUF, KSTAT_DATA_ULONG, "tdc_tx_no_buf"},
+
+ {TDC_STAT_PEU_RESP_ERR, KSTAT_DATA_ULONG, "tdc_peu_resp_err"},
+ {TDC_STAT_PKT_SIZE_ERR, KSTAT_DATA_ULONG, "tdc_pkt_size_err"},
+ {TDC_STAT_TX_RNG_OFLOW, KSTAT_DATA_ULONG, "tdc_tx_rng_oflow"},
+ {TDC_STAT_PKT_SIZE_HDR_ERR, KSTAT_DATA_ULONG, "tdc_pkt_size_hdr_err"},
+ {TDC_STAT_RUNT_PKT_DROP_ERR, KSTAT_DATA_ULONG, "tdc_runt_pkt_drop_err"},
+ {TDC_STAT_PREF_PAR_ERR, KSTAT_DATA_ULONG, "tdc_pref_par_err"},
+ {TDC_STAT_TDR_PREF_CPL_TO, KSTAT_DATA_ULONG, "tdc_tdr_pref_cpl_to"},
+ {TDC_STAT_PKT_CPL_TO, KSTAT_DATA_ULONG, "tdc_pkt_cpl_to"},
+ {TDC_STAT_INVALID_SOP, KSTAT_DATA_ULONG, "tdc_invalid_sop"},
+ {TDC_STAT_UNEXPECTED_SOP, KSTAT_DATA_ULONG, "tdc_unexpected_sop"},
+
+ {TDC_STAT_COUNT_HDR_SIZE_ERR, KSTAT_DATA_ULONG,
+ "tdc_count_hdr_size_err"},
+ {TDC_STAT_COUNT_RUNT, KSTAT_DATA_ULONG, "tdc_count_runt"},
+ {TDC_STAT_COUNT_ABORT, KSTAT_DATA_ULONG, "tdc_count_abort"},
+
+ {TDC_STAT_TX_STARTS, KSTAT_DATA_ULONG, "tdc_tx_starts"},
+ {TDC_STAT_TX_NO_DESC, KSTAT_DATA_ULONG, "tdc_tx_no_desc"},
+ {TDC_STAT_TX_DMA_BIND_FAIL, KSTAT_DATA_ULONG, "tdc_tx_dma_bind_fail"},
+ {TDC_STAT_TX_HDR_PKTS, KSTAT_DATA_ULONG, "tdc_tx_hdr_pkts"},
+ {TDC_STAT_TX_DDI_PKTS, KSTAT_DATA_ULONG, "tdc_tx_ddi_pkts"},
+ {TDC_STAT_TX_JUMBO_PKTS, KSTAT_DATA_ULONG, "tdc_tx_jumbo_pkts"},
+ {TDC_STAT_TX_MAX_PEND, KSTAT_DATA_ULONG, "tdc_tx_max_pend"},
+ {TDC_STAT_TX_MARKS, KSTAT_DATA_ULONG, "tdc_tx_marks"},
+ {TDC_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+ REORD_TBL_PAR_ERR = 0,
+ REORD_BUF_DED_ERR,
+ REORD_BUF_SEC_ERR,
+ TDC_SYS_STAT_END
+} hxge_tdc_sys_stat_idx_t;
+
+hxge_kstat_index_t hxge_tdc_sys_stats[] = {
+ {REORD_TBL_PAR_ERR, KSTAT_DATA_UINT64, "reord_tbl_par_err"},
+ {REORD_BUF_DED_ERR, KSTAT_DATA_UINT64, "reord_buf_ded_err"},
+ {REORD_BUF_SEC_ERR, KSTAT_DATA_UINT64, "reord_buf_sec_err"},
+ {TDC_SYS_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+ VMAC_STAT_TX_FRAME_CNT, /* vmac_tx_frame_cnt_t */
+ VMAC_STAT_TX_BYTE_CNT, /* vmac_tx_byte_cnt_t */
+
+ VMAC_STAT_RX_FRAME_CNT, /* vmac_rx_frame_cnt_t */
+ VMAC_STAT_RX_BYTE_CNT, /* vmac_rx_byte_cnt_t */
+ VMAC_STAT_RX_DROP_FRAME_CNT, /* vmac_rx_drop_fr_cnt_t */
+ VMAC_STAT_RX_DROP_BYTE_CNT, /* vmac_rx_drop_byte_cnt_t */
+ VMAC_STAT_RX_CRC_CNT, /* vmac_rx_crc_cnt_t */
+ VMAC_STAT_RX_PAUSE_CNT, /* vmac_rx_pause_cnt_t */
+ VMAC_STAT_RX_BCAST_FR_CNT, /* vmac_rx_bcast_fr_cnt_t */
+ VMAC_STAT_RX_MCAST_FR_CNT, /* vmac_rx_mcast_fr_cnt_t */
+ VMAC_STAT_END
+} hxge_vmac_stat_index_t;
+
+hxge_kstat_index_t hxge_vmac_stats[] = {
+ {VMAC_STAT_TX_FRAME_CNT, KSTAT_DATA_ULONG, "vmac_tx_frame_cnt"},
+ {VMAC_STAT_TX_BYTE_CNT, KSTAT_DATA_ULONG, "vmac_tx_byte_cnt"},
+
+ {VMAC_STAT_RX_FRAME_CNT, KSTAT_DATA_ULONG, "vmac_rx_frame_cnt"},
+ {VMAC_STAT_RX_BYTE_CNT, KSTAT_DATA_ULONG, "vmac_rx_byte_cnt"},
+ {VMAC_STAT_RX_DROP_FRAME_CNT, KSTAT_DATA_ULONG,
+ "vmac_rx_drop_frame_cnt"},
+ {VMAC_STAT_RX_DROP_BYTE_CNT, KSTAT_DATA_ULONG, "vmac_rx_drop_byte_cnt"},
+ {VMAC_STAT_RX_CRC_CNT, KSTAT_DATA_ULONG, "vmac_rx_crc_cnt"},
+ {VMAC_STAT_RX_PAUSE_CNT, KSTAT_DATA_ULONG, "vmac_rx_pause_cnt"},
+ {VMAC_STAT_RX_BCAST_FR_CNT, KSTAT_DATA_ULONG, "vmac_rx_bcast_fr_cnt"},
+ {VMAC_STAT_RX_MCAST_FR_CNT, KSTAT_DATA_ULONG, "vmac_rx_mcast_fr_cnt"},
+ {VMAC_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+ PFC_STAT_PKT_DROP,
+ PFC_STAT_TCAM_PARITY_ERR,
+ PFC_STAT_VLAN_PARITY_ERR,
+ PFC_STAT_BAD_CS_COUNT,
+ PFC_STAT_DROP_COUNT,
+ PFC_STAT_TCP_CTRL_DROP,
+ PFC_STAT_L2_ADDR_DROP,
+ PFC_STAT_CLASS_CODE_DROP,
+ PFC_STAT_TCAM_DROP,
+ PFC_STAT_VLAN_DROP,
+ PFC_STAT_END
+} hxge_pfc_stat_index_t;
+
+hxge_kstat_index_t hxge_pfc_stats[] = {
+ {PFC_STAT_PKT_DROP, KSTAT_DATA_ULONG, "pfc_pkt_drop"},
+ {PFC_STAT_TCAM_PARITY_ERR, KSTAT_DATA_ULONG, "pfc_tcam_parity_err"},
+ {PFC_STAT_VLAN_PARITY_ERR, KSTAT_DATA_ULONG, "pfc_vlan_parity_err"},
+ {PFC_STAT_BAD_CS_COUNT, KSTAT_DATA_ULONG, "pfc_bad_cs_count"},
+ {PFC_STAT_DROP_COUNT, KSTAT_DATA_ULONG, "pfc_drop_count"},
+ {PFC_STAT_TCP_CTRL_DROP, KSTAT_DATA_ULONG, " pfc_pkt_drop_tcp_ctrl"},
+ {PFC_STAT_L2_ADDR_DROP, KSTAT_DATA_ULONG, " pfc_pkt_drop_l2_addr"},
+ {PFC_STAT_CLASS_CODE_DROP, KSTAT_DATA_ULONG,
+ " pfc_pkt_drop_class_code"},
+ {PFC_STAT_TCAM_DROP, KSTAT_DATA_ULONG, " pfc_pkt_drop_tcam"},
+ {PFC_STAT_VLAN_DROP, KSTAT_DATA_ULONG, " pfc_pkt_drop_vlan"},
+ {PFC_STAT_END, NULL, NULL}
+};
+
+typedef enum {
+ MMAC_MAX_ADDR,
+ MMAC_AVAIL_ADDR,
+ MMAC_ADDR_POOL1,
+ MMAC_ADDR_POOL2,
+ MMAC_ADDR_POOL3,
+ MMAC_ADDR_POOL4,
+ MMAC_ADDR_POOL5,
+ MMAC_ADDR_POOL6,
+ MMAC_ADDR_POOL7,
+ MMAC_ADDR_POOL8,
+ MMAC_ADDR_POOL9,
+ MMAC_ADDR_POOL10,
+ MMAC_ADDR_POOL11,
+ MMAC_ADDR_POOL12,
+ MMAC_ADDR_POOL13,
+ MMAC_ADDR_POOL14,
+ MMAC_ADDR_POOL15,
+ MMAC_ADDR_POOL16,
+ MMAC_STATS_END
+} hxge_mmac_stat_index_t;
+
+hxge_kstat_index_t hxge_mmac_stats[] = {
+ {MMAC_MAX_ADDR, KSTAT_DATA_UINT64, "max_mmac_addr"},
+ {MMAC_AVAIL_ADDR, KSTAT_DATA_UINT64, "avail_mmac_addr"},
+ {MMAC_ADDR_POOL1, KSTAT_DATA_UINT64, "mmac_addr_1"},
+ {MMAC_ADDR_POOL2, KSTAT_DATA_UINT64, "mmac_addr_2"},
+ {MMAC_ADDR_POOL3, KSTAT_DATA_UINT64, "mmac_addr_3"},
+ {MMAC_ADDR_POOL4, KSTAT_DATA_UINT64, "mmac_addr_4"},
+ {MMAC_ADDR_POOL5, KSTAT_DATA_UINT64, "mmac_addr_5"},
+ {MMAC_ADDR_POOL6, KSTAT_DATA_UINT64, "mmac_addr_6"},
+ {MMAC_ADDR_POOL7, KSTAT_DATA_UINT64, "mmac_addr_7"},
+ {MMAC_ADDR_POOL8, KSTAT_DATA_UINT64, "mmac_addr_8"},
+ {MMAC_ADDR_POOL9, KSTAT_DATA_UINT64, "mmac_addr_9"},
+ {MMAC_ADDR_POOL10, KSTAT_DATA_UINT64, "mmac_addr_10"},
+ {MMAC_ADDR_POOL11, KSTAT_DATA_UINT64, "mmac_addr_11"},
+ {MMAC_ADDR_POOL12, KSTAT_DATA_UINT64, "mmac_addr_12"},
+ {MMAC_ADDR_POOL13, KSTAT_DATA_UINT64, "mmac_addr_13"},
+ {MMAC_ADDR_POOL14, KSTAT_DATA_UINT64, "mmac_addr_14"},
+ {MMAC_ADDR_POOL15, KSTAT_DATA_UINT64, "mmac_addr_15"},
+ {MMAC_ADDR_POOL16, KSTAT_DATA_UINT64, "mmac_addr_16"},
+ {MMAC_STATS_END, NULL, NULL},
+};
+
+typedef enum {
+ SPC_ACC_ERR = 0,
+ TDC_PIOACC_ERR,
+ RDC_PIOACC_ERR,
+ PFC_PIOACC_ERR,
+ VMAC_PIOACC_ERR,
+ CPL_HDRQ_PARERR,
+ CPL_DATAQ_PARERR,
+ RETRYRAM_XDLH_PARERR,
+ RETRYSOTRAM_XDLH_PARERR,
+ P_HDRQ_PARERR,
+ P_DATAQ_PARERR,
+ NP_HDRQ_PARERR,
+ NP_DATAQ_PARERR,
+ EIC_MSIX_PARERR,
+ HCR_PARERR,
+ PEU_SYS_STAT_END
+} hxge_peu_sys_stat_idx_t;
+
+hxge_kstat_index_t hxge_peu_sys_stats[] = {
+ {SPC_ACC_ERR, KSTAT_DATA_UINT64, "spc_acc_err"},
+ {TDC_PIOACC_ERR, KSTAT_DATA_UINT64, "tdc_pioacc_err"},
+ {RDC_PIOACC_ERR, KSTAT_DATA_UINT64, "rdc_pioacc_err"},
+ {PFC_PIOACC_ERR, KSTAT_DATA_UINT64, "pfc_pioacc_err"},
+ {VMAC_PIOACC_ERR, KSTAT_DATA_UINT64, "vmac_pioacc_err"},
+ {CPL_HDRQ_PARERR, KSTAT_DATA_UINT64, "cpl_hdrq_parerr"},
+ {CPL_DATAQ_PARERR, KSTAT_DATA_UINT64, "cpl_dataq_parerr"},
+ {RETRYRAM_XDLH_PARERR, KSTAT_DATA_UINT64, "retryram_xdlh_parerr"},
+ {RETRYSOTRAM_XDLH_PARERR, KSTAT_DATA_UINT64, "retrysotram_xdlh_parerr"},
+ {P_HDRQ_PARERR, KSTAT_DATA_UINT64, "p_hdrq_parerr"},
+ {P_DATAQ_PARERR, KSTAT_DATA_UINT64, "p_dataq_parerr"},
+ {NP_HDRQ_PARERR, KSTAT_DATA_UINT64, "np_hdrq_parerr"},
+ {NP_DATAQ_PARERR, KSTAT_DATA_UINT64, "np_dataq_parerr"},
+ {EIC_MSIX_PARERR, KSTAT_DATA_UINT64, "eic_msix_parerr"},
+ {HCR_PARERR, KSTAT_DATA_UINT64, "hcr_parerr"},
+ {TDC_SYS_STAT_END, NULL, NULL}
+};
+
+/* ARGSUSED */
+int
+hxge_tdc_stat_update(kstat_t *ksp, int rw)
+{
+ p_hxge_t hxgep;
+ p_hxge_tdc_kstat_t tdc_kstatsp;
+ p_hxge_tx_ring_stats_t statsp;
+ int channel;
+ char *ch_name, *end;
+
+ hxgep = (p_hxge_t)ksp->ks_private;
+ if (hxgep == NULL)
+ return (-1);
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_rxstat_update"));
+
+ ch_name = ksp->ks_name;
+ ch_name += strlen(TDC_NAME_FORMAT1);
+ channel = mi_strtol(ch_name, &end, 10);
+
+ tdc_kstatsp = (p_hxge_tdc_kstat_t)ksp->ks_data;
+ statsp = (p_hxge_tx_ring_stats_t)&hxgep->statsp->tdc_stats[channel];
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL,
+ "hxge_tdc_stat_update data $%p statsp $%p channel %d",
+ ksp->ks_data, statsp, channel));
+
+ tdc_kstatsp->opackets.value.ull = statsp->opackets;
+ tdc_kstatsp->obytes.value.ull = statsp->obytes;
+ tdc_kstatsp->obytes_with_pad.value.ull = statsp->obytes_with_pad;
+ tdc_kstatsp->oerrors.value.ull = statsp->oerrors;
+ tdc_kstatsp->tx_hdr_pkts.value.ull = statsp->tx_hdr_pkts;
+ tdc_kstatsp->tx_ddi_pkts.value.ull = statsp->tx_ddi_pkts;
+ tdc_kstatsp->tx_jumbo_pkts.value.ull = statsp->tx_jumbo_pkts;
+ tdc_kstatsp->tx_max_pend.value.ull = statsp->tx_max_pend;
+ tdc_kstatsp->peu_resp_err.value.ul = statsp->peu_resp_err;
+ tdc_kstatsp->pkt_size_err.value.ul = statsp->pkt_size_err;
+ tdc_kstatsp->tx_rng_oflow.value.ul = statsp->tx_rng_oflow;
+ tdc_kstatsp->pkt_size_hdr_err.value.ul = statsp->pkt_size_hdr_err;
+ tdc_kstatsp->runt_pkt_drop_err.value.ul = statsp->runt_pkt_drop_err;
+ tdc_kstatsp->pref_par_err.value.ul = statsp->pref_par_err;
+ tdc_kstatsp->tdr_pref_cpl_to.value.ul = statsp->tdr_pref_cpl_to;
+ tdc_kstatsp->pkt_cpl_to.value.ul = statsp->pkt_cpl_to;
+ tdc_kstatsp->invalid_sop.value.ul = statsp->invalid_sop;
+ tdc_kstatsp->unexpected_sop.value.ul = statsp->unexpected_sop;
+ tdc_kstatsp->tx_starts.value.ul = statsp->tx_starts;
+ tdc_kstatsp->tx_no_desc.value.ul = statsp->tx_no_desc;
+ tdc_kstatsp->tx_dma_bind_fail.value.ul = statsp->tx_dma_bind_fail;
+
+ tdc_kstatsp->count_hdr_size_err.value.ul =
+ statsp->count_hdr_size_err;
+ tdc_kstatsp->count_runt.value.ul = statsp->count_runt;
+ tdc_kstatsp->count_abort.value.ul = statsp->count_abort;
+ tdc_kstatsp->tx_marks.value.ul = statsp->tx_marks;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, " <== hxge_tdc_stat_update"));
+ return (0);
+}
+
+/* ARGSUSED */
+int
+hxge_tdc_sys_stat_update(kstat_t *ksp, int rw)
+{
+ p_hxge_t hxgep;
+ p_hxge_tdc_sys_kstat_t tdc_sys_kstatsp;
+ p_hxge_tdc_sys_stats_t statsp;
+
+ hxgep = (p_hxge_t)ksp->ks_private;
+ if (hxgep == NULL)
+ return (-1);
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_tdc_sys_stat_update"));
+
+ tdc_sys_kstatsp = (p_hxge_tdc_sys_kstat_t)ksp->ks_data;
+ statsp = (p_hxge_tdc_sys_stats_t)&hxgep->statsp->tdc_sys_stats;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "hxge_tdc_sys_stat_update %llx",
+ ksp->ks_data));
+
+ tdc_sys_kstatsp->reord_tbl_par_err.value.ul =
+ statsp->reord_tbl_par_err;
+ tdc_sys_kstatsp->reord_buf_ded_err.value.ul =
+ statsp->reord_buf_ded_err;
+ tdc_sys_kstatsp->reord_buf_sec_err.value.ul =
+ statsp->reord_buf_sec_err;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, " <== hxge_tdc_sys_stat_update"));
+ return (0);
+}
+
+/* ARGSUSED */
+int
+hxge_rdc_stat_update(kstat_t *ksp, int rw)
+{
+ p_hxge_t hxgep;
+ p_hxge_rdc_kstat_t rdc_kstatsp;
+ p_hxge_rx_ring_stats_t statsp;
+ int channel;
+ char *ch_name, *end;
+
+ hxgep = (p_hxge_t)ksp->ks_private;
+ if (hxgep == NULL)
+ return (-1);
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_rdc_stat_update"));
+
+ ch_name = ksp->ks_name;
+ ch_name += strlen(RDC_NAME_FORMAT1);
+ channel = mi_strtol(ch_name, &end, 10);
+
+ rdc_kstatsp = (p_hxge_rdc_kstat_t)ksp->ks_data;
+ statsp = (p_hxge_rx_ring_stats_t)&hxgep->statsp->rdc_stats[channel];
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL,
+ "hxge_rdc_stat_update $%p statsp $%p channel %d",
+ ksp->ks_data, statsp, channel));
+
+ rdc_kstatsp->ipackets.value.ull = statsp->ipackets;
+ rdc_kstatsp->rbytes.value.ull = statsp->ibytes;
+ rdc_kstatsp->jumbo_pkts.value.ul = statsp->jumbo_pkts;
+ rdc_kstatsp->rcr_unknown_err.value.ul = statsp->rcr_unknown_err;
+ rdc_kstatsp->errors.value.ul = statsp->ierrors;
+ rdc_kstatsp->rcr_sha_par_err.value.ul = statsp->rcr_sha_par;
+ rdc_kstatsp->rbr_pre_par_err.value.ul = statsp->rbr_pre_par;
+ rdc_kstatsp->rbr_pre_emty.value.ul = statsp->rbr_pre_empty;
+ rdc_kstatsp->rcr_shadow_full.value.ul = statsp->rcr_shadow_full;
+ rdc_kstatsp->rbr_tmout.value.ul = statsp->rbr_tmout;
+ rdc_kstatsp->peu_resp_err.value.ul = statsp->peu_resp_err;
+ rdc_kstatsp->ctrl_fifo_ecc_err.value.ul = statsp->ctrl_fifo_ecc_err;
+ rdc_kstatsp->data_fifo_ecc_err.value.ul = statsp->data_fifo_ecc_err;
+ rdc_kstatsp->rcrfull.value.ul = statsp->rcrfull;
+ rdc_kstatsp->rbr_empty.value.ul = statsp->rbr_empty;
+ rdc_kstatsp->rbrfull.value.ul = statsp->rbrfull;
+ rdc_kstatsp->rcr_to.value.ul = statsp->rcr_to;
+ rdc_kstatsp->rcr_thresh.value.ul = statsp->rcr_thres;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, " <== hxge_rdc_stat_update"));
+ return (0);
+}
+
+/* ARGSUSED */
+int
+hxge_rdc_sys_stat_update(kstat_t *ksp, int rw)
+{
+ p_hxge_t hxgep;
+ p_hxge_rdc_sys_kstat_t rdc_sys_kstatsp;
+ p_hxge_rdc_sys_stats_t statsp;
+
+ hxgep = (p_hxge_t)ksp->ks_private;
+ if (hxgep == NULL)
+ return (-1);
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_rdc_sys_stat_update"));
+
+ rdc_sys_kstatsp = (p_hxge_rdc_sys_kstat_t)ksp->ks_data;
+ statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "hxge_rdc_sys_stat_update %llx",
+ ksp->ks_data));
+
+ rdc_sys_kstatsp->ctrl_fifo_sec.value.ul = statsp->ctrl_fifo_sec;
+ rdc_sys_kstatsp->ctrl_fifo_ded.value.ul = statsp->ctrl_fifo_ded;
+ rdc_sys_kstatsp->data_fifo_sec.value.ul = statsp->data_fifo_sec;
+ rdc_sys_kstatsp->data_fifo_ded.value.ul = statsp->data_fifo_ded;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, " <== hxge_rdc_sys_stat_update"));
+ return (0);
+}
+
+/* ARGSUSED */
+int
+hxge_vmac_stat_update(kstat_t *ksp, int rw)
+{
+ p_hxge_t hxgep;
+ p_hxge_vmac_kstat_t vmac_kstatsp;
+ p_hxge_vmac_stats_t statsp;
+
+ hxgep = (p_hxge_t)ksp->ks_private;
+ if (hxgep == NULL)
+ return (-1);
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_vmac_stat_update"));
+
+ hxge_save_cntrs(hxgep);
+
+ vmac_kstatsp = (p_hxge_vmac_kstat_t)ksp->ks_data;
+ statsp = (p_hxge_vmac_stats_t)&hxgep->statsp->vmac_stats;
+
+ vmac_kstatsp->tx_frame_cnt.value.ul = statsp->tx_frame_cnt;
+ vmac_kstatsp->tx_byte_cnt.value.ul = statsp->tx_byte_cnt;
+
+ vmac_kstatsp->rx_frame_cnt.value.ul = statsp->rx_frame_cnt;
+ vmac_kstatsp->rx_byte_cnt.value.ul = statsp->rx_byte_cnt;
+ vmac_kstatsp->rx_drop_frame_cnt.value.ul = statsp->rx_drop_frame_cnt;
+ vmac_kstatsp->rx_drop_byte_cnt.value.ul = statsp->rx_drop_byte_cnt;
+ vmac_kstatsp->rx_crc_cnt.value.ul = statsp->rx_crc_cnt;
+ vmac_kstatsp->rx_pause_cnt.value.ul = statsp->rx_pause_cnt;
+ vmac_kstatsp->rx_bcast_fr_cnt.value.ul = statsp->rx_bcast_fr_cnt;
+ vmac_kstatsp->rx_mcast_fr_cnt.value.ul = statsp->rx_mcast_fr_cnt;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_vmac_stat_update"));
+ return (0);
+}
+
+/* ARGSUSED */
+int
+hxge_pfc_stat_update(kstat_t *ksp, int rw)
+{
+ p_hxge_t hxgep;
+ p_hxge_pfc_kstat_t kstatsp;
+ p_hxge_pfc_stats_t statsp;
+
+ hxgep = (p_hxge_t)ksp->ks_private;
+ if (hxgep == NULL)
+ return (-1);
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_pfc_stat_update"));
+
+ kstatsp = (p_hxge_pfc_kstat_t)ksp->ks_data;
+ statsp = (p_hxge_pfc_stats_t)&hxgep->statsp->pfc_stats;
+
+ kstatsp->pfc_pkt_drop.value.ul = statsp->pkt_drop;
+ kstatsp->pfc_tcam_parity_err.value.ul = statsp->tcam_parity_err;
+ kstatsp->pfc_vlan_parity_err.value.ul = statsp->vlan_parity_err;
+ kstatsp->pfc_bad_cs_count.value.ul = statsp->bad_cs_count;
+ kstatsp->pfc_drop_count.value.ul = statsp->drop_count;
+ kstatsp->pfc_tcp_ctrl_drop.value.ul = statsp->errlog.tcp_ctrl_drop;
+ kstatsp->pfc_l2_addr_drop.value.ul = statsp->errlog.l2_addr_drop;
+ kstatsp->pfc_class_code_drop.value.ul = statsp->errlog.class_code_drop;
+ kstatsp->pfc_tcam_drop.value.ul = statsp->errlog.tcam_drop;
+ kstatsp->pfc_vlan_drop.value.ul = statsp->errlog.vlan_drop;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_pfc_stat_update"));
+ return (0);
+}
+
+static uint64_t
+hxge_mac_octet_to_u64(struct ether_addr addr)
+{
+ int i;
+ uint64_t addr64 = 0;
+
+ for (i = ETHERADDRL - 1; i >= 0; i--) {
+ addr64 <<= 8;
+ addr64 |= addr.ether_addr_octet[i];
+ }
+ return (addr64);
+}
+
+/* ARGSUSED */
+int
+hxge_mmac_stat_update(kstat_t *ksp, int rw)
+{
+ p_hxge_t hxgep;
+ p_hxge_mmac_kstat_t mmac_kstatsp;
+ p_hxge_mmac_stats_t statsp;
+
+ hxgep = (p_hxge_t)ksp->ks_private;
+ if (hxgep == NULL)
+ return (-1);
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_mmac_stat_update"));
+
+ mmac_kstatsp = (p_hxge_mmac_kstat_t)ksp->ks_data;
+ statsp = (p_hxge_mmac_stats_t)&hxgep->statsp->mmac_stats;
+
+ mmac_kstatsp->mmac_max_addr_cnt.value.ul = statsp->mmac_max_cnt;
+ mmac_kstatsp->mmac_avail_addr_cnt.value.ul = statsp->mmac_avail_cnt;
+ mmac_kstatsp->mmac_addr1.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[0]);
+ mmac_kstatsp->mmac_addr2.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[1]);
+ mmac_kstatsp->mmac_addr3.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[2]);
+ mmac_kstatsp->mmac_addr4.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[3]);
+ mmac_kstatsp->mmac_addr5.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[4]);
+ mmac_kstatsp->mmac_addr6.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[5]);
+ mmac_kstatsp->mmac_addr7.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[6]);
+ mmac_kstatsp->mmac_addr8.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[7]);
+ mmac_kstatsp->mmac_addr9.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[8]);
+ mmac_kstatsp->mmac_addr10.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[9]);
+ mmac_kstatsp->mmac_addr11.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[10]);
+ mmac_kstatsp->mmac_addr12.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[11]);
+ mmac_kstatsp->mmac_addr13.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[12]);
+ mmac_kstatsp->mmac_addr14.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[13]);
+ mmac_kstatsp->mmac_addr15.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[14]);
+ mmac_kstatsp->mmac_addr16.value.ul =
+ hxge_mac_octet_to_u64(statsp->mmac_avail_pool[15]);
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_mmac_stat_update"));
+ return (0);
+}
+
+/* ARGSUSED */
+int
+hxge_peu_sys_stat_update(kstat_t *ksp, int rw)
+{
+ p_hxge_t hxgep;
+ p_hxge_peu_sys_kstat_t peu_kstatsp;
+ p_hxge_peu_sys_stats_t statsp;
+
+ hxgep = (p_hxge_t)ksp->ks_private;
+ if (hxgep == NULL)
+ return (-1);
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_peu_sys_stat_update"));
+
+ peu_kstatsp = (p_hxge_peu_sys_kstat_t)ksp->ks_data;
+ statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats;
+
+ peu_kstatsp->spc_acc_err.value.ul = statsp->spc_acc_err;
+ peu_kstatsp->tdc_pioacc_err.value.ul = statsp->tdc_pioacc_err;
+ peu_kstatsp->rdc_pioacc_err.value.ul = statsp->rdc_pioacc_err;
+ peu_kstatsp->pfc_pioacc_err.value.ul = statsp->pfc_pioacc_err;
+ peu_kstatsp->vmac_pioacc_err.value.ul = statsp->vmac_pioacc_err;
+ peu_kstatsp->cpl_hdrq_parerr.value.ul = statsp->cpl_hdrq_parerr;
+ peu_kstatsp->cpl_dataq_parerr.value.ul = statsp->cpl_dataq_parerr;
+ peu_kstatsp->retryram_xdlh_parerr.value.ul =
+ statsp->retryram_xdlh_parerr;
+ peu_kstatsp->retrysotram_xdlh_parerr.value.ul =
+ statsp->retrysotram_xdlh_parerr;
+ peu_kstatsp->p_hdrq_parerr.value.ul = statsp->p_hdrq_parerr;
+ peu_kstatsp->p_dataq_parerr.value.ul = statsp->p_dataq_parerr;
+ peu_kstatsp->np_hdrq_parerr.value.ul = statsp->np_hdrq_parerr;
+ peu_kstatsp->np_dataq_parerr.value.ul = statsp->np_dataq_parerr;
+ peu_kstatsp->eic_msix_parerr.value.ul = statsp->eic_msix_parerr;
+ peu_kstatsp->hcr_parerr.value.ul = statsp->hcr_parerr;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_peu_sys_stat_update"));
+ return (0);
+}
+
+static kstat_t *
+hxge_setup_local_kstat(p_hxge_t hxgep, int instance, char *name,
+ const hxge_kstat_index_t *ksip, size_t count,
+ int (*update) (kstat_t *, int))
+{
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ int i;
+
+ ksp = kstat_create(HXGE_DRIVER_NAME, instance, name, "net",
+ KSTAT_TYPE_NAMED, count, 0);
+ if (ksp == NULL)
+ return (NULL);
+
+ ksp->ks_private = (void *) hxgep;
+ ksp->ks_update = update;
+ knp = ksp->ks_data;
+
+ for (i = 0; ksip[i].name != NULL; i++) {
+ kstat_named_init(&knp[i], ksip[i].name, ksip[i].type);
+ }
+
+ kstat_install(ksp);
+
+ return (ksp);
+}
+
+void
+hxge_setup_kstats(p_hxge_t hxgep)
+{
+ struct kstat *ksp;
+ p_hxge_port_kstat_t hxgekp;
+ size_t hxge_kstat_sz;
+ char stat_name[64];
+ char mmac_name[64];
+ int i;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_setup_kstats"));
+
+ /* Setup RDC statistics */
+ for (i = 0; i < hxgep->nrdc; i++) {
+ (void) sprintf(stat_name, "%s"CH_NAME_FORMAT,
+ RDC_NAME_FORMAT1, i);
+ hxgep->statsp->rdc_ksp[i] = hxge_setup_local_kstat(hxgep,
+ hxgep->instance, stat_name, &hxge_rdc_stats[0],
+ RDC_STAT_END, hxge_rdc_stat_update);
+ if (hxgep->statsp->rdc_ksp[i] == NULL)
+ cmn_err(CE_WARN,
+ "kstat_create failed for rdc channel %d", i);
+ }
+
+ /* Setup RDC System statistics */
+ hxgep->statsp->rdc_sys_ksp = hxge_setup_local_kstat(hxgep,
+ hxgep->instance, "RDC_system", &hxge_rdc_sys_stats[0],
+ RDC_SYS_STAT_END, hxge_rdc_sys_stat_update);
+ if (hxgep->statsp->rdc_sys_ksp == NULL)
+ cmn_err(CE_WARN, "kstat_create failed for rdc_sys_ksp");
+
+ /* Setup TDC statistics */
+ for (i = 0; i < hxgep->ntdc; i++) {
+ (void) sprintf(stat_name, "%s"CH_NAME_FORMAT,
+ TDC_NAME_FORMAT1, i);
+ hxgep->statsp->tdc_ksp[i] = hxge_setup_local_kstat(hxgep,
+ hxgep->instance, stat_name, &hxge_tdc_stats[0],
+ TDC_STAT_END, hxge_tdc_stat_update);
+ if (hxgep->statsp->tdc_ksp[i] == NULL)
+ cmn_err(CE_WARN,
+ "kstat_create failed for tdc channel %d", i);
+ }
+
+ /* Setup TDC System statistics */
+ hxgep->statsp->tdc_sys_ksp = hxge_setup_local_kstat(hxgep,
+ hxgep->instance, "TDC_system", &hxge_tdc_sys_stats[0],
+ RDC_SYS_STAT_END, hxge_tdc_sys_stat_update);
+ if (hxgep->statsp->tdc_sys_ksp == NULL)
+ cmn_err(CE_WARN, "kstat_create failed for tdc_sys_ksp");
+
+ /* Setup PFC statistics */
+ hxgep->statsp->pfc_ksp = hxge_setup_local_kstat(hxgep,
+ hxgep->instance, "PFC", &hxge_pfc_stats[0],
+ PFC_STAT_END, hxge_pfc_stat_update);
+ if (hxgep->statsp->pfc_ksp == NULL)
+ cmn_err(CE_WARN, "kstat_create failed for pfc");
+
+ /* Setup VMAC statistics */
+ hxgep->statsp->vmac_ksp = hxge_setup_local_kstat(hxgep,
+ hxgep->instance, "VMAC", &hxge_vmac_stats[0],
+ VMAC_STAT_END, hxge_vmac_stat_update);
+ if (hxgep->statsp->vmac_ksp == NULL)
+ cmn_err(CE_WARN, "kstat_create failed for vmac");
+
+ /* Setup MMAC statistics */
+ (void) sprintf(mmac_name, "MMAC Stats%d", hxgep->instance);
+ hxgep->statsp->mmac_ksp = hxge_setup_local_kstat(hxgep,
+ hxgep->instance, "MMAC",
+ &hxge_mmac_stats[0], MMAC_STATS_END, hxge_mmac_stat_update);
+ if (hxgep->statsp->mmac_ksp == NULL)
+ cmn_err(CE_WARN, "kstat_create failed for mmac");
+
+ /* Setup PEU System statistics */
+ hxgep->statsp->peu_sys_ksp = hxge_setup_local_kstat(hxgep,
+ hxgep->instance, "PEU", &hxge_peu_sys_stats[0],
+ PEU_SYS_STAT_END, hxge_peu_sys_stat_update);
+ if (hxgep->statsp->peu_sys_ksp == NULL)
+ cmn_err(CE_WARN, "kstat_create failed for peu sys");
+
+ /* Port stats */
+ hxge_kstat_sz = sizeof (hxge_port_kstat_t);
+
+ if ((ksp = kstat_create(HXGE_DRIVER_NAME, hxgep->instance,
+ "Port", "net", KSTAT_TYPE_NAMED,
+ hxge_kstat_sz / sizeof (kstat_named_t), 0)) == NULL) {
+ cmn_err(CE_WARN, "kstat_create failed for port stat");
+ return;
+ }
+
+ hxgekp = (p_hxge_port_kstat_t)ksp->ks_data;
+
+ kstat_named_init(&hxgekp->cap_10gfdx, "cap_10gfdx", KSTAT_DATA_ULONG);
+
+ /*
+ * Link partner capabilities.
+ */
+ kstat_named_init(&hxgekp->lp_cap_10gfdx, "lp_cap_10gfdx",
+ KSTAT_DATA_ULONG);
+
+ /*
+ * Shared link setup.
+ */
+ kstat_named_init(&hxgekp->link_speed, "link_speed", KSTAT_DATA_ULONG);
+ kstat_named_init(&hxgekp->link_duplex, "link_duplex", KSTAT_DATA_CHAR);
+ kstat_named_init(&hxgekp->link_up, "link_up", KSTAT_DATA_ULONG);
+
+ /*
+ * Loopback statistics.
+ */
+ kstat_named_init(&hxgekp->lb_mode, "lb_mode", KSTAT_DATA_ULONG);
+
+ /* General MAC statistics */
+
+ kstat_named_init(&hxgekp->ifspeed, "ifspeed", KSTAT_DATA_UINT64);
+ kstat_named_init(&hxgekp->promisc, "promisc", KSTAT_DATA_CHAR);
+
+ ksp->ks_update = hxge_port_kstat_update;
+ ksp->ks_private = (void *) hxgep;
+ kstat_install(ksp);
+ hxgep->statsp->port_ksp = ksp;
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_setup_kstats"));
+}
+
+void
+hxge_destroy_kstats(p_hxge_t hxgep)
+{
+ int channel;
+ p_hxge_dma_pt_cfg_t p_dma_cfgp;
+ p_hxge_hw_pt_cfg_t p_cfgp;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_destroy_kstats"));
+ if (hxgep->statsp == NULL)
+ return;
+
+ if (hxgep->statsp->ksp)
+ kstat_delete(hxgep->statsp->ksp);
+
+ p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
+ p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+ for (channel = 0; channel < p_cfgp->max_rdcs; channel++) {
+ if (hxgep->statsp->rdc_ksp[channel]) {
+ kstat_delete(hxgep->statsp->rdc_ksp[channel]);
+ }
+ }
+
+ for (channel = 0; channel < p_cfgp->max_tdcs; channel++) {
+ if (hxgep->statsp->tdc_ksp[channel]) {
+ kstat_delete(hxgep->statsp->tdc_ksp[channel]);
+ }
+ }
+
+ if (hxgep->statsp->rdc_sys_ksp)
+ kstat_delete(hxgep->statsp->rdc_sys_ksp);
+
+ if (hxgep->statsp->tdc_sys_ksp)
+ kstat_delete(hxgep->statsp->tdc_sys_ksp);
+
+ if (hxgep->statsp->peu_sys_ksp)
+ kstat_delete(hxgep->statsp->peu_sys_ksp);
+
+ if (hxgep->statsp->mmac_ksp)
+ kstat_delete(hxgep->statsp->mmac_ksp);
+
+ if (hxgep->statsp->pfc_ksp)
+ kstat_delete(hxgep->statsp->pfc_ksp);
+
+ if (hxgep->statsp->vmac_ksp)
+ kstat_delete(hxgep->statsp->vmac_ksp);
+
+ if (hxgep->statsp->port_ksp)
+ kstat_delete(hxgep->statsp->port_ksp);
+
+ if (hxgep->statsp)
+ KMEM_FREE(hxgep->statsp, hxgep->statsp->stats_size);
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_destroy_kstats"));
+}
+
+/* ARGSUSED */
+int
+hxge_port_kstat_update(kstat_t *ksp, int rw)
+{
+ p_hxge_t hxgep;
+ p_hxge_stats_t statsp;
+ p_hxge_port_kstat_t hxgekp;
+ p_hxge_port_stats_t psp;
+
+ hxgep = (p_hxge_t)ksp->ks_private;
+ if (hxgep == NULL)
+ return (-1);
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_port_kstat_update"));
+ statsp = (p_hxge_stats_t)hxgep->statsp;
+ hxgekp = (p_hxge_port_kstat_t)ksp->ks_data;
+ psp = &statsp->port_stats;
+
+ if (hxgep->filter.all_phys_cnt)
+ (void) strcpy(hxgekp->promisc.value.c, "phys");
+ else if (hxgep->filter.all_multicast_cnt)
+ (void) strcpy(hxgekp->promisc.value.c, "multi");
+ else
+ (void) strcpy(hxgekp->promisc.value.c, "off");
+ hxgekp->ifspeed.value.ul = statsp->mac_stats.link_speed * 1000000ULL;
+
+ /*
+ * transceiver state informations.
+ */
+ hxgekp->cap_10gfdx.value.ul = statsp->mac_stats.cap_10gfdx;
+
+ /*
+ * Link partner capabilities.
+ */
+ hxgekp->lp_cap_10gfdx.value.ul = statsp->mac_stats.lp_cap_10gfdx;
+
+ /*
+ * Physical link statistics.
+ */
+ hxgekp->link_speed.value.ul = statsp->mac_stats.link_speed;
+ if (statsp->mac_stats.link_duplex == 2)
+ (void) strcpy(hxgekp->link_duplex.value.c, "full");
+ else
+ (void) strcpy(hxgekp->link_duplex.value.c, "unknown");
+ hxgekp->link_up.value.ul = statsp->mac_stats.link_up;
+
+ /*
+ * Loopback statistics.
+ */
+ hxgekp->lb_mode.value.ul = psp->lb_mode;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_port_kstat_update"));
+ return (0);
+}
+
+int
+hxge_m_stat(void *arg, uint_t stat, uint64_t *value)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg;
+ p_hxge_stats_t statsp;
+ hxge_tx_ring_stats_t *tx_stats;
+ uint64_t val = 0;
+ int channel;
+
+ HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_m_stat"));
+ statsp = (p_hxge_stats_t)hxgep->statsp;
+
+ switch (stat) {
+ case MAC_STAT_IFSPEED:
+ val = statsp->mac_stats.link_speed * 1000000ull;
+ break;
+
+ case MAC_STAT_MULTIRCV:
+ val = 0;
+ break;
+
+ case MAC_STAT_BRDCSTRCV:
+ val = 0;
+ break;
+
+ case MAC_STAT_MULTIXMT:
+ val = 0;
+ break;
+
+ case MAC_STAT_BRDCSTXMT:
+ val = 0;
+ break;
+
+ case MAC_STAT_NORCVBUF:
+ val = 0;
+ break;
+
+ case MAC_STAT_IERRORS:
+ case ETHER_STAT_MACRCV_ERRORS:
+ val = 0;
+ for (channel = 0; channel < hxgep->nrdc; channel++) {
+ val += statsp->rdc_stats[channel].ierrors;
+ }
+ break;
+
+ case MAC_STAT_NOXMTBUF:
+ val = 0;
+ break;
+
+ case MAC_STAT_OERRORS:
+ for (channel = 0; channel < hxgep->ntdc; channel++) {
+ val += statsp->tdc_stats[channel].oerrors;
+ }
+ break;
+
+ case MAC_STAT_COLLISIONS:
+ val = 0;
+ break;
+
+ case MAC_STAT_RBYTES:
+ for (channel = 0; channel < hxgep->nrdc; channel++) {
+ val += statsp->rdc_stats[channel].ibytes;
+ }
+ break;
+
+ case MAC_STAT_IPACKETS:
+ for (channel = 0; channel < hxgep->nrdc; channel++) {
+ val += statsp->rdc_stats[channel].ipackets;
+ }
+ break;
+
+ case MAC_STAT_OBYTES:
+ for (channel = 0; channel < hxgep->ntdc; channel++) {
+ val += statsp->tdc_stats[channel].obytes;
+ }
+ break;
+
+ case MAC_STAT_OPACKETS:
+ for (channel = 0; channel < hxgep->ntdc; channel++) {
+ val += statsp->tdc_stats[channel].opackets;
+ }
+ break;
+
+ case MAC_STAT_UNKNOWNS:
+ val = 0;
+ break;
+
+ case MAC_STAT_UNDERFLOWS:
+ val = 0;
+ break;
+
+ case MAC_STAT_OVERFLOWS:
+ val = 0;
+ break;
+
+ case MAC_STAT_LINK_STATE:
+ val = statsp->mac_stats.link_duplex;
+ break;
+ case MAC_STAT_LINK_UP:
+ val = statsp->mac_stats.link_up;
+ break;
+ case MAC_STAT_PROMISC:
+ val = statsp->mac_stats.promisc;
+ break;
+ case ETHER_STAT_SQE_ERRORS:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ALIGN_ERRORS:
+ /*
+ * No similar error in Hydra receive channels
+ */
+ val = 0;
+ break;
+
+ case ETHER_STAT_FCS_ERRORS:
+ /*
+ * No similar error in Hydra receive channels
+ */
+ val = 0;
+ break;
+
+ case ETHER_STAT_FIRST_COLLISIONS:
+ val = 0;
+ break;
+
+ case ETHER_STAT_MULTI_COLLISIONS:
+ val = 0;
+ break;
+
+ case ETHER_STAT_TX_LATE_COLLISIONS:
+ val = 0;
+ break;
+
+ case ETHER_STAT_EX_COLLISIONS:
+ val = 0;
+ break;
+
+ case ETHER_STAT_DEFER_XMTS:
+ val = 0;
+ break;
+
+ case ETHER_STAT_MACXMT_ERRORS:
+ /*
+ * A count of frames for which transmission on a
+ * particular interface fails due to an internal
+ * MAC sublayer transmit error
+ */
+ for (channel = 0; channel < hxgep->ntdc; channel++) {
+ tx_stats = &statsp->tdc_stats[channel];
+ val += tx_stats->pkt_size_hdr_err +
+ tx_stats->pkt_size_err +
+ tx_stats->tx_rng_oflow +
+ tx_stats->peu_resp_err +
+ tx_stats->runt_pkt_drop_err +
+ tx_stats->pref_par_err +
+ tx_stats->tdr_pref_cpl_to +
+ tx_stats->pkt_cpl_to +
+ tx_stats->invalid_sop +
+ tx_stats->unexpected_sop;
+ }
+ break;
+
+ case ETHER_STAT_CARRIER_ERRORS:
+ /*
+ * The number of times that the carrier sense
+ * condition was lost or never asserted when
+ * attempting to transmit a frame on a particular interface
+ */
+ for (channel = 0; channel < hxgep->ntdc; channel++) {
+ tx_stats = &statsp->tdc_stats[channel];
+ val += tx_stats->tdr_pref_cpl_to + tx_stats->pkt_cpl_to;
+ }
+ break;
+
+ case ETHER_STAT_TOOLONG_ERRORS:
+ /*
+ * A count of frames received on a particular
+ * interface that exceed the maximum permitted frame size
+ */
+ for (channel = 0; channel < hxgep->ntdc; channel++) {
+ tx_stats = &statsp->tdc_stats[channel];
+ val += tx_stats->pkt_size_err;
+ }
+ break;
+
+ case ETHER_STAT_XCVR_ADDR:
+ val = 0;
+ break;
+ case ETHER_STAT_XCVR_ID:
+ val = 0;
+ break;
+
+ case ETHER_STAT_XCVR_INUSE:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_1000FDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_1000HDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_100FDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_100HDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_10FDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_10HDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_ASMPAUSE:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_PAUSE:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_AUTONEG:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_CAP_1000FDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_CAP_1000HDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_CAP_100FDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_CAP_100HDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_CAP_10FDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_CAP_10HDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_CAP_ASMPAUSE:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_CAP_PAUSE:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_CAP_AUTONEG:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_CAP_1000FDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_CAP_1000HDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_CAP_100FDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_CAP_100HDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_CAP_10FDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_CAP_10HDX:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_CAP_ASMPAUSE:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_CAP_PAUSE:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_CAP_AUTONEG:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LINK_ASMPAUSE:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LINK_PAUSE:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LINK_AUTONEG:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LINK_DUPLEX:
+ val = statsp->mac_stats.link_duplex;
+ break;
+
+ case ETHER_STAT_TOOSHORT_ERRORS:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_REMFAULT:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_REMFAULT:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_REMFAULT:
+ val = 0;
+ break;
+
+ case ETHER_STAT_JABBER_ERRORS:
+ val = 0;
+ break;
+
+ case ETHER_STAT_CAP_100T4:
+ val = 0;
+ break;
+
+ case ETHER_STAT_ADV_CAP_100T4:
+ val = 0;
+ break;
+
+ case ETHER_STAT_LP_CAP_100T4:
+ val = 0;
+ break;
+
+ default:
+ /*
+ * Shouldn't reach here...
+ */
+ cmn_err(CE_WARN,
+ "hxge_m_stat: unrecognized parameter value = 0x%x", stat);
+
+ return (ENOTSUP);
+ }
+ *value = val;
+ return (0);
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_main.c b/usr/src/uts/common/io/hxge/hxge_main.c
new file mode 100644
index 0000000000..ec3a7422f6
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_main.c
@@ -0,0 +1,3773 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
+ */
+#include <hxge_impl.h>
+#include <hxge_pfc.h>
+
+/*
+ * PSARC/2007/453 MSI-X interrupt limit override
+ * (This PSARC case is limited to MSI-X vectors
+ * and SPARC platforms only).
+ */
+#if defined(_BIG_ENDIAN)
+uint32_t hxge_msi_enable = 2;
+#else
+uint32_t hxge_msi_enable = 1;
+#endif
+
+/*
+ * Globals: tunable parameters (/etc/system or adb)
+ *
+ */
+uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
+uint32_t hxge_rbr_spare_size = 0;
+uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
+uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
+uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
+uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
+uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
+uint32_t hxge_jumbo_mtu = TX_JUMBO_MTU;
+boolean_t hxge_jumbo_enable = B_FALSE;
+
+static hxge_os_mutex_t hxgedebuglock;
+static int hxge_debug_init = 0;
+
+/*
+ * Debugging flags:
+ * hxge_no_tx_lb : transmit load balancing
+ * hxge_tx_lb_policy: 0 - TCP/UDP port (default)
+ * 1 - From the Stack
+ * 2 - Destination IP Address
+ */
+uint32_t hxge_no_tx_lb = 0;
+uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
+
+/*
+ * Add tunable to reduce the amount of time spent in the
+ * ISR doing Rx Processing.
+ */
+uint32_t hxge_max_rx_pkts = 1024;
+
+/*
+ * Tunables to manage the receive buffer blocks.
+ *
+ * hxge_rx_threshold_hi: copy all buffers.
+ * hxge_rx_bcopy_size_type: receive buffer block size type.
+ * hxge_rx_threshold_lo: copy only up to tunable block size type.
+ */
+hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
+hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
+hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3;
+
+rtrace_t hpi_rtracebuf;
+
+/*
+ * Function Prototypes
+ */
+static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
+static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
+static void hxge_unattach(p_hxge_t);
+
+static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
+
+static hxge_status_t hxge_setup_mutexes(p_hxge_t);
+static void hxge_destroy_mutexes(p_hxge_t);
+
+static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
+static void hxge_unmap_regs(p_hxge_t hxgep);
+
+hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
+static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep);
+static void hxge_remove_intrs(p_hxge_t hxgep);
+static void hxge_remove_soft_intrs(p_hxge_t hxgep);
+static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
+static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
+static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
+void hxge_intrs_enable(p_hxge_t hxgep);
+static void hxge_intrs_disable(p_hxge_t hxgep);
+static void hxge_suspend(p_hxge_t);
+static hxge_status_t hxge_resume(p_hxge_t);
+hxge_status_t hxge_setup_dev(p_hxge_t);
+static void hxge_destroy_dev(p_hxge_t);
+hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
+static void hxge_free_mem_pool(p_hxge_t);
+static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
+static void hxge_free_rx_mem_pool(p_hxge_t);
+static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
+static void hxge_free_tx_mem_pool(p_hxge_t);
+static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
+ struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
+ p_hxge_dma_common_t);
+static void hxge_dma_mem_free(p_hxge_dma_common_t);
+static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
+ p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
+static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
+static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
+ p_hxge_dma_common_t *, size_t);
+static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
+static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
+ p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
+static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
+static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
+ p_hxge_dma_common_t *, size_t);
+static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
+static int hxge_init_common_dev(p_hxge_t);
+static void hxge_uninit_common_dev(p_hxge_t);
+
+/*
+ * The next declarations are for the GLDv3 interface.
+ */
+static int hxge_m_start(void *);
+static void hxge_m_stop(void *);
+static int hxge_m_unicst(void *, const uint8_t *);
+static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
+static int hxge_m_promisc(void *, boolean_t);
+static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
+static void hxge_m_resources(void *);
+static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
+
+static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
+static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
+static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
+static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
+static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
+
+#define HXGE_MAGIC 0x4E584745UL
+#define MAX_DUMP_SZ 256
+
+#define HXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
+
+extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp);
+extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
+
+static mac_callbacks_t hxge_m_callbacks = {
+ HXGE_M_CALLBACK_FLAGS,
+ hxge_m_stat,
+ hxge_m_start,
+ hxge_m_stop,
+ hxge_m_promisc,
+ hxge_m_multicst,
+ hxge_m_unicst,
+ hxge_m_tx,
+ hxge_m_resources,
+ hxge_m_ioctl,
+ hxge_m_getcapab
+};
+
+/* Enable debug messages as necessary. */
+uint64_t hxge_debug_level = 0x0;
+
+/*
+ * This list contains the instance structures for the Hydra
+ * devices present in the system. The lock exists to guarantee
+ * mutually exclusive access to the list.
+ */
+void *hxge_list = NULL;
+void *hxge_hw_list = NULL;
+hxge_os_mutex_t hxge_common_lock;
+
+extern uint64_t hpi_debug_level;
+
+extern hxge_status_t hxge_ldgv_init();
+extern hxge_status_t hxge_ldgv_uninit();
+extern hxge_status_t hxge_intr_ldgv_init();
+extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
+ ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
+extern void hxge_fm_fini(p_hxge_t hxgep);
+
+/*
+ * Count used to maintain the number of buffers being used
+ * by Hydra instances and loaned up to the upper layers.
+ */
+uint32_t hxge_mblks_pending = 0;
+
+/*
+ * Device register access attributes for PIO.
+ */
+static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC,
+};
+
+/*
+ * Device descriptor access attributes for DMA.
+ */
+static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+/*
+ * Device buffer access attributes for DMA.
+ */
+static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_BE_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+ddi_dma_attr_t hxge_desc_dma_attr = {
+ DMA_ATTR_V0, /* version number. */
+ 0, /* low address */
+ 0xffffffffffffffff, /* high address */
+ 0xffffffffffffffff, /* address counter max */
+ 0x100000, /* alignment */
+ 0xfc00fc, /* dlim_burstsizes */
+ 0x1, /* minimum transfer size */
+ 0xffffffffffffffff, /* maximum transfer size */
+ 0xffffffffffffffff, /* maximum segment size */
+ 1, /* scatter/gather list length */
+ (unsigned int)1, /* granularity */
+ 0 /* attribute flags */
+};
+
+ddi_dma_attr_t hxge_tx_dma_attr = {
+ DMA_ATTR_V0, /* version number. */
+ 0, /* low address */
+ 0xffffffffffffffff, /* high address */
+ 0xffffffffffffffff, /* address counter max */
+#if defined(_BIG_ENDIAN)
+ 0x2000, /* alignment */
+#else
+ 0x1000, /* alignment */
+#endif
+ 0xfc00fc, /* dlim_burstsizes */
+ 0x1, /* minimum transfer size */
+ 0xffffffffffffffff, /* maximum transfer size */
+ 0xffffffffffffffff, /* maximum segment size */
+ 5, /* scatter/gather list length */
+ (unsigned int)1, /* granularity */
+ 0 /* attribute flags */
+};
+
+ddi_dma_attr_t hxge_rx_dma_attr = {
+ DMA_ATTR_V0, /* version number. */
+ 0, /* low address */
+ 0xffffffffffffffff, /* high address */
+ 0xffffffffffffffff, /* address counter max */
+ 0x10000, /* alignment */
+ 0xfc00fc, /* dlim_burstsizes */
+ 0x1, /* minimum transfer size */
+ 0xffffffffffffffff, /* maximum transfer size */
+ 0xffffffffffffffff, /* maximum segment size */
+ 1, /* scatter/gather list length */
+ (unsigned int)1, /* granularity */
+ DDI_DMA_RELAXED_ORDERING /* attribute flags */
+};
+
+ddi_dma_lim_t hxge_dma_limits = {
+ (uint_t)0, /* dlim_addr_lo */
+ (uint_t)0xffffffff, /* dlim_addr_hi */
+ (uint_t)0xffffffff, /* dlim_cntr_max */
+ (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
+ 0x1, /* dlim_minxfer */
+ 1024 /* dlim_speed */
+};
+
+dma_method_t hxge_force_dma = DVMA;
+
+/*
+ * dma chunk sizes.
+ *
+ * Try to allocate the largest possible size
+ * so that fewer number of dma chunks would be managed
+ */
+size_t alloc_sizes[] = {
+ 0x1000, 0x2000, 0x4000, 0x8000,
+ 0x10000, 0x20000, 0x40000, 0x80000,
+ 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
+};
+
+/*
+ * Translate "dev_t" to a pointer to the associated "dev_info_t".
+ */
+static int
+hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ p_hxge_t hxgep = NULL;
+ int instance;
+ int status = DDI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
+
+ /*
+ * Get the device instance since we'll need to setup or retrieve a soft
+ * state for this instance.
+ */
+ instance = ddi_get_instance(dip);
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
+ break;
+
+ case DDI_RESUME:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
+ hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
+ if (hxgep == NULL) {
+ status = DDI_FAILURE;
+ break;
+ }
+ if (hxgep->dip != dip) {
+ status = DDI_FAILURE;
+ break;
+ }
+ if (hxgep->suspended == DDI_PM_SUSPEND) {
+ status = ddi_dev_is_needed(hxgep->dip, 0, 1);
+ } else {
+ (void) hxge_resume(hxgep);
+ }
+ goto hxge_attach_exit;
+
+ case DDI_PM_RESUME:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
+ hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
+ if (hxgep == NULL) {
+ status = DDI_FAILURE;
+ break;
+ }
+ if (hxgep->dip != dip) {
+ status = DDI_FAILURE;
+ break;
+ }
+ (void) hxge_resume(hxgep);
+ goto hxge_attach_exit;
+
+ default:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
+ status = DDI_FAILURE;
+ goto hxge_attach_exit;
+ }
+
+ if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
+ status = DDI_FAILURE;
+ HXGE_ERROR_MSG((hxgep, DDI_CTL,
+ "ddi_soft_state_zalloc failed"));
+ goto hxge_attach_exit;
+ }
+
+ hxgep = ddi_get_soft_state(hxge_list, instance);
+ if (hxgep == NULL) {
+ status = HXGE_ERROR;
+ HXGE_ERROR_MSG((hxgep, DDI_CTL,
+ "ddi_get_soft_state failed"));
+ goto hxge_attach_fail2;
+ }
+
+ hxgep->drv_state = 0;
+ hxgep->dip = dip;
+ hxgep->instance = instance;
+ hxgep->p_dip = ddi_get_parent(dip);
+ hxgep->hxge_debug_level = hxge_debug_level;
+ hpi_debug_level = hxge_debug_level;
+
+ hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
+ &hxge_rx_dma_attr);
+
+ status = hxge_map_regs(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
+ goto hxge_attach_fail3;
+ }
+
+ status = hxge_init_common_dev(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_init_common_dev failed"));
+ goto hxge_attach_fail4;
+ }
+
+ /*
+ * Setup the Ndd parameters for this instance.
+ */
+ hxge_init_param(hxgep);
+
+ /*
+ * Setup Register Tracing Buffer.
+ */
+ hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
+
+ /* init stats ptr */
+ hxge_init_statsp(hxgep);
+
+ status = hxge_get_config_properties(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
+ goto hxge_attach_fail;
+ }
+
+ /*
+ * Setup the Kstats for the driver.
+ */
+ hxge_setup_kstats(hxgep);
+ hxge_setup_param(hxgep);
+
+ status = hxge_setup_system_dma_pages(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
+ goto hxge_attach_fail;
+ }
+
+ hxge_hw_id_init(hxgep);
+ hxge_hw_init_niu_common(hxgep);
+
+ status = hxge_setup_mutexes(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
+ goto hxge_attach_fail;
+ }
+
+ status = hxge_setup_dev(hxgep);
+ if (status != DDI_SUCCESS) {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
+ goto hxge_attach_fail;
+ }
+
+ status = hxge_add_intrs(hxgep);
+ if (status != DDI_SUCCESS) {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
+ goto hxge_attach_fail;
+ }
+
+ status = hxge_add_soft_intrs(hxgep);
+ if (status != DDI_SUCCESS) {
+ HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed"));
+ goto hxge_attach_fail;
+ }
+
+ /*
+ * Enable interrupts.
+ */
+ hxge_intrs_enable(hxgep);
+
+ if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "unable to register to mac layer (%d)", status));
+ goto hxge_attach_fail;
+ }
+ mac_link_update(hxgep->mach, LINK_STATE_UP);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
+ instance));
+
+ goto hxge_attach_exit;
+
+hxge_attach_fail:
+ hxge_unattach(hxgep);
+ goto hxge_attach_fail1;
+
+hxge_attach_fail5:
+ /*
+ * Tear down the ndd parameters setup.
+ */
+ hxge_destroy_param(hxgep);
+
+ /*
+ * Tear down the kstat setup.
+ */
+ hxge_destroy_kstats(hxgep);
+
+hxge_attach_fail4:
+ if (hxgep->hxge_hw_p) {
+ hxge_uninit_common_dev(hxgep);
+ hxgep->hxge_hw_p = NULL;
+ }
+hxge_attach_fail3:
+ /*
+ * Unmap the register setup.
+ */
+ hxge_unmap_regs(hxgep);
+
+ hxge_fm_fini(hxgep);
+
+hxge_attach_fail2:
+ ddi_soft_state_free(hxge_list, hxgep->instance);
+
+hxge_attach_fail1:
+ if (status != HXGE_OK)
+ status = (HXGE_ERROR | HXGE_DDI_FAILED);
+ hxgep = NULL;
+
+hxge_attach_exit:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
+ status));
+
+ return (status);
+}
+
+static int
+hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int status = DDI_SUCCESS;
+ int instance;
+ p_hxge_t hxgep = NULL;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
+ instance = ddi_get_instance(dip);
+ hxgep = ddi_get_soft_state(hxge_list, instance);
+ if (hxgep == NULL) {
+ status = DDI_FAILURE;
+ goto hxge_detach_exit;
+ }
+
+ switch (cmd) {
+ case DDI_DETACH:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
+ break;
+
+ case DDI_PM_SUSPEND:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
+ hxgep->suspended = DDI_PM_SUSPEND;
+ hxge_suspend(hxgep);
+ break;
+
+ case DDI_SUSPEND:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
+ if (hxgep->suspended != DDI_PM_SUSPEND) {
+ hxgep->suspended = DDI_SUSPEND;
+ hxge_suspend(hxgep);
+ }
+ break;
+
+ default:
+ status = DDI_FAILURE;
+ break;
+ }
+
+ if (cmd != DDI_DETACH)
+ goto hxge_detach_exit;
+
+ /*
+ * Stop the xcvr polling.
+ */
+ hxgep->suspended = cmd;
+
+ if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_detach status = 0x%08X", status));
+ return (DDI_FAILURE);
+ }
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "<== hxge_detach (mac_unregister) status = 0x%08X", status));
+
+ hxge_unattach(hxgep);
+ hxgep = NULL;
+
+hxge_detach_exit:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
+ status));
+
+ return (status);
+}
+
+static void
+hxge_unattach(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
+
+ if (hxgep == NULL || hxgep->dev_regs == NULL) {
+ return;
+ }
+
+ if (hxgep->hxge_hw_p) {
+ hxge_uninit_common_dev(hxgep);
+ hxgep->hxge_hw_p = NULL;
+ }
+
+ if (hxgep->hxge_timerid) {
+ hxge_stop_timer(hxgep, hxgep->hxge_timerid);
+ hxgep->hxge_timerid = 0;
+ }
+
+ /* Stop any further interrupts. */
+ hxge_remove_intrs(hxgep);
+
+ /* Remove soft interrups */
+ hxge_remove_soft_intrs(hxgep);
+
+ /* Stop the device and free resources. */
+ hxge_destroy_dev(hxgep);
+
+ /* Tear down the ndd parameters setup. */
+ hxge_destroy_param(hxgep);
+
+ /* Tear down the kstat setup. */
+ hxge_destroy_kstats(hxgep);
+
+ /* Destroy all mutexes. */
+ hxge_destroy_mutexes(hxgep);
+
+ /*
+ * Remove the list of ndd parameters which were setup during attach.
+ */
+ if (hxgep->dip) {
+ HXGE_DEBUG_MSG((hxgep, OBP_CTL,
+ " hxge_unattach: remove all properties"));
+ (void) ddi_prop_remove_all(hxgep->dip);
+ }
+
+ /*
+ * Unmap the register setup.
+ */
+ hxge_unmap_regs(hxgep);
+
+ hxge_fm_fini(hxgep);
+
+ /*
+ * Free the soft state data structures allocated with this instance.
+ */
+ ddi_soft_state_free(hxge_list, hxgep->instance);
+
+ HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
+}
+
+static hxge_status_t
+hxge_map_regs(p_hxge_t hxgep)
+{
+ int ddi_status = DDI_SUCCESS;
+ p_dev_regs_t dev_regs;
+
+#ifdef HXGE_DEBUG
+ char *sysname;
+#endif
+
+ off_t regsize;
+ hxge_status_t status = HXGE_OK;
+ int nregs;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
+
+ if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
+ return (HXGE_ERROR);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
+
+ hxgep->dev_regs = NULL;
+ dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
+ dev_regs->hxge_regh = NULL;
+ dev_regs->hxge_pciregh = NULL;
+ dev_regs->hxge_msix_regh = NULL;
+
+ (void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "hxge_map_regs: pci config size 0x%x", regsize));
+
+ ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
+ (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
+ &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
+ if (ddi_status != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "ddi_map_regs, hxge bus config regs failed"));
+ goto hxge_map_regs_fail0;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
+ dev_regs->hxge_pciregp,
+ dev_regs->hxge_pciregh));
+
+ (void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "hxge_map_regs: pio size 0x%x", regsize));
+
+ /* set up the device mapped register */
+ ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
+ (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
+ &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
+
+ if (ddi_status != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "ddi_map_regs for Hydra global reg failed"));
+ goto hxge_map_regs_fail1;
+ }
+
+ /* set up the msi/msi-x mapped register */
+ (void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "hxge_map_regs: msix size 0x%x", regsize));
+
+ ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
+ (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
+ &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
+
+ if (ddi_status != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "ddi_map_regs for msi reg failed"));
+ goto hxge_map_regs_fail2;
+ }
+
+ hxgep->dev_regs = dev_regs;
+
+ HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
+ HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
+ HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
+ HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
+
+ HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
+ HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
+
+ HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
+ HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
+ " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
+
+ goto hxge_map_regs_exit;
+
+hxge_map_regs_fail3:
+ if (dev_regs->hxge_msix_regh) {
+ ddi_regs_map_free(&dev_regs->hxge_msix_regh);
+ }
+
+hxge_map_regs_fail2:
+ if (dev_regs->hxge_regh) {
+ ddi_regs_map_free(&dev_regs->hxge_regh);
+ }
+
+hxge_map_regs_fail1:
+ if (dev_regs->hxge_pciregh) {
+ ddi_regs_map_free(&dev_regs->hxge_pciregh);
+ }
+
+hxge_map_regs_fail0:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
+ kmem_free(dev_regs, sizeof (dev_regs_t));
+
+hxge_map_regs_exit:
+ if (ddi_status != DDI_SUCCESS)
+ status |= (HXGE_ERROR | HXGE_DDI_FAILED);
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
+ return (status);
+}
+
+static void
+hxge_unmap_regs(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
+ if (hxgep->dev_regs) {
+ if (hxgep->dev_regs->hxge_pciregh) {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "==> hxge_unmap_regs: bus"));
+ ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
+ hxgep->dev_regs->hxge_pciregh = NULL;
+ }
+
+ if (hxgep->dev_regs->hxge_regh) {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "==> hxge_unmap_regs: device registers"));
+ ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
+ hxgep->dev_regs->hxge_regh = NULL;
+ }
+
+ if (hxgep->dev_regs->hxge_msix_regh) {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "==> hxge_unmap_regs: device interrupts"));
+ ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
+ hxgep->dev_regs->hxge_msix_regh = NULL;
+ }
+ kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
+ hxgep->dev_regs = NULL;
+ }
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
+}
+
+static hxge_status_t
+hxge_setup_mutexes(p_hxge_t hxgep)
+{
+ int ddi_status = DDI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
+
+ /*
+ * Get the interrupt cookie so the mutexes can be Initialised.
+ */
+ ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
+ &hxgep->interrupt_cookie);
+
+ if (ddi_status != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
+ goto hxge_setup_mutexes_exit;
+ }
+
+ /*
+ * Initialize mutex's for this device.
+ */
+ MUTEX_INIT(hxgep->genlock, NULL,
+ MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
+ MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
+ MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
+ RW_INIT(&hxgep->filter_lock, NULL,
+ RW_DRIVER, (void *) hxgep->interrupt_cookie);
+
+hxge_setup_mutexes_exit:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "<== hxge_setup_mutexes status = %x", status));
+
+ if (ddi_status != DDI_SUCCESS)
+ status |= (HXGE_ERROR | HXGE_DDI_FAILED);
+
+ return (status);
+}
+
+static void
+hxge_destroy_mutexes(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
+ RW_DESTROY(&hxgep->filter_lock);
+ MUTEX_DESTROY(&hxgep->ouraddr_lock);
+ MUTEX_DESTROY(hxgep->genlock);
+
+ if (hxge_debug_init == 1) {
+ MUTEX_DESTROY(&hxgedebuglock);
+ hxge_debug_init = 0;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
+}
+
+hxge_status_t
+hxge_init(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
+
+ if (hxgep->drv_state & STATE_HW_INITIALIZED) {
+ return (status);
+ }
+
+ /*
+ * Allocate system memory for the receive/transmit buffer blocks and
+ * receive/transmit descriptor rings.
+ */
+ status = hxge_alloc_mem_pool(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
+ goto hxge_init_fail1;
+ }
+
+ /*
+ * Initialize and enable TXDMA channels.
+ */
+ status = hxge_init_txdma_channels(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
+ goto hxge_init_fail3;
+ }
+
+ /*
+ * Initialize and enable RXDMA channels.
+ */
+ status = hxge_init_rxdma_channels(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
+ goto hxge_init_fail4;
+ }
+
+ /*
+ * Initialize TCAM
+ */
+ status = hxge_classify_init(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
+ goto hxge_init_fail5;
+ }
+
+ /*
+ * Initialize the VMAC block.
+ */
+ status = hxge_vmac_init(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
+ goto hxge_init_fail5;
+ }
+
+ /* Bringup - this may be unnecessary when PXE and FCODE available */
+ status = hxge_pfc_set_default_mac_addr(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Default Address Failure\n"));
+ goto hxge_init_fail5;
+ }
+
+ hxge_intrs_enable(hxgep);
+
+ /*
+ * Enable hardware interrupts.
+ */
+ hxge_intr_hw_enable(hxgep);
+ hxgep->drv_state |= STATE_HW_INITIALIZED;
+
+ goto hxge_init_exit;
+
+hxge_init_fail5:
+ hxge_uninit_rxdma_channels(hxgep);
+hxge_init_fail4:
+ hxge_uninit_txdma_channels(hxgep);
+hxge_init_fail3:
+ hxge_free_mem_pool(hxgep);
+hxge_init_fail1:
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_init status (failed) = 0x%08x", status));
+ return (status);
+
+hxge_init_exit:
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
+ status));
+
+ return (status);
+}
+
+timeout_id_t
+hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
+{
+ if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
+ return (timeout(func, (caddr_t)hxgep,
+ drv_usectohz(1000 * msec)));
+ }
+ return (NULL);
+}
+
+/*ARGSUSED*/
+void
+hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
+{
+ if (timerid) {
+ (void) untimeout(timerid);
+ }
+}
+
+void
+hxge_uninit(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
+
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "==> hxge_uninit: not initialized"));
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
+ return;
+ }
+
+ /* Stop timer */
+ if (hxgep->hxge_timerid) {
+ hxge_stop_timer(hxgep, hxgep->hxge_timerid);
+ hxgep->hxge_timerid = 0;
+ }
+
+ (void) hxge_intr_hw_disable(hxgep);
+
+ /* Reset the receive VMAC side. */
+ (void) hxge_rx_vmac_disable(hxgep);
+
+ /* Free classification resources */
+ (void) hxge_classify_uninit(hxgep);
+
+ /* Reset the transmit/receive DMA side. */
+ (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
+ (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
+
+ hxge_uninit_txdma_channels(hxgep);
+ hxge_uninit_rxdma_channels(hxgep);
+
+ /* Reset the transmit VMAC side. */
+ (void) hxge_tx_vmac_disable(hxgep);
+
+ hxge_free_mem_pool(hxgep);
+
+ hxgep->drv_state &= ~STATE_HW_INITIALIZED;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
+}
+
+void
+hxge_get64(p_hxge_t hxgep, p_mblk_t mp)
+{
+#if defined(__i386)
+ size_t reg;
+#else
+ uint64_t reg;
+#endif
+ uint64_t regdata;
+ int i, retry;
+
+ bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
+ regdata = 0;
+ retry = 1;
+
+ for (i = 0; i < retry; i++) {
+ HXGE_REG_RD64(hxgep->hpi_handle, reg, &regdata);
+ }
+ bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
+}
+
+void
+hxge_put64(p_hxge_t hxgep, p_mblk_t mp)
+{
+#if defined(__i386)
+ size_t reg;
+#else
+ uint64_t reg;
+#endif
+ uint64_t buf[2];
+
+ bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
+#if defined(__i386)
+ reg = (size_t)buf[0];
+#else
+ reg = buf[0];
+#endif
+
+ HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]);
+}
+
+/*ARGSUSED*/
+/*VARARGS*/
+void
+hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
+{
+ char msg_buffer[1048];
+ char prefix_buffer[32];
+ int instance;
+ uint64_t debug_level;
+ int cmn_level = CE_CONT;
+ va_list ap;
+
+ debug_level = (hxgep == NULL) ? hxge_debug_level :
+ hxgep->hxge_debug_level;
+
+ if ((level & debug_level) || (level == HXGE_NOTE) ||
+ (level == HXGE_ERR_CTL)) {
+ /* do the msg processing */
+ if (hxge_debug_init == 0) {
+ MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
+ hxge_debug_init = 1;
+ }
+
+ MUTEX_ENTER(&hxgedebuglock);
+
+ if ((level & HXGE_NOTE)) {
+ cmn_level = CE_NOTE;
+ }
+
+ if (level & HXGE_ERR_CTL) {
+ cmn_level = CE_WARN;
+ }
+
+ va_start(ap, fmt);
+ (void) vsprintf(msg_buffer, fmt, ap);
+ va_end(ap);
+
+ if (hxgep == NULL) {
+ instance = -1;
+ (void) sprintf(prefix_buffer, "%s :", "hxge");
+ } else {
+ instance = hxgep->instance;
+ (void) sprintf(prefix_buffer,
+ "%s%d :", "hxge", instance);
+ }
+
+ MUTEX_EXIT(&hxgedebuglock);
+ cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
+ }
+}
+
+char *
+hxge_dump_packet(char *addr, int size)
+{
+ uchar_t *ap = (uchar_t *)addr;
+ int i;
+ static char etherbuf[1024];
+ char *cp = etherbuf;
+ char digits[] = "0123456789abcdef";
+
+ if (!size)
+ size = 60;
+
+ if (size > MAX_DUMP_SZ) {
+ /* Dump the leading bytes */
+ for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
+ if (*ap > 0x0f)
+ *cp++ = digits[*ap >> 4];
+ *cp++ = digits[*ap++ & 0xf];
+ *cp++ = ':';
+ }
+ for (i = 0; i < 20; i++)
+ *cp++ = '.';
+ /* Dump the last MAX_DUMP_SZ/2 bytes */
+ ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
+ for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
+ if (*ap > 0x0f)
+ *cp++ = digits[*ap >> 4];
+ *cp++ = digits[*ap++ & 0xf];
+ *cp++ = ':';
+ }
+ } else {
+ for (i = 0; i < size; i++) {
+ if (*ap > 0x0f)
+ *cp++ = digits[*ap >> 4];
+ *cp++ = digits[*ap++ & 0xf];
+ *cp++ = ':';
+ }
+ }
+ *--cp = 0;
+ return (etherbuf);
+}
+
+static void
+hxge_suspend(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
+
+ hxge_intrs_disable(hxgep);
+ hxge_destroy_dev(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
+}
+
+static hxge_status_t
+hxge_resume(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
+ hxgep->suspended = DDI_RESUME;
+
+ (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
+ (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
+
+ (void) hxge_rx_vmac_enable(hxgep);
+ (void) hxge_tx_vmac_enable(hxgep);
+
+ hxge_intrs_enable(hxgep);
+
+ hxgep->suspended = 0;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "<== hxge_resume status = 0x%x", status));
+
+ return (status);
+}
+
+hxge_status_t
+hxge_setup_dev(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
+
+ status = hxge_link_init(hxgep);
+ if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Bad register acc handle"));
+ status = HXGE_ERROR;
+ }
+
+ if (status != HXGE_OK) {
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL,
+ " hxge_setup_dev status (link init 0x%08x)", status));
+ goto hxge_setup_dev_exit;
+ }
+
+hxge_setup_dev_exit:
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "<== hxge_setup_dev status = 0x%08x", status));
+
+ return (status);
+}
+
+static void
+hxge_destroy_dev(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
+
+ (void) hxge_hw_stop(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
+}
+
+static hxge_status_t
+hxge_setup_system_dma_pages(p_hxge_t hxgep)
+{
+ int ddi_status = DDI_SUCCESS;
+ uint_t count;
+ ddi_dma_cookie_t cookie;
+ uint_t iommu_pagesize;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
+
+ hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
+ iommu_pagesize = dvma_pagesize(hxgep->dip);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
+ " default_block_size %d iommu_pagesize %d",
+ hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
+ hxgep->rx_default_block_size, iommu_pagesize));
+
+ if (iommu_pagesize != 0) {
+ if (hxgep->sys_page_sz == iommu_pagesize) {
+ /* Hydra support up to 8K pages */
+ if (iommu_pagesize > 0x2000)
+ hxgep->sys_page_sz = 0x2000;
+ } else {
+ if (hxgep->sys_page_sz > iommu_pagesize)
+ hxgep->sys_page_sz = iommu_pagesize;
+ }
+ }
+
+ hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
+ "default_block_size %d page mask %d",
+ hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
+ hxgep->rx_default_block_size, hxgep->sys_page_mask));
+
+ switch (hxgep->sys_page_sz) {
+ default:
+ hxgep->sys_page_sz = 0x1000;
+ hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
+ hxgep->rx_default_block_size = 0x1000;
+ hxgep->rx_bksize_code = RBR_BKSIZE_4K;
+ break;
+ case 0x1000:
+ hxgep->rx_default_block_size = 0x1000;
+ hxgep->rx_bksize_code = RBR_BKSIZE_4K;
+ break;
+ case 0x2000:
+ hxgep->rx_default_block_size = 0x2000;
+ hxgep->rx_bksize_code = RBR_BKSIZE_8K;
+ break;
+ }
+
+ hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
+ hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
+ hxge_desc_dma_attr.dma_attr_align = hxgep->sys_page_sz;
+
+ /*
+ * Get the system DMA burst size.
+ */
+ ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
+ DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
+ if (ddi_status != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
+ goto hxge_get_soft_properties_exit;
+ }
+
+ ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
+ (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
+ &cookie, &count);
+ if (ddi_status != DDI_DMA_MAPPED) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Binding spare handle to find system burstsize failed."));
+ ddi_status = DDI_FAILURE;
+ goto hxge_get_soft_properties_fail1;
+ }
+
+ hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
+ (void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
+
+hxge_get_soft_properties_fail1:
+ ddi_dma_free_handle(&hxgep->dmasparehandle);
+
+hxge_get_soft_properties_exit:
+
+ if (ddi_status != DDI_SUCCESS)
+ status |= (HXGE_ERROR | HXGE_DDI_FAILED);
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "<== hxge_setup_system_dma_pages status = 0x%08x", status));
+
+ return (status);
+}
+
+hxge_status_t
+hxge_alloc_mem_pool(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
+
+ status = hxge_alloc_rx_mem_pool(hxgep);
+ if (status != HXGE_OK) {
+ return (HXGE_ERROR);
+ }
+
+ status = hxge_alloc_tx_mem_pool(hxgep);
+ if (status != HXGE_OK) {
+ hxge_free_rx_mem_pool(hxgep);
+ return (HXGE_ERROR);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
+ return (HXGE_OK);
+}
+
+static void
+hxge_free_mem_pool(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
+
+ hxge_free_rx_mem_pool(hxgep);
+ hxge_free_tx_mem_pool(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
+}
+
+static hxge_status_t
+hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
+{
+ int i, j;
+ uint32_t ndmas, st_rdc;
+ p_hxge_dma_pt_cfg_t p_all_cfgp;
+ p_hxge_hw_pt_cfg_t p_cfgp;
+ p_hxge_dma_pool_t dma_poolp;
+ p_hxge_dma_common_t *dma_buf_p;
+ p_hxge_dma_pool_t dma_cntl_poolp;
+ p_hxge_dma_common_t *dma_cntl_p;
+ size_t rx_buf_alloc_size;
+ size_t rx_cntl_alloc_size;
+ uint32_t *num_chunks; /* per dma */
+ hxge_status_t status = HXGE_OK;
+
+ uint32_t hxge_port_rbr_size;
+ uint32_t hxge_port_rbr_spare_size;
+ uint32_t hxge_port_rcr_size;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
+
+ p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
+ p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
+ st_rdc = p_cfgp->start_rdc;
+ ndmas = p_cfgp->max_rdcs;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
+
+ /*
+ * Allocate memory for each receive DMA channel.
+ */
+ dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
+ KM_SLEEP);
+ dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
+ sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
+
+ dma_cntl_poolp = (p_hxge_dma_pool_t)
+ KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
+ dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
+ sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
+
+ num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
+ KM_SLEEP);
+
+ /*
+ * Assume that each DMA channel will be configured with default block
+ * size. rbr block counts are mod of batch count (16).
+ */
+ hxge_port_rbr_size = p_all_cfgp->rbr_size;
+ hxge_port_rcr_size = p_all_cfgp->rcr_size;
+
+ if (!hxge_port_rbr_size) {
+ hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
+ }
+
+ if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
+ hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
+ (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
+ }
+
+ p_all_cfgp->rbr_size = hxge_port_rbr_size;
+ hxge_port_rbr_spare_size = hxge_rbr_spare_size;
+
+ if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
+ hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
+ (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
+ }
+
+ rx_buf_alloc_size = (hxgep->rx_default_block_size *
+ (hxge_port_rbr_size + hxge_port_rbr_spare_size));
+
+ /*
+ * Addresses of receive block ring, receive completion ring and the
+ * mailbox must be all cache-aligned (64 bytes).
+ */
+ rx_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
+ rx_cntl_alloc_size *= (sizeof (rx_desc_t));
+ rx_cntl_alloc_size += (sizeof (rcr_entry_t) * hxge_port_rcr_size);
+ rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
+ "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
+ "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
+ hxge_port_rbr_size, hxge_port_rbr_spare_size,
+ hxge_port_rcr_size, rx_cntl_alloc_size));
+
+ hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
+ hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
+
+ /*
+ * Allocate memory for receive buffers and descriptor rings. Replace
+ * allocation functions with interface functions provided by the
+ * partition manager when it is available.
+ */
+ /*
+ * Allocate memory for the receive buffer blocks.
+ */
+ for (i = 0; i < ndmas; i++) {
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ " hxge_alloc_rx_mem_pool to alloc mem: "
+ " dma %d dma_buf_p %llx &dma_buf_p %llx",
+ i, dma_buf_p[i], &dma_buf_p[i]));
+
+ num_chunks[i] = 0;
+
+ status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
+ rx_buf_alloc_size, hxgep->rx_default_block_size,
+ &num_chunks[i]);
+ if (status != HXGE_OK) {
+ break;
+ }
+
+ st_rdc++;
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ " hxge_alloc_rx_mem_pool DONE alloc mem: "
+ "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
+ dma_buf_p[i], &dma_buf_p[i]));
+ }
+
+ if (i < ndmas) {
+ goto hxge_alloc_rx_mem_fail1;
+ }
+
+ /*
+ * Allocate memory for descriptor rings and mailbox.
+ */
+ st_rdc = p_cfgp->start_rdc;
+ for (j = 0; j < ndmas; j++) {
+ status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, &dma_cntl_p[j],
+ rx_cntl_alloc_size);
+ if (status != HXGE_OK) {
+ break;
+ }
+ st_rdc++;
+ }
+
+ if (j < ndmas) {
+ goto hxge_alloc_rx_mem_fail2;
+ }
+
+ dma_poolp->ndmas = ndmas;
+ dma_poolp->num_chunks = num_chunks;
+ dma_poolp->buf_allocated = B_TRUE;
+ hxgep->rx_buf_pool_p = dma_poolp;
+ dma_poolp->dma_buf_pool_p = dma_buf_p;
+
+ dma_cntl_poolp->ndmas = ndmas;
+ dma_cntl_poolp->buf_allocated = B_TRUE;
+ hxgep->rx_cntl_pool_p = dma_cntl_poolp;
+ dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
+
+ goto hxge_alloc_rx_mem_pool_exit;
+
+hxge_alloc_rx_mem_fail2:
+ /* Free control buffers */
+ j--;
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
+ for (; j >= 0; j--) {
+ hxge_free_rx_cntl_dma(hxgep,
+ (p_hxge_dma_common_t)dma_cntl_p[j]);
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
+ }
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
+
+hxge_alloc_rx_mem_fail1:
+ /* Free data buffers */
+ i--;
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
+ for (; i >= 0; i--) {
+ hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
+ num_chunks[i]);
+ }
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
+
+ KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
+ KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
+ KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
+ KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
+ KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
+
+hxge_alloc_rx_mem_pool_exit:
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
+
+ return (status);
+}
+
+static void
+hxge_free_rx_mem_pool(p_hxge_t hxgep)
+{
+ uint32_t i, ndmas;
+ p_hxge_dma_pool_t dma_poolp;
+ p_hxge_dma_common_t *dma_buf_p;
+ p_hxge_dma_pool_t dma_cntl_poolp;
+ p_hxge_dma_common_t *dma_cntl_p;
+ uint32_t *num_chunks;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
+
+ dma_poolp = hxgep->rx_buf_pool_p;
+ if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
+ "(null rx buf pool or buf not allocated"));
+ return;
+ }
+
+ dma_cntl_poolp = hxgep->rx_cntl_pool_p;
+ if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_free_rx_mem_pool "
+ "(null rx cntl buf pool or cntl buf not allocated"));
+ return;
+ }
+
+ dma_buf_p = dma_poolp->dma_buf_pool_p;
+ num_chunks = dma_poolp->num_chunks;
+
+ dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
+ ndmas = dma_cntl_poolp->ndmas;
+
+ for (i = 0; i < ndmas; i++) {
+ hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
+ }
+
+ for (i = 0; i < ndmas; i++) {
+ hxge_free_rx_cntl_dma(hxgep, dma_cntl_p[i]);
+ }
+
+ for (i = 0; i < ndmas; i++) {
+ KMEM_FREE(dma_buf_p[i],
+ sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
+ KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
+ }
+
+ KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
+ KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
+ KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
+ KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
+ KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
+
+ hxgep->rx_buf_pool_p = NULL;
+ hxgep->rx_cntl_pool_p = NULL;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
+}
+
+static hxge_status_t
+hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
+ p_hxge_dma_common_t *dmap,
+ size_t alloc_size, size_t block_size, uint32_t *num_chunks)
+{
+ p_hxge_dma_common_t rx_dmap;
+ hxge_status_t status = HXGE_OK;
+ size_t total_alloc_size;
+ size_t allocated = 0;
+ int i, size_index, array_size;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
+
+ rx_dmap = (p_hxge_dma_common_t)
+ KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
+ dma_channel, alloc_size, block_size, dmap));
+
+ total_alloc_size = alloc_size;
+
+ i = 0;
+ size_index = 0;
+ array_size = sizeof (alloc_sizes) / sizeof (size_t);
+ while ((alloc_sizes[size_index] < alloc_size) &&
+ (size_index < array_size))
+ size_index++;
+ if (size_index >= array_size) {
+ size_index = array_size - 1;
+ }
+
+ while ((allocated < total_alloc_size) &&
+ (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
+ rx_dmap[i].dma_chunk_index = i;
+ rx_dmap[i].block_size = block_size;
+ rx_dmap[i].alength = alloc_sizes[size_index];
+ rx_dmap[i].orig_alength = rx_dmap[i].alength;
+ rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
+ rx_dmap[i].dma_channel = dma_channel;
+ rx_dmap[i].contig_alloc_type = B_FALSE;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
+ "i %d nblocks %d alength %d",
+ dma_channel, i, &rx_dmap[i], block_size,
+ i, rx_dmap[i].nblocks, rx_dmap[i].alength));
+ status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
+ &hxge_rx_dma_attr, rx_dmap[i].alength,
+ &hxge_dev_buf_dma_acc_attr,
+ DDI_DMA_READ | DDI_DMA_STREAMING,
+ (p_hxge_dma_common_t)(&rx_dmap[i]));
+ if (status != HXGE_OK) {
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ " hxge_alloc_rx_buf_dma: Alloc Failed: "
+ " for size: %d", alloc_sizes[size_index]));
+ size_index--;
+ } else {
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ " alloc_rx_buf_dma allocated rdc %d "
+ "chunk %d size %x dvma %x bufp %llx ",
+ dma_channel, i, rx_dmap[i].alength,
+ rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
+ i++;
+ allocated += alloc_sizes[size_index];
+ }
+ }
+
+ if (allocated < total_alloc_size) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_alloc_rx_buf_dma failed due to"
+ " allocated(%d) < required(%d)",
+ allocated, total_alloc_size));
+ goto hxge_alloc_rx_mem_fail1;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
+
+ *num_chunks = i;
+ *dmap = rx_dmap;
+
+ goto hxge_alloc_rx_mem_exit;
+
+hxge_alloc_rx_mem_fail1:
+ KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
+
+hxge_alloc_rx_mem_exit:
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static void
+hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
+ uint32_t num_chunks)
+{
+ int i;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
+
+ for (i = 0; i < num_chunks; i++) {
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
+ hxge_dma_mem_free(dmap++);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
+ p_hxge_dma_common_t *dmap, size_t size)
+{
+ p_hxge_dma_common_t rx_dmap;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
+
+ rx_dmap = (p_hxge_dma_common_t)
+ KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
+
+ rx_dmap->contig_alloc_type = B_FALSE;
+
+ status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
+ &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_alloc_rx_cntl_dma: Alloc Failed: "
+ " for size: %d", size));
+ goto hxge_alloc_rx_cntl_dma_fail1;
+ }
+
+ *dmap = rx_dmap;
+
+ goto hxge_alloc_rx_cntl_dma_exit;
+
+hxge_alloc_rx_cntl_dma_fail1:
+ KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
+
+hxge_alloc_rx_cntl_dma_exit:
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static void
+hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
+{
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
+
+ hxge_dma_mem_free(dmap);
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
+}
+
+static hxge_status_t
+hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+ int i, j;
+ uint32_t ndmas, st_tdc;
+ p_hxge_dma_pt_cfg_t p_all_cfgp;
+ p_hxge_hw_pt_cfg_t p_cfgp;
+ p_hxge_dma_pool_t dma_poolp;
+ p_hxge_dma_common_t *dma_buf_p;
+ p_hxge_dma_pool_t dma_cntl_poolp;
+ p_hxge_dma_common_t *dma_cntl_p;
+ size_t tx_buf_alloc_size;
+ size_t tx_cntl_alloc_size;
+ uint32_t *num_chunks; /* per dma */
+
+ HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
+
+ p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
+ p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
+ st_tdc = p_cfgp->start_tdc;
+ ndmas = p_cfgp->max_tdcs;
+
+ HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
+ "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
+ p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
+ /*
+ * Allocate memory for each transmit DMA channel.
+ */
+ dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
+ KM_SLEEP);
+ dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
+ sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
+
+ dma_cntl_poolp = (p_hxge_dma_pool_t)
+ KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
+ dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
+ sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
+
+ hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
+
+ /*
+ * Assume that each DMA channel will be configured with default
+ * transmit bufer size for copying transmit data. (For packet payload
+ * over this limit, packets will not be copied.)
+ */
+ tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
+
+ /*
+ * Addresses of transmit descriptor ring and the mailbox must be all
+ * cache-aligned (64 bytes).
+ */
+ tx_cntl_alloc_size = hxge_tx_ring_size;
+ tx_cntl_alloc_size *= (sizeof (tx_desc_t));
+ tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
+
+ num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
+ KM_SLEEP);
+
+ /*
+ * Allocate memory for transmit buffers and descriptor rings. Replace
+ * allocation functions with interface functions provided by the
+ * partition manager when it is available.
+ *
+ * Allocate memory for the transmit buffer pool.
+ */
+ for (i = 0; i < ndmas; i++) {
+ num_chunks[i] = 0;
+ status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
+ tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
+ if (status != HXGE_OK) {
+ break;
+ }
+ st_tdc++;
+ }
+
+ if (i < ndmas) {
+ goto hxge_alloc_tx_mem_pool_fail1;
+ }
+
+ st_tdc = p_cfgp->start_tdc;
+
+ /*
+ * Allocate memory for descriptor rings and mailbox.
+ */
+ for (j = 0; j < ndmas; j++) {
+ status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
+ tx_cntl_alloc_size);
+ if (status != HXGE_OK) {
+ break;
+ }
+ st_tdc++;
+ }
+
+ if (j < ndmas) {
+ goto hxge_alloc_tx_mem_pool_fail2;
+ }
+
+ dma_poolp->ndmas = ndmas;
+ dma_poolp->num_chunks = num_chunks;
+ dma_poolp->buf_allocated = B_TRUE;
+ dma_poolp->dma_buf_pool_p = dma_buf_p;
+ hxgep->tx_buf_pool_p = dma_poolp;
+
+ dma_cntl_poolp->ndmas = ndmas;
+ dma_cntl_poolp->buf_allocated = B_TRUE;
+ dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
+ hxgep->tx_cntl_pool_p = dma_cntl_poolp;
+
+ HXGE_DEBUG_MSG((hxgep, MEM_CTL,
+ "==> hxge_alloc_tx_mem_pool: start_tdc %d "
+ "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
+
+ goto hxge_alloc_tx_mem_pool_exit;
+
+hxge_alloc_tx_mem_pool_fail2:
+ /* Free control buffers */
+ j--;
+ for (; j >= 0; j--) {
+ hxge_free_tx_cntl_dma(hxgep,
+ (p_hxge_dma_common_t)dma_cntl_p[j]);
+ }
+
+hxge_alloc_tx_mem_pool_fail1:
+ /* Free data buffers */
+ i--;
+ for (; i >= 0; i--) {
+ hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
+ num_chunks[i]);
+ }
+
+ KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
+ KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
+ KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
+ KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
+ KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
+
+hxge_alloc_tx_mem_pool_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM_CTL,
+ "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
+
+ return (status);
+}
+
+static hxge_status_t
+hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
+ p_hxge_dma_common_t *dmap, size_t alloc_size,
+ size_t block_size, uint32_t *num_chunks)
+{
+ p_hxge_dma_common_t tx_dmap;
+ hxge_status_t status = HXGE_OK;
+ size_t total_alloc_size;
+ size_t allocated = 0;
+ int i, size_index, array_size;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
+
+ tx_dmap = (p_hxge_dma_common_t)
+ KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
+
+ total_alloc_size = alloc_size;
+ i = 0;
+ size_index = 0;
+ array_size = sizeof (alloc_sizes) / sizeof (size_t);
+ while ((alloc_sizes[size_index] < alloc_size) &&
+ (size_index < array_size))
+ size_index++;
+ if (size_index >= array_size) {
+ size_index = array_size - 1;
+ }
+
+ while ((allocated < total_alloc_size) &&
+ (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
+ tx_dmap[i].dma_chunk_index = i;
+ tx_dmap[i].block_size = block_size;
+ tx_dmap[i].alength = alloc_sizes[size_index];
+ tx_dmap[i].orig_alength = tx_dmap[i].alength;
+ tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
+ tx_dmap[i].dma_channel = dma_channel;
+ tx_dmap[i].contig_alloc_type = B_FALSE;
+
+ status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
+ &hxge_tx_dma_attr, tx_dmap[i].alength,
+ &hxge_dev_buf_dma_acc_attr,
+ DDI_DMA_WRITE | DDI_DMA_STREAMING,
+ (p_hxge_dma_common_t)(&tx_dmap[i]));
+ if (status != HXGE_OK) {
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ " hxge_alloc_tx_buf_dma: Alloc Failed: "
+ " for size: %d", alloc_sizes[size_index]));
+ size_index--;
+ } else {
+ i++;
+ allocated += alloc_sizes[size_index];
+ }
+ }
+
+ if (allocated < total_alloc_size) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_alloc_tx_buf_dma: failed due to"
+ " allocated(%d) < required(%d)",
+ allocated, total_alloc_size));
+ goto hxge_alloc_tx_mem_fail1;
+ }
+
+ *num_chunks = i;
+ *dmap = tx_dmap;
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
+ *dmap, i));
+ goto hxge_alloc_tx_mem_exit;
+
+hxge_alloc_tx_mem_fail1:
+ KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
+
+hxge_alloc_tx_mem_exit:
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static void
+hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
+ uint32_t num_chunks)
+{
+ int i;
+
+ HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
+
+ for (i = 0; i < num_chunks; i++) {
+ hxge_dma_mem_free(dmap++);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
+ p_hxge_dma_common_t *dmap, size_t size)
+{
+ p_hxge_dma_common_t tx_dmap;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
+
+ tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
+ KM_SLEEP);
+
+ tx_dmap->contig_alloc_type = B_FALSE;
+
+ status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
+ &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_alloc_tx_cntl_dma: Alloc Failed: "
+ " for size: %d", size));
+ goto hxge_alloc_tx_cntl_dma_fail1;
+ }
+
+ *dmap = tx_dmap;
+
+ goto hxge_alloc_tx_cntl_dma_exit;
+
+hxge_alloc_tx_cntl_dma_fail1:
+ KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
+
+hxge_alloc_tx_cntl_dma_exit:
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static void
+hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
+{
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
+
+ hxge_dma_mem_free(dmap);
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
+}
+
+static void
+hxge_free_tx_mem_pool(p_hxge_t hxgep)
+{
+ uint32_t i, ndmas;
+ p_hxge_dma_pool_t dma_poolp;
+ p_hxge_dma_common_t *dma_buf_p;
+ p_hxge_dma_pool_t dma_cntl_poolp;
+ p_hxge_dma_common_t *dma_cntl_p;
+ uint32_t *num_chunks;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
+
+ dma_poolp = hxgep->tx_buf_pool_p;
+ if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_free_tx_mem_pool "
+ "(null rx buf pool or buf not allocated"));
+ return;
+ }
+
+ dma_cntl_poolp = hxgep->tx_cntl_pool_p;
+ if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_free_tx_mem_pool "
+ "(null tx cntl buf pool or cntl buf not allocated"));
+ return;
+ }
+
+ dma_buf_p = dma_poolp->dma_buf_pool_p;
+ num_chunks = dma_poolp->num_chunks;
+
+ dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
+ ndmas = dma_cntl_poolp->ndmas;
+
+ for (i = 0; i < ndmas; i++) {
+ hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
+ }
+
+ for (i = 0; i < ndmas; i++) {
+ hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
+ }
+
+ for (i = 0; i < ndmas; i++) {
+ KMEM_FREE(dma_buf_p[i],
+ sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
+ KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
+ }
+
+ KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
+ KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
+ KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
+ KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
+ KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
+
+ hxgep->tx_buf_pool_p = NULL;
+ hxgep->tx_cntl_pool_p = NULL;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
+ struct ddi_dma_attr *dma_attrp,
+ size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
+ p_hxge_dma_common_t dma_p)
+{
+ caddr_t kaddrp;
+ int ddi_status = DDI_SUCCESS;
+
+ dma_p->dma_handle = NULL;
+ dma_p->acc_handle = NULL;
+ dma_p->kaddrp = NULL;
+
+ ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
+ DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
+ if (ddi_status != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
+ xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
+ &dma_p->acc_handle);
+ if (ddi_status != DDI_SUCCESS) {
+ /* The caller will decide whether it is fatal */
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
+ ddi_dma_free_handle(&dma_p->dma_handle);
+ dma_p->dma_handle = NULL;
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ if (dma_p->alength < length) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
+ ddi_dma_mem_free(&dma_p->acc_handle);
+ ddi_dma_free_handle(&dma_p->dma_handle);
+ dma_p->acc_handle = NULL;
+ dma_p->dma_handle = NULL;
+ return (HXGE_ERROR);
+ }
+
+ ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
+ kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
+ &dma_p->dma_cookie, &dma_p->ncookies);
+ if (ddi_status != DDI_DMA_MAPPED) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_dma_mem_alloc:di_dma_addr_bind failed "
+ "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
+ if (dma_p->acc_handle) {
+ ddi_dma_mem_free(&dma_p->acc_handle);
+ dma_p->acc_handle = NULL;
+ }
+ ddi_dma_free_handle(&dma_p->dma_handle);
+ dma_p->dma_handle = NULL;
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ if (dma_p->ncookies != 1) {
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
+ "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
+ if (dma_p->acc_handle) {
+ ddi_dma_mem_free(&dma_p->acc_handle);
+ dma_p->acc_handle = NULL;
+ }
+ (void) ddi_dma_unbind_handle(dma_p->dma_handle);
+ ddi_dma_free_handle(&dma_p->dma_handle);
+ dma_p->dma_handle = NULL;
+ return (HXGE_ERROR);
+ }
+
+ dma_p->kaddrp = kaddrp;
+#if defined(__i386)
+ dma_p->ioaddr_pp =
+ (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
+#else
+ dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
+#endif
+
+ HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
+ "dma buffer allocated: dma_p $%p "
+ "return dmac_ladress from cookie $%p dmac_size %d "
+ "dma_p->ioaddr_p $%p "
+ "dma_p->orig_ioaddr_p $%p "
+ "orig_vatopa $%p "
+ "alength %d (0x%x) "
+ "kaddrp $%p "
+ "length %d (0x%x)",
+ dma_p,
+ dma_p->dma_cookie.dmac_laddress,
+ dma_p->dma_cookie.dmac_size,
+ dma_p->ioaddr_pp,
+ dma_p->orig_ioaddr_pp,
+ dma_p->orig_vatopa,
+ dma_p->alength, dma_p->alength,
+ kaddrp,
+ length, length));
+
+ return (HXGE_OK);
+}
+
+static void
+hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
+{
+ if (dma_p->dma_handle != NULL) {
+ if (dma_p->ncookies) {
+ (void) ddi_dma_unbind_handle(dma_p->dma_handle);
+ dma_p->ncookies = 0;
+ }
+ ddi_dma_free_handle(&dma_p->dma_handle);
+ dma_p->dma_handle = NULL;
+ }
+ if (dma_p->acc_handle != NULL) {
+ ddi_dma_mem_free(&dma_p->acc_handle);
+ dma_p->acc_handle = NULL;
+ HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
+ }
+ dma_p->kaddrp = NULL;
+ dma_p->alength = NULL;
+}
+
+/*
+ * hxge_m_start() -- start transmitting and receiving.
+ *
+ * This function is called by the MAC layer when the first
+ * stream is open to prepare the hardware ready for sending
+ * and transmitting packets.
+ */
+static int
+hxge_m_start(void *arg)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg;
+
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
+
+ MUTEX_ENTER(hxgep->genlock);
+
+ if (hxge_init(hxgep) != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_m_start: initialization failed"));
+ MUTEX_EXIT(hxgep->genlock);
+ return (EIO);
+ }
+
+ if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
+ /*
+ * Start timer to check the system error and tx hangs
+ */
+ hxgep->hxge_timerid = hxge_start_timer(hxgep,
+ hxge_check_hw_state, HXGE_CHECK_TIMER);
+
+ hxgep->hxge_mac_state = HXGE_MAC_STARTED;
+ }
+
+ MUTEX_EXIT(hxgep->genlock);
+
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
+
+ return (0);
+}
+
+/*
+ * hxge_m_stop(): stop transmitting and receiving.
+ */
+static void
+hxge_m_stop(void *arg)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg;
+
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
+
+ if (hxgep->hxge_timerid) {
+ hxge_stop_timer(hxgep, hxgep->hxge_timerid);
+ hxgep->hxge_timerid = 0;
+ }
+
+ MUTEX_ENTER(hxgep->genlock);
+
+ hxge_uninit(hxgep);
+
+ hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
+
+ MUTEX_EXIT(hxgep->genlock);
+
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
+}
+
+static int
+hxge_m_unicst(void *arg, const uint8_t *macaddr)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg;
+ struct ether_addr addrp;
+ hxge_status_t status;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst"));
+
+ bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
+
+ status = hxge_set_mac_addr(hxgep, &addrp);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_m_unicst: set unitcast failed"));
+ return (EINVAL);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst"));
+
+ return (0);
+}
+
+static int
+hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg;
+ struct ether_addr addrp;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
+
+ bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
+
+ if (add) {
+ if (hxge_add_mcast_addr(hxgep, &addrp)) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_m_multicst: add multicast failed"));
+ return (EINVAL);
+ }
+ } else {
+ if (hxge_del_mcast_addr(hxgep, &addrp)) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_m_multicst: del multicast failed"));
+ return (EINVAL);
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
+
+ return (0);
+}
+
+static int
+hxge_m_promisc(void *arg, boolean_t on)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
+
+ if (hxge_set_promisc(hxgep, on)) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_m_promisc: set promisc failed"));
+ return (EINVAL);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
+
+ return (0);
+}
+
+static void
+hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg;
+ struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
+ boolean_t need_privilege;
+ int err;
+ int cmd;
+
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
+
+ iocp = (struct iocblk *)mp->b_rptr;
+ iocp->ioc_error = 0;
+ need_privilege = B_TRUE;
+ cmd = iocp->ioc_cmd;
+
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
+ switch (cmd) {
+ default:
+ miocnak(wq, mp, 0, EINVAL);
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
+ return;
+
+ case LB_GET_INFO_SIZE:
+ case LB_GET_INFO:
+ case LB_GET_MODE:
+ need_privilege = B_FALSE;
+ break;
+
+ case LB_SET_MODE:
+ break;
+
+ case ND_GET:
+ need_privilege = B_FALSE;
+ break;
+ case ND_SET:
+ break;
+
+ case HXGE_GET64:
+ case HXGE_PUT64:
+ case HXGE_GET_TX_RING_SZ:
+ case HXGE_GET_TX_DESC:
+ case HXGE_TX_SIDE_RESET:
+ case HXGE_RX_SIDE_RESET:
+ case HXGE_GLOBAL_RESET:
+ case HXGE_RESET_MAC:
+ case HXGE_PUT_TCAM:
+ case HXGE_GET_TCAM:
+ case HXGE_RTRACE:
+
+ need_privilege = B_FALSE;
+ break;
+ }
+
+ if (need_privilege) {
+ err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
+ if (err != 0) {
+ miocnak(wq, mp, 0, err);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_m_ioctl: no priv"));
+ return;
+ }
+ }
+
+ switch (cmd) {
+ case ND_GET:
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
+ case ND_SET:
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
+ hxge_param_ioctl(hxgep, wq, mp, iocp);
+ break;
+
+ case LB_GET_MODE:
+ case LB_SET_MODE:
+ case LB_GET_INFO_SIZE:
+ case LB_GET_INFO:
+ hxge_loopback_ioctl(hxgep, wq, mp, iocp);
+ break;
+
+ case HXGE_PUT_TCAM:
+ case HXGE_GET_TCAM:
+ case HXGE_GET64:
+ case HXGE_PUT64:
+ case HXGE_GET_TX_RING_SZ:
+ case HXGE_GET_TX_DESC:
+ case HXGE_TX_SIDE_RESET:
+ case HXGE_RX_SIDE_RESET:
+ case HXGE_GLOBAL_RESET:
+ case HXGE_RESET_MAC:
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
+ "==> hxge_m_ioctl: cmd 0x%x", cmd));
+ hxge_hw_ioctl(hxgep, wq, mp, iocp);
+ break;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
+}
+
+extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
+
+static void
+hxge_m_resources(void *arg)
+{
+ p_hxge_t hxgep = arg;
+ mac_rx_fifo_t mrf;
+ p_rx_rcr_rings_t rcr_rings;
+ p_rx_rcr_ring_t *rcr_p;
+ p_rx_rcr_ring_t rcrp;
+ uint32_t i, ndmas;
+ int status;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources"));
+
+ MUTEX_ENTER(hxgep->genlock);
+
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ status = hxge_init(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: "
+ "hxge_init failed"));
+ MUTEX_EXIT(hxgep->genlock);
+ return;
+ }
+ }
+
+ mrf.mrf_type = MAC_RX_FIFO;
+ mrf.mrf_blank = hxge_rx_hw_blank;
+
+ mrf.mrf_normal_blank_time = RXDMA_RCR_PTHRES_DEFAULT;
+ mrf.mrf_normal_pkt_count = RXDMA_RCR_TO_DEFAULT;
+
+ rcr_rings = hxgep->rx_rcr_rings;
+ rcr_p = rcr_rings->rcr_rings;
+ ndmas = rcr_rings->ndmas;
+
+ /*
+ * Export our receive resources to the MAC layer.
+ */
+ for (i = 0; i < ndmas; i++) {
+ rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i];
+ mrf.mrf_arg = rcrp;
+ rcrp->rcr_mac_handle =
+ mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_m_resources: vdma %d dma %d "
+ "rcrptr 0x%016llx mac_handle 0x%016llx",
+ i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle));
+ }
+
+ MUTEX_EXIT(hxgep->genlock);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources"));
+}
+
+/*
+ * Set an alternate MAC address
+ */
+static int
+hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot)
+{
+ uint64_t address;
+ uint64_t tmp;
+ hpi_status_t status;
+ uint8_t addrn;
+ int i;
+
+ /*
+ * Convert a byte array to a 48 bit value.
+ * Need to check endianess if in doubt
+ */
+ address = 0;
+ for (i = 0; i < ETHERADDRL; i++) {
+ tmp = maddr[i];
+ address <<= 8;
+ address |= tmp;
+ }
+
+ addrn = (uint8_t)slot;
+ status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address);
+ if (status != HPI_SUCCESS)
+ return (EIO);
+
+ return (0);
+}
+
+static void
+hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot)
+{
+ p_hxge_mmac_stats_t mmac_stats;
+ int i;
+ hxge_mmac_t *mmac_info;
+
+ mmac_info = &hxgep->hxge_mmac_info;
+ mmac_stats = &hxgep->statsp->mmac_stats;
+ mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
+ mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
+
+ for (i = 0; i < ETHERADDRL; i++) {
+ mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] =
+ mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
+ }
+}
+
+/*
+ * Find an unused address slot, set the address value to the one specified,
+ * enable the port to start filtering on the new MAC address.
+ * Returns: 0 on success.
+ */
+int
+hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
+{
+ p_hxge_t hxgep = arg;
+ mac_addr_slot_t slot;
+ hxge_mmac_t *mmac_info;
+ int err;
+ hxge_status_t status;
+
+ mutex_enter(hxgep->genlock);
+
+ /*
+ * Make sure that hxge is initialized, if _start() has
+ * not been called.
+ */
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ status = hxge_init(hxgep);
+ if (status != HXGE_OK) {
+ mutex_exit(hxgep->genlock);
+ return (ENXIO);
+ }
+ }
+
+ mmac_info = &hxgep->hxge_mmac_info;
+ if (mmac_info->naddrfree == 0) {
+ mutex_exit(hxgep->genlock);
+ return (ENOSPC);
+ }
+
+ if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
+ maddr->mma_addrlen)) {
+ mutex_exit(hxgep->genlock);
+ return (EINVAL);
+ }
+
+ /*
+ * Search for the first available slot. Because naddrfree
+ * is not zero, we are guaranteed to find one.
+ * Slot 0 is for unique (primary) MAC. The first alternate
+ * MAC slot is slot 1.
+ */
+ for (slot = 1; slot < mmac_info->num_mmac; slot++) {
+ if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
+ break;
+ }
+
+ ASSERT(slot < mmac_info->num_mmac);
+ if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) {
+ mutex_exit(hxgep->genlock);
+ return (err);
+ }
+ bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
+ mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
+ mmac_info->naddrfree--;
+ hxge_mmac_kstat_update(hxgep, slot);
+
+ maddr->mma_slot = slot;
+
+ mutex_exit(hxgep->genlock);
+ return (0);
+}
+
+/*
+ * Remove the specified mac address and update
+ * the h/w not to filter the mac address anymore.
+ * Returns: 0, on success.
+ */
+int
+hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
+{
+ p_hxge_t hxgep = arg;
+ hxge_mmac_t *mmac_info;
+ int err = 0;
+ hxge_status_t status;
+
+ mutex_enter(hxgep->genlock);
+
+ /*
+ * Make sure that hxge is initialized, if _start() has
+ * not been called.
+ */
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ status = hxge_init(hxgep);
+ if (status != HXGE_OK) {
+ mutex_exit(hxgep->genlock);
+ return (ENXIO);
+ }
+ }
+
+ mmac_info = &hxgep->hxge_mmac_info;
+ if (slot <= 0 || slot >= mmac_info->num_mmac) {
+ mutex_exit(hxgep->genlock);
+ return (EINVAL);
+ }
+
+ if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
+ if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) ==
+ HPI_SUCCESS) {
+ mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
+ mmac_info->naddrfree++;
+ /*
+ * Clear mac_pool[slot].addr so that kstat shows 0
+ * alternate MAC address if the slot is not used.
+ */
+ bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
+ hxge_mmac_kstat_update(hxgep, slot);
+ } else {
+ err = EIO;
+ }
+ } else {
+ err = EINVAL;
+ }
+
+ mutex_exit(hxgep->genlock);
+ return (err);
+}
+
+/*
+ * Modify a mac address added by hxge_mmac_add().
+ * Returns: 0, on success.
+ */
+int
+hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
+{
+ p_hxge_t hxgep = arg;
+ mac_addr_slot_t slot;
+ hxge_mmac_t *mmac_info;
+ int err = 0;
+ hxge_status_t status;
+
+ if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
+ maddr->mma_addrlen))
+ return (EINVAL);
+
+ slot = maddr->mma_slot;
+
+ mutex_enter(hxgep->genlock);
+
+ /*
+ * Make sure that hxge is initialized, if _start() has
+ * not been called.
+ */
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ status = hxge_init(hxgep);
+ if (status != HXGE_OK) {
+ mutex_exit(hxgep->genlock);
+ return (ENXIO);
+ }
+ }
+
+ mmac_info = &hxgep->hxge_mmac_info;
+ if (slot <= 0 || slot >= mmac_info->num_mmac) {
+ mutex_exit(hxgep->genlock);
+ return (EINVAL);
+ }
+
+ if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
+ if ((err = hxge_altmac_set(hxgep, maddr->mma_addr,
+ slot)) == 0) {
+ bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
+ ETHERADDRL);
+ hxge_mmac_kstat_update(hxgep, slot);
+ }
+ } else {
+ err = EINVAL;
+ }
+
+ mutex_exit(hxgep->genlock);
+ return (err);
+}
+
+/*
+ * static int
+ * hxge_m_mmac_get() - Get the MAC address and other information
+ * related to the slot. mma_flags should be set to 0 in the call.
+ * Note: although kstat shows MAC address as zero when a slot is
+ * not used, Crossbow expects hxge_m_mmac_get to copy factory MAC
+ * to the caller as long as the slot is not using a user MAC address.
+ * The following table shows the rules,
+ *
+ * USED VENDOR mma_addr
+ * ------------------------------------------------------------
+ * (1) Slot uses a user MAC: yes no user MAC
+ * (2) Slot uses a factory MAC: yes yes factory MAC
+ * (3) Slot is not used but is
+ * factory MAC capable: no yes factory MAC
+ * (4) Slot is not used and is
+ * not factory MAC capable: no no 0
+ * ------------------------------------------------------------
+ */
+int
+hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
+{
+ hxge_t *hxgep = arg;
+ mac_addr_slot_t slot;
+ hxge_mmac_t *mmac_info;
+ hxge_status_t status;
+
+ slot = maddr->mma_slot;
+
+ mutex_enter(hxgep->genlock);
+
+ /*
+ * Make sure that hxge is initialized, if _start() has
+ * not been called.
+ */
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ status = hxge_init(hxgep);
+ if (status != HXGE_OK) {
+ mutex_exit(hxgep->genlock);
+ return (ENXIO);
+ }
+ }
+
+ mmac_info = &hxgep->hxge_mmac_info;
+ if (slot <= 0 || slot >= mmac_info->num_mmac) {
+ mutex_exit(hxgep->genlock);
+ return (EINVAL);
+ }
+
+ maddr->mma_flags = 0;
+ if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
+ maddr->mma_flags |= MMAC_SLOT_USED;
+ bcopy(mmac_info->mac_pool[slot].addr,
+ maddr->mma_addr, ETHERADDRL);
+ maddr->mma_addrlen = ETHERADDRL;
+ }
+
+ mutex_exit(hxgep->genlock);
+ return (0);
+}
+
+/*ARGSUSED*/
+boolean_t
+hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg;
+ uint32_t *txflags = cap_data;
+ multiaddress_capab_t *mmacp = cap_data;
+
+ switch (cap) {
+ case MAC_CAPAB_HCKSUM:
+ *txflags = HCKSUM_INET_PARTIAL;
+ break;
+
+ case MAC_CAPAB_POLL:
+ /*
+ * There's nothing for us to fill in, simply returning B_TRUE
+ * stating that we support polling is sufficient.
+ */
+ break;
+
+ case MAC_CAPAB_MULTIADDRESS:
+ /*
+ * The number of MAC addresses made available by
+ * this capability is one less than the total as
+ * the primary address in slot 0 is counted in
+ * the total.
+ */
+ mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1;
+ mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree;
+ mmacp->maddr_flag = 0; /* No multiple factory macs */
+ mmacp->maddr_handle = hxgep;
+ mmacp->maddr_add = hxge_m_mmac_add;
+ mmacp->maddr_remove = hxge_m_mmac_remove;
+ mmacp->maddr_modify = hxge_m_mmac_modify;
+ mmacp->maddr_get = hxge_m_mmac_get;
+ mmacp->maddr_reserve = NULL; /* No multiple factory macs */
+ break;
+ default:
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}
+
+/*
+ * Module loading and removing entry points.
+ */
+DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
+ nodev, NULL, D_MP, NULL);
+
+extern struct mod_ops mod_driverops;
+
+#define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver"
+
+/*
+ * Module linkage information for the kernel.
+ */
+static struct modldrv hxge_modldrv = {
+ &mod_driverops,
+ HXGE_DESC_VER,
+ &hxge_dev_ops
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *) &hxge_modldrv, NULL
+};
+
+int
+_init(void)
+{
+ int status;
+
+ HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
+ mac_init_ops(&hxge_dev_ops, "hxge");
+ status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
+ if (status != 0) {
+ HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
+ "failed to init device soft state"));
+ mac_fini_ops(&hxge_dev_ops);
+ goto _init_exit;
+ }
+
+ status = mod_install(&modlinkage);
+ if (status != 0) {
+ ddi_soft_state_fini(&hxge_list);
+ HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
+ goto _init_exit;
+ }
+
+ MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
+
+_init_exit:
+ HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
+
+ return (status);
+}
+
+int
+_fini(void)
+{
+ int status;
+
+ HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
+
+ HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
+
+ if (hxge_mblks_pending)
+ return (EBUSY);
+
+ status = mod_remove(&modlinkage);
+ if (status != DDI_SUCCESS) {
+ HXGE_DEBUG_MSG((NULL, MOD_CTL,
+ "Module removal failed 0x%08x", status));
+ goto _fini_exit;
+ }
+
+ mac_fini_ops(&hxge_dev_ops);
+
+ ddi_soft_state_fini(&hxge_list);
+
+ MUTEX_DESTROY(&hxge_common_lock);
+
+_fini_exit:
+ HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
+
+ return (status);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ int status;
+
+ HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
+ status = mod_info(&modlinkage, modinfop);
+ HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+hxge_status_t
+hxge_add_intrs(p_hxge_t hxgep)
+{
+ int intr_types;
+ int type = 0;
+ int ddi_status = DDI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
+
+ hxgep->hxge_intr_type.intr_registered = B_FALSE;
+ hxgep->hxge_intr_type.intr_enabled = B_FALSE;
+ hxgep->hxge_intr_type.msi_intx_cnt = 0;
+ hxgep->hxge_intr_type.intr_added = 0;
+ hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
+ hxgep->hxge_intr_type.intr_type = 0;
+
+ if (hxge_msi_enable) {
+ hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
+ }
+
+ /* Get the supported interrupt types */
+ if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
+ != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
+ "ddi_intr_get_supported_types failed: status 0x%08x",
+ ddi_status));
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ hxgep->hxge_intr_type.intr_types = intr_types;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
+ "ddi_intr_get_supported_types: 0x%08x", intr_types));
+
+ /*
+ * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
+ * (1): 1 - MSI
+ * (2): 2 - MSI-X
+ * others - FIXED
+ */
+ switch (hxge_msi_enable) {
+ default:
+ type = DDI_INTR_TYPE_FIXED;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
+ "use fixed (intx emulation) type %08x", type));
+ break;
+
+ case 2:
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
+ "ddi_intr_get_supported_types: 0x%08x", intr_types));
+ if (intr_types & DDI_INTR_TYPE_MSIX) {
+ type = DDI_INTR_TYPE_MSIX;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_add_intrs: "
+ "ddi_intr_get_supported_types: MSIX 0x%08x", type));
+ } else if (intr_types & DDI_INTR_TYPE_MSI) {
+ type = DDI_INTR_TYPE_MSI;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_add_intrs: "
+ "ddi_intr_get_supported_types: MSI 0x%08x", type));
+ } else if (intr_types & DDI_INTR_TYPE_FIXED) {
+ type = DDI_INTR_TYPE_FIXED;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
+ "ddi_intr_get_supported_types: MSXED0x%08x", type));
+ }
+ break;
+
+ case 1:
+ if (intr_types & DDI_INTR_TYPE_MSI) {
+ type = DDI_INTR_TYPE_MSI;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_add_intrs: "
+ "ddi_intr_get_supported_types: MSI 0x%08x", type));
+ } else if (intr_types & DDI_INTR_TYPE_MSIX) {
+ type = DDI_INTR_TYPE_MSIX;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_add_intrs: "
+ "ddi_intr_get_supported_types: MSIX 0x%08x", type));
+ } else if (intr_types & DDI_INTR_TYPE_FIXED) {
+ type = DDI_INTR_TYPE_FIXED;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_add_intrs: "
+ "ddi_intr_get_supported_types: MSXED0x%08x", type));
+ }
+ }
+
+ hxgep->hxge_intr_type.intr_type = type;
+ if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
+ type == DDI_INTR_TYPE_FIXED) &&
+ hxgep->hxge_intr_type.niu_msi_enable) {
+ if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_add_intrs: "
+ " hxge_add_intrs_adv failed: status 0x%08x",
+ status));
+ return (status);
+ } else {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
+ "interrupts registered : type %d", type));
+ hxgep->hxge_intr_type.intr_registered = B_TRUE;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "\nAdded advanced hxge add_intr_adv "
+ "intr type 0x%x\n", type));
+
+ return (status);
+ }
+ }
+
+ if (!hxgep->hxge_intr_type.intr_registered) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_add_intrs: failed to register interrupts"));
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_add_soft_intrs(p_hxge_t hxgep)
+{
+ int ddi_status = DDI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs"));
+
+ hxgep->resched_id = NULL;
+ hxgep->resched_running = B_FALSE;
+ ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW,
+ &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep);
+ if (ddi_status != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: "
+ "ddi_add_softintrs failed: status 0x%08x", ddi_status));
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs"));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_add_intrs_adv(p_hxge_t hxgep)
+{
+ int intr_type;
+ p_hxge_intr_t intrp;
+ hxge_status_t status;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
+
+ intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
+ intr_type = intrp->intr_type;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
+ intr_type));
+
+ switch (intr_type) {
+ case DDI_INTR_TYPE_MSI: /* 0x2 */
+ case DDI_INTR_TYPE_MSIX: /* 0x4 */
+ status = hxge_add_intrs_adv_type(hxgep, intr_type);
+ break;
+
+ case DDI_INTR_TYPE_FIXED: /* 0x1 */
+ status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
+ break;
+
+ default:
+ status = HXGE_ERROR;
+ break;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
+{
+ dev_info_t *dip = hxgep->dip;
+ p_hxge_ldg_t ldgp;
+ p_hxge_intr_t intrp;
+ uint_t *inthandler;
+ void *arg1, *arg2;
+ int behavior;
+ int nintrs, navail;
+ int nactual, nrequired;
+ int inum = 0;
+ int loop = 0;
+ int x, y;
+ int ddi_status = DDI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
+
+ intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
+
+ ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
+ if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "ddi_intr_get_nintrs() failed, status: 0x%x%, "
+ "nintrs: %d", ddi_status, nintrs));
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
+ if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "ddi_intr_get_navail() failed, status: 0x%x%, "
+ "nintrs: %d", ddi_status, navail));
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
+ int_type, nintrs, navail));
+
+ if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
+ /* MSI must be power of 2 */
+ if ((navail & 16) == 16) {
+ navail = 16;
+ } else if ((navail & 8) == 8) {
+ navail = 8;
+ } else if ((navail & 4) == 4) {
+ navail = 4;
+ } else if ((navail & 2) == 2) {
+ navail = 2;
+ } else {
+ navail = 1;
+ }
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
+ "navail %d", nintrs, navail));
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "requesting: intr type %d nintrs %d, navail %d",
+ int_type, nintrs, navail));
+
+ behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
+ DDI_INTR_ALLOC_NORMAL);
+ intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
+ intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
+
+ ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
+ navail, &nactual, behavior);
+ if (ddi_status != DDI_SUCCESS || nactual == 0) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " ddi_intr_alloc() failed: %d", ddi_status));
+ kmem_free(intrp->htable, intrp->intr_size);
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "ddi_intr_alloc() returned: navail %d nactual %d",
+ navail, nactual));
+
+ if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
+ (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " ddi_intr_get_pri() failed: %d", ddi_status));
+ /* Free already allocated interrupts */
+ for (y = 0; y < nactual; y++) {
+ (void) ddi_intr_free(intrp->htable[y]);
+ }
+
+ kmem_free(intrp->htable, intrp->intr_size);
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ nrequired = 0;
+ status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_add_intrs_adv_typ:hxge_ldgv_init "
+ "failed: 0x%x", status));
+ /* Free already allocated interrupts */
+ for (y = 0; y < nactual; y++) {
+ (void) ddi_intr_free(intrp->htable[y]);
+ }
+
+ kmem_free(intrp->htable, intrp->intr_size);
+ return (status);
+ }
+
+ ldgp = hxgep->ldgvp->ldgp;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
+
+ if (nactual < nrequired)
+ loop = nactual;
+ else
+ loop = nrequired;
+
+ for (x = 0; x < loop; x++, ldgp++) {
+ ldgp->vector = (uint8_t)x;
+ arg1 = ldgp->ldvp;
+ arg2 = hxgep;
+ if (ldgp->nldvs == 1) {
+ inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
+ "1-1 int handler (entry %d)\n",
+ arg1, arg2, x));
+ } else if (ldgp->nldvs > 1) {
+ inthandler = (uint_t *)ldgp->sys_intr_handler;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
+ "nldevs %d int handler (entry %d)\n",
+ arg1, arg2, ldgp->nldvs, x));
+ }
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
+ "htable 0x%llx", x, intrp->htable[x]));
+
+ if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
+ (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
+ DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_add_intrs_adv_type: failed #%d "
+ "status 0x%x", x, ddi_status));
+ for (y = 0; y < intrp->intr_added; y++) {
+ (void) ddi_intr_remove_handler(
+ intrp->htable[y]);
+ }
+
+ /* Free already allocated intr */
+ for (y = 0; y < nactual; y++) {
+ (void) ddi_intr_free(intrp->htable[y]);
+ }
+ kmem_free(intrp->htable, intrp->intr_size);
+
+ (void) hxge_ldgv_uninit(hxgep);
+
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ intrp->intr_added++;
+ }
+ intrp->msi_intx_cnt = nactual;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
+ navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
+
+ (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
+ (void) hxge_intr_ldgv_init(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
+{
+ dev_info_t *dip = hxgep->dip;
+ p_hxge_ldg_t ldgp;
+ p_hxge_intr_t intrp;
+ uint_t *inthandler;
+ void *arg1, *arg2;
+ int behavior;
+ int nintrs, navail;
+ int nactual, nrequired;
+ int inum = 0;
+ int x, y;
+ int ddi_status = DDI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
+ intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
+
+ ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
+ if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "ddi_intr_get_nintrs() failed, status: 0x%x%, "
+ "nintrs: %d", status, nintrs));
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
+ if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "ddi_intr_get_navail() failed, status: 0x%x%, "
+ "nintrs: %d", ddi_status, navail));
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
+ nintrs, navail));
+
+ behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
+ DDI_INTR_ALLOC_NORMAL);
+ intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
+ intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
+ ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
+ navail, &nactual, behavior);
+ if (ddi_status != DDI_SUCCESS || nactual == 0) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " ddi_intr_alloc() failed: %d", ddi_status));
+ kmem_free(intrp->htable, intrp->intr_size);
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
+ (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " ddi_intr_get_pri() failed: %d", ddi_status));
+ /* Free already allocated interrupts */
+ for (y = 0; y < nactual; y++) {
+ (void) ddi_intr_free(intrp->htable[y]);
+ }
+
+ kmem_free(intrp->htable, intrp->intr_size);
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+
+ nrequired = 0;
+ status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
+ "failed: 0x%x", status));
+ /* Free already allocated interrupts */
+ for (y = 0; y < nactual; y++) {
+ (void) ddi_intr_free(intrp->htable[y]);
+ }
+
+ kmem_free(intrp->htable, intrp->intr_size);
+ return (status);
+ }
+
+ ldgp = hxgep->ldgvp->ldgp;
+ for (x = 0; x < nrequired; x++, ldgp++) {
+ ldgp->vector = (uint8_t)x;
+ arg1 = ldgp->ldvp;
+ arg2 = hxgep;
+ if (ldgp->nldvs == 1) {
+ inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "hxge_add_intrs_adv_type_fix: "
+ "1-1 int handler(%d) ldg %d ldv %d "
+ "arg1 $%p arg2 $%p\n",
+ x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
+ } else if (ldgp->nldvs > 1) {
+ inthandler = (uint_t *)ldgp->sys_intr_handler;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "hxge_add_intrs_adv_type_fix: "
+ "shared ldv %d int handler(%d) ldv %d ldg %d"
+ "arg1 0x%016llx arg2 0x%016llx\n",
+ x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
+ arg1, arg2));
+ }
+
+ if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
+ (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
+ DDI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_add_intrs_adv_type_fix: failed #%d "
+ "status 0x%x", x, ddi_status));
+ for (y = 0; y < intrp->intr_added; y++) {
+ (void) ddi_intr_remove_handler(
+ intrp->htable[y]);
+ }
+ for (y = 0; y < nactual; y++) {
+ (void) ddi_intr_free(intrp->htable[y]);
+ }
+ /* Free already allocated intr */
+ kmem_free(intrp->htable, intrp->intr_size);
+
+ (void) hxge_ldgv_uninit(hxgep);
+
+ return (HXGE_ERROR | HXGE_DDI_FAILED);
+ }
+ intrp->intr_added++;
+ }
+
+ intrp->msi_intx_cnt = nactual;
+
+ (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
+
+ status = hxge_intr_ldgv_init(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static void
+hxge_remove_intrs(p_hxge_t hxgep)
+{
+ int i, inum;
+ p_hxge_intr_t intrp;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
+ intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
+ if (!intrp->intr_registered) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "<== hxge_remove_intrs: interrupts not registered"));
+ return;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
+
+ if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ (void) ddi_intr_block_disable(intrp->htable,
+ intrp->intr_added);
+ } else {
+ for (i = 0; i < intrp->intr_added; i++) {
+ (void) ddi_intr_disable(intrp->htable[i]);
+ }
+ }
+
+ for (inum = 0; inum < intrp->intr_added; inum++) {
+ if (intrp->htable[inum]) {
+ (void) ddi_intr_remove_handler(intrp->htable[inum]);
+ }
+ }
+
+ for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
+ if (intrp->htable[inum]) {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "hxge_remove_intrs: ddi_intr_free inum %d "
+ "msi_intx_cnt %d intr_added %d",
+ inum, intrp->msi_intx_cnt, intrp->intr_added));
+
+ (void) ddi_intr_free(intrp->htable[inum]);
+ }
+ }
+
+ kmem_free(intrp->htable, intrp->intr_size);
+ intrp->intr_registered = B_FALSE;
+ intrp->intr_enabled = B_FALSE;
+ intrp->msi_intx_cnt = 0;
+ intrp->intr_added = 0;
+
+ (void) hxge_ldgv_uninit(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
+}
+
+/*ARGSUSED*/
+static void
+hxge_remove_soft_intrs(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs"));
+
+ if (hxgep->resched_id) {
+ ddi_remove_softintr(hxgep->resched_id);
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_remove_soft_intrs: removed"));
+ hxgep->resched_id = NULL;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs"));
+}
+
+/*ARGSUSED*/
+void
+hxge_intrs_enable(p_hxge_t hxgep)
+{
+ p_hxge_intr_t intrp;
+ int i;
+ int status;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
+
+ intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
+
+ if (!intrp->intr_registered) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
+ "interrupts are not registered"));
+ return;
+ }
+
+ if (intrp->intr_enabled) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "<== hxge_intrs_enable: already enabled"));
+ return;
+ }
+
+ if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ status = ddi_intr_block_enable(intrp->htable,
+ intrp->intr_added);
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
+ "block enable - status 0x%x total inums #%d\n",
+ status, intrp->intr_added));
+ } else {
+ for (i = 0; i < intrp->intr_added; i++) {
+ status = ddi_intr_enable(intrp->htable[i]);
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
+ "ddi_intr_enable:enable - status 0x%x "
+ "total inums %d enable inum #%d\n",
+ status, intrp->intr_added, i));
+ if (status == DDI_SUCCESS) {
+ intrp->intr_enabled = B_TRUE;
+ }
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
+}
+
+/*ARGSUSED*/
+static void
+hxge_intrs_disable(p_hxge_t hxgep)
+{
+ p_hxge_intr_t intrp;
+ int i;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
+
+ intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
+
+ if (!intrp->intr_registered) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
+ "interrupts are not registered"));
+ return;
+ }
+
+ if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ (void) ddi_intr_block_disable(intrp->htable,
+ intrp->intr_added);
+ } else {
+ for (i = 0; i < intrp->intr_added; i++) {
+ (void) ddi_intr_disable(intrp->htable[i]);
+ }
+ }
+
+ intrp->intr_enabled = B_FALSE;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
+}
+
+static hxge_status_t
+hxge_mac_register(p_hxge_t hxgep)
+{
+ mac_register_t *macp;
+ int status;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
+
+ if ((macp = mac_alloc(MAC_VERSION)) == NULL)
+ return (HXGE_ERROR);
+
+ macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
+ macp->m_driver = hxgep;
+ macp->m_dip = hxgep->dip;
+ macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
+ macp->m_src_addr[0],
+ macp->m_src_addr[1],
+ macp->m_src_addr[2],
+ macp->m_src_addr[3],
+ macp->m_src_addr[4],
+ macp->m_src_addr[5]));
+
+ macp->m_callbacks = &hxge_m_callbacks;
+ macp->m_min_sdu = 0;
+ macp->m_max_sdu = hxgep->vmac.maxframesize -
+ sizeof (struct ether_header) - ETHERFCSL - 4 - TX_PKT_HEADER_SIZE;
+
+ status = mac_register(macp, &hxgep->mach);
+ mac_free(macp);
+
+ if (status != 0) {
+ cmn_err(CE_WARN,
+ "hxge_mac_register failed (status %d instance %d)",
+ status, hxgep->instance);
+ return (HXGE_ERROR);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
+ "(instance %d)", hxgep->instance));
+
+ return (HXGE_OK);
+}
+
+static int
+hxge_init_common_dev(p_hxge_t hxgep)
+{
+ p_hxge_hw_list_t hw_p;
+ dev_info_t *p_dip;
+
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
+
+ p_dip = hxgep->p_dip;
+ MUTEX_ENTER(&hxge_common_lock);
+
+ /*
+ * Loop through existing per Hydra hardware list.
+ */
+ for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
+ hw_p, p_dip));
+ if (hw_p->parent_devp == p_dip) {
+ hxgep->hxge_hw_p = hw_p;
+ hw_p->ndevs++;
+ hw_p->hxge_p = hxgep;
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "==> hxge_init_common_device: "
+ "hw_p $%p parent dip $%p ndevs %d (found)",
+ hw_p, p_dip, hw_p->ndevs));
+ break;
+ }
+ }
+
+ if (hw_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
+ hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
+ hw_p->parent_devp = p_dip;
+ hw_p->magic = HXGE_MAGIC;
+ hxgep->hxge_hw_p = hw_p;
+ hw_p->ndevs++;
+ hw_p->hxge_p = hxgep;
+ hw_p->next = hxge_hw_list;
+
+ MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
+ MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
+ MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
+
+ hxge_hw_list = hw_p;
+ }
+ MUTEX_EXIT(&hxge_common_lock);
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
+
+ return (HXGE_OK);
+}
+
+static void
+hxge_uninit_common_dev(p_hxge_t hxgep)
+{
+ p_hxge_hw_list_t hw_p, h_hw_p;
+ dev_info_t *p_dip;
+
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
+ if (hxgep->hxge_hw_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "<== hxge_uninit_common_dev (no common)"));
+ return;
+ }
+
+ MUTEX_ENTER(&hxge_common_lock);
+ h_hw_p = hxge_hw_list;
+ for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
+ p_dip = hw_p->parent_devp;
+ if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
+ hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
+ hw_p->magic == HXGE_MAGIC) {
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "==> hxge_uninit_common_dev: "
+ "hw_p $%p parent dip $%p ndevs %d (found)",
+ hw_p, p_dip, hw_p->ndevs));
+
+ hxgep->hxge_hw_p = NULL;
+ if (hw_p->ndevs) {
+ hw_p->ndevs--;
+ }
+ hw_p->hxge_p = NULL;
+ if (!hw_p->ndevs) {
+ MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
+ MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
+ MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "==> hxge_uninit_common_dev: "
+ "hw_p $%p parent dip $%p ndevs %d (last)",
+ hw_p, p_dip, hw_p->ndevs));
+
+ if (hw_p == hxge_hw_list) {
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "==> hxge_uninit_common_dev:"
+ "remove head "
+ "hw_p $%p parent dip $%p "
+ "ndevs %d (head)",
+ hw_p, p_dip, hw_p->ndevs));
+ hxge_hw_list = hw_p->next;
+ } else {
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "==> hxge_uninit_common_dev:"
+ "remove middle "
+ "hw_p $%p parent dip $%p "
+ "ndevs %d (middle)",
+ hw_p, p_dip, hw_p->ndevs));
+ h_hw_p->next = hw_p->next;
+ }
+
+ KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
+ }
+ break;
+ } else {
+ h_hw_p = hw_p;
+ }
+ }
+
+ MUTEX_EXIT(&hxge_common_lock);
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL,
+ "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
+
+ HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_ndd.c b/usr/src/uts/common/io/hxge/hxge_ndd.c
new file mode 100644
index 0000000000..838697bd15
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_ndd.c
@@ -0,0 +1,1529 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+#include <inet/common.h>
+#include <inet/mi.h>
+#include <inet/nd.h>
+
+extern uint64_t hpi_debug_level;
+
+#define HXGE_PARAM_MAC_RW \
+ HXGE_PARAM_RW | HXGE_PARAM_MAC | \
+ HXGE_PARAM_NDD_WR_OK | HXGE_PARAM_READ_PROP
+
+#define HXGE_PARAM_RXDMA_RW HXGE_PARAM_RWP | HXGE_PARAM_RXDMA | \
+ HXGE_PARAM_NDD_WR_OK | HXGE_PARAM_READ_PROP
+
+#define HXGE_PARAM_L2CLASS_CFG \
+ HXGE_PARAM_RW | HXGE_PARAM_PROP_ARR32 | \
+ HXGE_PARAM_READ_PROP | HXGE_PARAM_NDD_WR_OK
+
+#define HXGE_PARAM_CLASS_RWS \
+ HXGE_PARAM_RWS | HXGE_PARAM_READ_PROP
+
+#define HXGE_PARAM_ARRAY_INIT_SIZE 0x20ULL
+
+#define BASE_ANY 0
+#define BASE_BINARY 2
+#define BASE_HEX 16
+#define BASE_DECIMAL 10
+#define ALL_FF_64 0xFFFFFFFFFFFFFFFFULL
+#define ALL_FF_32 0xFFFFFFFFUL
+
+#define HXGE_NDD_INFODUMP_BUFF_SIZE 2048 /* is 2k enough? */
+#define HXGE_NDD_INFODUMP_BUFF_8K 8192
+#define HXGE_NDD_INFODUMP_BUFF_16K 0x2000
+#define HXGE_NDD_INFODUMP_BUFF_64K 0x8000
+
+#define PARAM_OUTOF_RANGE(vptr, eptr, rval, pa) \
+ ((vptr == eptr) || (rval < pa->minimum) || (rval > pa->maximum))
+
+#define ADVANCE_PRINT_BUFFER(pmp, plen, rlen) { \
+ ((mblk_t *)pmp)->b_wptr += plen; \
+ rlen -= plen; \
+}
+
+static int hxge_param_rx_intr_pkts(p_hxge_t hxgep, queue_t *,
+ mblk_t *, char *, caddr_t);
+static int hxge_param_rx_intr_time(p_hxge_t hxgep, queue_t *,
+ mblk_t *, char *, caddr_t);
+static int hxge_param_set_mac(p_hxge_t, queue_t *,
+ mblk_t *, char *, caddr_t);
+static int hxge_param_set_ether_usr(p_hxge_t hxgep, queue_t *, mblk_t *,
+ char *, caddr_t);
+static int hxge_param_set_ip_opt(p_hxge_t hxgep,
+ queue_t *, mblk_t *, char *, caddr_t);
+static int hxge_param_pfc_hash_init(p_hxge_t hxgep,
+ queue_t *, mblk_t *, char *, caddr_t);
+static int hxge_param_tcam_enable(p_hxge_t hxgep, queue_t *,
+ mblk_t *, char *, caddr_t);
+static int hxge_param_get_rxdma_info(p_hxge_t hxgep, queue_t *q,
+ p_mblk_t mp, caddr_t cp);
+static int hxge_param_set_vlan_ids(p_hxge_t hxgep, queue_t *q,
+ mblk_t *mp, char *value, caddr_t cp);
+static int hxge_param_get_vlan_ids(p_hxge_t hxgep, queue_t *q,
+ p_mblk_t mp, caddr_t cp);
+static int hxge_param_get_ip_opt(p_hxge_t hxgep,
+ queue_t *, mblk_t *, caddr_t);
+static int hxge_param_get_mac(p_hxge_t hxgep, queue_t *q, p_mblk_t mp,
+ caddr_t cp);
+static int hxge_param_get_debug_flag(p_hxge_t hxgep, queue_t *q,
+ p_mblk_t mp, caddr_t cp);
+static int hxge_param_set_hxge_debug_flag(p_hxge_t hxgep,
+ queue_t *, mblk_t *, char *, caddr_t);
+static int hxge_param_set_hpi_debug_flag(p_hxge_t hxgep,
+ queue_t *, mblk_t *, char *, caddr_t);
+static int hxge_param_dump_ptrs(p_hxge_t hxgep, queue_t *q,
+ p_mblk_t mp, caddr_t cp);
+
+/*
+ * Global array of Hydra changable parameters.
+ * This array is initialized to correspond to the default
+ * Hydra configuration. This array would be copied
+ * into the parameter structure and modifed per
+ * fcode and hxge.conf configuration. Later, the parameters are
+ * exported to ndd to display and run-time configuration (at least
+ * some of them).
+ */
+
+static hxge_param_t hxge_param_arr[] = {
+ /* min max value old hw-name conf-name */
+ {hxge_param_get_generic, NULL, HXGE_PARAM_READ,
+ 0, 999, 1000, 0, "instance", "instance"},
+
+ /* MTU cannot be propagated to the stack from here, so don't show it */
+ {hxge_param_get_mac, hxge_param_set_mac,
+ HXGE_PARAM_MAC_RW | HXGE_PARAM_DONT_SHOW,
+ 0, 1, 0, 0, "accept-jumbo", "accept_jumbo"},
+
+ {hxge_param_get_rxdma_info, NULL,
+ HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
+ HXGE_RBR_RBB_MIN, HXGE_RBR_RBB_MAX, HXGE_RBR_RBB_DEFAULT, 0,
+ "rx-rbr-size", "rx_rbr_size"},
+
+ {hxge_param_get_rxdma_info, NULL,
+ HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
+ HXGE_RCR_MIN, HXGE_RCR_MAX, HXGE_RCR_DEFAULT, 0,
+ "rx-rcr-size", "rx_rcr_size"},
+
+ {hxge_param_get_generic, hxge_param_rx_intr_time,
+ HXGE_PARAM_RXDMA_RW,
+ HXGE_RDC_RCR_TIMEOUT_MIN, HXGE_RDC_RCR_TIMEOUT_MAX,
+ RXDMA_RCR_TO_DEFAULT, 0, "rxdma-intr-time", "rxdma_intr_time"},
+
+ {hxge_param_get_generic, hxge_param_rx_intr_pkts,
+ HXGE_PARAM_RXDMA_RW,
+ HXGE_RDC_RCR_THRESHOLD_MIN, HXGE_RDC_RCR_THRESHOLD_MAX,
+ RXDMA_RCR_PTHRES_DEFAULT, 0,
+ "rxdma-intr-pkts", "rxdma_intr_pkts"},
+
+ /* Hardware VLAN is not used currently, so don't show it */
+ {hxge_param_get_vlan_ids, hxge_param_set_vlan_ids,
+ HXGE_PARAM_L2CLASS_CFG | HXGE_PARAM_DONT_SHOW,
+ VLAN_ID_MIN, VLAN_ID_MAX, 0, 0, "vlan-ids", "vlan_ids"},
+
+ /* Hardware VLAN is not used currently, so don't show it */
+ {hxge_param_get_generic, hxge_param_set_generic,
+ HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
+ VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_IMPLICIT, VLAN_ID_IMPLICIT,
+ "implicit-vlan-id", "implicit_vlan_id"},
+
+ {hxge_param_get_generic, hxge_param_tcam_enable,
+ HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
+ 0, 0x1, 0x0, 0, "tcam-enable", "tcam_enable"},
+
+ {hxge_param_get_generic, hxge_param_pfc_hash_init,
+ HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
+ 0, ALL_FF_32, ALL_FF_32, 0,
+ "hash-init-value", "hash_init_value"},
+
+ {hxge_param_get_generic, hxge_param_set_ether_usr,
+ HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
+ 0, ALL_FF_32, 0x0, 0,
+ "class-cfg-ether-usr1", "class_cfg_ether_usr1"},
+
+ {hxge_param_get_generic, hxge_param_set_ether_usr,
+ HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
+ 0, ALL_FF_32, 0x0, 0,
+ "class-cfg-ether-usr2", "class_cfg_ether_usr2"},
+
+ {hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
+ 0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
+ "class-opt-ipv4-tcp", "class_opt_ipv4_tcp"},
+
+ {hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
+ 0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
+ "class-opt-ipv4-udp", "class_opt_ipv4_udp"},
+
+ {hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
+ 0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
+ "class-opt-ipv4-ah", "class_opt_ipv4_ah"},
+
+ {hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
+ 0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
+ "class-opt-ipv4-sctp", "class_opt_ipv4_sctp"},
+
+ {hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
+ 0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
+ "class-opt-ipv6-tcp", "class_opt_ipv6_tcp"},
+
+ {hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
+ 0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
+ "class-opt-ipv6-udp", "class_opt_ipv6_udp"},
+
+ {hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
+ 0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
+ "class-opt-ipv6-ah", "class_opt_ipv6_ah"},
+
+ {hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
+ 0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
+ "class-opt-ipv6-sctp", "class_opt_ipv6_sctp"},
+
+ {hxge_param_get_debug_flag, hxge_param_set_hxge_debug_flag,
+ HXGE_PARAM_RW | HXGE_PARAM_DONT_SHOW,
+ 0ULL, ALL_FF_64, 0ULL, 0ULL,
+ "hxge-debug-flag", "hxge_debug_flag"},
+
+ {hxge_param_get_debug_flag, hxge_param_set_hpi_debug_flag,
+ HXGE_PARAM_RW | HXGE_PARAM_DONT_SHOW,
+ 0ULL, ALL_FF_64, 0ULL, 0ULL,
+ "hpi-debug-flag", "hpi_debug_flag"},
+
+ {hxge_param_dump_ptrs, NULL, HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
+ 0, 0x0fffffff, 0x0fffffff, 0, "dump-ptrs", "dump_ptrs"},
+
+ {NULL, NULL, HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
+ 0, 0x0fffffff, 0x0fffffff, 0, "end", "end"},
+};
+
+extern void *hxge_list;
+
+/*
+ * Update the NDD array from the soft properties.
+ */
+void
+hxge_get_param_soft_properties(p_hxge_t hxgep)
+{
+ p_hxge_param_t param_arr;
+ uint_t prop_len;
+ int i, j;
+ uint32_t param_count;
+ uint32_t *int_prop_val;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, " ==> hxge_get_param_soft_properties"));
+
+ param_arr = hxgep->param_arr;
+ param_count = hxgep->param_count;
+ for (i = 0; i < param_count; i++) {
+
+ if ((param_arr[i].type & HXGE_PARAM_READ_PROP) == 0)
+ continue;
+
+ if ((param_arr[i].type & HXGE_PARAM_PROP_STR))
+ continue;
+
+ if ((param_arr[i].type & HXGE_PARAM_PROP_ARR32) ||
+ (param_arr[i].type & HXGE_PARAM_PROP_ARR64)) {
+
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
+ hxgep->dip, 0, param_arr[i].fcode_name,
+ (int **)&int_prop_val, (uint_t *)&prop_len) ==
+ DDI_PROP_SUCCESS) {
+ uint64_t *cfg_value;
+ uint64_t prop_count;
+
+ if (prop_len > HXGE_PARAM_ARRAY_INIT_SIZE)
+ prop_len = HXGE_PARAM_ARRAY_INIT_SIZE;
+#if defined(__i386)
+ cfg_value =
+ (uint64_t *)(int32_t)param_arr[i].value;
+#else
+ cfg_value = (uint64_t *)param_arr[i].value;
+#endif
+ for (j = 0; j < prop_len; j++) {
+ cfg_value[j] = int_prop_val[j];
+ }
+ prop_count = prop_len;
+ param_arr[i].type |=
+ (prop_count << HXGE_PARAM_ARRAY_CNT_SHIFT);
+
+ ddi_prop_free(int_prop_val);
+ }
+ continue;
+ }
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0,
+ param_arr[i].fcode_name, (int **)&int_prop_val,
+ &prop_len) == DDI_PROP_SUCCESS) {
+ if ((*int_prop_val >= param_arr[i].minimum) &&
+ (*int_prop_val <= param_arr[i].maximum))
+ param_arr[i].value = *int_prop_val;
+ ddi_prop_free(int_prop_val);
+ }
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0,
+ param_arr[i].name, (int **)&int_prop_val, &prop_len) ==
+ DDI_PROP_SUCCESS) {
+ if ((*int_prop_val >= param_arr[i].minimum) &&
+ (*int_prop_val <= param_arr[i].maximum))
+ param_arr[i].value = *int_prop_val;
+ ddi_prop_free(int_prop_val);
+ }
+ }
+}
+
+static int
+hxge_private_param_register(p_hxge_t hxgep, p_hxge_param_t param_arr)
+{
+ int status = B_TRUE;
+ int channel;
+ char *prop_name;
+ char *end;
+ uint32_t name_chars;
+
+ HXGE_DEBUG_MSG((hxgep, NDD2_CTL, " hxge_private_param_register %s",
+ param_arr->name));
+
+ if ((param_arr->type & HXGE_PARAM_PRIV) != HXGE_PARAM_PRIV)
+ return (B_TRUE);
+ prop_name = param_arr->name;
+ if (param_arr->type & HXGE_PARAM_RXDMA) {
+ if (strncmp("rxdma_intr", prop_name, 10) == 0)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+ }
+
+ if (param_arr->type & HXGE_PARAM_TXDMA) {
+ name_chars = strlen("txdma");
+ if (strncmp("txdma", prop_name, name_chars) == 0) {
+ prop_name += name_chars;
+ channel = mi_strtol(prop_name, &end, 10);
+ /* now check if this rdc is in config */
+ HXGE_DEBUG_MSG((hxgep, NDD2_CTL,
+ " hxge_private_param_register: %d", channel));
+ return (hxge_check_txdma_port_member(hxgep, channel));
+ }
+ return (B_FALSE);
+ }
+
+ status = B_FALSE;
+ HXGE_DEBUG_MSG((hxgep, NDD2_CTL, "<== hxge_private_param_register"));
+
+ return (status);
+}
+
+void
+hxge_setup_param(p_hxge_t hxgep)
+{
+ p_hxge_param_t param_arr;
+ int i;
+ pfi_t set_pfi;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_setup_param"));
+ /*
+ * Make sure the param_instance is set to a valid device instance.
+ */
+ if (hxge_param_arr[param_instance].value == 1000)
+ hxge_param_arr[param_instance].value = hxgep->instance;
+
+ param_arr = hxgep->param_arr;
+ param_arr[param_instance].value = hxgep->instance;
+
+ for (i = 0; i < hxgep->param_count; i++) {
+ if ((param_arr[i].type & HXGE_PARAM_PRIV) &&
+ (hxge_private_param_register(hxgep, &param_arr[i]) ==
+ B_FALSE)) {
+ param_arr[i].setf = NULL;
+ param_arr[i].getf = NULL;
+ }
+ if (param_arr[i].type & HXGE_PARAM_CMPLX)
+ param_arr[i].setf = NULL;
+
+ if (param_arr[i].type & HXGE_PARAM_DONT_SHOW) {
+ param_arr[i].setf = NULL;
+ param_arr[i].getf = NULL;
+ }
+ set_pfi = (pfi_t)param_arr[i].setf;
+
+ if ((set_pfi) && (param_arr[i].type & HXGE_PARAM_INIT_ONLY)) {
+ set_pfi = NULL;
+ }
+ if (!hxge_nd_load(&hxgep->param_list, param_arr[i].name,
+ (pfi_t)param_arr[i].getf, set_pfi,
+ (caddr_t)&param_arr[i])) {
+ (void) hxge_nd_free(&hxgep->param_list);
+ break;
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_setup_param"));
+}
+
+/*
+ * Called from the attached function, it allocates memory for
+ * the parameter array and some members.
+ */
+void
+hxge_init_param(p_hxge_t hxgep)
+{
+ p_hxge_param_t param_arr;
+ int i, alloc_size;
+ uint64_t alloc_count;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_init_param"));
+ /*
+ * Make sure the param_instance is set to a valid device instance.
+ */
+ if (hxge_param_arr[param_instance].value == 1000)
+ hxge_param_arr[param_instance].value = hxgep->instance;
+
+ param_arr = hxgep->param_arr;
+ if (param_arr == NULL) {
+ param_arr = (p_hxge_param_t)KMEM_ZALLOC(
+ sizeof (hxge_param_arr), KM_SLEEP);
+ }
+ for (i = 0; i < sizeof (hxge_param_arr) / sizeof (hxge_param_t); i++) {
+ param_arr[i] = hxge_param_arr[i];
+ if ((param_arr[i].type & HXGE_PARAM_PROP_ARR32) ||
+ (param_arr[i].type & HXGE_PARAM_PROP_ARR64)) {
+ alloc_count = HXGE_PARAM_ARRAY_INIT_SIZE;
+ alloc_size = alloc_count * sizeof (uint64_t);
+#if defined(__i386)
+ param_arr[i].value =
+ (uint64_t)(uint32_t)KMEM_ZALLOC(alloc_size,
+ KM_SLEEP);
+ param_arr[i].old_value =
+ (uint64_t)(uint32_t)KMEM_ZALLOC(alloc_size,
+ KM_SLEEP);
+#else
+ param_arr[i].value =
+ (uint64_t)KMEM_ZALLOC(alloc_size, KM_SLEEP);
+ param_arr[i].old_value =
+ (uint64_t)KMEM_ZALLOC(alloc_size, KM_SLEEP);
+#endif
+ param_arr[i].type |=
+ (alloc_count << HXGE_PARAM_ARRAY_ALLOC_SHIFT);
+ }
+ }
+
+ hxgep->param_arr = param_arr;
+ hxgep->param_count = sizeof (hxge_param_arr) / sizeof (hxge_param_t);
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init_param: count %d",
+ hxgep->param_count));
+}
+
+/*
+ * Called from the attached functions, it frees memory for the parameter array
+ */
+void
+hxge_destroy_param(p_hxge_t hxgep)
+{
+ int i;
+ uint64_t free_size, free_count;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_param"));
+ /*
+ * Make sure the param_instance is set to a valid device instance.
+ */
+ if (hxge_param_arr[param_instance].value == hxgep->instance) {
+ for (i = 0; i <= hxge_param_arr[param_instance].maximum; i++) {
+ if ((ddi_get_soft_state(hxge_list, i) != NULL) &&
+ (i != hxgep->instance))
+ break;
+ }
+ hxge_param_arr[param_instance].value = i;
+ }
+ if (hxgep->param_list)
+ hxge_nd_free(&hxgep->param_list);
+ for (i = 0; i < hxgep->param_count; i++) {
+ if ((hxgep->param_arr[i].type & HXGE_PARAM_PROP_ARR32) ||
+ (hxgep->param_arr[i].type & HXGE_PARAM_PROP_ARR64)) {
+ free_count = ((hxgep->param_arr[i].type &
+ HXGE_PARAM_ARRAY_ALLOC_MASK) >>
+ HXGE_PARAM_ARRAY_ALLOC_SHIFT);
+ free_count = HXGE_PARAM_ARRAY_INIT_SIZE;
+ free_size = sizeof (uint64_t) * free_count;
+#if defined(__i386)
+ KMEM_FREE((void *)(uint32_t)
+ hxgep->param_arr[i].value, free_size);
+ KMEM_FREE((void *)(uint32_t)
+ hxgep->param_arr[i].old_value, free_size);
+#else
+ KMEM_FREE((void *) hxgep->param_arr[i].value,
+ free_size);
+ KMEM_FREE((void *) hxgep->param_arr[i].old_value,
+ free_size);
+#endif
+ }
+ }
+
+ KMEM_FREE(hxgep->param_arr, sizeof (hxge_param_arr));
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_param"));
+}
+
+/*
+ * Extracts the value from the 'hxge' parameter array and prints the
+ * parameter value. cp points to the required parameter.
+ */
+/* ARGSUSED */
+int
+hxge_param_get_generic(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, " ==> hxge_param_get_generic name %s ",
+ pa->name));
+
+ if (pa->value > 0xffffffff)
+ (void) mi_mpprintf(mp, "%x%x", (int)(pa->value >> 32),
+ (int)(pa->value & 0xffffffff));
+ else
+ (void) mi_mpprintf(mp, "%x", (int)pa->value);
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_generic"));
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_get_mac(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_mac"));
+
+ (void) mi_mpprintf(mp, "%d", (uint32_t)pa->value);
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_mac"));
+ return (0);
+}
+
+/* ARGSUSED */
+int
+hxge_param_get_rxdma_info(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+ uint_t print_len, buf_len;
+ p_mblk_t np;
+ int rdc;
+ p_hxge_dma_pt_cfg_t p_dma_cfgp;
+ p_hxge_hw_pt_cfg_t p_cfgp;
+ int buff_alloc_size = HXGE_NDD_INFODUMP_BUFF_SIZE;
+
+ p_rx_rcr_rings_t rx_rcr_rings;
+ p_rx_rcr_ring_t *rcr_rings;
+ p_rx_rbr_rings_t rx_rbr_rings;
+ p_rx_rbr_ring_t *rbr_rings;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_rxdma_info"));
+
+ (void) mi_mpprintf(mp, "RXDMA Information\n");
+
+ if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
+ /* The following may work even if we cannot get a large buf. */
+ (void) mi_mpprintf(mp, "%s\n", "out of buffer");
+ return (0);
+ }
+ buf_len = buff_alloc_size;
+
+ mp->b_cont = np;
+
+ p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
+ p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+ rx_rcr_rings = hxgep->rx_rcr_rings;
+ rcr_rings = rx_rcr_rings->rcr_rings;
+ rx_rbr_rings = hxgep->rx_rbr_rings;
+ rbr_rings = rx_rbr_rings->rbr_rings;
+
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ "Total RDCs\t %d\n", p_cfgp->max_rdcs);
+ ((mblk_t *)np)->b_wptr += print_len;
+ buf_len -= print_len;
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ "RDC\t HW RDC\t Timeout\t Packets RBR ptr \t"
+ "chunks\t RCR ptr\n");
+ ((mblk_t *)np)->b_wptr += print_len;
+ buf_len -= print_len;
+ for (rdc = 0; rdc < p_cfgp->max_rdcs; rdc++) {
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ " %d\t %d\t $%p\t 0x%x\t $%p\n",
+ rdc, hxgep->rdc[rdc], rbr_rings[rdc],
+ rbr_rings[rdc]->num_blocks, rcr_rings[rdc]);
+ ((mblk_t *)np)->b_wptr += print_len;
+ buf_len -= print_len;
+ }
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_rxdma_info"));
+ return (0);
+}
+
+int
+hxge_mk_mblk_tail_space(p_mblk_t mp, p_mblk_t *nmp, size_t size)
+{
+ p_mblk_t tmp;
+
+ tmp = mp;
+ while (tmp->b_cont)
+ tmp = tmp->b_cont;
+ if ((tmp->b_wptr + size) >= tmp->b_datap->db_lim) {
+ tmp->b_cont = allocb(1024, BPRI_HI);
+ tmp = tmp->b_cont;
+ if (!tmp)
+ return (ENOMEM);
+ }
+ *nmp = tmp;
+ return (0);
+}
+
+/*
+ * Sets the ge parameter to the value in the hxge_param_register using
+ * hxge_nd_load().
+ */
+/* ARGSUSED */
+int
+hxge_param_set_generic(p_hxge_t hxgep, queue_t *q, mblk_t *mp,
+ char *value, caddr_t cp)
+{
+ char *end;
+ uint32_t new_value;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, " ==> hxge_param_set_generic"));
+ new_value = (uint32_t)mi_strtol(value, &end, 10);
+ if (end == value || new_value < pa->minimum ||
+ new_value > pa->maximum) {
+ return (EINVAL);
+ }
+ pa->value = new_value;
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, " <== hxge_param_set_generic"));
+ return (0);
+}
+
+/* ARGSUSED */
+int
+hxge_param_set_mac(p_hxge_t hxgep, queue_t *q, mblk_t *mp,
+ char *value, caddr_t cp)
+{
+ char *end;
+ uint32_t new_value;
+ int status = 0;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_mac"));
+ new_value = (uint32_t)mi_strtol(value, &end, BASE_DECIMAL);
+ if (PARAM_OUTOF_RANGE(value, end, new_value, pa)) {
+ return (EINVAL);
+ }
+
+ if (pa->value != new_value) {
+ pa->old_value = pa->value;
+ pa->value = new_value;
+ }
+
+ if (pa->value != pa->old_value) {
+ RW_ENTER_WRITER(&hxgep->filter_lock);
+ (void) hxge_rx_vmac_disable(hxgep);
+ (void) hxge_tx_vmac_disable(hxgep);
+
+ /*
+ * Apply the new jumbo parameter here.
+ * The order of the following two calls is important.
+ */
+ (void) hxge_tx_vmac_enable(hxgep);
+ (void) hxge_rx_vmac_enable(hxgep);
+ RW_EXIT(&hxgep->filter_lock);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_mac"));
+ return (status);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_rx_intr_pkts(p_hxge_t hxgep, queue_t *q,
+ mblk_t *mp, char *value, caddr_t cp)
+{
+ char *end;
+ uint32_t cfg_value;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_rx_intr_pkts"));
+
+ cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
+
+ if ((cfg_value > HXGE_RDC_RCR_THRESHOLD_MAX) ||
+ (cfg_value < HXGE_RDC_RCR_THRESHOLD_MIN)) {
+ return (EINVAL);
+ }
+
+ if ((pa->value != cfg_value)) {
+ pa->old_value = pa->value;
+ pa->value = cfg_value;
+ hxgep->intr_threshold = pa->value;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_rx_intr_pkts"));
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_rx_intr_time(p_hxge_t hxgep, queue_t *q,
+ mblk_t *mp, char *value, caddr_t cp)
+{
+ char *end;
+ uint32_t cfg_value;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_rx_intr_time"));
+
+ cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
+
+ if ((cfg_value > HXGE_RDC_RCR_TIMEOUT_MAX) ||
+ (cfg_value < HXGE_RDC_RCR_TIMEOUT_MIN)) {
+ return (EINVAL);
+ }
+
+ if ((pa->value != cfg_value)) {
+ pa->old_value = pa->value;
+ pa->value = cfg_value;
+ hxgep->intr_timeout = pa->value;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_rx_intr_time"));
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_set_vlan_ids(p_hxge_t hxgep, queue_t *q, mblk_t *mp, char *value,
+ caddr_t cp)
+{
+ char *end;
+ uint32_t status = 0, cfg_value;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+ uint32_t cfg_it = B_FALSE;
+ uint32_t *val_ptr, *old_val_ptr;
+ hxge_param_map_t *vmap, *old_map;
+ p_hxge_class_pt_cfg_t p_class_cfgp;
+ uint64_t cfgd_vlans;
+ int i, inc = 0, cfg_position;
+ hxge_mv_cfg_t *vlan_tbl;
+ hpi_handle_t handle;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_vlan_ids "));
+
+ p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
+ vlan_tbl = (hxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
+ handle = hxgep->hpi_reg_handle;
+
+ cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
+
+ /* now do decoding */
+ cfgd_vlans = ((pa->type & HXGE_PARAM_ARRAY_CNT_MASK) >>
+ HXGE_PARAM_ARRAY_CNT_SHIFT);
+
+ if (cfgd_vlans >= HXGE_PARAM_ARRAY_INIT_SIZE) {
+ /*
+ * for now, we process only upto HXGE_PARAM_ARRAY_INIT_SIZE
+ * parameters In the future, we may want to expand
+ * the storage array and continue
+ */
+ return (EINVAL);
+ }
+
+ vmap = (hxge_param_map_t *)&cfg_value;
+ if ((vmap->param_id == 0) || (vmap->param_id > VLAN_ID_MAX)) {
+ return (EINVAL);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, " hxge_param_set_vlan_ids id %d",
+ vmap->param_id));
+#if defined(__i386)
+ val_ptr = (uint32_t *)(uint32_t)pa->value;
+ old_val_ptr = (uint32_t *)(uint32_t)pa->old_value;
+#else
+ val_ptr = (uint32_t *)pa->value;
+ old_val_ptr = (uint32_t *)pa->old_value;
+#endif
+
+ /* Search to see if this vlan id is already configured */
+ for (i = 0; i < cfgd_vlans; i++) {
+ old_map = (hxge_param_map_t *)&val_ptr[i];
+ if ((old_map->param_id == 0) ||
+ (vmap->param_id == old_map->param_id) ||
+ (vlan_tbl[vmap->param_id].flag)) {
+ cfg_position = i;
+ break;
+ }
+ }
+
+ if (cfgd_vlans == 0) {
+ cfg_position = 0;
+ inc++;
+ }
+
+ if (i == cfgd_vlans) {
+ cfg_position = i;
+ inc++;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD2_CTL,
+ " set_vlan_ids mapping i %d cfgd_vlans %llx position %d ",
+ i, cfgd_vlans, cfg_position));
+
+ if (val_ptr[cfg_position] != cfg_value) {
+ old_val_ptr[cfg_position] = val_ptr[cfg_position];
+ val_ptr[cfg_position] = cfg_value;
+ vlan_tbl[vmap->param_id].flag = 1;
+ cfg_it = B_TRUE;
+ if (inc) {
+ cfgd_vlans++;
+ pa->type &= ~HXGE_PARAM_ARRAY_CNT_MASK;
+ pa->type |= (cfgd_vlans << HXGE_PARAM_ARRAY_CNT_SHIFT);
+
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD2_CTL,
+ " after: param_set_vlan_ids cfg_vlans %llx position %d \n",
+ cfgd_vlans, cfg_position));
+ }
+
+ if (cfg_it == B_TRUE) {
+ status = hpi_pfc_cfg_vlan_table_entry_set(handle,
+ (vlan_id_t)vmap->param_id);
+ if (status != HPI_SUCCESS)
+ return (EINVAL);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_vlan_ids"));
+
+ return (0);
+}
+
+
+/* ARGSUSED */
+static int
+hxge_param_get_vlan_ids(p_hxge_t hxgep, queue_t *q, mblk_t *mp, caddr_t cp)
+{
+ uint_t print_len, buf_len;
+ p_mblk_t np;
+ int i;
+ uint32_t *val_ptr;
+ hxge_param_map_t *vmap;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+ p_hxge_class_pt_cfg_t p_class_cfgp;
+ uint64_t cfgd_vlans = 0;
+ int buff_alloc_size = HXGE_NDD_INFODUMP_BUFF_SIZE * 32;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_vlan_ids "));
+ (void) mi_mpprintf(mp, "VLAN Information\n");
+
+ if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
+ (void) mi_mpprintf(mp, "%s\n", "out of buffer");
+ return (0);
+ }
+
+ buf_len = buff_alloc_size;
+ mp->b_cont = np;
+ cfgd_vlans = (pa->type & HXGE_PARAM_ARRAY_CNT_MASK) >>
+ HXGE_PARAM_ARRAY_CNT_SHIFT;
+
+ i = (int)cfgd_vlans;
+ p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ "Configured VLANs %d\n VLAN ID\n", i);
+ ((mblk_t *)np)->b_wptr += print_len;
+ buf_len -= print_len;
+
+#if defined(__i386)
+ val_ptr = (uint32_t *)(uint32_t)pa->value;
+#else
+ val_ptr = (uint32_t *)pa->value;
+#endif
+
+ for (i = 0; i < cfgd_vlans; i++) {
+ vmap = (hxge_param_map_t *)&val_ptr[i];
+ if (p_class_cfgp->vlan_tbl[vmap->param_id].flag) {
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
+ buf_len, " %d\n", vmap->param_id);
+ ((mblk_t *)np)->b_wptr += print_len;
+ buf_len -= print_len;
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_vlan_ids"));
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_tcam_enable(p_hxge_t hxgep, queue_t *q,
+ mblk_t *mp, char *value, caddr_t cp)
+{
+ uint32_t status = 0, cfg_value;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+ uint32_t cfg_it = B_FALSE;
+ char *end;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_tcam_enable"));
+
+ cfg_value = (uint32_t)mi_strtol(value, &end, BASE_BINARY);
+ if (pa->value != cfg_value) {
+ pa->old_value = pa->value;
+ pa->value = cfg_value;
+ cfg_it = B_TRUE;
+ }
+ if (cfg_it == B_TRUE) {
+ if (pa->value)
+ status = hxge_pfc_config_tcam_enable(hxgep);
+ else
+ status = hxge_pfc_config_tcam_disable(hxgep);
+ if (status != HXGE_OK)
+ return (EINVAL);
+ }
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, " <== hxge_param_tcam_enable"));
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_set_ether_usr(p_hxge_t hxgep, queue_t *q,
+ mblk_t *mp, char *value, caddr_t cp)
+{
+ char *end;
+ uint32_t status = 0, cfg_value;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_ether_usr"));
+
+ cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
+ if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+ return (EINVAL);
+ }
+ if (pa->value != cfg_value) {
+ pa->old_value = pa->value;
+ pa->value = cfg_value;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_ether_usr"));
+ return (status);
+}
+
+static int
+hxge_class_name_2value(p_hxge_t hxgep, char *name)
+{
+ int i;
+ int class_instance = param_class_opt_ipv4_tcp;
+ p_hxge_param_t param_arr;
+
+ param_arr = hxgep->param_arr;
+ for (i = TCAM_CLASS_TCP_IPV4; i <= TCAM_CLASS_SCTP_IPV6; i++) {
+ if (strcmp(param_arr[class_instance].name, name) == 0)
+ return (i);
+ class_instance++;
+ }
+ return (-1);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_set_ip_opt(p_hxge_t hxgep, queue_t *q,
+ mblk_t *mp, char *value, caddr_t cp)
+{
+ char *end;
+ uint32_t status, cfg_value;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+ tcam_class_t class;
+ uint32_t cfg_it = B_FALSE;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_ip_opt"));
+
+ cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
+ if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+ return (EINVAL);
+ }
+ if (pa->value != cfg_value) {
+ pa->old_value = pa->value;
+ pa->value = cfg_value;
+ cfg_it = B_TRUE;
+ }
+ if (cfg_it == B_TRUE) {
+ /* do the actual hw setup */
+ class = hxge_class_name_2value(hxgep, pa->name);
+ if (class == -1)
+ return (EINVAL);
+
+ status = hxge_pfc_ip_class_config(hxgep, class, pa->value);
+ if (status != HXGE_OK)
+ return (EINVAL);
+ }
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_ip_opt"));
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_get_ip_opt(p_hxge_t hxgep, queue_t *q, mblk_t *mp, caddr_t cp)
+{
+ uint32_t status, cfg_value;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+ tcam_class_t class;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_ip_opt"));
+
+ /* do the actual hw setup */
+ class = hxge_class_name_2value(hxgep, pa->name);
+ if (class == -1)
+ return (EINVAL);
+ cfg_value = 0;
+ status = hxge_pfc_ip_class_config_get(hxgep, class, &cfg_value);
+ if (status != HXGE_OK)
+ return (EINVAL);
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL,
+ "hxge_param_get_ip_opt_get %x ", cfg_value));
+ pa->value = cfg_value;
+
+ (void) mi_mpprintf(mp, "%x", cfg_value);
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_ip_opt status "));
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_pfc_hash_init(p_hxge_t hxgep, queue_t *q, mblk_t *mp,
+ char *value, caddr_t cp)
+{
+ char *end;
+ uint32_t status, cfg_value;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+ uint32_t cfg_it = B_FALSE;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_pfc_hash_init"));
+
+ cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
+ if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+ return (EINVAL);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL,
+ " hxge_param_pfc_hash_init value %x", cfg_value));
+ if (pa->value != cfg_value) {
+ pa->old_value = pa->value;
+ pa->value = cfg_value;
+ cfg_it = B_TRUE;
+ }
+
+ if (cfg_it == B_TRUE) {
+ status = hxge_pfc_set_hash(hxgep, (uint32_t)pa->value);
+ if (status != HXGE_OK)
+ return (EINVAL);
+ }
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, " <== hxge_param_pfc_hash_init"));
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_set_hxge_debug_flag(p_hxge_t hxgep, queue_t *q,
+ mblk_t *mp, char *value, caddr_t cp)
+{
+ char *end;
+ uint32_t status = 0;
+ uint64_t cfg_value = 0;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+ uint32_t cfg_it = B_FALSE;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_hxge_debug_flag"));
+ cfg_value = mi_strtol(value, &end, BASE_HEX);
+
+ if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL,
+ " hxge_param_set_hxge_debug_flag"
+ " outof range %llx", cfg_value));
+ return (EINVAL);
+ }
+ if (pa->value != cfg_value) {
+ pa->old_value = pa->value;
+ pa->value = cfg_value;
+ cfg_it = B_TRUE;
+ }
+ if (cfg_it == B_TRUE)
+ hxgep->hxge_debug_level = pa->value;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_hxge_debug_flag"));
+ return (status);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_get_debug_flag(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+ int status = 0;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_debug_flag"));
+
+ if (pa->value > 0xffffffff)
+ (void) mi_mpprintf(mp, "%x%x", (int)(pa->value >> 32),
+ (int)(pa->value & 0xffffffff));
+ else
+ (void) mi_mpprintf(mp, "%x", (int)pa->value);
+
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_debug_flag"));
+ return (status);
+}
+
+/* ARGSUSED */
+static int
+hxge_param_set_hpi_debug_flag(p_hxge_t hxgep, queue_t *q,
+ mblk_t *mp, char *value, caddr_t cp)
+{
+ char *end;
+ uint32_t status = 0;
+ uint64_t cfg_value = 0;
+ p_hxge_param_t pa = (p_hxge_param_t)cp;
+ uint32_t cfg_it = B_FALSE;
+
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_hpi_debug_flag"));
+ cfg_value = mi_strtol(value, &end, BASE_HEX);
+
+ if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, " hxge_param_set_hpi_debug_flag"
+ " outof range %llx", cfg_value));
+ return (EINVAL);
+ }
+ if (pa->value != cfg_value) {
+ pa->old_value = pa->value;
+ pa->value = cfg_value;
+ cfg_it = B_TRUE;
+ }
+ if (cfg_it == B_TRUE) {
+ hpi_debug_level = pa->value;
+ }
+ HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_debug_flag"));
+ return (status);
+}
+
+typedef struct block_info {
+ char *name;
+ uint32_t offset;
+} block_info_t;
+
+block_info_t reg_block[] = {
+ {"PIO", PIO_BASE_ADDR},
+ {"PIO_LDSV", PIO_LDSV_BASE_ADDR},
+ {"PIO_LDMASK", PIO_LDMASK_BASE_ADDR},
+ {"PFC", PFC_BASE_ADDR},
+ {"RDC", RDC_BASE_ADDR},
+ {"TDC", TDC_BASE_ADDR},
+ {"VMAC", VMAC_BASE_ADDR},
+ {"END", ALL_FF_32},
+};
+
+/* ARGSUSED */
+static int
+hxge_param_dump_ptrs(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
+{
+ uint_t print_len, buf_len;
+ p_mblk_t np;
+ int rdc, tdc, block;
+ uint64_t base;
+ p_hxge_dma_pt_cfg_t p_dma_cfgp;
+ p_hxge_hw_pt_cfg_t p_cfgp;
+ int buff_alloc_size = HXGE_NDD_INFODUMP_BUFF_8K;
+ p_tx_ring_t *tx_rings;
+ p_rx_rcr_rings_t rx_rcr_rings;
+ p_rx_rcr_ring_t *rcr_rings;
+ p_rx_rbr_rings_t rx_rbr_rings;
+ p_rx_rbr_ring_t *rbr_rings;
+
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_param_dump_ptrs"));
+
+ (void) mi_mpprintf(mp, "ptr information\n");
+
+ if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
+ /* The following may work even if we cannot get a large buf. */
+ (void) mi_mpprintf(mp, "%s\n", "out of buffer");
+ return (0);
+ }
+ buf_len = buff_alloc_size;
+
+ mp->b_cont = np;
+ p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
+ p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+ rx_rcr_rings = hxgep->rx_rcr_rings;
+ rcr_rings = rx_rcr_rings->rcr_rings;
+ rx_rbr_rings = hxgep->rx_rbr_rings;
+ rbr_rings = rx_rbr_rings->rbr_rings;
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ "hxgep (hxge_t) $%p\n dev_regs (dev_regs_t) $%p\n",
+ hxgep, hxgep->dev_regs);
+
+ ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+ /* do register pointers */
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ "reg base (hpi_reg_ptr_t) $%p\t pci reg (hpi_reg_ptr_t) $%p\n",
+ hxgep->dev_regs->hxge_regp, hxgep->dev_regs->hxge_pciregp);
+
+ ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ "\nBlock \t Offset \n");
+
+ ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+ block = 0;
+#if defined(__i386)
+ base = (uint64_t)(uint32_t)hxgep->dev_regs->hxge_regp;
+#else
+ base = (uint64_t)hxgep->dev_regs->hxge_regp;
+#endif
+ while (reg_block[block].offset != ALL_FF_32) {
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ "%9s\t 0x%llx\n", reg_block[block].name,
+ (unsigned long long) (reg_block[block].offset + base));
+ ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+ block++;
+ }
+
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ "\nRDC\t rcrp (rx_rcr_ring_t)\t rbrp (rx_rbr_ring_t)\n");
+
+ ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+
+ for (rdc = 0; rdc < p_cfgp->max_rdcs; rdc++) {
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ " %d\t $%p\t\t $%p\n",
+ rdc, rcr_rings[rdc], rbr_rings[rdc]);
+ ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+ }
+
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ "\nTDC\t tdcp (tx_ring_t)\n");
+
+ ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+ tx_rings = hxgep->tx_rings->rings;
+ for (tdc = 0; tdc < p_cfgp->max_tdcs; tdc++) {
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
+ " %d\t $%p\n", tdc, tx_rings[tdc]);
+ ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+ }
+
+ print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len, "\n\n");
+
+ ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_param_dump_ptrs"));
+ return (0);
+}
+
+/*
+ * Load 'name' into the named dispatch table pointed to by 'ndp'.
+ * 'ndp' should be the address of a char pointer cell. If the table
+ * does not exist (*ndp == 0), a new table is allocated and 'ndp'
+ * is stuffed. If there is not enough space in the table for a new
+ * entry, more space is allocated.
+ */
+boolean_t
+hxge_nd_load(caddr_t *pparam, char *name,
+ pfi_t get_pfi, pfi_t set_pfi, caddr_t data)
+{
+ ND *nd;
+ NDE *nde;
+
+ HXGE_DEBUG_MSG((NULL, NDD2_CTL, " ==> hxge_nd_load: %s", name));
+ if (!pparam)
+ return (B_FALSE);
+ if ((nd = (ND *) * pparam) == NULL) {
+ if ((nd = (ND *) KMEM_ZALLOC(sizeof (ND), KM_NOSLEEP)) == NULL)
+ return (B_FALSE);
+ *pparam = (caddr_t)nd;
+ }
+ if (nd->nd_tbl) {
+ for (nde = nd->nd_tbl; nde->nde_name; nde++) {
+ if (strcmp(name, nde->nde_name) == 0)
+ goto fill_it;
+ }
+ }
+ if (nd->nd_free_count <= 1) {
+ if ((nde = (NDE *) KMEM_ZALLOC(nd->nd_size +
+ NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
+ return (B_FALSE);
+ nd->nd_free_count += NDE_ALLOC_COUNT;
+ if (nd->nd_tbl) {
+ bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
+ KMEM_FREE((char *)nd->nd_tbl, nd->nd_size);
+ } else {
+ nd->nd_free_count--;
+ nde->nde_name = "?";
+ nde->nde_get_pfi = hxge_nd_get_names;
+ nde->nde_set_pfi = hxge_set_default;
+ }
+ nde->nde_data = (caddr_t)nd;
+ nd->nd_tbl = nde;
+ nd->nd_size += NDE_ALLOC_SIZE;
+ }
+ for (nde = nd->nd_tbl; nde->nde_name; nde++)
+ noop;
+ nd->nd_free_count--;
+fill_it:
+ nde->nde_name = name;
+ nde->nde_get_pfi = get_pfi;
+ nde->nde_set_pfi = set_pfi;
+ nde->nde_data = data;
+ HXGE_DEBUG_MSG((NULL, NDD2_CTL, " <== hxge_nd_load"));
+
+ return (B_TRUE);
+}
+
+/*
+ * Free the table pointed to by 'pparam'
+ */
+void
+hxge_nd_free(caddr_t *pparam)
+{
+ ND *nd;
+
+ if ((nd = (ND *)*pparam) != NULL) {
+ if (nd->nd_tbl)
+ KMEM_FREE((char *)nd->nd_tbl, nd->nd_size);
+ KMEM_FREE((char *)nd, sizeof (ND));
+ *pparam = nil(caddr_t);
+ }
+}
+
+int
+hxge_nd_getset(p_hxge_t hxgep, queue_t *q, caddr_t param, p_mblk_t mp)
+{
+ int err;
+ IOCP iocp;
+ p_mblk_t mp1, mp2;
+ ND *nd;
+ NDE *nde;
+ char *valp;
+
+ size_t avail;
+
+ if (!param) {
+ return (B_FALSE);
+ }
+ nd = (ND *) param;
+ iocp = (IOCP) mp->b_rptr;
+ if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
+ mp->b_datap->db_type = M_IOCACK;
+ iocp->ioc_count = 0;
+ iocp->ioc_error = EINVAL;
+ return (B_FALSE);
+ }
+ /*
+ * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
+ * However, existing code sends in some big buffers.
+ */
+ avail = iocp->ioc_count;
+ if (mp1->b_cont) {
+ freemsg(mp1->b_cont);
+ mp1->b_cont = NULL;
+ }
+ mp1->b_datap->db_lim[-1] = '\0'; /* Force null termination */
+ for (valp = (char *)mp1->b_rptr; *valp != '\0'; valp++) {
+ if (*valp == '-')
+ *valp = '_';
+ }
+
+ valp = (char *)mp1->b_rptr;
+
+ for (nde = nd->nd_tbl; /* */; nde++) {
+ if (!nde->nde_name)
+ return (B_FALSE);
+ if (strcmp(nde->nde_name, valp) == 0)
+ break;
+ }
+ err = EINVAL;
+ while (*valp++)
+ noop;
+ if (!*valp || valp >= (char *)mp1->b_wptr)
+ valp = nilp(char);
+ switch (iocp->ioc_cmd) {
+ case ND_GET:
+ /*
+ * (temporary) hack: "*valp" is size of user buffer for
+ * copyout. If result of action routine is too big, free excess
+ * and return ioc_rval as buffer size needed. Return as many
+ * mblocks as will fit, free the rest. For backward
+ * compatibility, assume size of original ioctl buffer if
+ * "*valp" bad or not given.
+ */
+ if (valp)
+ avail = mi_strtol(valp, (char **)0, 10);
+ /*
+ * We overwrite the name/value with the reply data
+ */
+ mp2 = mp1;
+ while (mp2) {
+ mp2->b_wptr = mp2->b_rptr;
+ mp2 = mp2->b_cont;
+ }
+
+ err = (*nde->nde_get_pfi) (hxgep, q, mp1, nde->nde_data);
+
+ if (!err) {
+ size_t size_out = 0;
+ ssize_t excess;
+
+ iocp->ioc_rval = 0;
+
+ /* Tack on the null */
+ err = hxge_mk_mblk_tail_space(mp1, &mp2, 1);
+ if (!err) {
+ *mp2->b_wptr++ = '\0';
+ size_out = msgdsize(mp1);
+ excess = size_out - avail;
+ if (excess > 0) {
+ iocp->ioc_rval = (int)size_out;
+ size_out -= excess;
+ (void) adjmsg(mp1, -(excess + 1));
+ err = hxge_mk_mblk_tail_space(
+ mp1, &mp2, 1);
+ if (!err)
+ *mp2->b_wptr++ = '\0';
+ else
+ size_out = 0;
+ }
+ } else
+ size_out = 0;
+ iocp->ioc_count = size_out;
+ }
+ break;
+
+ case ND_SET:
+ if (valp) {
+ if (nde->nde_set_pfi) {
+ err = (*nde->nde_set_pfi) (hxgep, q, mp1, valp,
+ nde->nde_data);
+ iocp->ioc_count = 0;
+ freemsg(mp1);
+ mp->b_cont = NULL;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ iocp->ioc_error = err;
+ mp->b_datap->db_type = M_IOCACK;
+ return (B_TRUE);
+}
+
+/* ARGSUSED */
+int
+hxge_nd_get_names(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t param)
+{
+ ND *nd;
+ NDE *nde;
+ char *rwtag;
+ boolean_t get_ok, set_ok;
+ size_t param_len;
+ int status = 0;
+
+ nd = (ND *) param;
+ if (!nd)
+ return (ENOENT);
+
+ for (nde = nd->nd_tbl; nde->nde_name; nde++) {
+ get_ok = (nde->nde_get_pfi != hxge_get_default) &&
+ (nde->nde_get_pfi != NULL);
+ set_ok = (nde->nde_set_pfi != hxge_set_default) &&
+ (nde->nde_set_pfi != NULL);
+ if (get_ok) {
+ if (set_ok)
+ rwtag = "read and write";
+ else
+ rwtag = "read only";
+ } else if (set_ok)
+ rwtag = "write only";
+ else {
+ continue;
+ }
+ param_len = strlen(rwtag);
+ param_len += strlen(nde->nde_name);
+ param_len += 4;
+
+ (void) mi_mpprintf(mp, "%s (%s)", nde->nde_name, rwtag);
+ }
+ return (status);
+}
+
+/* ARGSUSED */
+int
+hxge_get_default(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t data)
+{
+ return (EACCES);
+}
+
+/* ARGSUSED */
+int
+hxge_set_default(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, char *value,
+ caddr_t data)
+{
+ return (EACCES);
+}
+
+void
+hxge_param_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
+{
+ int cmd;
+ int status = B_FALSE;
+
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_param_ioctl"));
+ cmd = iocp->ioc_cmd;
+ switch (cmd) {
+ default:
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL,
+ "hxge_param_ioctl: bad cmd 0x%0x", cmd));
+ break;
+
+ case ND_GET:
+ case ND_SET:
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL,
+ "hxge_param_ioctl: cmd 0x%0x", cmd));
+ if (!hxge_nd_getset(hxgep, wq, hxgep->param_list, mp)) {
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL,
+ "false ret from hxge_nd_getset"));
+ break;
+ }
+ status = B_TRUE;
+ break;
+ }
+
+ if (status) {
+ qreply(wq, mp);
+ } else {
+ miocnak(wq, mp, 0, EINVAL);
+ }
+ HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_param_ioctl"));
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_peu.h b/usr/src/uts/common/io/hxge/hxge_peu.h
new file mode 100644
index 0000000000..942366824d
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_peu.h
@@ -0,0 +1,48 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_PEU_H
+#define _SYS_HXGE_HXGE_PEU_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Offsets in the SPROM MAP.
+ */
+#define HCR_MAX_FRAME_SZ 0x00
+#define HCR_N_MAC_ADDRS 0x8
+#define HCR_ADDR_LO 0xC
+#define HCR_ADDR_HI 0x10
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_SYS_HXGE_HXGE_PEU_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_peu_hw.h b/usr/src/uts/common/io/hxge/hxge_peu_hw.h
new file mode 100644
index 0000000000..cf8807021d
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_peu_hw.h
@@ -0,0 +1,5763 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HXGE_PEU_HW_H
+#define _HXGE_PEU_HW_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PIO_LDSV_BASE_ADDR 0X800000
+#define PIO_BASE_ADDR 0X000000
+#define PIO_LDMASK_BASE_ADDR 0XA00000
+
+#define DEVICE_VENDOR_ID (PIO_BASE_ADDR + 0x0)
+#define STATUS_COMMAND (PIO_BASE_ADDR + 0x4)
+#define CLASSCODE_REV_ID (PIO_BASE_ADDR + 0x8)
+#define BIST_HDRTYP_LATTMR_CASHLSZ (PIO_BASE_ADDR + 0xC)
+#define PIO_BAR0 (PIO_BASE_ADDR + 0x10)
+#define PIO_BAR1 (PIO_BASE_ADDR + 0x14)
+#define MSIX_BAR0 (PIO_BASE_ADDR + 0x18)
+#define MSIX_BAR1 (PIO_BASE_ADDR + 0x1C)
+#define VIRT_BAR0 (PIO_BASE_ADDR + 0x20)
+#define VIRT_BAR1 (PIO_BASE_ADDR + 0x24)
+#define CIS_PTR (PIO_BASE_ADDR + 0x28)
+#define SUB_VENDOR_ID (PIO_BASE_ADDR + 0x2C)
+#define EXP_ROM_BAR (PIO_BASE_ADDR + 0x30)
+#define CAP_PTR (PIO_BASE_ADDR + 0x34)
+#define INT_LINE (PIO_BASE_ADDR + 0x3C)
+#define PM_CAP (PIO_BASE_ADDR + 0x40)
+#define PM_CTRL_STAT (PIO_BASE_ADDR + 0x44)
+#define MSI_CAP (PIO_BASE_ADDR + 0x50)
+#define MSI_LO_ADDR (PIO_BASE_ADDR + 0x54)
+#define MSI_HI_ADDR (PIO_BASE_ADDR + 0x58)
+#define MSI_DATA (PIO_BASE_ADDR + 0x5C)
+#define MSI_MASK (PIO_BASE_ADDR + 0x60)
+#define MSI_PEND (PIO_BASE_ADDR + 0x64)
+#define MSIX_CAP (PIO_BASE_ADDR + 0x70)
+#define MSIX_TAB_OFF (PIO_BASE_ADDR + 0x74)
+#define MSIX_PBA_OFF (PIO_BASE_ADDR + 0x78)
+#define PCIE_CAP (PIO_BASE_ADDR + 0x80)
+#define DEV_CAP (PIO_BASE_ADDR + 0x84)
+#define DEV_STAT_CTRL (PIO_BASE_ADDR + 0x88)
+#define LNK_CAP (PIO_BASE_ADDR + 0x8C)
+#define LNK_STAT_CTRL (PIO_BASE_ADDR + 0x90)
+#define VEN_CAP_HDR (PIO_BASE_ADDR + 0x94)
+#define VEN_CTRL (PIO_BASE_ADDR + 0x98)
+#define VEN_PRT_HDR (PIO_BASE_ADDR + 0x9C)
+#define ACKLAT_REPLAY (PIO_BASE_ADDR + 0xA0)
+#define OTH_MSG (PIO_BASE_ADDR + 0xA4)
+#define FORCE_LINK (PIO_BASE_ADDR + 0xA8)
+#define ACK_FREQ (PIO_BASE_ADDR + 0xAC)
+#define LINK_CTRL (PIO_BASE_ADDR + 0xB0)
+#define LANE_SKEW (PIO_BASE_ADDR + 0xB4)
+#define SYMBOL_NUM (PIO_BASE_ADDR + 0xB8)
+#define SYMB_TIM_RADM_FLT1 (PIO_BASE_ADDR + 0xBC)
+#define RADM_FLT2 (PIO_BASE_ADDR + 0xC0)
+#define CASCADE_DEB_REG0 (PIO_BASE_ADDR + 0xC8)
+#define CASCADE_DEB_REG1 (PIO_BASE_ADDR + 0xCC)
+#define TXP_FC_CREDIT_STAT (PIO_BASE_ADDR + 0xD0)
+#define TXNP_FC_CREDIT_STAT (PIO_BASE_ADDR + 0xD4)
+#define TXCPL_FC_CREDIT_STAT (PIO_BASE_ADDR + 0xD8)
+#define QUEUE_STAT (PIO_BASE_ADDR + 0xDC)
+#define GBT_DEBUG0 (PIO_BASE_ADDR + 0xE0)
+#define GBT_DEBUG1 (PIO_BASE_ADDR + 0xE4)
+#define GBT_DEBUG2 (PIO_BASE_ADDR + 0xE8)
+#define GBT_DEBUG3 (PIO_BASE_ADDR + 0xEC)
+#define PIPE_DEBUG0 (PIO_BASE_ADDR + 0xF0)
+#define PIPE_DEBUG1 (PIO_BASE_ADDR + 0xF4)
+#define PIPE_DEBUG2 (PIO_BASE_ADDR + 0xF8)
+#define PIPE_DEBUG3 (PIO_BASE_ADDR + 0xFC)
+#define PCIE_ENH_CAP_HDR (PIO_BASE_ADDR + 0x100)
+#define UNC_ERR_STAT (PIO_BASE_ADDR + 0x104)
+#define UNC_ERR_MASK (PIO_BASE_ADDR + 0x108)
+#define UNC_ERR_SVRTY (PIO_BASE_ADDR + 0x10C)
+#define CORR_ERR_STAT (PIO_BASE_ADDR + 0x110)
+#define CORR_ERR_MASK (PIO_BASE_ADDR + 0x114)
+#define ADV_CAP_CTRL (PIO_BASE_ADDR + 0x118)
+#define HDR_LOG0 (PIO_BASE_ADDR + 0x11C)
+#define HDR_LOG1 (PIO_BASE_ADDR + 0x120)
+#define HDR_LOG2 (PIO_BASE_ADDR + 0x124)
+#define HDR_LOG3 (PIO_BASE_ADDR + 0x128)
+#define PIPE_RX_TX_CONTROL (PIO_BASE_ADDR + 0x1000)
+#define PIPE_RX_TX_STATUS (PIO_BASE_ADDR + 0x1004)
+#define PIPE_RX_TX_PWR_CNTL (PIO_BASE_ADDR + 0x1008)
+#define PIPE_RX_TX_PARAM (PIO_BASE_ADDR + 0x1010)
+#define PIPE_RX_TX_CLOCK (PIO_BASE_ADDR + 0x1014)
+#define PIPE_GLUE_CNTL0 (PIO_BASE_ADDR + 0x1018)
+#define PIPE_GLUE_CNTL1 (PIO_BASE_ADDR + 0x101C)
+#define HCR_REG (PIO_BASE_ADDR + 0x2000)
+#define BLOCK_RESET (PIO_BASE_ADDR + 0x8000)
+#define TIMEOUT_CFG (PIO_BASE_ADDR + 0x8004)
+#define HEART_CFG (PIO_BASE_ADDR + 0x8008)
+#define HEART_TIMER (PIO_BASE_ADDR + 0x800C)
+#define CIP_GP_CTRL (PIO_BASE_ADDR + 0x8010)
+#define CIP_STATUS (PIO_BASE_ADDR + 0x8014)
+#define CIP_LINK_STAT (PIO_BASE_ADDR + 0x801C)
+#define EPC_STAT (PIO_BASE_ADDR + 0x8020)
+#define EPC_DATA (PIO_BASE_ADDR + 0x8024)
+#define SPC_STAT (PIO_BASE_ADDR + 0x8030)
+#define HOST2SPI_INDACC_ADDR (PIO_BASE_ADDR + 0x8050)
+#define HOST2SPI_INDACC_CTRL (PIO_BASE_ADDR + 0x8054)
+#define HOST2SPI_INDACC_DATA (PIO_BASE_ADDR + 0x8058)
+#define BT_CTRL0 (PIO_BASE_ADDR + 0x8080)
+#define BT_DATA0 (PIO_BASE_ADDR + 0x8084)
+#define BT_INTMASK0 (PIO_BASE_ADDR + 0x8088)
+#define BT_CTRL1 (PIO_BASE_ADDR + 0x8090)
+#define BT_DATA1 (PIO_BASE_ADDR + 0x8094)
+#define BT_INTMASK1 (PIO_BASE_ADDR + 0x8098)
+#define BT_CTRL2 (PIO_BASE_ADDR + 0x80A0)
+#define BT_DATA2 (PIO_BASE_ADDR + 0x80A4)
+#define BT_INTMASK2 (PIO_BASE_ADDR + 0x80A8)
+#define BT_CTRL3 (PIO_BASE_ADDR + 0x80B0)
+#define BT_DATA3 (PIO_BASE_ADDR + 0x80B4)
+#define BT_INTMASK3 (PIO_BASE_ADDR + 0x80B8)
+#define DEBUG_SEL (PIO_BASE_ADDR + 0x80C0)
+#define INDACC_MEM0_CTRL (PIO_BASE_ADDR + 0x80C4)
+#define INDACC_MEM0_DATA0 (PIO_BASE_ADDR + 0x80C8)
+#define INDACC_MEM0_DATA1 (PIO_BASE_ADDR + 0x80CC)
+#define INDACC_MEM0_DATA2 (PIO_BASE_ADDR + 0x80D0)
+#define INDACC_MEM0_DATA3 (PIO_BASE_ADDR + 0x80D4)
+#define INDACC_MEM0_PRTY (PIO_BASE_ADDR + 0x80D8)
+#define INDACC_MEM1_CTRL (PIO_BASE_ADDR + 0x80DC)
+#define INDACC_MEM1_DATA0 (PIO_BASE_ADDR + 0x80E0)
+#define INDACC_MEM1_DATA1 (PIO_BASE_ADDR + 0x80E4)
+#define INDACC_MEM1_DATA2 (PIO_BASE_ADDR + 0x80E8)
+#define INDACC_MEM1_DATA3 (PIO_BASE_ADDR + 0x80EC)
+#define INDACC_MEM1_PRTY (PIO_BASE_ADDR + 0x80F0)
+#define PHY_DEBUG_TRAINING_VEC (PIO_BASE_ADDR + 0x80F4)
+#define PEU_DEBUG_TRAINING_VEC (PIO_BASE_ADDR + 0x80F8)
+#define PIPE_CFG0 (PIO_BASE_ADDR + 0x8120)
+#define PIPE_CFG1 (PIO_BASE_ADDR + 0x8124)
+#define CIP_BAR_MASK_CFG (PIO_BASE_ADDR + 0x8134)
+#define CIP_BAR_MASK (PIO_BASE_ADDR + 0x8138)
+#define CIP_LDSV0_STAT (PIO_BASE_ADDR + 0x8140)
+#define CIP_LDSV1_STAT (PIO_BASE_ADDR + 0x8144)
+#define PEU_INTR_STAT (PIO_BASE_ADDR + 0x8148)
+#define PEU_INTR_MASK (PIO_BASE_ADDR + 0x814C)
+#define PEU_INTR_STAT_MIRROR (PIO_BASE_ADDR + 0x8150)
+#define CPL_HDRQ_PERR_LOC (PIO_BASE_ADDR + 0x8154)
+#define CPL_DATAQ_PERR_LOC (PIO_BASE_ADDR + 0x8158)
+#define RETR_PERR_LOC (PIO_BASE_ADDR + 0x815C)
+#define RETR_SOT_PERR_LOC (PIO_BASE_ADDR + 0x8160)
+#define P_HDRQ_PERR_LOC (PIO_BASE_ADDR + 0x8164)
+#define P_DATAQ_PERR_LOC (PIO_BASE_ADDR + 0x8168)
+#define NP_HDRQ_PERR_LOC (PIO_BASE_ADDR + 0x816C)
+#define NP_DATAQ_PERR_LOC (PIO_BASE_ADDR + 0x8170)
+#define MSIX_PERR_LOC (PIO_BASE_ADDR + 0x8174)
+#define HCR_PERR_LOC (PIO_BASE_ADDR + 0x8178)
+#define TDC_PIOACC_ERR_LOG (PIO_BASE_ADDR + 0x8180)
+#define RDC_PIOACC_ERR_LOG (PIO_BASE_ADDR + 0x8184)
+#define PFC_PIOACC_ERR_LOG (PIO_BASE_ADDR + 0x8188)
+#define VMAC_PIOACC_ERR_LOG (PIO_BASE_ADDR + 0x818C)
+#define LD_GRP_CTRL (PIO_BASE_ADDR + 0x8300)
+#define DEV_ERR_STAT (PIO_BASE_ADDR + 0x8380)
+#define DEV_ERR_MASK (PIO_BASE_ADDR + 0x8384)
+#define LD_INTR_TIM_RES (PIO_BASE_ADDR + 0x8390)
+#define LDSV0 (PIO_LDSV_BASE_ADDR + 0x0)
+#define LDSV1 (PIO_LDSV_BASE_ADDR + 0x4)
+#define LD_INTR_MASK (PIO_LDMASK_BASE_ADDR + 0x0)
+#define LD_INTR_MGMT (PIO_LDMASK_BASE_ADDR + 0x4)
+#define SID (PIO_LDMASK_BASE_ADDR + 0x8)
+
+
+/*
+ * Register: DeviceVendorId
+ * Device ID and Vendor ID
+ * Description: Device ID/Vendor ID
+ * Fields:
+ * Device ID Register: dbi writeable
+ * Vendor ID Register (Sun Microsystem): dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t device_id:16;
+ uint32_t vendor_id:16;
+#else
+ uint32_t vendor_id:16;
+ uint32_t device_id:16;
+#endif
+ } bits;
+} device_vendor_id_t;
+
+
+/*
+ * Register: StatusCommand
+ * Status and Command
+ * Description: Status/Command
+ * Fields:
+ * The device detected a parity error. The device detects
+ * Poisoned TLP received regardless of Command Register Parity
+ * Error Enable/Response bit.
+ * The device signaled a system error with SERR#. The device
+ * detects a UE, is about to send a F/NF error message; and if
+ * the Command Register SERR# enable is set.
+ * A transaction initiated by this device was terminated due to a
+ * Master Abort (i.e. Unsupported Request Completion Status was
+ * received).
+ * A transaction initiated by this device was terminated due to a
+ * Target Abort (i.e. Completer Abort Completion Status was
+ * received).
+ * Set when Completer Abort Completion Status is sent back to the
+ * RC. The request violated hydra's programming rules.
+ * The slowest DEVSEL# timing for this target device (N/A in
+ * PCIE)
+ * Master Data Parity Error - set if all the following conditions
+ * are true: received a poisoned TLP header or sending a poisoned
+ * write request; and the parity error response bit in the
+ * command register is set.
+ * Fast Back-to-Back Capable (N/A in PCIE)
+ * 66 MHz Capable (N/A in PCIE)
+ * Capabilities List - presence of extended capability item.
+ * INTx Status
+ * INTx Assertion Disable
+ * Fast Back-to-Back Enable (N/A in PCIE)
+ * This device can drive the SERR# line.
+ * IDSEL Stepping/Wait Cycle Control (N/A in PCIE)
+ * This device can drive the PERR# line.
+ * VGA Palette Snoop (N/A in PCIE)
+ * The device can issue Memory Write-and-Invalidate commands (N/A
+ * in PCIE)
+ * This device monitors for PCI Special Cycles (N/A in PCIE)
+ * This device's bus master capability is enabled.
+ * This device responds to PCI memory accesses.
+ * This device responds to PCI IO accesses (No I/O space used in
+ * Hydra)
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t det_par_err:1;
+ uint32_t sig_serr:1;
+ uint32_t rcv_mstr_abrt:1;
+ uint32_t rcv_tgt_abrt:1;
+ uint32_t sig_tgt_abrt:1;
+ uint32_t devsel_timing:2;
+ uint32_t mstr_dpe:1;
+ uint32_t fast_b2b_cap:1;
+ uint32_t rsrvd:1;
+ uint32_t mhz_cap:1;
+ uint32_t cap_list:1;
+ uint32_t intx_stat:1;
+ uint32_t rsrvd1:3;
+ uint32_t rsrvd2:5;
+ uint32_t intx_dis:1;
+ uint32_t fast_b2b_en:1;
+ uint32_t serr_en:1;
+ uint32_t idsel_step:1;
+ uint32_t par_err_en:1;
+ uint32_t vga_snoop:1;
+ uint32_t mwi_en:1;
+ uint32_t special_cycle:1;
+ uint32_t bm_en:1;
+ uint32_t mem_sp_en:1;
+ uint32_t io_sp_en:1;
+#else
+ uint32_t io_sp_en:1;
+ uint32_t mem_sp_en:1;
+ uint32_t bm_en:1;
+ uint32_t special_cycle:1;
+ uint32_t mwi_en:1;
+ uint32_t vga_snoop:1;
+ uint32_t par_err_en:1;
+ uint32_t idsel_step:1;
+ uint32_t serr_en:1;
+ uint32_t fast_b2b_en:1;
+ uint32_t intx_dis:1;
+ uint32_t rsrvd2:5;
+ uint32_t rsrvd1:3;
+ uint32_t intx_stat:1;
+ uint32_t cap_list:1;
+ uint32_t mhz_cap:1;
+ uint32_t rsrvd:1;
+ uint32_t fast_b2b_cap:1;
+ uint32_t mstr_dpe:1;
+ uint32_t devsel_timing:2;
+ uint32_t sig_tgt_abrt:1;
+ uint32_t rcv_tgt_abrt:1;
+ uint32_t rcv_mstr_abrt:1;
+ uint32_t sig_serr:1;
+ uint32_t det_par_err:1;
+#endif
+ } bits;
+} status_command_t;
+
+
+/*
+ * Register: ClasscodeRevId
+ * Class Code, and Revision ID
+ * Description: Class Code/Revision ID
+ * Fields:
+ * Base Class (Network Controller): dbi writeable
+ * Sub Class (Ethernet Controller): dbi writeable
+ * Programming Interface: dbi writeable
+ * Revision ID: dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t base_class:8;
+ uint32_t sub_class:8;
+ uint32_t prog_if:8;
+ uint32_t rev_id:8;
+#else
+ uint32_t rev_id:8;
+ uint32_t prog_if:8;
+ uint32_t sub_class:8;
+ uint32_t base_class:8;
+#endif
+ } bits;
+} classcode_rev_id_t;
+
+
+/*
+ * Register: BistHdrtypLattmrCashlsz
+ * BIST, Header Type, Latency Timer, and Cache Line Size
+ * Description: BIST, Latency Timer etc
+ * Fields:
+ * BIST is not supported. Header Type Fields
+ * Multi-Function Device: dbi writeable
+ * Configuration Header Format. 0 = Type 0.
+ * Master Latency Timer. (N/A in PCIE)
+ * Cache line size for legacy compatibility (N/A in PCIE)
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t value:8;
+ uint32_t mult_func_dev:1;
+ uint32_t cfg_hdr_fmt:7;
+ uint32_t timer:8;
+ uint32_t cache_line_sz:8;
+#else
+ uint32_t cache_line_sz:8;
+ uint32_t timer:8;
+ uint32_t cfg_hdr_fmt:7;
+ uint32_t mult_func_dev:1;
+ uint32_t value:8;
+#endif
+ } bits;
+} bist_hdrtyp_lattmr_cashlsz_t;
+
+
+/*
+ * Register: PioBar0
+ * PIO BAR0
+ * Description: PIO BAR0 - For Hydra PIO space PIO BAR1 & PIO BAR0
+ * are together configured as a 64b BAR register (Synopsys core
+ * implementation dependent) where PIO BAR1 handles the upper address
+ * bits and PIO BAR0 handles the lower address bits.
+ * Fields:
+ * Base Address Relocation : indirect dbi writeable via bar0Mask
+ * register in EP core
+ * Base Address for PIO (16MB space) : indirect dbi writeable via
+ * bar0Mask register in EP core
+ * Prefetchable if memory BAR (PIOs not prefetchable): dbi
+ * writeable
+ * If memory BAR, then 32 or 64 bit BAR (00 = 32 bit, 10 = 64
+ * bit): dbi writeable
+ * I/O or Memory space indicator (0 = memory BAR): dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t base_addr_rel_lo:8;
+ uint32_t base_addr:20;
+ uint32_t pftch:1;
+ uint32_t type:2;
+ uint32_t mem_sp_ind:1;
+#else
+ uint32_t mem_sp_ind:1;
+ uint32_t type:2;
+ uint32_t pftch:1;
+ uint32_t base_addr:20;
+ uint32_t base_addr_rel_lo:8;
+#endif
+ } bits;
+} pio_bar0_t;
+
+
+/*
+ * Register: PioBar1
+ * PIO BAR1
+ * Description: PIO BAR1
+ * Fields:
+ * Base Address Relocation : indirect dbi writeable via bar0Mask
+ * register in EP core
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t base_addr_rel_hi:32;
+#else
+ uint32_t base_addr_rel_hi:32;
+#endif
+ } bits;
+} pio_bar1_t;
+
+
+/*
+ * Register: MsixBar0
+ * MSIX BAR0
+ * Description: MSIX BAR0 - For MSI-X Tables and PBA MSIX BAR1 & MSIX
+ * BAR0 are together configured as a 64b BAR register (Synopsys core
+ * implementation dependent) where MSIX BAR1 handles the upper
+ * address bits and MSIX BAR0 handles the lower address bits.
+ * Fields:
+ * Base Address Relocation : indirect dbi writeable via bar2Mask
+ * register in EP core
+ * Base Address for MSIX (16KB space) : indirect dbi writeable
+ * via bar2Mask register in EP core
+ * Prefetchable if memory BAR (Not prefetchable) : dbi writeable
+ * If memory BAR, then 32 or 64 bit BAR (00 = 32 bit, 10 = 64
+ * bit): dbi writeable
+ * I/O or Memory space indicator (0 = memory BAR) : dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t base_addr_rel_lo:18;
+ uint32_t base_addr:10;
+ uint32_t pftch:1;
+ uint32_t type:2;
+ uint32_t mem_sp_ind:1;
+#else
+ uint32_t mem_sp_ind:1;
+ uint32_t type:2;
+ uint32_t pftch:1;
+ uint32_t base_addr:10;
+ uint32_t base_addr_rel_lo:18;
+#endif
+ } bits;
+} msix_bar0_t;
+
+
+/*
+ * Register: MsixBar1
+ * MSIX BAR1
+ * Description: MSIX BAR1
+ * Fields:
+ * Base Address Relocation : indirect dbi writeable via bar2Mask
+ * register in EP core
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t base_addr_rel_hi:32;
+#else
+ uint32_t base_addr_rel_hi:32;
+#endif
+ } bits;
+} msix_bar1_t;
+
+
+/*
+ * Register: VirtBar0
+ * Virtualization BAR0
+ * Description: Virtualization BAR0 - Previously for Hydra
+ * Virtualization space This bar is no longer enabled and is not dbi
+ * writeable. VIRT BAR1 & VIRT BAR0 could be configured as a 64b BAR
+ * register (Synopsys core implementation dependent), but this is not
+ * used in hydra.
+ * Fields:
+ * Base Address Relocation
+ * Base Address for Virtualization (64KB space)
+ * Prefetchable if memory BAR (Not prefetchable)
+ * If memory BAR, then 32 or 64 bit BAR (00 = 32 bit, 10 = 64
+ * bit)
+ * I/O or Memory space indicator (0 = memory BAR)
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t base_addr_rel_lo:17;
+ uint32_t base_addr:11;
+ uint32_t pftch:1;
+ uint32_t type:2;
+ uint32_t mem_sp_ind:1;
+#else
+ uint32_t mem_sp_ind:1;
+ uint32_t type:2;
+ uint32_t pftch:1;
+ uint32_t base_addr:11;
+ uint32_t base_addr_rel_lo:17;
+#endif
+ } bits;
+} virt_bar0_t;
+
+
+/*
+ * Register: VirtBar1
+ * Virtualization BAR1
+ * Description: Previously for Virtualization BAR1 This bar is no
+ * longer enabled and is not dbi writeable.
+ * Fields:
+ * Base Address Relocation
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t base_addr_rel_hi:32;
+#else
+ uint32_t base_addr_rel_hi:32;
+#endif
+ } bits;
+} virt_bar1_t;
+
+
+/*
+ * Register: CisPtr
+ * CardBus CIS Pointer
+ * Description: CardBus CIS Pointer
+ * Fields:
+ * CardBus CIS Pointer: dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t cis_ptr:32;
+#else
+ uint32_t cis_ptr:32;
+#endif
+ } bits;
+} cis_ptr_t;
+
+
+/*
+ * Register: SubVendorId
+ * Subsystem ID and Vendor ID
+ * Description: Subsystem ID and Vendor ID
+ * Fields:
+ * Subsystem ID as assigned by PCI-SIG : dbi writeable
+ * Subsystem Vendor ID as assigned by PCI-SIG : dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t dev_id:16;
+ uint32_t vendor_id:16;
+#else
+ uint32_t vendor_id:16;
+ uint32_t dev_id:16;
+#endif
+ } bits;
+} sub_vendor_id_t;
+
+
+/*
+ * Register: ExpRomBar
+ * Expansion ROM BAR
+ * Description: Expansion ROM BAR - For Hydra EEPROM space
+ * Fields:
+ * Base Address Relocatable : indirect dbi writeable via
+ * romBarMask register in EP core
+ * Base Address for ROM (2MB) : indirect dbi writeable via
+ * romBarMask register in EP core
+ * ROM Enable: dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t base_addr_rel:11;
+ uint32_t base_addr:10;
+ uint32_t rsrvd:10;
+ uint32_t rom_en:1;
+#else
+ uint32_t rom_en:1;
+ uint32_t rsrvd:10;
+ uint32_t base_addr:10;
+ uint32_t base_addr_rel:11;
+#endif
+ } bits;
+} exp_rom_bar_t;
+
+
+/*
+ * Register: CapPtr
+ * Capabilities Pointer
+ * Description: Capabilities Pointer
+ * Fields:
+ * Pointer to PM Capability structure : dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t pm_ptr:8;
+#else
+ uint32_t pm_ptr:8;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} cap_ptr_t;
+
+
+/*
+ * Register: IntLine
+ * Interrupt Line
+ * Description: Interrupt Line
+ * Fields:
+ * Max Latency (N/A in PCIE)
+ * Minimum Grant (N/A in PCIE)
+ * Interrupt pin: dbi writeable
+ * Interrupt Line
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t max_lat:8;
+ uint32_t min_gnt:8;
+ uint32_t int_pin:8;
+ uint32_t int_line:8;
+#else
+ uint32_t int_line:8;
+ uint32_t int_pin:8;
+ uint32_t min_gnt:8;
+ uint32_t max_lat:8;
+#endif
+ } bits;
+} int_line_t;
+
+
+/*
+ * Register: PmCap
+ * Power Management Capability
+ * Description: Power Management Capability
+ * Fields:
+ * PME Support (N/A in Hydra): dbi writeable
+ * D2 Support (N/A in Hydra): dbi writeable
+ * D1 Support (N/A in Hydra): dbi writeable
+ * Aux Current (N/A in Hydra): dbi writeable
+ * Device Specific Initialization: dbi writeable
+ * PME Clock (N/A in PCIE)
+ * PM Spec Version: dbi writeable
+ * Next Capability Pointer: dbi writeable
+ * Power Management Capability ID: dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t pme_supt:5;
+ uint32_t d2_supt:1;
+ uint32_t d1_supt:1;
+ uint32_t aux_curr:3;
+ uint32_t dev_spec_init:1;
+ uint32_t rsrvd:1;
+ uint32_t pme_clk:1;
+ uint32_t pm_ver:3;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t pm_id:8;
+#else
+ uint32_t pm_id:8;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t pm_ver:3;
+ uint32_t pme_clk:1;
+ uint32_t rsrvd:1;
+ uint32_t dev_spec_init:1;
+ uint32_t aux_curr:3;
+ uint32_t d1_supt:1;
+ uint32_t d2_supt:1;
+ uint32_t pme_supt:5;
+#endif
+ } bits;
+} pm_cap_t;
+
+
+/*
+ * Register: PmCtrlStat
+ * Power Management Control and Status
+ * Description: Power Management Control and Status
+ * Fields:
+ * Data for additional info (N/A)
+ * Bus Power and Clock Control Enable (N/A in PCIE)
+ * B2/B3 Support (N/A in PCIE)
+ * Indicates if PME event occured
+ * Data Scale (N/A)
+ * Data Select (N/A)
+ * PME Enable (Sticky)
+ * Power State
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t pwr_data:8;
+ uint32_t pwr_clk_en:1;
+ uint32_t b2_b3_supt:1;
+ uint32_t rsrvd:6;
+ uint32_t pme_stat:1;
+ uint32_t data_scale:2;
+ uint32_t data_sel:4;
+ uint32_t pme_en:1;
+ uint32_t rsrvd1:6;
+ uint32_t pwr_st:2;
+#else
+ uint32_t pwr_st:2;
+ uint32_t rsrvd1:6;
+ uint32_t pme_en:1;
+ uint32_t data_sel:4;
+ uint32_t data_scale:2;
+ uint32_t pme_stat:1;
+ uint32_t rsrvd:6;
+ uint32_t b2_b3_supt:1;
+ uint32_t pwr_clk_en:1;
+ uint32_t pwr_data:8;
+#endif
+ } bits;
+} pm_ctrl_stat_t;
+
+
+/*
+ * Register: MsiCap
+ * MSI Capability
+ * Description: MSI Capability
+ * Fields:
+ * Mask and Pending bits available
+ * 64-bit Address Capable
+ * Multiple Messages Enabled
+ * Multiple Message Capable (32 messages = 0x5)
+ * MSI Enabled (if enabled, INTx must be diabled)
+ * Next Capability Pointer: dbi writeable
+ * MSI Capability ID
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:7;
+ uint32_t vect_mask:1;
+ uint32_t msi64_en:1;
+ uint32_t mult_msg_en:3;
+ uint32_t mult_msg_cap:3;
+ uint32_t msi_en:1;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t msi_cap_id:8;
+#else
+ uint32_t msi_cap_id:8;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t msi_en:1;
+ uint32_t mult_msg_cap:3;
+ uint32_t mult_msg_en:3;
+ uint32_t msi64_en:1;
+ uint32_t vect_mask:1;
+ uint32_t rsrvd:7;
+#endif
+ } bits;
+} msi_cap_t;
+
+
+/*
+ * Register: MsiLoAddr
+ * MSI Low Address
+ * Description: MSI Low Address
+ * Fields:
+ * Lower 32 bit Address
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t lo_addr:30;
+ uint32_t rsrvd:2;
+#else
+ uint32_t rsrvd:2;
+ uint32_t lo_addr:30;
+#endif
+ } bits;
+} msi_lo_addr_t;
+
+
+/*
+ * Register: MsiHiAddr
+ * MSI High Address
+ * Description: MSI High Address
+ * Fields:
+ * Upper 32 bit Address (only if msi64En = 1)
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t hi_addr:32;
+#else
+ uint32_t hi_addr:32;
+#endif
+ } bits;
+} msi_hi_addr_t;
+
+
+/*
+ * Register: MsiData
+ * MSI Data
+ * Description: MSI Data
+ * Fields:
+ * MSI Data. Depending on the value for multMsgEn in the MSI
+ * Capability Register which determines the number of allocated
+ * vectors, bits [4:0] may be replaced with msiVector[4:0] bits
+ * to generate up to 32 MSI messages. # allocated vectors Actual
+ * messageData[4:0] ------------------- ------------------------
+ * 1 DATA[4:0] (no replacement) 2 {DATA[4:1], msiVector[0]} 4
+ * {DATA[4:2], msiVector[1:0]} 8 {DATA[4:3], msiVector[2:0]} 16
+ * {DATA[4], msiVector[3:0]} 32 msiVector[4:0] (full replacement)
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t data:16;
+#else
+ uint32_t data:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} msi_data_t;
+
+
+/*
+ * Register: MsiMask
+ * MSI Mask
+ * Description: MSI Mask
+ * Fields:
+ * per vector MSI Mask bits
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mask:32;
+#else
+ uint32_t mask:32;
+#endif
+ } bits;
+} msi_mask_t;
+
+
+/*
+ * Register: MsiPend
+ * MSI Pending
+ * Description: MSI Pending
+ * Fields:
+ * per vector MSI Pending bits
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t pend:32;
+#else
+ uint32_t pend:32;
+#endif
+ } bits;
+} msi_pend_t;
+
+
+/*
+ * Register: MsixCap
+ * MSIX Capability
+ * Description: MSIX Capability
+ * Fields:
+ * MSIX Enable (if enabled, MSI and INTx must be disabled)
+ * Function Mask (1 = all vectors masked regardless of per vector
+ * mask, 0 = each vector's mask
+ * Table Size (0x1F = 32 entries): dbi writeable
+ * Next Capability Pointer: dbi writeable
+ * MSIX Capability ID
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t msix_en:1;
+ uint32_t func_mask:1;
+ uint32_t rsrvd:3;
+ uint32_t tab_sz:11;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t msix_cap_id:8;
+#else
+ uint32_t msix_cap_id:8;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t tab_sz:11;
+ uint32_t rsrvd:3;
+ uint32_t func_mask:1;
+ uint32_t msix_en:1;
+#endif
+ } bits;
+} msix_cap_t;
+
+
+/*
+ * Register: MsixTabOff
+ * MSIX Table Offset
+ * Description: MSIX Table Offset
+ * Fields:
+ * Table Offset (Base address of MSIX Table = msixTabBir.BAR +
+ * msixTabOff) : dbi writeable
+ * Table BAR Indicator (0x2 = BAR2 at loc 0x18) : dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t msix_tab_off:29;
+ uint32_t msix_tab_bir:3;
+#else
+ uint32_t msix_tab_bir:3;
+ uint32_t msix_tab_off:29;
+#endif
+ } bits;
+} msix_tab_off_t;
+
+
+/*
+ * Register: MsixPbaOff
+ * MSIX PBA Offset
+ * Description: MSIX PBA Offset
+ * Fields:
+ * Pending Bit Array (PBA) Offset (Base address of MSIX Table =
+ * msixTabBir.BAR + msixPbaOff); msixPbaOff is quad-aligned, i.e.
+ * starts at 0x2000 (half-way in MSI-X bar space. : dbi writeable
+ * Pending Bit Array (PBA) BAR Indicator (0x2 = BAR2 at loc 0x18)
+ * : dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t msix_pba_off:29;
+ uint32_t msix_pba_bir:3;
+#else
+ uint32_t msix_pba_bir:3;
+ uint32_t msix_pba_off:29;
+#endif
+ } bits;
+} msix_pba_off_t;
+
+
+/*
+ * Register: PcieCap
+ * PCIE Capability
+ * Description: PCIE Capability
+ * Fields:
+ * Interrupt Message Number (updated by HW)
+ * Slot Implemented (Endpoint must be 0)
+ * PCIE Express Device Port Type (Endpoint)
+ * PCIE Express Capability Version
+ * Next Capability Pointer: dbi writeable
+ * PCI Express Capability ID
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:2;
+ uint32_t int_msg_num:5;
+ uint32_t pcie_slt_imp:1;
+ uint32_t pcie_dev_type:4;
+ uint32_t pcie_cap_ver:4;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t pcie_cap_id:8;
+#else
+ uint32_t pcie_cap_id:8;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t pcie_cap_ver:4;
+ uint32_t pcie_dev_type:4;
+ uint32_t pcie_slt_imp:1;
+ uint32_t int_msg_num:5;
+ uint32_t rsrvd:2;
+#endif
+ } bits;
+} pcie_cap_t;
+
+
+/*
+ * Register: DevCap
+ * Device Capability
+ * Description: Device Capability
+ * Fields:
+ * Slot Power Limit Scale (Msg from RC) Hydra can capture
+ * Received setSlotPowerLimit message; values in this field are
+ * ignored as no power scaling is possible.
+ * Slot Power Limit Value (Msg from RC) Hydra can capture
+ * Received setSlotPowerLimit message; values in this field are
+ * ignored as no power scaling is possible.
+ * Introduced in PCIe 1.1 specification. : dbi writeable
+ * L1 Acceptable Latency (4 - 8 us) : dbi writeable
+ * LOs Acceptable Latency (2 - 4 us) : dbi writeable
+ * Extended Tag Field Support (N/A) : dbi writeable
+ * Phantom Function Supported (N/A) : dbi writeable
+ * Maximum Payload Size supported (Hydra = 1KB) : dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:4;
+ uint32_t slt_pwr_lmt_scle:2;
+ uint32_t slt_pwr_lmt_val:8;
+ uint32_t rsrvd1:2;
+ uint32_t role_based_err:1;
+ uint32_t rsrvd2:3;
+ uint32_t l1_lat:3;
+ uint32_t los_lat:3;
+ uint32_t ext_tag:1;
+ uint32_t phant_func:2;
+ uint32_t max_mtu:3;
+#else
+ uint32_t max_mtu:3;
+ uint32_t phant_func:2;
+ uint32_t ext_tag:1;
+ uint32_t los_lat:3;
+ uint32_t l1_lat:3;
+ uint32_t rsrvd2:3;
+ uint32_t role_based_err:1;
+ uint32_t rsrvd1:2;
+ uint32_t slt_pwr_lmt_val:8;
+ uint32_t slt_pwr_lmt_scle:2;
+ uint32_t rsrvd:4;
+#endif
+ } bits;
+} dev_cap_t;
+
+
+/*
+ * Register: DevStatCtrl
+ * Device Status and Control
+ * Description: Device Control
+ * Fields:
+ * Transaction Pending (1 if NP request not completed)
+ * Auxilliary Power Detected (1 if detected)
+ * Unsupported Request Detect
+ * Fatal Error Detected
+ * Non-Fatal Error Detected
+ * Correctable Error Detected ----- Control Fields
+ * Introduced in PCIe 1.1 specification.
+ * Maximum Read Request Size (default = 512B) for the device as a
+ * requester. 3'b000: 128 Bytes 3'b001: 256 Bytes 3'b010: 512
+ * Bytes 3'b011: 1K Bytes 3'b100: 2K Bytes 3'b101: 4K Bytes
+ * 3'b110: Reserved 3'b111: Reserved
+ * No Snoop Enable This bit indicates the device "could", not
+ * that it does. Both this bit and the hydra specific peuCip
+ * register bit must be set for the value of this bit to impact
+ * the TLP header no snoop attribute. When both are set, hydra
+ * sets the no snoop attribute on all initiated TLPs. Software
+ * must guarantee the No Snoop attribute is used in the system
+ * correctly.
+ * Auxilliary Power PM Enable
+ * Phantom Function enable
+ * Extended Tag Field Enable
+ * Maximum Payload Size. 3-bit value has the same encodings as
+ * the maxRdSz field.
+ * Relaxed Ordering Enable This bit indicates the device "could",
+ * not that it does. Both this bit and the hydra specific peuCip
+ * register bit must be set for the value of this bit to impact
+ * the TLP header relaxed ordering attribute. When both are set,
+ * packet operations set the relaxed ordering attribute. Mailbox
+ * updates always set the relaxed ordering attribute to 0,
+ * regardless of this bit. When this bit is 0, the default
+ * Sun4u/Sun4v ordering model is used.
+ * Unsupported Request Report Enable
+ * Fatal Error Report Enable
+ * Non-Fatal Error Report Enable
+ * Correctable Error Report Enable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:10;
+ uint32_t trans_pend:1;
+ uint32_t aux_pwr_det:1;
+ uint32_t unsup_req_det:1;
+ uint32_t fat_err_det:1;
+ uint32_t nf_err_det:1;
+ uint32_t corr_err_det:1;
+ uint32_t pcie2pcix_brdg:1;
+ uint32_t max_rd_sz:3;
+ uint32_t no_snoop_en:1;
+ uint32_t aux_pwr_pm_en:1;
+ uint32_t phant_func_en:1;
+ uint32_t ext_tag_en:1;
+ uint32_t max_pld_sz:3;
+ uint32_t rlx_ord_en:1;
+ uint32_t unsup_req_en:1;
+ uint32_t fat_err_en:1;
+ uint32_t nf_err_en:1;
+ uint32_t corr_err_en:1;
+#else
+ uint32_t corr_err_en:1;
+ uint32_t nf_err_en:1;
+ uint32_t fat_err_en:1;
+ uint32_t unsup_req_en:1;
+ uint32_t rlx_ord_en:1;
+ uint32_t max_pld_sz:3;
+ uint32_t ext_tag_en:1;
+ uint32_t phant_func_en:1;
+ uint32_t aux_pwr_pm_en:1;
+ uint32_t no_snoop_en:1;
+ uint32_t max_rd_sz:3;
+ uint32_t pcie2pcix_brdg:1;
+ uint32_t corr_err_det:1;
+ uint32_t nf_err_det:1;
+ uint32_t fat_err_det:1;
+ uint32_t unsup_req_det:1;
+ uint32_t aux_pwr_det:1;
+ uint32_t trans_pend:1;
+ uint32_t rsrvd:10;
+#endif
+ } bits;
+} dev_stat_ctrl_t;
+
+
+/*
+ * Register: LnkCap
+ * Link Capability
+ * Description: Link Capability
+ * Fields:
+ * Port Number : dbi writeable
+ * Introduced in PCIe 1.1 specification.
+ * Introduced in PCIe 1.1 specification.
+ * Default Clock Power Management (N/A) Introduced in PCIe 1.1
+ * specification. : dbi writeable
+ * Default L1 Exit Latency (32us to 64us => 0x6) : dbi writeable
+ * Default L0s Exit Latency (1us to 2us => 0x5) : dbi writeable
+ * Active Link PM Support (only L0s = 1) : dbi writeable
+ * Maximum Link Width (x8) : dbi writeable
+ * Maximum Link Speed (2.5 Gbps = 1) : dbi writeable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t prt_num:8;
+ uint32_t rsrvd:3;
+ uint32_t def_dll_act_rptg:1;
+ uint32_t def_surpise_down:1;
+ uint32_t def_clk_pm_cap:1;
+ uint32_t def_l1_lat:3;
+ uint32_t def_l0s_lat:3;
+ uint32_t as_lnk_pm_supt:2;
+ uint32_t max_lnk_wid:6;
+ uint32_t max_lnk_spd:4;
+#else
+ uint32_t max_lnk_spd:4;
+ uint32_t max_lnk_wid:6;
+ uint32_t as_lnk_pm_supt:2;
+ uint32_t def_l0s_lat:3;
+ uint32_t def_l1_lat:3;
+ uint32_t def_clk_pm_cap:1;
+ uint32_t def_surpise_down:1;
+ uint32_t def_dll_act_rptg:1;
+ uint32_t rsrvd:3;
+ uint32_t prt_num:8;
+#endif
+ } bits;
+} lnk_cap_t;
+
+
+/*
+ * Register: LnkStatCtrl
+ * Link Status and Control
+ * Description: Link Control
+ * Fields:
+ * Slot Clock Configuration (0 = using independent clock; pg 266
+ * PCIe 1.1) : dbi writeable
+ * Link Training (N/A for EP)
+ * Training Error (N/A for EP)
+ * Negotiated Link Width (Max negotiated: x8)
+ * Negotiated Link Speed (Max negotiated: 1 = 2.5 Gbps) -----
+ * Control Fields
+ * Introduced in PCIe 1.1.
+ * Extended Synch
+ * Common Clock Configuration
+ * Retrain Link (N/A for EP)
+ * Link Disable (N/A for EP)
+ * Read Completion Boundary (128B)
+ * Active State Link PM Control
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:2;
+ uint32_t dll_active:1;
+ uint32_t slt_clk_cfg:1;
+ uint32_t lnk_train:1;
+ uint32_t train_err:1;
+ uint32_t lnk_wid:6;
+ uint32_t lnk_spd:4;
+ uint32_t rsrvd1:7;
+ uint32_t en_clkpwr_mg:1;
+ uint32_t ext_sync:1;
+ uint32_t com_clk_cfg:1;
+ uint32_t retrain_lnk:1;
+ uint32_t lnk_dis:1;
+ uint32_t rd_cmpl_bndy:1;
+ uint32_t rsrvd2:1;
+ uint32_t aspm_ctrl:2;
+#else
+ uint32_t aspm_ctrl:2;
+ uint32_t rsrvd2:1;
+ uint32_t rd_cmpl_bndy:1;
+ uint32_t lnk_dis:1;
+ uint32_t retrain_lnk:1;
+ uint32_t com_clk_cfg:1;
+ uint32_t ext_sync:1;
+ uint32_t en_clkpwr_mg:1;
+ uint32_t rsrvd1:7;
+ uint32_t lnk_spd:4;
+ uint32_t lnk_wid:6;
+ uint32_t train_err:1;
+ uint32_t lnk_train:1;
+ uint32_t slt_clk_cfg:1;
+ uint32_t dll_active:1;
+ uint32_t rsrvd:2;
+#endif
+ } bits;
+} lnk_stat_ctrl_t;
+
+
+/*
+ * Register: VenCapHdr
+ * Vendor Specific Capability Header
+ * Description: Vendor Specific Capability Header
+ * Fields:
+ * Length
+ * Next Capbility Pointer
+ * Vendor Specific Capbility ID
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:8;
+ uint32_t len:8;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t ven_cap_id:8;
+#else
+ uint32_t ven_cap_id:8;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t len:8;
+ uint32_t rsrvd:8;
+#endif
+ } bits;
+} ven_cap_hdr_t;
+
+
+/*
+ * Register: VenCtrl
+ * Vendor Specific Control
+ * Description: Vendor Specific Control
+ * Fields:
+ * PCIe spec absolute minimum is 50usec - (likely ~10ms). PCIe
+ * spec absolute max is 50msec. Default set for 22.2 msec via
+ * adding time as follows: Bit 23: 3.21 secs <---POR 0 Bit 22:
+ * 201.3 msec <---POR 0 Bit 21: 100.8 msec <---POR 0 Bit 20: 25.2
+ * msec <---POR 0 Bit 19: 12.6 msec <---POR 1 Bit 18: 6.3 msec
+ * <---POR 1 Bit 17: 3.3 msec <---POR 1 Bit 16: if 0:
+ * Baseline0=50usec; else Baseline1(use for
+ * simulation-only)=804nsec
+ * Interrupt Control Mode (00 = Reserved, 01 = INTx emulation, 10
+ * = Reserved [Neptune INTx pins], 11 = Reserved [Neptune INTx
+ * emulation + pins]
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:8;
+ uint32_t eic_xtd_cpl_timout:8;
+ uint32_t rsrvd1:14;
+ uint32_t legacy_int_ctrl:2;
+#else
+ uint32_t legacy_int_ctrl:2;
+ uint32_t rsrvd1:14;
+ uint32_t eic_xtd_cpl_timout:8;
+ uint32_t rsrvd:8;
+#endif
+ } bits;
+} ven_ctrl_t;
+
+
+/*
+ * Register: VenPrtHdr
+ * Vendor Specific Port Logic Header
+ * Description: Vendor Specific Port Logic Header
+ * Fields:
+ * Length
+ * Next Capbility Pointer (END, no more)
+ * Vendor Specific Capbility ID
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:8;
+ uint32_t len:8;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t ven_cap_id:8;
+#else
+ uint32_t ven_cap_id:8;
+ uint32_t nxt_cap_ptr:8;
+ uint32_t len:8;
+ uint32_t rsrvd:8;
+#endif
+ } bits;
+} ven_prt_hdr_t;
+
+
+/*
+ * Register: AcklatReplay
+ * Ack Latency and Replay Timer register
+ * Description: Ack Latency/Replay Timer
+ * Fields:
+ * Replay Time limit = 16'd12429/`cxNb where cxNb=1.
+ * Round Trip Latency Time limit = 9d'4143/`cxNb where cxNb=1.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rep_tim:16;
+ uint32_t ack_tim:16;
+#else
+ uint32_t ack_tim:16;
+ uint32_t rep_tim:16;
+#endif
+ } bits;
+} acklat_replay_t;
+
+
+/*
+ * Register: OthMsg
+ * Other Message Register
+ * Description: Other Message Register
+ * Fields:
+ * Message to send/Data to corrupt LCRC
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t oth_msg:32;
+#else
+ uint32_t oth_msg:32;
+#endif
+ } bits;
+} oth_msg_t;
+
+
+/*
+ * Register: ForceLink
+ * Port Force Link
+ * Description: Other Message Register
+ * Fields:
+ * LinkState that the EP core will be forced to when ForceLink
+ * (bit[15]) is set
+ * Forces Link to the specified LinkState field below. Write this
+ * bit to generate a pulse to the ltssm. It clears itself once
+ * the pulse is generated. Read will always return 0.
+ * Link Number - N/A for Endpoint
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:10;
+ uint32_t link_state:6;
+ uint32_t force_link:1;
+ uint32_t rsrvd1:7;
+ uint32_t link_num:8;
+#else
+ uint32_t link_num:8;
+ uint32_t rsrvd1:7;
+ uint32_t force_link:1;
+ uint32_t link_state:6;
+ uint32_t rsrvd:10;
+#endif
+ } bits;
+} force_link_t;
+
+
+/*
+ * Register: AckFreq
+ * ACK Frequency Register
+ * Description: ACK Frequency Register
+ * Fields:
+ * NFTS = 115.
+ * NFTS = 115.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:2;
+ uint32_t l1_entr_latency:3;
+ uint32_t los_entr_latency:3;
+ uint32_t cx_comm_nfts:8;
+ uint32_t nfts:8;
+ uint32_t def_ack_freq:8;
+#else
+ uint32_t def_ack_freq:8;
+ uint32_t nfts:8;
+ uint32_t cx_comm_nfts:8;
+ uint32_t los_entr_latency:3;
+ uint32_t l1_entr_latency:3;
+ uint32_t rsrvd:2;
+#endif
+ } bits;
+} ack_freq_t;
+
+
+/*
+ * Register: LinkCtrl
+ * Port Link Control
+ * Description: Port Link Control
+ * Fields:
+ * 8 lanes
+ * When set, this bit is only set for 1 cycle. A write of 0 has
+ * no effect.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:4;
+ uint32_t rsrvd1:2;
+ uint32_t corrupt_lcrc:1;
+ uint32_t rsrvd2:1;
+ uint32_t rsrvd3:2;
+ uint32_t link_mode_en:6;
+ uint32_t rsrvd4:4;
+ uint32_t rsrvd5:4;
+ uint32_t fast_link_mode:1;
+ uint32_t rsrvd6:1;
+ uint32_t dll_link_en:1;
+ uint32_t rsrvd7:1;
+ uint32_t reset_assert:1;
+ uint32_t lpbk_en:1;
+ uint32_t scram_dis:1;
+ uint32_t oth_msg_req:1;
+#else
+ uint32_t oth_msg_req:1;
+ uint32_t scram_dis:1;
+ uint32_t lpbk_en:1;
+ uint32_t reset_assert:1;
+ uint32_t rsrvd7:1;
+ uint32_t dll_link_en:1;
+ uint32_t rsrvd6:1;
+ uint32_t fast_link_mode:1;
+ uint32_t rsrvd5:4;
+ uint32_t rsrvd4:4;
+ uint32_t link_mode_en:6;
+ uint32_t rsrvd3:2;
+ uint32_t rsrvd2:1;
+ uint32_t corrupt_lcrc:1;
+ uint32_t rsrvd1:2;
+ uint32_t rsrvd:4;
+#endif
+ } bits;
+} link_ctrl_t;
+
+
+/*
+ * Register: LaneSkew
+ * Lane Skew Register
+ * Description: Lane Skew Register
+ * Fields:
+ * prevents EP core from sending Ack/Nack DLLPs
+ * prevents EP core from sending FC DLLPs
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t dis_lane_to_lane_deskew:1;
+ uint32_t rsrvd:5;
+ uint32_t ack_nack_dis:1;
+ uint32_t flow_control_dis:1;
+ uint32_t tx_lane_skew:24;
+#else
+ uint32_t tx_lane_skew:24;
+ uint32_t flow_control_dis:1;
+ uint32_t ack_nack_dis:1;
+ uint32_t rsrvd:5;
+ uint32_t dis_lane_to_lane_deskew:1;
+#endif
+ } bits;
+} lane_skew_t;
+
+
+/*
+ * Register: SymbolNum
+ * Symbol Number Register
+ * Description: Symbol Number Register
+ * Fields:
+ * Timer modifier for Flow control Watch Dog timer
+ * Timer modifier for Ack/Nack latency timer
+ * Timer modifier for Replay timer
+ * Note: rtl uses defaultNSkipSymbols
+ * Note: rtl initialized using defaultNTs1Symbols
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:3;
+ uint32_t fc_wdog_tim_mod:5;
+ uint32_t ack_nack_tim_mod:5;
+ uint32_t rep_tim_mod:5;
+ uint32_t rsrvd1:3;
+ uint32_t num_skip_symb:3;
+ uint32_t rsrvd2:4;
+ uint32_t num_ts_symb:4;
+#else
+ uint32_t num_ts_symb:4;
+ uint32_t rsrvd2:4;
+ uint32_t num_skip_symb:3;
+ uint32_t rsrvd1:3;
+ uint32_t rep_tim_mod:5;
+ uint32_t ack_nack_tim_mod:5;
+ uint32_t fc_wdog_tim_mod:5;
+ uint32_t rsrvd:3;
+#endif
+ } bits;
+} symbol_num_t;
+
+
+/*
+ * Register: SymbTimRadmFlt1
+ * Symbol Timer Register / RADM Filter Mask Register 1
+ * Description: Symbol Timer / RADM Filter Mask 1
+ * Fields:
+ * No masking errors while filtering
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mask_radm_flt:16;
+ uint32_t dis_fc_wdog:1;
+ uint32_t rsrvd:4;
+ uint32_t skip_interval:11;
+#else
+ uint32_t skip_interval:11;
+ uint32_t rsrvd:4;
+ uint32_t dis_fc_wdog:1;
+ uint32_t mask_radm_flt:16;
+#endif
+ } bits;
+} symb_tim_radm_flt1_t;
+
+
+/*
+ * Register: RadmFlt2
+ * RADM Filter Mask Register 2
+ * Description: RADM Filter Mask Register 2
+ * Fields:
+ * [31:2] = Reserved [1]=0=Vendor MSG Type0 dropped & treated as
+ * UR, [0]=0=Vendor MSG Type1 silently dropped.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mask_radm_flt:32;
+#else
+ uint32_t mask_radm_flt:32;
+#endif
+ } bits;
+} radm_flt2_t;
+
+
+/*
+ * Register: CascadeDebReg0
+ * Cascade core (EP) Debug Register 0
+ * Description: Debug Register 0 EP Core SII Interface bus :
+ * cxplDebugInfo[31:0]
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rmlh_ts_link_ctrl:4;
+ uint32_t rmlh_ts_lane_num_is_k237:1;
+ uint32_t rmlh_ts_link_num_is_k237:1;
+ uint32_t rmlh_rcvd_idle_bit0:1;
+ uint32_t rmlh_rcvd_idle_bit1:1;
+ uint32_t mac_phy_txdata:16;
+ uint32_t mac_phy_txdatak:2;
+ uint32_t rsrvd:1;
+ uint32_t xmlh_ltssm_state:5;
+#else
+ uint32_t xmlh_ltssm_state:5;
+ uint32_t rsrvd:1;
+ uint32_t mac_phy_txdatak:2;
+ uint32_t mac_phy_txdata:16;
+ uint32_t rmlh_rcvd_idle_bit1:1;
+ uint32_t rmlh_rcvd_idle_bit0:1;
+ uint32_t rmlh_ts_link_num_is_k237:1;
+ uint32_t rmlh_ts_lane_num_is_k237:1;
+ uint32_t rmlh_ts_link_ctrl:4;
+#endif
+ } bits;
+} cascade_deb_reg0_t;
+
+
+/*
+ * Register: CascadeDebReg1
+ * Cascade Core (EP) Debug Register 1
+ * Description: Debug Register 1 EP Core SII Interface bus :
+ * cxplDebugInfo[63:32]
+ * Fields:
+ * PCIe Link status. 0=down, 1=up
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t xmlh_scrambler_disable:1;
+ uint32_t xmlh_link_disable:1;
+ uint32_t xmlh_link_in_training:1;
+ uint32_t xmlh_rcvr_revrs_pol_en:1;
+ uint32_t xmlh_training_rst_n:1;
+ uint32_t rsrvd:4;
+ uint32_t mac_phy_txdetectrx_loopback:1;
+ uint32_t mac_phy_txelecidle_bit0:1;
+ uint32_t mac_phy_txcompliance_bit0:1;
+ uint32_t app_init_rst:1;
+ uint32_t rsrvd1:3;
+ uint32_t rmlh_rs_link_num:8;
+ uint32_t rmlh_link_mode:3;
+ uint32_t xmlh_link_up:1;
+ uint32_t rmlh_inskip_rcv:1;
+ uint32_t rmlh_ts1_rcvd:1;
+ uint32_t rmlh_ts2_rcvd:1;
+ uint32_t rmlh_rcvd_lane_rev:1;
+#else
+ uint32_t rmlh_rcvd_lane_rev:1;
+ uint32_t rmlh_ts2_rcvd:1;
+ uint32_t rmlh_ts1_rcvd:1;
+ uint32_t rmlh_inskip_rcv:1;
+ uint32_t xmlh_link_up:1;
+ uint32_t rmlh_link_mode:3;
+ uint32_t rmlh_rs_link_num:8;
+ uint32_t rsrvd1:3;
+ uint32_t app_init_rst:1;
+ uint32_t mac_phy_txcompliance_bit0:1;
+ uint32_t mac_phy_txelecidle_bit0:1;
+ uint32_t mac_phy_txdetectrx_loopback:1;
+ uint32_t rsrvd:4;
+ uint32_t xmlh_training_rst_n:1;
+ uint32_t xmlh_rcvr_revrs_pol_en:1;
+ uint32_t xmlh_link_in_training:1;
+ uint32_t xmlh_link_disable:1;
+ uint32_t xmlh_scrambler_disable:1;
+#endif
+ } bits;
+} cascade_deb_reg1_t;
+
+
+/*
+ * Register: TxpFcCreditStat
+ * Transmit Posted FC Credit Status
+ * Description: Transmit Posted FC Credit Status
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:12;
+ uint32_t txp_fc_hdr_credit_stat:8;
+ uint32_t txp_fc_data_credit_stat:12;
+#else
+ uint32_t txp_fc_data_credit_stat:12;
+ uint32_t txp_fc_hdr_credit_stat:8;
+ uint32_t rsrvd:12;
+#endif
+ } bits;
+} txp_fc_credit_stat_t;
+
+
+/*
+ * Register: TxnpFcCreditStat
+ * Transmit Non-Posted FC Credit Status
+ * Description: Transmit Non-Posted FC Credit Status
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:12;
+ uint32_t txnp_fc_hdr_credit_stat:8;
+ uint32_t txnp_fc_data_credit_stat:12;
+#else
+ uint32_t txnp_fc_data_credit_stat:12;
+ uint32_t txnp_fc_hdr_credit_stat:8;
+ uint32_t rsrvd:12;
+#endif
+ } bits;
+} txnp_fc_credit_stat_t;
+
+
+/*
+ * Register: TxcplFcCreditStat
+ * Transmit Completion FC Credit Status
+ * Description: Transmit Completion FC Credit Status
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:12;
+ uint32_t txcpl_fc_hdr_credit_stat:8;
+ uint32_t txcpl_fc_data_credit_stat:12;
+#else
+ uint32_t txcpl_fc_data_credit_stat:12;
+ uint32_t txcpl_fc_hdr_credit_stat:8;
+ uint32_t rsrvd:12;
+#endif
+ } bits;
+} txcpl_fc_credit_stat_t;
+
+
+/*
+ * Register: QueueStat
+ * Queue Status
+ * Description: Queue Status
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:29;
+ uint32_t rx_queue_not_empty:1;
+ uint32_t tx_rbuf_not_empty:1;
+ uint32_t tx_fc_credit_not_ret:1;
+#else
+ uint32_t tx_fc_credit_not_ret:1;
+ uint32_t tx_rbuf_not_empty:1;
+ uint32_t rx_queue_not_empty:1;
+ uint32_t rsrvd:29;
+#endif
+ } bits;
+} queue_stat_t;
+
+
+/*
+ * Register: GbtDebug0
+ * GBT Debug, Status register
+ * Description: This register returns bits [31:0] of the PIPE core's
+ * gbtDebug bus
+ * Fields:
+ * [6] & [22] = rxclkO will always read 1'b0 [7] & [23] =
+ * tbcout10O will always read 1'b0
+ * The value specified here is the Power On Reset value as given
+ * in spec. except for the clock bits which are hardwired to
+ * 1'b0.
+ * The gbtDebug[0:15] bus is provided for each lane as an output
+ * from the pcieGbtopWrapper.v module. These signals are not
+ * required for manufacturing test and may be left unconnected.
+ * The cw00041130PipeParam.vh bus width is the number of lanes
+ * multiplied by 16. lane0 is bits[15:0], lane1 is bits[31:16],
+ * lane2 is bits[47:32], lane3 is bits[63:48], lane4 is
+ * bits[79:64], lane5 is bits[95:80], lane6 is bits[111:96],
+ * lane7 is bits[127:112].
+ * Refer to section 4.2.2.4, Gigablaze Debug Signals section.
+ * (pgs 4.27 - 4.28) in the following document :
+ * /home/cadtools/cores/lsi/cw000411/cw00041131/prod/docs/manuals/
+ * cw000411TechMan.pdf
+ * lane0 is bits[15:0], which is gbtDebug0[15:0] lane1 is
+ * bits[31:16], which is gbtDebug0[31:16]
+ *
+ * -------------------------------------------------------------------------
+ * Signal Bit Reset Description
+ * -------------------------------------------------------------------------
+ * gbtResetRbcI [16n+15] [15] 1 Reset receiver bit clock
+ * gbtResetTbc20I [16n+14] [14] 1 Reset transmit 20-bit clock
+ * gbtResetRI [16n+13] [13] 1 Reset receiver logic gbtResetTI
+ * [16n+12] [12] 1 Reset transmit logic reserved [16n+11:16n+8]
+ * [11:8] 0 reserved gbtTbcout10 [16n+7] [7] 1 transmit clock
+ * 10-bit gbtRxclkO [16n+6] [6] 0 receiver PLL clock gbtRxpresO
+ * [16n+5] [5] 0 receiver detect present gbtRxpresvalidO [16n+4]
+ * [4] 0 gbtRxpresO is valid gbtRxlosO [16n+3] [3] 1 raw receiver
+ * loss-of-signal gbtPassnO [16n+2] [2] 1 GigaBlaze BIST pass
+ * active low reserved [16n+1] [1] 0 reserved reserved [16n] [0]
+ * 0 reserved
+ * -------------------------------------------------------------------------
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} gbt_debug0_t;
+
+
+/*
+ * Register: GbtDebug1
+ * GBT Debug, Status register
+ * Description: This register returns bits [63:32] of the PIPE core's
+ * gbtDebug bus
+ * Fields:
+ * [6] & [22] = rxclkO will always read 1'b0 [7] & [23] =
+ * tbcout10O will always read 1'b0
+ * The value specified here is the Power On Reset value as given
+ * in spec. except for the clock bits which are hardwired to
+ * 1'b0.
+ * The gbtDebug[0:15] bus is provided for each lane as an output
+ * from the pcieGbtopWrapper.v module. These signals are not
+ * required for manufacturing test and may be left unconnected.
+ * The cw00041130PipeParam.vh bus width is the number of lanes
+ * multiplied by 16.
+ * Refer to section 4.2.2.4, Gigablaze Debug Signals section.
+ * (pgs 4.27 - 4.28) in the following document :
+ * /home/cadtools/cores/lsi/cw000411/cw00041131/prod/docs/manuals/
+ * cw000411TechMan.pdf
+ * lane2 is bits[47:32], which is gbtDebug1[15:0] lane3 is
+ * bits[63:48], which is gbtDebug1[31:16]
+ *
+ * -------------------------------------------------------------------------
+ * Signal Bit Reset Description
+ * -------------------------------------------------------------------------
+ * gbtResetRbcI [16n+15] [15] 1 Reset receiver bit clock
+ * gbtResetTbc20I [16n+14] [14] 1 Reset transmit 20-bit clock
+ * gbtResetRI [16n+13] [13] 1 Reset receiver logic gbtResetTI
+ * [16n+12] [12] 1 Reset transmit logic reserved [16n+11:16n+8]
+ * [11:8] 0 reserved gbtTbcout10 [16n+7] [7] 1 transmit clock
+ * 10-bit gbtRxclkO [16n+6] [6] 0 receiver PLL clock gbtRxpresO
+ * [16n+5] [5] 0 receiver detect present gbtRxpresvalidO [16n+4]
+ * [4] 0 gbtRxpresO is valid gbtRxlosO [16n+3] [3] 1 raw receiver
+ * loss-of-signal gbtPassnO [16n+2] [2] 1 GigaBlaze BIST pass
+ * active low reserved [16n+1] [1] 0 reserved reserved [16n] [0]
+ * 0 reserved
+ * -------------------------------------------------------------------------
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} gbt_debug1_t;
+
+
+/*
+ * Register: GbtDebug2
+ * GBT Debug, Status register
+ * Description: This register returns bits [95:64] of the PIPE core's
+ * gbtDebug bus
+ * Fields:
+ * [6] & [22] = rxclkO will always read 1'b0 [7] & [23] =
+ * tbcout10O will always read 1'b0
+ * The value specified here is the Power On Reset value as given
+ * in spec. except for the clock bits which are hardwired to
+ * 1'b0.
+ * The gbtDebug[0:15] bus is provided for each lane as an output
+ * from the pcieGbtopWrapper.v module. These signals are not
+ * required for manufacturing test and may be left unconnected.
+ * The cw00041130PipeParam.vh bus width is the number of lanes
+ * multiplied by 16.
+ * Refer to section 4.2.2.4, Gigablaze Debug Signals section.
+ * (pgs 4.27 - 4.28) in the following document :
+ * /home/cadtools/cores/lsi/cw000411/cw00041131/prod/docs/manuals/
+ * cw000411TechMan.pdf
+ * lane4 is bits[79:64], which is gbtDebug2[15:0] lane5 is
+ * bits[95:80], which is gbtDebug2[31:16]
+ *
+ * -------------------------------------------------------------------------
+ * Signal Bit Reset Description
+ * -------------------------------------------------------------------------
+ * gbtResetRbcI [16n+15] [15] 1 Reset receiver bit clock
+ * gbtResetTbc20I [16n+14] [14] 1 Reset transmit 20-bit clock
+ * gbtResetRI [16n+13] [13] 1 Reset receiver logic gbtResetTI
+ * [16n+12] [12] 1 Reset transmit logic reserved [16n+11:16n+8]
+ * [11:8] 0 reserved gbtTbcout10 [16n+7] [7] 1 transmit clock
+ * 10-bit gbtRxclkO [16n+6] [6] 0 receiver PLL clock gbtRxpresO
+ * [16n+5] [5] 0 receiver detect present gbtRxpresvalidO [16n+4]
+ * [4] 0 gbtRxpresO is valid gbtRxlosO [16n+3] [3] 1 raw receiver
+ * loss-of-signal gbtPassnO [16n+2] [2] 1 GigaBlaze BIST pass
+ * active low reserved [16n+1] [1] 0 reserved reserved [16n] [0]
+ * 0 reserved
+ * -------------------------------------------------------------------------
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} gbt_debug2_t;
+
+
+/*
+ * Register: GbtDebug3
+ * GBT Debug, Status register
+ * Description: This register returns bits [127:96] of the PIPE
+ * core's gbtDebug bus
+ * Fields:
+ * [6] & [22] = rxclkO will always read 1'b0 [7] & [23] =
+ * tbcout10O will always read 1'b0
+ * The value specified here is the Power On Reset value as given
+ * in spec. except for the clock bits which are hardwired to
+ * 1'b0.
+ * The gbtDebug[0:15] bus is provided for each lane as an output
+ * from the pcieGbtopWrapper.v module. These signals are not
+ * required for manufacturing test and may be left unconnected.
+ * The cw00041130PipeParam.vh bus width is the number of lanes
+ * multiplied by 16.
+ * Refer to section 4.2.2.4, Gigablaze Debug Signals section.
+ * (pgs 4.27 - 4.28) in the following document :
+ * /home/cadtools/cores/lsi/cw000411/cw00041131/prod/docs/manuals/
+ * cw000411TechMan.pdf
+ * lane6 is bits[111:96], which is gbtDebug3[15:0] lane7 is
+ * bits[127:112], which is gbtDebug3[31:16]
+ *
+ * -------------------------------------------------------------------------
+ * Signal Bit Reset Description
+ * -------------------------------------------------------------------------
+ * gbtResetRbcI [16n+15] [15] 1 Reset receiver bit clock
+ * gbtResetTbc20I [16n+14] [14] 1 Reset transmit 20-bit clock
+ * gbtResetRI [16n+13] [13] 1 Reset receiver logic gbtResetTI
+ * [16n+12] [12] 1 Reset transmit logic reserved [16n+11:16n+8]
+ * [11:8] 0 reserved gbtTbcout10 [16n+7] [7] 1 transmit clock
+ * 10-bit gbtRxclkO [16n+6] [6] 0 receiver PLL clock gbtRxpresO
+ * [16n+5] [5] 0 receiver detect present gbtRxpresvalidO [16n+4]
+ * [4] 0 gbtRxpresO is valid gbtRxlosO [16n+3] [3] 1 raw receiver
+ * loss-of-signal gbtPassnO [16n+2] [2] 1 GigaBlaze BIST pass
+ * active low reserved [16n+1] [1] 0 reserved reserved [16n] [0]
+ * 0 reserved
+ * -------------------------------------------------------------------------
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} gbt_debug3_t;
+
+
+/*
+ * Register: PipeDebug0
+ * PIPE Debug, status register
+ * Description: This register returns bits [31:0] of the PIPE core's
+ * gbtDebug bus
+ * Fields:
+ * The value specified here is the Power On Reset value as given
+ * in spec.
+ * This 16-bit debug bus reports operating conditions for the
+ * PIPE. The pipeDebug[0:15] bus is provided for each lane. lane0
+ * is bits[15:0], lane1 is bits[31:16], lane2 is bits[47:32],
+ * lane3 is bits[63:48], lane4 is bits[79:64], lane5 is
+ * bits[95:80], lane6 is bits[111:96], lane7 is bits[127:112].
+ * Refer to section 4.2.1.5 Single-Lane PIPE Debug Signals in
+ * the following document :
+ * /home/cadtools/cores/lsi/cw000411/cw00041131/prod/docs/manuals/
+ * cw000411TechMan.pdf
+ * lane0 is bit[15:0], which is pipeDebug0[15:0] lane1 is
+ * bit[31:16], which is pipeDebug0[31:16]
+ *
+ * -------------------------------------------------------------------------
+ * pipeDebug Signal or Condition Description Reset
+ * -------------------------------------------------------------------------
+ * [15] efifoOverflow or EFIFO overflow or 0 efifoUnderflow EFIFO
+ * underflow occurred
+ * [14] skipInsert or EFIFO skip inserted or 0 skipDelete
+ * deleted 0
+ * [13] fifordData[12] == Skip flag read by EFIFO. 0 skipFlag
+ * Used with skipcharflag to verify EFIFO depth.
+ * [12] skipcharflag Skip flag written by EFIFO 0
+ * [11:8] efifoDepth[3:0] Indicates EFIFO depth 0000
+ * [7] efifoEios Detected EFIFO 0 electrical-idle ordered-set
+ * output
+ * [6] efifoBytesync EFIFO output byte 0 synchronization
+ * [5] rxinvalid 8b/10b error or 0 or code violation
+ * [4] rxinitdone Receiver bit-init done. 0 Synchronous with
+ * pipeClk.
+ * [3] txinitdone Transmitter-bit init done. 0 Synchronous with
+ * pipeClk.
+ * [2] filteredrxlos Filtered loss of signal used 1 to generate
+ * p2lRxelectidle. Synchronous with pipeClk.
+ * [1] rxdetectInt Receiver detected 0
+ * [0] pipeMasterDoneOut Receiver detection valid 0
+ *
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} pipe_debug0_t;
+
+
+/*
+ * Register: PipeDebug1
+ * PIPE Debug, status register
+ * Description: This register returns bits [63:32] of the PIPE core's
+ * gbtDebug bus
+ * Fields:
+ * The value specified here is the Power On Reset value as given
+ * in spec.
+ * This 16-bit debug bus reports operating conditions for the
+ * PIPE. The pipeDebug[0:15] bus is provided for each lane. lane0
+ * is bits[15:0], lane1 is bits[31:16], lane2 is bits[47:32],
+ * lane3 is bits[63:48], lane4 is bits[79:64], lane5 is
+ * bits[95:80], lane6 is bits[111:96], lane7 is bits[127:112].
+ * Refer to section 4.2.1.5 Single-Lane PIPE Debug Signals in
+ * the following document :
+ * /home/cadtools/cores/lsi/cw000411/cw00041131/prod/docs/manuals/
+ * cw000411TechMan.pdf
+ * lane2 is bits[47:32], which is pipeDebug1[15:0] lane3 is
+ * bits[63:48], which is pipeDebug1[31:16]
+ *
+ * -------------------------------------------------------------------------
+ * pipeDebug Signal or Condition Description Reset
+ * -------------------------------------------------------------------------
+ * [15] efifoOverflow or EFIFO overflow or 0 efifoUnderflow EFIFO
+ * underflow occurred
+ * [14] skipInsert or EFIFO skip inserted or 0 skipDelete
+ * deleted 0
+ * [13] fifordData[12] == Skip flag read by EFIFO. 0 skipFlag
+ * Used with skipcharflag to verify EFIFO depth.
+ * [12] skipcharflag Skip flag written by EFIFO 0
+ * [11:8] efifoDepth[3:0] Indicates EFIFO depth 0000
+ * [7] efifoEios Detected EFIFO 0 electrical-idle ordered-set
+ * output
+ * [6] efifoBytesync EFIFO output byte 0 synchronization
+ * [5] rxinvalid 8b/10b error or 0 or code violation
+ * [4] rxinitdone Receiver bit-init done. 0 Synchronous with
+ * pipeClk.
+ * [3] txinitdone Transmitter-bit init done. 0 Synchronous with
+ * pipeClk.
+ * [2] filteredrxlos Filtered loss of signal used 1 to generate
+ * p2lRxelectidle. Synchronous with pipeClk.
+ * [1] rxdetectInt Receiver detected 0
+ * [0] pipeMasterDoneOut Receiver detection valid 0
+ *
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} pipe_debug1_t;
+
+
+/*
+ * Register: PipeDebug2
+ * PIPE Debug, status register
+ * The value specified here is the Power On Reset value as given
+ * in spec.
+ * This 16-bit debug bus reports operating conditions for the
+ * PIPE. The pipeDebug[0:15] bus is provided for each lane. lane0
+ * is bits[15:0], lane1 is bits[31:16], lane2 is bits[47:32],
+ * lane3 is bits[63:48], lane4 is bits[79:64], lane5 is
+ * bits[95:80], lane6 is bits[111:96], lane7 is bits[127:112].
+ * Refer to section 4.2.1.5 Single-Lane PIPE Debug Signals in
+ * the following document :
+ * /home/cadtools/cores/lsi/cw000411/cw00041131/prod/docs/manuals/
+ * cw000411TechMan.pdf
+ * lane4 is bits[79:64], which is pipeDebug2[15:0] lane5 is
+ * bits[95:80], which is pipeDebug2[31:16]
+ *
+ * -------------------------------------------------------------------------
+ * pipeDebug Signal or Condition Description Reset
+ * -------------------------------------------------------------------------
+ * [15] efifoOverflow or EFIFO overflow or 0 efifoUnderflow EFIFO
+ * underflow occurred
+ * [14] skipInsert or EFIFO skip inserted or 0 skipDelete
+ * deleted 0
+ * [13] fifordData[12] == Skip flag read by EFIFO. 0 skipFlag
+ * Used with skipcharflag to verify EFIFO depth.
+ * [12] skipcharflag Skip flag written by EFIFO 0
+ * [11:8] efifoDepth[3:0] Indicates EFIFO depth 0000
+ * [7] efifoEios Detected EFIFO 0 electrical-idle ordered-set
+ * output
+ * [6] efifoBytesync EFIFO output byte 0 synchronization
+ * [5] rxinvalid 8b/10b error or 0 or code violation
+ * [4] rxinitdone Receiver bit-init done. 0 Synchronous with
+ * pipeClk.
+ * [3] txinitdone Transmitter-bit init done. 0 Synchronous with
+ * pipeClk.
+ * [2] filteredrxlos Filtered loss of signal used 1 to generate
+ * p2lRxelectidle. Synchronous with pipeClk.
+ * [1] rxdetectInt Receiver detected 0
+ * [0] pipeMasterDoneOut Receiver detection valid 0
+ *
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} pipe_debug2_t;
+
+
+/*
+ * Register: PipeDebug3
+ * PIPE Debug, status register
+ * Description: This register returns bits [127:96] of the PIPE
+ * core's gbtDebug bus
+ * Fields:
+ * The value specified here is the Power On Reset value as given
+ * in spec.
+ * This 16-bit debug bus reports operating conditions for the
+ * PIPE. The pipeDebug[0:15] bus is provided for each lane. lane0
+ * is bits[15:0], lane1 is bits[31:16], lane2 is bits[47:32],
+ * lane3 is bits[63:48], lane4 is bits[79:64], lane5 is
+ * bits[95:80], lane6 is bits[111:96], lane7 is bits[127:112].
+ * Refer to section 4.2.1.5 Single-Lane PIPE Debug Signals in
+ * the following document :
+ * /home/cadtools/cores/lsi/cw000411/cw00041131/prod/docs/manuals/
+ * cw000411TechMan.pdf
+ * lane6 is bits[111:96], which is pipeDebug3[15:0] lane7 is
+ * bits[127:112], which is pipeDebug3[31:16]
+ *
+ * -------------------------------------------------------------------------
+ * pipeDebug Signal or Condition Description Reset
+ * -------------------------------------------------------------------------
+ * [15] efifoOverflow or EFIFO overflow or 0 efifoUnderflow EFIFO
+ * underflow occurred
+ * [14] skipInsert or EFIFO skip inserted or 0 skipDelete
+ * deleted 0
+ * [13] fifordData[12] == Skip flag read by EFIFO. 0 skipFlag
+ * Used with skipcharflag to verify EFIFO depth.
+ * [12] skipcharflag Skip flag written by EFIFO 0
+ * [11:8] efifoDepth[3:0] Indicates EFIFO depth 0000
+ * [7] efifoEios Detected EFIFO 0 electrical-idle ordered-set
+ * output
+ * [6] efifoBytesync EFIFO output byte 0 synchronization
+ * [5] rxinvalid 8b/10b error or 0 or code violation
+ * [4] rxinitdone Receiver bit-init done. 0 Synchronous with
+ * pipeClk.
+ * [3] txinitdone Transmitter-bit init done. 0 Synchronous with
+ * pipeClk.
+ * [2] filteredrxlos Filtered loss of signal used 1 to generate
+ * p2lRxelectidle. Synchronous with pipeClk.
+ * [1] rxdetectInt Receiver detected 0
+ * [0] pipeMasterDoneOut Receiver detection valid 0
+ *
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} pipe_debug3_t;
+
+
+/*
+ * Register: PcieEnhCapHdr
+ * PCIE Enhanced Capability Header
+ * Description: PCIE Enhanced Capability Header
+ * Fields:
+ * Next Capability Offset (END, no more)
+ * Capability Version
+ * PCI Express Enhanced Capability ID (0x1 = Advanced Error
+ * Reporting)
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t nxt_cap_offset:12;
+ uint32_t cap_ver:4;
+ uint32_t pcie_enh_cap_id:16;
+#else
+ uint32_t pcie_enh_cap_id:16;
+ uint32_t cap_ver:4;
+ uint32_t nxt_cap_offset:12;
+#endif
+ } bits;
+} pcie_enh_cap_hdr_t;
+
+
+/*
+ * Register: UncErrStat
+ * Uncorrectable Error Status
+ * Description: Uncorrectable Error Status
+ * Fields:
+ * Unsupported Request Error
+ * ECRC Error
+ * Malformed TLP
+ * Reciever Overflow
+ * Unexpected Completion
+ * Completion Abort
+ * Completion Timeout
+ * Flow Control Protocol Error
+ * Poisoned TLP
+ * Introduced in PCIe 1.1 specification.
+ * Data Link Protocol Error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:11;
+ uint32_t unsup_req_err:1;
+ uint32_t ecrc_err:1;
+ uint32_t bad_tlp:1;
+ uint32_t rcv_ovfl:1;
+ uint32_t unexp_cpl:1;
+ uint32_t cpl_abrt:1;
+ uint32_t cpl_tmout:1;
+ uint32_t fc_err:1;
+ uint32_t psn_tlp:1;
+ uint32_t rsrvd1:6;
+ uint32_t surprise_down_err:1;
+ uint32_t dlp_err:1;
+ uint32_t rsrvd2:4;
+#else
+ uint32_t rsrvd2:4;
+ uint32_t dlp_err:1;
+ uint32_t surprise_down_err:1;
+ uint32_t rsrvd1:6;
+ uint32_t psn_tlp:1;
+ uint32_t fc_err:1;
+ uint32_t cpl_tmout:1;
+ uint32_t cpl_abrt:1;
+ uint32_t unexp_cpl:1;
+ uint32_t rcv_ovfl:1;
+ uint32_t bad_tlp:1;
+ uint32_t ecrc_err:1;
+ uint32_t unsup_req_err:1;
+ uint32_t rsrvd:11;
+#endif
+ } bits;
+} unc_err_stat_t;
+
+
+/*
+ * Register: UncErrMask
+ * Uncorrectable Error Mask
+ * Description: Uncorrectable Error Mask
+ * Fields:
+ * Unsupported Request Error
+ * ECRC Error
+ * Malformed TLP
+ * Reciever Overflow
+ * Unexpected Completion
+ * Completion Abort
+ * Completion Timeout
+ * Flow Control Protocol Error
+ * Poisoned TLP
+ * Introduced in PCIe 1.1
+ * Data Link Protocol Error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:11;
+ uint32_t unsup_req_err:1;
+ uint32_t ecrc_err:1;
+ uint32_t bad_tlp:1;
+ uint32_t rcv_ovfl:1;
+ uint32_t unexp_cpl:1;
+ uint32_t cpl_abrt:1;
+ uint32_t cpl_tmout:1;
+ uint32_t fc_err:1;
+ uint32_t psn_tlp:1;
+ uint32_t rsrvd1:6;
+ uint32_t surprise_down_err:1;
+ uint32_t dlp_err:1;
+ uint32_t rsrvd2:4;
+#else
+ uint32_t rsrvd2:4;
+ uint32_t dlp_err:1;
+ uint32_t surprise_down_err:1;
+ uint32_t rsrvd1:6;
+ uint32_t psn_tlp:1;
+ uint32_t fc_err:1;
+ uint32_t cpl_tmout:1;
+ uint32_t cpl_abrt:1;
+ uint32_t unexp_cpl:1;
+ uint32_t rcv_ovfl:1;
+ uint32_t bad_tlp:1;
+ uint32_t ecrc_err:1;
+ uint32_t unsup_req_err:1;
+ uint32_t rsrvd:11;
+#endif
+ } bits;
+} unc_err_mask_t;
+
+
+/*
+ * Register: UncErrSvrty
+ * Uncorrectable Error Severity
+ * Description: Uncorrectable Error Severity
+ * Fields:
+ * Unsupported Request Error
+ * ECRC Error
+ * Malformed TLP
+ * Reciever Overflow
+ * Unexpected Completion
+ * Completion Abort
+ * Completion Timeout
+ * Flow Control Protocol Error
+ * Poisoned TLP
+ * Introduced in PCIe 1.1 specification. Not supported; use PCIe
+ * default.
+ * Data Link Protocol Error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:11;
+ uint32_t unsup_req_err:1;
+ uint32_t ecrc_err:1;
+ uint32_t bad_tlp:1;
+ uint32_t rcv_ovfl:1;
+ uint32_t unexp_cpl:1;
+ uint32_t cpl_abrt:1;
+ uint32_t cpl_tmout:1;
+ uint32_t fc_err:1;
+ uint32_t psn_tlp:1;
+ uint32_t rsrvd1:6;
+ uint32_t surprise_down_err:1;
+ uint32_t dlp_err:1;
+ uint32_t rsrvd2:4;
+#else
+ uint32_t rsrvd2:4;
+ uint32_t dlp_err:1;
+ uint32_t surprise_down_err:1;
+ uint32_t rsrvd1:6;
+ uint32_t psn_tlp:1;
+ uint32_t fc_err:1;
+ uint32_t cpl_tmout:1;
+ uint32_t cpl_abrt:1;
+ uint32_t unexp_cpl:1;
+ uint32_t rcv_ovfl:1;
+ uint32_t bad_tlp:1;
+ uint32_t ecrc_err:1;
+ uint32_t unsup_req_err:1;
+ uint32_t rsrvd:11;
+#endif
+ } bits;
+} unc_err_svrty_t;
+
+
+/*
+ * Register: CorrErrStat
+ * Correctable Error Status
+ * Description: Correctable Error Status
+ * Fields:
+ * Advisory Non-Fatal Error Introduced in PCIe 1.1 specification.
+ * Reply Timer Timeout
+ * Replay Number Rollover
+ * Bad DLLP
+ * Bad TLP
+ * Receive Error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:18;
+ uint32_t adv_nf_err:1;
+ uint32_t rply_tmr_tmout:1;
+ uint32_t rsrvd1:3;
+ uint32_t rply_rlovr:1;
+ uint32_t bad_dllp:1;
+ uint32_t bad_tlp:1;
+ uint32_t rsrvd2:5;
+ uint32_t rcv_err:1;
+#else
+ uint32_t rcv_err:1;
+ uint32_t rsrvd2:5;
+ uint32_t bad_tlp:1;
+ uint32_t bad_dllp:1;
+ uint32_t rply_rlovr:1;
+ uint32_t rsrvd1:3;
+ uint32_t rply_tmr_tmout:1;
+ uint32_t adv_nf_err:1;
+ uint32_t rsrvd:18;
+#endif
+ } bits;
+} corr_err_stat_t;
+
+
+/*
+ * Register: CorrErrMask
+ * Correctable Error Mask
+ * Description: Correctable Error Mask
+ * Fields:
+ * Advisory Non Fatal Error Mask
+ * Reply Timer Timeout
+ * Replay Number Rollover
+ * Bad DLLP
+ * Bad TLP
+ * Receive Error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:18;
+ uint32_t adv_nf_err_mask:1;
+ uint32_t rply_tmr_tmout:1;
+ uint32_t rsrvd1:3;
+ uint32_t rply_rlovr:1;
+ uint32_t bad_dllp:1;
+ uint32_t bad_tlp:1;
+ uint32_t rsrvd2:5;
+ uint32_t rcv_err:1;
+#else
+ uint32_t rcv_err:1;
+ uint32_t rsrvd2:5;
+ uint32_t bad_tlp:1;
+ uint32_t bad_dllp:1;
+ uint32_t rply_rlovr:1;
+ uint32_t rsrvd1:3;
+ uint32_t rply_tmr_tmout:1;
+ uint32_t adv_nf_err_mask:1;
+ uint32_t rsrvd:18;
+#endif
+ } bits;
+} corr_err_mask_t;
+
+
+/*
+ * Register: AdvCapCtrl
+ * Advanced Capability and Control
+ * Description: Advanced Capability and Control
+ * Fields:
+ * ECRC Check Enable
+ * ECRC Check Capable
+ * ECRC Generation Enable
+ * ECRC Generation Capability
+ * First Error Pointer
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:23;
+ uint32_t ecrc_chk_en:1;
+ uint32_t ecrc_chk_cap:1;
+ uint32_t ecrc_gen_en:1;
+ uint32_t ecrc_gen_cap:1;
+ uint32_t st_err_ptr:5;
+#else
+ uint32_t st_err_ptr:5;
+ uint32_t ecrc_gen_cap:1;
+ uint32_t ecrc_gen_en:1;
+ uint32_t ecrc_chk_cap:1;
+ uint32_t ecrc_chk_en:1;
+ uint32_t rsrvd:23;
+#endif
+ } bits;
+} adv_cap_ctrl_t;
+
+
+/*
+ * Register: HdrLog0
+ * Header Log0
+ * Description: Header Log0
+ * Fields:
+ * First DW of TLP header with error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} hdr_log0_t;
+
+
+/*
+ * Register: HdrLog1
+ * Header Log1
+ * Description: Header Log1
+ * Fields:
+ * Second DW of TLP header with error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} hdr_log1_t;
+
+
+/*
+ * Register: HdrLog2
+ * Header Log2
+ * Description: Header Log2
+ * Fields:
+ * Third DW of TLP header with error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} hdr_log2_t;
+
+
+/*
+ * Register: HdrLog3
+ * Header Log3
+ * Description: Header Log3
+ * Fields:
+ * Fourth DW of TLP header with error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} hdr_log3_t;
+
+
+/*
+ * Register: PipeRxTxControl
+ * Pipe Rx/Tx Control
+ * 00 : ewrap : Enable wrapback test mode 01 : padLoopback :
+ * Enable Pad Serial Loopback test mode 10 : revLoopback : Enable
+ * Reverse Loopback test mode 11 : efifoLoopback : Enable PCI
+ * Express Slave loop back
+ * 100 : Clock generator test x10 : Vil/Vih test x01 : Vih/Vil
+ * test x11 : No-error test. A full test of the transceiver 111 :
+ * Forced-error test. A full test of the transceiver with forced
+ * errors
+ * 1 : selects 20-bit mode 0 : selects 10-bit mode
+ * 1 : selects Tx 20-bit fifo mode
+ * 00 : 52 us (470 cycles) 01 : 53 us (720 cycles) 10 : 54 us
+ * (970 cycles) 11 : 55 us (1220 cycles)
+ * 1 : selects 20-bit mode 0 : selects 10-bit mode
+ * 1 : Enable receiver reference clocks
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:1;
+ uint32_t loopback:1;
+ uint32_t loopback_mode_sel:2;
+ uint32_t rsrvd1:1;
+ uint32_t en_bist:3;
+ uint32_t tdws20:1;
+ uint32_t tdenfifo:1;
+ uint32_t rxpreswin:2;
+ uint32_t rdws20:1;
+ uint32_t enstretch:1;
+ uint32_t rsrvd2:18;
+#else
+ uint32_t rsrvd2:18;
+ uint32_t enstretch:1;
+ uint32_t rdws20:1;
+ uint32_t rxpreswin:2;
+ uint32_t tdenfifo:1;
+ uint32_t tdws20:1;
+ uint32_t en_bist:3;
+ uint32_t rsrvd1:1;
+ uint32_t loopback_mode_sel:2;
+ uint32_t loopback:1;
+ uint32_t rsrvd:1;
+#endif
+ } bits;
+} pipe_rx_tx_control_t;
+
+
+/*
+ * Register: PipeRxTxStatus
+ * Pipe Rx/Tx Status
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:32;
+#else
+ uint32_t rsrvd:32;
+#endif
+ } bits;
+} pipe_rx_tx_status_t;
+
+
+/*
+ * Register: PipeRxTxPwrCntl
+ * Pipe Rx/Tx Power Control
+ * 1 : power down termination trimming circuit 0 : normal
+ * operation
+ * Power down PECL Clock buffer 1 : when a bit is 1, power down
+ * associated clock buffer cell 0 : normal operation
+ * Power down Transmit PLL 1 : when a bit is 1, power down
+ * associated Tx PLL circuit 0 : normal operation
+ * Power down Differential O/P Clock buffer 1 : when a bit is 1,
+ * power down associated differntial clock buffer that drives
+ * gbtClkoutN/p 0 : normal operation
+ * Power down Transmitter Analog section 1 : when a bit is 1,
+ * power down analog section of the associated Transmitter and
+ * the Tx buffer 0 : normal operation
+ * Power down RxLOS 1 : when a bit is 1, it powers down the Rx
+ * LOS circuitry for the associated serdes lanes 0 : normal
+ * operation
+ * Power down Receiver Analog section 1 : when a bit is 1, power
+ * down analog section of the associated Receiver and the Tx
+ * buffer 0 : normal operation
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:1;
+ uint32_t pdrtrim:1;
+ uint32_t pdownpecl:2;
+ uint32_t pdownpll:2;
+ uint32_t pdclkout:2;
+ uint32_t pdownt:8;
+ uint32_t pdrxlos:8;
+ uint32_t pdownr:8;
+#else
+ uint32_t pdownr:8;
+ uint32_t pdrxlos:8;
+ uint32_t pdownt:8;
+ uint32_t pdclkout:2;
+ uint32_t pdownpll:2;
+ uint32_t pdownpecl:2;
+ uint32_t pdrtrim:1;
+ uint32_t rsrvd:1;
+#endif
+ } bits;
+} pipe_rx_tx_pwr_cntl_t;
+
+
+/*
+ * Register: PipeRxTxParam
+ * Pipe Rx/Tx Parameter
+ * Tx Driver Emphasis
+ * Serial output Slew Rate Control
+ * Tx Voltage Mux control
+ * Tx Voltage Pulse control
+ * Output Swing setting
+ * Transmitter Clock generator pole adjust
+ * Transmitter Clock generator zero adjust
+ * Receiver Clock generator pole adjust
+ * Receiver Clock generator zero adjust
+ * Bias Control for factory testing and debugging
+ * Receiver LOS Threshold adjustment. This value is determined by
+ * LSI.
+ * Receiver Input Equalizer control
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:1;
+ uint32_t emph:3;
+ uint32_t rsrvd1:1;
+ uint32_t risefall:3;
+ uint32_t vmuxlo:2;
+ uint32_t vpulselo:2;
+ uint32_t vtxlo:4;
+ uint32_t tp:2;
+ uint32_t tz:2;
+ uint32_t rp:2;
+ uint32_t rz:2;
+ uint32_t biascntl:1;
+ uint32_t losadj:3;
+ uint32_t rxeq:4;
+#else
+ uint32_t rxeq:4;
+ uint32_t losadj:3;
+ uint32_t biascntl:1;
+ uint32_t rz:2;
+ uint32_t rp:2;
+ uint32_t tz:2;
+ uint32_t tp:2;
+ uint32_t vtxlo:4;
+ uint32_t vpulselo:2;
+ uint32_t vmuxlo:2;
+ uint32_t risefall:3;
+ uint32_t rsrvd1:1;
+ uint32_t emph:3;
+ uint32_t rsrvd:1;
+#endif
+ } bits;
+} pipe_rx_tx_param_t;
+
+
+/*
+ * Register: PipeRxTxClock
+ * Pipe Rx/Tx Clock
+ * Reverse Loopback clock select 00 : gbtRbcAO 01 : gbtRbcBO 10 :
+ * gbtRbcCO 11 : gbtRbcDO
+ * Select Master Clock 100 : All lanes 000 : Lane A 001 : Lane B
+ * 010 : Lane C 011 : Lane D
+ * Transmit PLL Divider control
+ * Transmit Data rate control
+ * Receiver PLL Frequency control
+ * Bit rate control to enable bit doubling feature
+ * Reset Transmitter lane
+ * Reset Receiver lane
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:2;
+ uint32_t revlbrefsel:2;
+ uint32_t rsrvd1:1;
+ uint32_t tdmaster:3;
+ uint32_t fbdivt:3;
+ uint32_t half_ratet:1;
+ uint32_t fbdivr:3;
+ uint32_t half_rater:1;
+ uint32_t txreset:8;
+ uint32_t rxreset:8;
+#else
+ uint32_t rxreset:8;
+ uint32_t txreset:8;
+ uint32_t half_rater:1;
+ uint32_t fbdivr:3;
+ uint32_t half_ratet:1;
+ uint32_t fbdivt:3;
+ uint32_t tdmaster:3;
+ uint32_t rsrvd1:1;
+ uint32_t revlbrefsel:2;
+ uint32_t rsrvd:2;
+#endif
+ } bits;
+} pipe_rx_tx_clock_t;
+
+
+/*
+ * Register: PipeGlueCntl0
+ * Pipe Glue Control 0
+ * Lock to Bitstream Initialization Time
+ * RXLOS Test bit
+ * Electrical Idle Ordered set enable
+ * Enable RxLOS
+ * Enable Fast resync
+ * RxLOS Sample Interval
+ * RxLOS threshold
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t bitlocktime:16;
+ uint32_t rxlos_test:1;
+ uint32_t eiosenable:1;
+ uint32_t rxlosenable:1;
+ uint32_t fastresync:1;
+ uint32_t samplerate:4;
+ uint32_t thresholdcount:8;
+#else
+ uint32_t thresholdcount:8;
+ uint32_t samplerate:4;
+ uint32_t fastresync:1;
+ uint32_t rxlosenable:1;
+ uint32_t eiosenable:1;
+ uint32_t rxlos_test:1;
+ uint32_t bitlocktime:16;
+#endif
+ } bits;
+} pipe_glue_cntl0_t;
+
+
+/*
+ * Register: PipeGlueCntl1
+ * Pipe Glue Control 1
+ * Receiver Trim Resistance Configuration
+ * Transmitter Trim Resistance Configuration
+ * Auto Trim Enable
+ * 50 Ohm Termination Enable
+ * Customer select for reference clock frequency
+ * EFIFO Same clock select
+ * EFIFO start depth
+ * Lock to refclk initialization time
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t termrcfg:2;
+ uint32_t termtcfg:2;
+ uint32_t rtrimen:1;
+ uint32_t ref50:1;
+ uint32_t freq_sel:1;
+ uint32_t same_sel:1;
+ uint32_t rsrvd:1;
+ uint32_t start_efifo:3;
+ uint32_t rsrvd1:2;
+ uint32_t inittime:18;
+#else
+ uint32_t inittime:18;
+ uint32_t rsrvd1:2;
+ uint32_t start_efifo:3;
+ uint32_t rsrvd:1;
+ uint32_t same_sel:1;
+ uint32_t freq_sel:1;
+ uint32_t ref50:1;
+ uint32_t rtrimen:1;
+ uint32_t termtcfg:2;
+ uint32_t termrcfg:2;
+#endif
+ } bits;
+} pipe_glue_cntl1_t;
+
+
+/*
+ * Register: HcrReg
+ * HCR Registers
+ * Description: Hydra Specific Configuration Registers for use by
+ * software. These registers are loaded with the SPROM contents at
+ * power on. A maximum of 128 DWords has been assigned for s/w to
+ * use. This space generally stores the following informations : MAC
+ * Address Number of MAC addresses MAC Phy Type Other data fields are
+ * upto the software to use.
+ *
+ * Fields:
+ * Hydra specific configuration controlled by software
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t hcr_val:32;
+#else
+ uint32_t hcr_val:32;
+#endif
+ } bits;
+} hcr_reg_t;
+
+
+/*
+ * Register: BlockReset
+ * Block Reset
+ * Description: Soft resets to modules. Blade domain modules are
+ * reset by setting the corresponding block reset to 1. Shared domain
+ * resets are sent to SPI for processing and corresponding action by
+ * SPI. Shared domains are reset only if all the blades have
+ * requested a reset for that block. Below is an example scenario :
+ * s/w initiates the reset by writing '1' to the dpmRst bit dpmRst
+ * bit remains '1' until dpmRstStat is detected to be 1. Once
+ * dpmRstStat is detected to be 1, even if s/w writes 1 to this bit
+ * again no new reset will be initiated to the shared domain, ie,
+ * DPM. dpmRstStat is driven by external i/f (shared domain status
+ * provided by SPI) dpmRstStat bit will show '1' as long as the input
+ * stays at 1 or until s/w reads the status and is cleared only after
+ * s/w reads it and if dpmRstStat is 0 by then.
+ * If Host wants to reset entire Hydra it should do so through the
+ * mailbox. In this case, the message interprettation is upto the
+ * software. Writing a '1' to any of these bits generates a single
+ * pulse to the SP module which then controls the reset of the
+ * respective block.
+ *
+ * Fields:
+ * 1 : indicates that an active reset has been applied to the SP
+ * based on the request from all of the blades. Clears on Read
+ * provided the reset to SP has been deasserted by then by SPI.
+ * Setting to 1 allows this blade to request Service Processor
+ * (Shared) reset. However, SP reset can only occur if all blades
+ * agree. The success of reset request is indicated by spRstStat
+ * = 1 which is wired-AND of request from all the blades. Current
+ * request can be removed by writing a '0' to this bit. This bit
+ * clears automatically on detecting spRstStat = 1.
+ * Enable blade to service processor (Shared) reset voter
+ * registration = 1, disabled = 0
+ * Issue power reset to the EP Core Clears to 0, writing 0 has no
+ * effect.
+ * Issue core reset to the EP Core Clears to 0, writing 0 has no
+ * effect.
+ * Issue system reset (sysPor) to the PIPE Core This issues reset
+ * to the EP core, PCIe domains of Tdc, Rdc, and CIP. This shuts
+ * down the PCIe clock until Pipe core comes out of reset. The
+ * status of the Pipe core can be read by reading out the
+ * cipLinkStat register's pipe core status and pcie reset status
+ * bits. Clears to 0, writing 0 has no effect.
+ * 1 : indicates that an active reset has been applied to the
+ * NMAC based on the request from all of the blades. Clears on
+ * Read provided the reset to NMAC has been deasserted by then by
+ * SPI.
+ * 1 : indicates that an active reset has been applied to the TDP
+ * based on the request from all of the blades. Clears on Read
+ * provided the reset to TDP has been deasserted by then by SPI.
+ * 1 : indicates that an active reset has been applied to the DPM
+ * based on the request from all of the blades. Clears on Read
+ * provided the reset to DPM has been deasserted by then by SPI.
+ * This bit is effective only if sharedVoterEn (bit 24 of this
+ * reg) has been enabled. Writing '1' sends a request to SP to
+ * reset NMAC if sharedVoterEn=1. Intended for backdoor access.
+ * The success of reset request is indicated by nmacRstStat = 1
+ * which is wired-AND of request from all the blades. This also
+ * means that the reset request is successful only if all the
+ * blades requested for reset of this block. Current request can
+ * be removed by writing a '0' to this bit. This bit clears
+ * automatically on detecting nmacRstStat = 1.
+ * This bit is effective only if sharedVoterEn (bit 24 of this
+ * reg) has been enabled. Writing '1' sends a request to SP to
+ * reset TDP if sharedVoterEn=1. Intended for backdoor access.
+ * Intended for backdoor access. The success of reset request is
+ * indicated by tdpRstStat = 1 which is wired-AND of request from
+ * all the blades. This also means that the reset request is
+ * successful only if all the blades requested for reset of this
+ * block. Current request can be removed by writing a '0' to this
+ * bit. This bit clears automatically on detecting tdpRstStat =
+ * 1.
+ * This bit is effective only if sharedVoterEn (bit 24 of this
+ * reg) has been enabled. Writing '1' sends a request to SP to
+ * reset DPM if sharedVoterEn=1. Intended for backdoor access.
+ * Intended for backdoor access. The success of reset request is
+ * indicated by dpmRstStat = 1 which is wired-AND of request from
+ * all the blades. This also means that the reset request is
+ * successful only if all the blades requested for reset of this
+ * block. Current request can be removed by writing a '0' to this
+ * bit. This bit clears automatically on detecting dpmRstStat =
+ * 1.
+ * Setting to 1 generates tdcCoreReset and tdcPcieReset to the
+ * TDC block. The reset will stay asserted for atleast 4 clock
+ * cycles. Clears to 0, writing 0 has no effect.
+ * Setting to 1 generates rdcCoreReset and rdcPcieReset to the
+ * RDC block. The reset will stay asserted for atleast 4 clock
+ * cycles. Clears to 0, writing 0 has no effect.
+ * Setting to 1 generates reset to the PFC block. The reset will
+ * stay asserted for atleast 4 clock cycles. Clears to 0, writing
+ * 0 has no effect.
+ * Setting to 1 generates reset to the VMAC block. The reset will
+ * stay asserted for atleast 4 clock cycles. Clears to 0, writing
+ * 0 has no effect.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:13;
+ uint32_t sp_rst_stat:1;
+ uint32_t sp_rst:1;
+ uint32_t shared_voter_en:1;
+ uint32_t epcore_pwr_rst:1;
+ uint32_t epcore_core_rst:1;
+ uint32_t pipe_sys_rst:1;
+ uint32_t nmac_rst_stat:1;
+ uint32_t tdp_rst_stat:1;
+ uint32_t dpm_rst_stat:1;
+ uint32_t rsrvd1:1;
+ uint32_t nmac_rst:1;
+ uint32_t tdp_rst:1;
+ uint32_t dpm_rst:1;
+ uint32_t rsrvd2:1;
+ uint32_t tdc_rst:1;
+ uint32_t rdc_rst:1;
+ uint32_t pfc_rst:1;
+ uint32_t vmac_rst:1;
+ uint32_t rsrvd3:1;
+#else
+ uint32_t rsrvd3:1;
+ uint32_t vmac_rst:1;
+ uint32_t pfc_rst:1;
+ uint32_t rdc_rst:1;
+ uint32_t tdc_rst:1;
+ uint32_t rsrvd2:1;
+ uint32_t dpm_rst:1;
+ uint32_t tdp_rst:1;
+ uint32_t nmac_rst:1;
+ uint32_t rsrvd1:1;
+ uint32_t dpm_rst_stat:1;
+ uint32_t tdp_rst_stat:1;
+ uint32_t nmac_rst_stat:1;
+ uint32_t pipe_sys_rst:1;
+ uint32_t epcore_core_rst:1;
+ uint32_t epcore_pwr_rst:1;
+ uint32_t shared_voter_en:1;
+ uint32_t sp_rst:1;
+ uint32_t sp_rst_stat:1;
+ uint32_t rsrvd:13;
+#endif
+ } bits;
+} block_reset_t;
+
+
+/*
+ * Register: TimeoutCfg
+ * PIO Timeout Configuration
+ * Description: PIO Timeout Configuration register to control wait
+ * time for a PIO access to complete. The timer resolution is in 250
+ * MHz clock.
+ * Fields:
+ * Programmable timeout counter value for PIO clients who did not
+ * ack a transaction in time. Minimum value should be 64.
+ * Timeout enable for PIO access to clients. 1 = enable.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:21;
+ uint32_t tmout_cnt:10;
+ uint32_t tmout_en:1;
+#else
+ uint32_t tmout_en:1;
+ uint32_t tmout_cnt:10;
+ uint32_t rsrvd:21;
+#endif
+ } bits;
+} timeout_cfg_t;
+
+
+/*
+ * Register: HeartCfg
+ * PIO Heartbeat Config
+ * Description: PIO Blade presence indication : Heartbeat
+ * configuration The timer resolution is in 250 MHz clock.
+ * Fields:
+ * Heartbeat countdown 250Mhz clock divider which serves as
+ * resolution for the heartTimer.
+ * Heartbeat countdown enable
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t divider:28;
+ uint32_t rsrvd:3;
+ uint32_t en:1;
+#else
+ uint32_t en:1;
+ uint32_t rsrvd:3;
+ uint32_t divider:28;
+#endif
+ } bits;
+} heart_cfg_t;
+
+
+/*
+ * Register: HeartTimer
+ * PIO Heartbeat Timer
+ * Description: PIO Blade presence indication : Heartbeat timer The
+ * timer resolution is in 250 MHz clock.
+ * Fields:
+ * Number of heartCfg.divider ticks of the 250Mhz clock before
+ * blade presence expires. This register decrements for every
+ * heartCfg.divider number of 250MHz clock cycles. It expires to
+ * 0 and so must be written periodically to reset the timer back
+ * to the required value. This counter does not have any effect
+ * on CIP functionality.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t timer:32;
+#else
+ uint32_t timer:32;
+#endif
+ } bits;
+} heart_timer_t;
+
+
+/*
+ * Register: CipGpCtrl
+ * CIP General Purpose Control Register
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:30;
+ uint32_t dma_override_relaxord:1;
+ uint32_t dma_override_nosnoop:1;
+#else
+ uint32_t dma_override_nosnoop:1;
+ uint32_t dma_override_relaxord:1;
+ uint32_t rsrvd:30;
+#endif
+ } bits;
+} cip_gp_ctrl_t;
+
+
+/*
+ * Register: CipStatus
+ * CIP Status
+ * Description: This register returns CIP block's current logic
+ * status
+ * Fields:
+ * Current state of the cipEpc state machine 00 : epIdle ( wait
+ * for EEPROM request from SP or Host ) 01 : waitAck0 ( wait for
+ * ack from EEPROM for the first 16 bit read of the DW access )
+ * 11 : waitAck1 ( wait for ack from EEPROM for the second 16 bit
+ * read of the DW access ) 10 : UNDEFINED ( Undefined/Unused
+ * state; EPC is never expected to be in this state )
+ * Current state of the cipSpc state machine 000 : spReset ( wait
+ * for Power-On SPROM download to start) 001 : getAddr ( Get
+ * CfgReg Address ) 010 : getData ( Get CfgReg Data ) 011 :
+ * ignoreData ( Address phase had an error, so ignore the Data
+ * coming in ) 100 : idleCyc ( Idle cycle following an AHB
+ * Address phase ) 101 : waitAck0 ( Wait for ack from EP Core
+ * during SPROM Download ) 110 : waitAck1 ( Wait for ack from EP
+ * Core during register read/write ) 111 : NORMAL ( SPROM
+ * Download/Register read/write access completed and wait for
+ * SP/Host initiated PCI/AHB/HCR read/write )
+ * PCI Bus Number as reported by EP core
+ * PCI Bus Device Number as reported by EP core
+ * 1: current csr access in progress is Local CIP csr access
+ * 1: current csr access in progress is Blade Domain csr access
+ * 1: a 64 bit blade domain access is in progress as two 32 bit
+ * accesses
+ * 1: indicates config values were downloaded from SPROM
+ * 1: indicates non-zero number of HCR config values downloaded
+ * from SPROM
+ * 1: indicates non-zero number of PCI config values downloaded
+ * from SPROM
+ * 1: indicates non-zero number of Pipe config values downloaded
+ * from SPROM
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:7;
+ uint32_t cip_epc_sm:2;
+ uint32_t cip_spc_sm:3;
+ uint32_t pbus_num:8;
+ uint32_t pbus_dev_num:5;
+ uint32_t loc_csr_access:1;
+ uint32_t bd_csr_access:1;
+ uint32_t d64_in_progress:1;
+ uint32_t spc_dnld_done:1;
+ uint32_t hcr_nz_cfg:1;
+ uint32_t pci_nz_cfg:1;
+ uint32_t pipe_nz_cfg:1;
+#else
+ uint32_t pipe_nz_cfg:1;
+ uint32_t pci_nz_cfg:1;
+ uint32_t hcr_nz_cfg:1;
+ uint32_t spc_dnld_done:1;
+ uint32_t d64_in_progress:1;
+ uint32_t bd_csr_access:1;
+ uint32_t loc_csr_access:1;
+ uint32_t pbus_dev_num:5;
+ uint32_t pbus_num:8;
+ uint32_t cip_spc_sm:3;
+ uint32_t cip_epc_sm:2;
+ uint32_t rsrvd:7;
+#endif
+ } bits;
+} cip_status_t;
+
+
+/*
+ * Register: CipLinkStat
+ * Link Status Register
+ * Description: This register returns the Link status
+ * Fields:
+ * NMAC XPCS-2 Link Status
+ * NMAC XPCS-1 Link Status
+ * NMAC XPCS-0 Link Status
+ * '1' indicates that pipe core went down suddenly when its reset
+ * sources are at deactivated level. When this happens, the PCIe
+ * domain logics are reset including the EP core, TDC/RDC PCIe
+ * domains. All these logics, EP Core, and the pipe core are held
+ * at reset until s/w writes 1 to this bit to clear status which
+ * will also bring the PCIe domain out of reset
+ * pipe core clock & reset status 1: core is up & running, ie,
+ * PIPE core is out of reset and clock is ON
+ * PCIe domain reset status 1: PCIe domain logics including EP
+ * core are out of reset; This also implies that PCIe clock is up
+ * and running
+ * EP Core XDM Link State
+ * EP Core RDM Link State
+ * EP Core LTSSM State
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:13;
+ uint32_t xpcs2_link_up:1;
+ uint32_t xpcs1_link_up:1;
+ uint32_t xpcs0_link_up:1;
+ uint32_t rsrvd1:6;
+ uint32_t surprise_pipedn:1;
+ uint32_t pipe_core_stable:1;
+ uint32_t pcie_domain_stable:1;
+ uint32_t xmlh_link_up:1;
+ uint32_t rdlh_link_up:1;
+ uint32_t xmlh_ltssm_state:5;
+#else
+ uint32_t xmlh_ltssm_state:5;
+ uint32_t rdlh_link_up:1;
+ uint32_t xmlh_link_up:1;
+ uint32_t pcie_domain_stable:1;
+ uint32_t pipe_core_stable:1;
+ uint32_t surprise_pipedn:1;
+ uint32_t rsrvd1:6;
+ uint32_t xpcs0_link_up:1;
+ uint32_t xpcs1_link_up:1;
+ uint32_t xpcs2_link_up:1;
+ uint32_t rsrvd:13;
+#endif
+ } bits;
+} cip_link_stat_t;
+
+
+/*
+ * Register: EpcStat
+ * EEPROM PIO Status
+ * Description: EEPROM PIO Status The Host may initiate access to the
+ * EEPROM either thru this register or directly by TRGT1 interfaces
+ * using ROM BAR access. Note that since the EEPROM can be accessed
+ * by either Host or SP, access must be granted to the PEU using the
+ * SPI PROM Control Register eepromPeuEn bit for proper operation.
+ * All EEPROM accesses initiated from either the Host or SP are
+ * always acknowledged. If a Host access is not acknowledged, then
+ * check the SPI PROM Control Register eepromPeuEn bit to make sure
+ * the PEU to EEPROM access has been enabled. Meanwhile, Host read
+ * and write accesses through the TRGT1 interface may be held up
+ * waiting for the acknowledgement. Thus, in order to recover from
+ * any faulty/stuck condition due to the blocked EEPROM accesses, the
+ * SP should configure the epcGotoNormal bit in the epcStat register.
+ * When Host accesses are stuck, only the SP can write into this bit
+ * to recover from this condition.
+ * The EEPROM is 1M x 16 bits or 2M bytes. The read address in bits
+ * [22:2] is byte address. The EEPROM access can only be DW access.
+ * While accessing through these registers, the lower 2 bits of the
+ * specified address is ignored resulting in a DW access to the
+ * EEPROM controller. While accessing through the ROM BAR range, only
+ * DW accesses are accepted and all other accesses will result in
+ * error status returned to the host.
+ * The read will initiate two reads to the EPC and the accumulated
+ * 32 bit data is returned to the Host either via the Client2 bus or
+ * in the epcData register depending on the cause of the transaction.
+ * This means, a read addr=0,1,2,3 will return data from EPC
+ * locations 0 & 1 which are 16 bits each, and a read to addr=4,5,6,7
+ * will return data from EPC locations 2,3 which are 16 bits each.
+ * Some examples for the address translation : 1) when Host gives
+ * address 0x0000, it means to get bytes 0,1,2, and 3 from the
+ * EEPROM. These bytes are stored at locations 0x0000 (bytes 0,1) and
+ * 0x0001 (bytes 2,3) in EEPROM. Hence PEU will present address
+ * 0x0000 followed by 0x0001 to the EEPROM.
+ * 2) when Host gives address 0x0004, it means to get bytes 4,5,6,
+ * and 7 from the EEPROM. These bytes are stored at locations 0x0002
+ * (bytes 4,5) and 0x0003 (bytes 6,7) in EEPROM. Hence PEU will
+ * present address 0x0002 followed by 0x0003 to the EEPROM.
+ * etc ..
+ *
+ * Fields:
+ * Force the EPC state machine to go to epIdle state. This bit is
+ * used to force the EPC to skip the reading of the EEPROM and
+ * goto the epIdle state which is normal state for EPC. The bit
+ * is auto-cleared after switching to the epIdle state. Both SP
+ * and HOST can write into this bit. However care must be taken
+ * writing '1' into this bit since setting this bit will flush
+ * out any pending EEPROM access request from Host. Hence, this
+ * bit should be used only if the EPC State machine (cipEpcSm
+ * bits in cipStatus register) is stuck at a non-zero state.
+ * EEPROM Byte Address for read operation This field can be
+ * updated only if there is no pending EEPROM read access.
+ * Software should poll bit 0 of this register (epcRdInit) to
+ * make sure that it is '0' before writing into this. If polled
+ * epcRdInit value is '1', then write to epcAddr field is
+ * ignored. This is to safe-guard the epcAddr value which is
+ * being read out the EEPROM.
+ * Read access completion status; set to '0' for successful
+ * completion by EPC set to '1' to indicate read access error
+ * from EPC
+ * Note: Currently, the EEPROM controller in Hydra does not
+ * return any error condition, ie, epcPeuErr = 1'b0 always. And
+ * so, for the PIO read access by the Host, the epcStat register
+ * in PEU will always show that the access was successful. For
+ * EEPROM read initiated through the ROM BAR by the Host, CIP
+ * will always return Successful Completion status to the Host.
+ * Any error situation is reported only in the Status Register
+ * within the EEPROM device. For access information about this
+ * register, please refer to the EEPROM/SPI PRMs.
+ *
+ * Read Initiate. SW writes 1 to this bit to initiate a EEPROM
+ * read. Clears to 0 on updating the epcData reg. Writing 0 has
+ * no effect.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t epc_goto_normal:1;
+ uint32_t rsrvd:8;
+ uint32_t epc_addr:21;
+ uint32_t epc_cpl_stat:1;
+ uint32_t epc_rd_init:1;
+#else
+ uint32_t epc_rd_init:1;
+ uint32_t epc_cpl_stat:1;
+ uint32_t epc_addr:21;
+ uint32_t rsrvd:8;
+ uint32_t epc_goto_normal:1;
+#endif
+ } bits;
+} epc_stat_t;
+
+
+/*
+ * Register: EpcData
+ * EEPROM PIO Data
+ * Description: EEPROM PIO Data The data returned from EEPROM
+ * controller for the EEPROM access initiated by the EEPROM PIO
+ * Status register is returned in this register.
+ * Fields:
+ * EEPROM Read Data; valid when rdInit transitioned from 1 to 0.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t eeprom_data:32;
+#else
+ uint32_t eeprom_data:32;
+#endif
+ } bits;
+} epc_data_t;
+
+
+/*
+ * Register: SpcStat
+ * SPROM PIO Status
+ * Description: SPROM PIO Status
+ * Fields:
+ * Force the SPC state machine to go to NORMAL state. This bit is
+ * used to force the SPC to skip the downloading of the SPROM
+ * contents into the EP/Pipe/Hcr registers. Setting this bit will
+ * make CIP to drop any pending requests to the DBI/AHB buses.
+ * The bit is auto-cleared after switching to the Normal state.
+ * This bit can not be used to terminate a pio access to
+ * PCI/PIPE/HCR registers. If a pio access to these registers is
+ * not responded to, by the respective block, then the pio access
+ * will automatically timeout. The timeout value is specified by
+ * the timeoutCfg:tmoutCnt value
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:29;
+ uint32_t spc_goto_normal:1;
+ uint32_t rsrvd1:2;
+#else
+ uint32_t rsrvd1:2;
+ uint32_t spc_goto_normal:1;
+ uint32_t rsrvd:29;
+#endif
+ } bits;
+} spc_stat_t;
+
+
+/*
+ * Register: Host2spiIndaccAddr
+ * HOST -> SPI Shared Domain Read Address
+ * Description: Read address set by Host for indirect access to
+ * shared domain address space The decoding of the address is as
+ * follows: [23:20] - block select [19:0] - register offset from base
+ * address of block
+ * Fields:
+ * Address in Shared domain
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:8;
+ uint32_t addr:24;
+#else
+ uint32_t addr:24;
+ uint32_t rsrvd:8;
+#endif
+ } bits;
+} host2spi_indacc_addr_t;
+
+
+/*
+ * Register: Host2spiIndaccCtrl
+ * HOST -> SPI Shared Domain Read Control
+ * Description: Control word set by Host for indirect access to the
+ * shared domain address space Writing to this register initiates the
+ * indirect access to the shared domain.
+ * The Host may read or write to a shared domain region data as
+ * below : Host updates the host2spiIndaccAddr register with address
+ * of the shared domain reg. For writes, Host updates the
+ * host2spiIndaccData register with write data Host then writes to
+ * bit 0 of host2spiIndaccCtrl register to '1' or '0' to initiate the
+ * read or write access; 1 : write command, 0 : read command Host
+ * should then poll bit 1 of host2spiIndaccCtrl register for the
+ * access status. 1 : access is done, 0 : access is in progress
+ * (busy) Host should then check bit 2 of host2spiIndaccCtrl register
+ * to know if the command was successful; 1 : access error, 0 :
+ * access successful For reads, Host then reads the
+ * host2spiIndaccData register for the read data.
+ * This register can be written into only when there is no pending
+ * access, ie, indaccCtrl.cplStat=1. Writes when indaccCtrl.cplStat=0
+ * is ignored.
+ *
+ * Fields:
+ * command completion status; 0 : successful completion of
+ * command by SPI 1 : access error from SPI
+ * command progress status; 0 : access is in progress (busy) 1 :
+ * access is done
+ * 1 : Initiate a write access 0 : Initiate a read access
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:29;
+ uint32_t err_stat:1;
+ uint32_t cpl_stat:1;
+ uint32_t rd_wr_cmd:1;
+#else
+ uint32_t rd_wr_cmd:1;
+ uint32_t cpl_stat:1;
+ uint32_t err_stat:1;
+ uint32_t rsrvd:29;
+#endif
+ } bits;
+} host2spi_indacc_ctrl_t;
+
+
+/*
+ * Register: Host2spiIndaccData
+ * HOST -> SPI Shared Domain Read/Write Data
+ * Description: For indirect read access by the Host, this register
+ * returns the data returned from the Shared Domain For indirect
+ * write access by the Host, the host should update this register
+ * with the writeData for the Shared Domain, before writing to the
+ * host2spiIndaccCtrl register to initiate the access.
+ * This register can be written into only when there is no pending
+ * access, ie, indaccCtrl.cplStat=1. Writes when indaccCtrl.cplStat=0
+ * is ignored.
+ *
+ * Fields:
+ * Shared domain read/write data
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} host2spi_indacc_data_t;
+
+
+/*
+ * Register: BtCtrl0
+ * Mailbox Control & Access status 0
+ * Description: Host (blade) <-> SP Block Transfer mailbox control
+ * and access status register 0.
+ * Host is allowed 8 bits read/write access to this register ; To do
+ * the same, it should provide the btCtrl0 address, data on
+ * hostDataBus[7:0], and assert hostBen[0], SPI is allowed 8 bits
+ * read/write access to this register ; To do the same, it should
+ * provide the btCtrl0 address, data on spiDataBus[7:0], and no need
+ * of spiBen
+ *
+ * Fields:
+ * The SP sets/clears this bit to indicate if it is busy and can
+ * not accept any other request; write 1 to toggle the bit; Read
+ * Only by Host.
+ * The Host sets/clears this bit to indicate if it is busy and
+ * can not accept any other request; Read Only by SP.
+ * Reserved for definition by platform. Typical usage could be
+ * "heartbeat" mechanism from/to the host. The host sets OEM0 to
+ * interrupt the SP and then polls it to be cleared by SP
+ * The SP sets this bit when it has detected and queued an SMS
+ * message in the SP2HOST buffer that must be reported to the
+ * HOST. The Host clears this bit by writing a 1 to it. This bit
+ * may generate an intrpt to Host depending on the sp2hostIntEn
+ * bit. Writing 0 has no effect
+ * The SP writes 1 to this bit after it has finished writing a
+ * message into the SP2HOST buffer. The Host clears this bit by
+ * writing 1 to it after it has set the hostBusy bit This bit may
+ * generate an intrpt to Host depending on the sp2hostIntEn bit.
+ * Writing 0 has no effect
+ * The Host writes 1 to this bit to generate an interrupt to SP
+ * after it has finished writing a message into the HOST2SP
+ * buffer. The SP clears this bit by writing 1 to it after it has
+ * set the spBusy bit. Writing 0 has no effect
+ * The host writes 1 to clear the read pointer to the BT SP2HOST
+ * buffer; the SP writes 1 to clear the read pointer to the BT
+ * HOST2SP buffer. This bit is always read back as 0; writing 0
+ * has no effect.
+ * The host writes 1 to clear the write pointer to the BT HOST2SP
+ * buffer; the SP writes 1 to clear the write pointer to the BT
+ * SP2HOST buffer. This bit is always read back as 0; writing 0
+ * has no effect.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t sp_busy:1;
+ uint32_t host_busy:1;
+ uint32_t oem0:1;
+ uint32_t sms_atn:1;
+ uint32_t sp2host_atn:1;
+ uint32_t host2sp_atn:1;
+ uint32_t clr_rd_ptr:1;
+ uint32_t clr_wr_ptr:1;
+#else
+ uint32_t clr_wr_ptr:1;
+ uint32_t clr_rd_ptr:1;
+ uint32_t host2sp_atn:1;
+ uint32_t sp2host_atn:1;
+ uint32_t sms_atn:1;
+ uint32_t oem0:1;
+ uint32_t host_busy:1;
+ uint32_t sp_busy:1;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_ctrl0_t;
+
+
+/*
+ * Register: BtData0
+ * Mailbox Data 0
+ * Description: Host (blade) <-> SP mailbox data register 0.
+ * Host is allowed a 32 bits read/write access to this register ; To
+ * do the same, it should provide the btData0 address, data on
+ * hostDataBus[31:0], and assert hostBen[1], SPI is allowed only 8
+ * bits read/write access to this register ; To do the same, it
+ * should provide the btData0 address, data on spiDataBus[7:0], and
+ * no need of spiBen
+ * All references to the mail box control bits in this register
+ * refer to btCtrl0. When spBusy=0 && host2spAtn=0, data is written
+ * by the host and read by the SP. When hostBusy=0 && sp2hostAtn=0,
+ * data is written by the SP and read by the Host.
+ *
+ * Fields:
+ * Bits 7:0 of message data to send to SP/HOST
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t data:8;
+#else
+ uint32_t data:8;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_data0_t;
+
+
+/*
+ * Register: BtIntmask0
+ * Mailbox Interrupt Mask & Status 0
+ * Description: Host (blade) <-> SP Block Transfer Interrupt Mask and
+ * Status register 0
+ * Host is allowed 8 bits read/write access to this register ; To do
+ * the same, it should provide the btIntmask0 address, data on
+ * hostDataBus[23:16], and assert hostBen[2], SPI is allowed 8 bits
+ * read only access to this register ; To do the same, it should
+ * provide the btIntmask0 address and no need of spiBen
+ * All references to the mail box control bits in this register
+ * refer to btCtrl0
+ * Fields:
+ * The host writes 1 to reset the entire mailbox 0 accesses for
+ * error recovery; resets both SP and HOST write and read
+ * pointers. Writing 0 has no effect. This is non-sticky. Always
+ * read back as 0.
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * SP to HOST Interrupt status This bit reflects the state of the
+ * intrpt line to the Host. O/S driver should write 1 to clear.
+ * SP to HOST Interrupt Enable The interrupt is generated if
+ * sp2hIrqEn is 1 and either sp2hostAtn or smsAtn is 1
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t mb_master_reset:1;
+ uint32_t rsrvd1:2;
+ uint32_t oem3:1;
+ uint32_t oem2:1;
+ uint32_t oem1:1;
+ uint32_t sp2h_irq:1;
+ uint32_t sp2h_irq_en:1;
+#else
+ uint32_t sp2h_irq_en:1;
+ uint32_t sp2h_irq:1;
+ uint32_t oem1:1;
+ uint32_t oem2:1;
+ uint32_t oem3:1;
+ uint32_t rsrvd1:2;
+ uint32_t mb_master_reset:1;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_intmask0_t;
+
+
+/*
+ * Register: BtCtrl1
+ * Mailbox Control & Access status 1
+ * Description: Host (blade) <-> SP Block Transfer mailbox control
+ * and access status register 1.
+ * Host is allowed 8 bits read/write access to this register ; To do
+ * the same, it should provide the btCtrl1 address, data on
+ * hostDataBus[7:0], and assert hostBen[0], SPI is allowed 8 bits
+ * read/write access to this register ; To do the same, it should
+ * provide the btCtrl1 address, data on spiDataBus[7:0], and no need
+ * of spiBen
+ *
+ * Fields:
+ * The SP sets/clears this bit to indicate that it is busy and
+ * can not accept any other request; write 1 to toggle the bit;
+ * Read only by Host.
+ * The Host sets/clears this bit to indicate that it is busy and
+ * can not accept any other request; Read only by SP.
+ * Reserved for definition by platform. Typical usage could be
+ * "heartbeat" mechanism from/to the host. The host sets OEM0 to
+ * interrupt the SP and then polls it to be cleared by SP
+ * The SP sets this bit when it has detected and queued an SMS
+ * message in the SP2HOST buffer that must be reported to the
+ * HOST. The Host clears this bit by writing a 1 to it. This bit
+ * may generate an intrpt to Host depending on the sp2hostIntEn
+ * bit. Writing 0 has no effect
+ * The SP writes 1 to this bit after it has finished writing a
+ * message into the SP2HOST buffer. The Host clears this bit by
+ * writing 1 to it after it has set the hostBusy bit This bit may
+ * generate an intrpt to Host depending on the sp2hostIntEn bit.
+ * Writing 0 has no effect
+ * The Host writes 1 to this bit to generate an interrupt to SP
+ * after it has finished writing a message into the HOST2SP
+ * buffer. The SP clears this bit by writing 1 to it after it has
+ * set the spBusy bit. Writing 0 has no effect
+ * The host writes 1 to clear the read pointer to the BT SP2HOST
+ * buffer; the SP writes 1 to clear the read pointer to the BT
+ * HOST2SP buffer. This bit is always read back as 0; writing 0
+ * has no effect.
+ * The host writes 1 to clear the write pointer to the BT HOST2SP
+ * buffer; the SP writes 1 to clear the write pointer to the BT
+ * SP2HOST buffer. This bit is always read back as 0; writing 0
+ * has no effect.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t sp_busy:1;
+ uint32_t host_busy:1;
+ uint32_t oem0:1;
+ uint32_t sms_atn:1;
+ uint32_t sp2host_atn:1;
+ uint32_t host2sp_atn:1;
+ uint32_t clr_rd_ptr:1;
+ uint32_t clr_wr_ptr:1;
+#else
+ uint32_t clr_wr_ptr:1;
+ uint32_t clr_rd_ptr:1;
+ uint32_t host2sp_atn:1;
+ uint32_t sp2host_atn:1;
+ uint32_t sms_atn:1;
+ uint32_t oem0:1;
+ uint32_t host_busy:1;
+ uint32_t sp_busy:1;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_ctrl1_t;
+
+
+/*
+ * Register: BtData1
+ * Mailbox Data 1
+ * Description: Host (blade) <-> SP mailbox data register 1.
+ * Host is allowed a 32 bits read/write access to this register ; To
+ * do the same, it should provide the btData1 address, data on
+ * hostDataBus[31:0], and assert hostBen[1], SPI is allowed only 8
+ * bits read/write access to this register ; To do the same, it
+ * should provide the btData1 address, data on spiDataBus[7:0], and
+ * no need of spiBen
+ * All references to the mail box control bits in this register
+ * refer to btCtrl1. When spBusy=0 && host2spAtn=0, data is written
+ * by the host and read by the SP. When hostBusy=0 && sp2hostAtn=0,
+ * data is written by the SP and read by the Host.
+ * Fields:
+ * Bits 31:0 of message data to send to SP/HOST
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t data:8;
+#else
+ uint32_t data:8;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_data1_t;
+
+
+/*
+ * Register: BtIntmask1
+ * Mailbox Interrupt Mask & Status 1
+ * Description: Host (blade) <-> SP Block Transfer Interrupt Mask and
+ * Status register 1
+ * Host is allowed 8 bits read/write access to this register ; To do
+ * the same, it should provide the btIntmask1 address, data on
+ * hostDataBus[23:16], and assert hostBen[2], SPI is allowed 8 bits
+ * read only access to this register ; To do the same, it should
+ * provide the btIntmask1 address and no need of spiBen
+ * All references to the mail box control bits in this register
+ * refer to btCtrl1
+ * Fields:
+ * The host writes 1 to reset the entire mailbox 1 accesses for
+ * error recovery; resets both SP and HOST write and read
+ * pointers. Writing 0 has no effect. This is non-sticky. Always
+ * read back as 0.
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * SP to HOST Interrupt status This bit reflects the state of the
+ * intrpt line to the Host. O/S driver should write 1 to clear.
+ * SP to HOST Interrupt Enable The interrupt is generated if
+ * sp2hIrqEn is 1 and either sp2hostAtn or smsAtn is 1
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t mb_master_reset:1;
+ uint32_t rsrvd1:2;
+ uint32_t oem3:1;
+ uint32_t oem2:1;
+ uint32_t oem1:1;
+ uint32_t sp2h_irq:1;
+ uint32_t sp2h_irq_en:1;
+#else
+ uint32_t sp2h_irq_en:1;
+ uint32_t sp2h_irq:1;
+ uint32_t oem1:1;
+ uint32_t oem2:1;
+ uint32_t oem3:1;
+ uint32_t rsrvd1:2;
+ uint32_t mb_master_reset:1;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_intmask1_t;
+
+
+/*
+ * Register: BtCtrl2
+ * Mailbox Control & Access status 2
+ * Description: Host (blade) <-> SP Block Transfer mailbox control
+ * and access status register 2.
+ * Host is allowed 8 bits read/write access to this register ; To do
+ * the same, it should provide the btCtrl2 address, data on
+ * hostDataBus[7:0], and assert hostBen[0], SPI is allowed 8 bits
+ * read/write access to this register ; To do the same, it should
+ * provide the btCtrl2 address, data on spiDataBus[7:0], and no need
+ * of spiBen
+ *
+ * Fields:
+ * The SP sets/clears this bit to indicate that it is busy and
+ * can not accept any other request; write 1 to toggle the bit;
+ * Read only by Host.
+ * The Host sets/clears this bit to indicate that it is busy and
+ * can not accept any other request; Read only by SP.
+ * Reserved for definition by platform. Typical usage could be
+ * "heartbeat" mechanism from/to the host. The host sets OEM0 to
+ * interrupt the SP and then polls it to be cleared by SP
+ * The SP sets this bit when it has detected and queued an SMS
+ * message in the SP2HOST buffer that must be reported to the
+ * HOST. The Host clears this bit by writing a 1 to it. This bit
+ * may generate an intrpt to Host depending on the sp2hostIntEn
+ * bit. Writing 0 has no effect
+ * The SP writes 1 to this bit after it has finished writing a
+ * message into the SP2HOST buffer. The Host clears this bit by
+ * writing 1 to it after it has set the hostBusy bit This bit may
+ * generate an intrpt to Host depending on the sp2hostIntEn bit.
+ * Writing 0 has no effect
+ * The Host writes 1 to this bit to generate an interrupt to SP
+ * after it has finished writing a message into the HOST2SP
+ * buffer. The SP clears this bit by writing 1 to it after it has
+ * set the spBusy bit. Writing 0 has no effect
+ * The host writes 1 to clear the read pointer to the BT SP2HOST
+ * buffer; the SP writes 1 to clear the read pointer to the BT
+ * HOST2SP buffer. This bit is always read back as 0; writing 0
+ * has no effect.
+ * The host writes 1 to clear the write pointer to the BT HOST2SP
+ * buffer; the SP writes 1 to clear the write pointer to the BT
+ * SP2HOST buffer. This bit is always read back as 0; writing 0
+ * has no effect.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t sp_busy:1;
+ uint32_t host_busy:1;
+ uint32_t oem0:1;
+ uint32_t sms_atn:1;
+ uint32_t sp2host_atn:1;
+ uint32_t host2sp_atn:1;
+ uint32_t clr_rd_ptr:1;
+ uint32_t clr_wr_ptr:1;
+#else
+ uint32_t clr_wr_ptr:1;
+ uint32_t clr_rd_ptr:1;
+ uint32_t host2sp_atn:1;
+ uint32_t sp2host_atn:1;
+ uint32_t sms_atn:1;
+ uint32_t oem0:1;
+ uint32_t host_busy:1;
+ uint32_t sp_busy:1;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_ctrl2_t;
+
+
+/*
+ * Register: BtData2
+ * Mailbox Data 2
+ * Description: Host (blade) <-> SP mailbox data register 2. All
+ * references to the mail box control bits in this register refer to
+ * btCtrl2.
+ * Host is allowed a 32 bits read/write access to this register ; To
+ * do the same, it should provide the btData2 address, data on
+ * hostDataBus[31:0], and assert hostBen[1], SPI is allowed only 8
+ * bits read/write access to this register ; To do the same, it
+ * should provide the btData2 address, data on spiDataBus[7:0], and
+ * no need of spiBen
+ * When spBusy=0 && host2spAtn=0, data is written by the host and
+ * read by the SP. When hostBusy=0 && sp2hostAtn=0, data is written
+ * by the SP and read by the Host.
+ * Fields:
+ * Bits 31:0 of message data to send to SP/HOST
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t data:8;
+#else
+ uint32_t data:8;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_data2_t;
+
+
+/*
+ * Register: BtIntmask2
+ * Mailbox Interrupt Mask & Status 2
+ * Description: Host (blade) <-> SP Block Transfer Interrupt Mask and
+ * Status register 2
+ * Host is allowed 8 bits read/write access to this register ; To do
+ * the same, it should provide the btIntmask2 address, data on
+ * hostDataBus[23:16], and assert hostBen[2], SPI is allowed 8 bits
+ * read only access to this register ; To do the same, it should
+ * provide the btIntmask2 address and no need of spiBen
+ * All references to the mail box control bits in this register
+ * refer to btCtrl2
+ * Fields:
+ * The host writes 1 to reset the entire mailbox 2 accesses for
+ * error recovery; resets both SP and HOST write and read
+ * pointers. Writing 0 has no effect. This is non-sticky. Always
+ * read back as 0.
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * SP to HOST Interrupt status This bit reflects the state of the
+ * intrpt line to the Host. O/S driver should write 1 to clear.
+ * SP to HOST Interrupt Enable The interrupt is generated if
+ * sp2hIrqEn is 1 and either sp2hostAtn or smsAtn is 1
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t mb_master_reset:1;
+ uint32_t rsrvd1:2;
+ uint32_t oem3:1;
+ uint32_t oem2:1;
+ uint32_t oem1:1;
+ uint32_t sp2h_irq:1;
+ uint32_t sp2h_irq_en:1;
+#else
+ uint32_t sp2h_irq_en:1;
+ uint32_t sp2h_irq:1;
+ uint32_t oem1:1;
+ uint32_t oem2:1;
+ uint32_t oem3:1;
+ uint32_t rsrvd1:2;
+ uint32_t mb_master_reset:1;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_intmask2_t;
+
+
+/*
+ * Register: BtCtrl3
+ * Mailbox Control & Access status 3
+ * Description: Host (blade) <-> SP Block Transfer mailbox control
+ * and access status register 3.
+ * Host is allowed 8 bits read/write access to this register ; To do
+ * the same, it should provide the btCtrl3 address, data on
+ * hostDataBus[7:0], and assert hostBen[0], SPI is allowed 8 bits
+ * read/write access to this register ; To do the same, it should
+ * provide the btCtrl3 address, data on spiDataBus[7:0], and no need
+ * of spiBen
+ *
+ * Fields:
+ * The SP sets/clears this bit to indicate that it is busy and
+ * can not accept any other request; write 1 to toggle the bit;
+ * Read only by Host.
+ * The Host sets/clears this bit to indicate that it is busy and
+ * can not accept any other request; Read only by SP.
+ * Reserved for definition by platform. Typical usage could be
+ * "heartbeat" mechanism from/to the host. The host sets OEM0 to
+ * interrupt the SP and then polls it to be cleared by SP
+ * The SP sets this bit when it has detected and queued an SMS
+ * message in the SP2HOST buffer that must be reported to the
+ * HOST. The Host clears this bit by writing a 1 to it. This bit
+ * may generate an intrpt to Host depending on the sp2hostIntEn
+ * bit. Writing 0 has no effect
+ * The SP writes 1 to this bit after it has finished writing a
+ * message into the SP2HOST buffer. The Host clears this bit by
+ * writing 1 to it after it has set the hostBusy bit This bit may
+ * generate an intrpt to Host depending on the sp2hostIntEn bit.
+ * Writing 0 has no effect
+ * The Host writes 1 to this bit to generate an interrupt to SP
+ * after it has finished writing a message into the HOST2SP
+ * buffer. The SP clears this bit by writing 1 to it after it has
+ * set the spBusy bit. Writing 0 has no effect
+ * The host writes 1 to clear the read pointer to the BT SP2HOST
+ * buffer; the SP writes 1 to clear the read pointer to the BT
+ * HOST2SP buffer. This bit is always read back as 0; writing 0
+ * has no effect.
+ * The host writes 1 to clear the write pointer to the BT HOST2SP
+ * buffer; the SP writes 1 to clear the write pointer to the BT
+ * SP2HOST buffer. This bit is always read back as 0; writing 0
+ * has no effect.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t sp_busy:1;
+ uint32_t host_busy:1;
+ uint32_t oem0:1;
+ uint32_t sms_atn:1;
+ uint32_t sp2host_atn:1;
+ uint32_t host2sp_atn:1;
+ uint32_t clr_rd_ptr:1;
+ uint32_t clr_wr_ptr:1;
+#else
+ uint32_t clr_wr_ptr:1;
+ uint32_t clr_rd_ptr:1;
+ uint32_t host2sp_atn:1;
+ uint32_t sp2host_atn:1;
+ uint32_t sms_atn:1;
+ uint32_t oem0:1;
+ uint32_t host_busy:1;
+ uint32_t sp_busy:1;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_ctrl3_t;
+
+
+/*
+ * Register: BtData3
+ * Mailbox Data 3
+ * Description: Host (blade) <-> SP mailbox data register 3.
+ * Host is allowed a 32 bits read/write access to this register ; To
+ * do the same, it should provide the btData3 address, data on
+ * hostDataBus[31:0], and assert hostBen[1], SPI is allowed only 8
+ * bits read/write access to this register ; To do the same, it
+ * should provide the btData3 address, data on spiDataBus[7:0], and
+ * no need of spiBen
+ * All references to the mail box control bits in this register
+ * refer to btCtrl3. When spBusy=0 && host2spAtn=0, data is written
+ * by the host and read by the SP. When hostBusy=0 && sp2hostAtn=0,
+ * data is written by the SP and read by the Host.
+ * Fields:
+ * Bits 31:0 of message data to send to SP/HOST
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t data:8;
+#else
+ uint32_t data:8;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_data3_t;
+
+
+/*
+ * Register: BtIntmask3
+ * Mailbox Interrupt Mask & Status 3
+ * Description: Host (blade) <-> SP Block Transfer Interrupt Mask and
+ * Status register 3
+ * Host is allowed 8 bits read/write access to this register ; To do
+ * the same, it should provide the btIntmask3 address, data on
+ * hostDataBus[23:16], and assert hostBen[2], SPI is allowed 8 bits
+ * read only access to this register ; To do the same, it should
+ * provide the btIntmask3 address and no need of spiBen
+ * All references to the mail box control bits in this register
+ * refer to btCtrl3
+ * Fields:
+ * The host writes 1 to reset the entire mailbox 3 accesses for
+ * error recovery; resets both SP and HOST write and read
+ * pointers. Writing 0 has no effect. This is non-sticky. Always
+ * read back as 0.
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * Reserved for definition by platform manufacturer for BIOS/SMI
+ * Handler use. Generic IPMI software must write this bit as 0
+ * and ignore the value on read
+ * SP to HOST Interrupt status This bit reflects the state of the
+ * intrpt line to the Host. O/S driver should write 1 to clear.
+ * SP to HOST Interrupt Enable The interrupt is generated if
+ * sp2hIrqEn is 1 and either sp2hostAtn or smsAtn is 1
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:24;
+ uint32_t mb_master_reset:1;
+ uint32_t rsrvd1:2;
+ uint32_t oem3:1;
+ uint32_t oem2:1;
+ uint32_t oem1:1;
+ uint32_t sp2h_irq:1;
+ uint32_t sp2h_irq_en:1;
+#else
+ uint32_t sp2h_irq_en:1;
+ uint32_t sp2h_irq:1;
+ uint32_t oem1:1;
+ uint32_t oem2:1;
+ uint32_t oem3:1;
+ uint32_t rsrvd1:2;
+ uint32_t mb_master_reset:1;
+ uint32_t rsrvd:24;
+#endif
+ } bits;
+} bt_intmask3_t;
+
+
+/*
+ * Register: DebugSel
+ * CIP Debug Data Select
+ * Description: Selects the debug data signals from the CIP blocks
+ * Fields:
+ * Selects up to 16 groups of gbtDebug/pipeDebug on
+ * peuPhyVdbgDebugPort[31:0]
+ * Selects the high DW of the debug data - default is PCIe link
+ * status
+ * Selects the low DW of the debug data
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:12;
+ uint32_t phy_dbug_sel:4;
+ uint32_t rsrvd1:3;
+ uint32_t cip_hdbug_sel:5;
+ uint32_t rsrvd2:3;
+ uint32_t cip_ldbug_sel:5;
+#else
+ uint32_t cip_ldbug_sel:5;
+ uint32_t rsrvd2:3;
+ uint32_t cip_hdbug_sel:5;
+ uint32_t rsrvd1:3;
+ uint32_t phy_dbug_sel:4;
+ uint32_t rsrvd:12;
+#endif
+ } bits;
+} debug_sel_t;
+
+
+/*
+ * Register: IndaccMem0Ctrl
+ * CIP Mem0 Debug ctrl
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * 1: rd/wr access is done 0: rd/wr access is in progress
+ * 1: pkt injection is done 0: pkt injection is in progress
+ * Ingress pkt injection enable: write to 1 for single pkt
+ * injection. Must be 0 when enabling diagnostic rd/wr access to
+ * memories.
+ * 1: Diagnostic rd/wr access to memories enabled 0: Diagnostic
+ * rd/wr access to memories disabled Must be 0 when enabling pkt
+ * injection.
+ * 1: read, 0: write
+ * This bit is read/writable only if mem0Diagen=1 or if
+ * mem0Diagen bit is also written with '1' along with enabling
+ * this bit. Else, the write will not have any effect. 1: Apply
+ * the parity mask provided in the Prty register 0: Do not apply
+ * the parity mask provided in the Prty register
+ * 0 : select npdataq memory 1 : select nphdrq memory 2 : select
+ * pdataq memory 3 : select phdrq memory 4 : select cpldataq
+ * memory 5 : select cplhdrq memory
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mem0_access_status:1;
+ uint32_t rsrvd:5;
+ uint32_t mem0_pktinj_stat:1;
+ uint32_t mem0_pktinj_en:1;
+ uint32_t rsrvd1:1;
+ uint32_t mem0_diagen:1;
+ uint32_t mem0_command:1;
+ uint32_t mem0_prty_wen:1;
+ uint32_t rsrvd2:1;
+ uint32_t mem0_sel:3;
+ uint32_t mem0_addr:16;
+#else
+ uint32_t mem0_addr:16;
+ uint32_t mem0_sel:3;
+ uint32_t rsrvd2:1;
+ uint32_t mem0_prty_wen:1;
+ uint32_t mem0_command:1;
+ uint32_t mem0_diagen:1;
+ uint32_t rsrvd1:1;
+ uint32_t mem0_pktinj_en:1;
+ uint32_t mem0_pktinj_stat:1;
+ uint32_t rsrvd:5;
+ uint32_t mem0_access_status:1;
+#endif
+ } bits;
+} indacc_mem0_ctrl_t;
+
+
+/*
+ * Register: IndaccMem0Data0
+ * CIP Mem0 Debug Data0
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * When pktInjectionEnable is 0: Data[31:0] from/for the memory
+ * selected by mem0Sel bits from mem0Ctrl This data is written to
+ * the memory when indaccMem0Ctrl register is written with the
+ * write command When indaccMem0Ctrl register is written with the
+ * read command, this register will hold the Data[31:0] returned
+ * from the memory When pktInjectionEnable is 1:
+ * debugData0Reg[31:0] is used in the following ways: [17:16] =
+ * radmTrgt1Fmt[1:0]: 2'b00 3DW MRd 2'b01 4DW MRd 2'b10 3DW MWr
+ * 2'b11 4DW MWr [13:12] = radmTrgt1DwLen[1:0]: 2'b01 1DW 2'b10
+ * 2DW [11:8] = radmTrgt1LastBe[3:0]: 4'b0000 1DW 4'b1111 2DW [7]
+ * = radmTrgt1RomInRange 1'b0 PIO Access 1'b1 EEPROM Access [6:4]
+ * = radmTrgt1InMembarRange[2:0] 3'b000 PIO Access 3'b010 MSIX
+ * Ram/PBA Table Access [1:0] = radmTrgt1Dwen[1:0] 2'b01
+ * 1DW->last DW is at radmTrgt1Data[31:0] 2'b11 2DW->last DW is
+ * at radmTrgt1Data[63:32]
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mem0_data0:32;
+#else
+ uint32_t mem0_data0:32;
+#endif
+ } bits;
+} indacc_mem0_data0_t;
+
+
+/*
+ * Register: IndaccMem0Data1
+ * CIP Mem0 Debug Data1
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * When pktInjectionEnable is 0: Data[63:32] from/for the memory
+ * selected by mem0Sel bits from mem0Ctrl This data is written to
+ * the memory when indaccMem0Ctrl register is written with the
+ * write command When indaccMem0Ctrl register is written with the
+ * read command, this register will hold the Data[63:32] returned
+ * from the memory When pktInjectionEnable is 1:
+ * debugData1Reg[31:0] is used as radmTrgt1Addr[31:0].
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mem0_data1:32;
+#else
+ uint32_t mem0_data1:32;
+#endif
+ } bits;
+} indacc_mem0_data1_t;
+
+
+/*
+ * Register: IndaccMem0Data2
+ * CIP Mem0 Debug Data2
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * When pktInjectionEnable is 0: Data[95:64] from/for the memory
+ * selected by mem0Sel bits from mem0Ctrl This data is written to
+ * the memory when indaccMem0Ctrl register is written with the
+ * write command When indaccMem0Ctrl register is written with the
+ * read command, this register will hold the Data[95:64] returned
+ * from the memory When pktInjectionEnable is 1:
+ * debugData2Reg[31:0] is used as radmTrgt1Data[63:32]. Allows up
+ * to QW=2DW access.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mem0_data2:32;
+#else
+ uint32_t mem0_data2:32;
+#endif
+ } bits;
+} indacc_mem0_data2_t;
+
+
+/*
+ * Register: IndaccMem0Data3
+ * CIP Mem0 Debug Data3
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * When pktInjectionEnable is 0: Data[127:96] from/for the memory
+ * selected by mem0Sel bits from mem0Ctrl This data is written to
+ * the memory when indaccMem0Ctrl register is written with the
+ * write command When indaccMem0Ctrl register is written with the
+ * read command, this register will hold the Data[127:96]
+ * returned from the memory When pktInjectionEnable is 1:
+ * debugData3Reg[31:0] is used as radmTrgt1Data[31:0].
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mem0_data3:32;
+#else
+ uint32_t mem0_data3:32;
+#endif
+ } bits;
+} indacc_mem0_data3_t;
+
+
+/*
+ * Register: IndaccMem0Prty
+ * CIP Mem0 Debug Parity
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * parity mask bits for the memory selected by mem0Sel bits from
+ * mem0Ctrl to inject parity error These bits serve two purposes
+ * regarding memory parity : - During indirect write access to
+ * the memories, the value in this register is applied as mask to
+ * the actual parity if prtyWen bit of the indaccCtrl register
+ * has been enabled. The masked parity and data are written into
+ * the specified memory location. - During indirect read access
+ * to the memories, the value in this register is overwritten
+ * with the parity value read from the memory location. If the
+ * parity mask had been set and enabled to be written into this
+ * location it will generate parity error for that memory
+ * location
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:18;
+ uint32_t mem0_parity:14;
+#else
+ uint32_t mem0_parity:14;
+ uint32_t rsrvd:18;
+#endif
+ } bits;
+} indacc_mem0_prty_t;
+
+
+/*
+ * Register: IndaccMem1Ctrl
+ * CIP Mem1 Debug ctrl
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * 1: rd/wr access is done 0: rd/wr access is in progress
+ * 1: client pkt injection is done 0: client pkt injection is in
+ * progress
+ * 1: client1 pkt injection 0: client0 pkt injection
+ * Mutually exclusive: Either client0 or client1 egress pkt
+ * injection enable: write to 1 for single pkt injection. Must be
+ * 0 when enabling diagnostic rd/wr access to memories.
+ * 1: Diagnostic rd/wr access enabled 0: Diagnostic rd/wr access
+ * disabled Must be 0 when enabling pkt injection.
+ * 1: read, 0: write
+ * This bit is read/writable only if mem1Diagen=1 or if
+ * mem1Diagen bit is also written with '1' along with enabling
+ * this bit. Else, the write will not have any effect. 1: Apply
+ * the parity mask provided in the Prty register 0: Do not apply
+ * the parity mask provided in the Prty register
+ * 0 : select retry sot memory 1 : select retry buffer memory 2 :
+ * select msix memory 3 : select hcr cfg memory
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mem1_access_status:1;
+ uint32_t rsrvd:4;
+ uint32_t mem1_pktinj_stat:1;
+ uint32_t mem1_pktinj_client:1;
+ uint32_t mem1_pktinj_en:1;
+ uint32_t rsrvd1:1;
+ uint32_t mem1_diagen:1;
+ uint32_t mem1_command:1;
+ uint32_t mem1_prty_wen:1;
+ uint32_t rsrvd2:2;
+ uint32_t mem1_sel:2;
+ uint32_t mem1_addr:16;
+#else
+ uint32_t mem1_addr:16;
+ uint32_t mem1_sel:2;
+ uint32_t rsrvd2:2;
+ uint32_t mem1_prty_wen:1;
+ uint32_t mem1_command:1;
+ uint32_t mem1_diagen:1;
+ uint32_t rsrvd1:1;
+ uint32_t mem1_pktinj_en:1;
+ uint32_t mem1_pktinj_client:1;
+ uint32_t mem1_pktinj_stat:1;
+ uint32_t rsrvd:4;
+ uint32_t mem1_access_status:1;
+#endif
+ } bits;
+} indacc_mem1_ctrl_t;
+
+
+/*
+ * Register: IndaccMem1Data0
+ * CIP Mem1 Debug Data0
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * When pktInjectionEnable is 0: Data[31:0] from/for the memory
+ * selected by mem1Sel bits from mem1Ctrl This data is written to
+ * the memory when indaccMem1Ctrl register is written with the
+ * write command When indaccMem1Ctrl register is written with the
+ * read command, this register will hold the Data[31:0] returned
+ * from the memory
+ * When pktInjectionEnable is 1: debugData0Reg[31:0] is used in
+ * the following ways: [27:26] = tdcPeuTlp0[or
+ * rdcPeuTlp1]_fmt[1:0]: 2'b00 3DW MRd 2'b01 4DW MRd 2'b10 3DW
+ * MWr 2'b11 4DW MWr [25:13] = tdcPeuTlp0[or
+ * rdcPeuTlp1]_byteLen[12:0]: Note MWr must be limited to 4B =
+ * 13'b0000000000001. [12:8] = tdcPeuTlp0[or
+ * rdcPeuTlp1]_tid[4:0]: 5 lsb of tid (TAG ID) [7:0] =
+ * tdcPeuTlp0[or rdcPeuTlp1]_byteEn[7:0]: [7:4] = last DW byte
+ * enables [3:0] = first DW byte enables
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mem1_data0:32;
+#else
+ uint32_t mem1_data0:32;
+#endif
+ } bits;
+} indacc_mem1_data0_t;
+
+
+/*
+ * Register: IndaccMem1Data1
+ * CIP Mem1 Debug Data1
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * When pktInjectionEnable is 0: Data[63:32] from/for the memory
+ * selected by mem1Sel bits from mem1Ctrl This data is written to
+ * the memory when indaccMem1Ctrl register is written with the
+ * write command When indaccMem1Ctrl register is written with the
+ * read command, this register will hold the Data[63:32] returned
+ * from the memory
+ * When pktInjectionEnable is 1: debugData1Reg[31:0] is used as
+ * tdcPeuTlp0[or rdcPeuTlp1]_addr[63:32] high address bits.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mem1_data1:32;
+#else
+ uint32_t mem1_data1:32;
+#endif
+ } bits;
+} indacc_mem1_data1_t;
+
+
+/*
+ * Register: IndaccMem1Data2
+ * CIP Mem1 Debug Data2
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * When pktInjectionEnable is 0: Data[95:64] from/for the memory
+ * selected by mem1Sel bits from mem1Ctrl This data is written to
+ * the memory when indaccMem1Ctrl register is written with the
+ * write command When indaccMem1Ctrl register is written with the
+ * read command, this register will hold the Data[95:64] returned
+ * from the memory
+ * When pktInjectionEnable is 1: debugData2Reg[31:0] is used as
+ * tdcPeuTlp0[or rdcPeuTlp1]_addr[31:0] low address bits.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mem1_data2:32;
+#else
+ uint32_t mem1_data2:32;
+#endif
+ } bits;
+} indacc_mem1_data2_t;
+
+
+/*
+ * Register: IndaccMem1Data3
+ * CIP Mem1 Debug Data3
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * When pktInjectionEnable is 0: Data[127:96] from/for the memory
+ * selected by mem1Sel bits from mem1Ctrl This data is written to
+ * the memory when indaccMem1Ctrl register is written with the
+ * write command When indaccMem1Ctrl register is written with the
+ * read command, this register will hold the Data[127:96]
+ * returned from the memory
+ * When pktInjectionEnable is 1: debugData3Reg[31:0] is used as
+ * tdcPeuTlp0[or rdcPeuTlp1]_data[31:0] Limited for MWr to 1 DW.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t mem1_data3:32;
+#else
+ uint32_t mem1_data3:32;
+#endif
+ } bits;
+} indacc_mem1_data3_t;
+
+
+/*
+ * Register: IndaccMem1Prty
+ * CIP Mem1 Debug Parity
+ * Description: Debug data signals from the CIP blocks
+ * Fields:
+ * parity mask bits for the memory selected by mem1Sel bits from
+ * mem1Ctrl to inject parity error These bits serve two purposes
+ * regarding memory parity : - During indirect write access to
+ * the memories, the value in this register is applied as mask to
+ * the actual parity if prtyWen bit of the indaccCtrl register
+ * has been enabled. The masked parity and data are written into
+ * the specified memory location. - During indirect read access
+ * to the memories, the value in this register is overwritten
+ * with the parity value read from the memory location. If the
+ * parity mask had been set and enabled to be written into this
+ * location it will generate parity error for that memory
+ * location
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:20;
+ uint32_t mem1_parity:12;
+#else
+ uint32_t mem1_parity:12;
+ uint32_t rsrvd:20;
+#endif
+ } bits;
+} indacc_mem1_prty_t;
+
+
+/*
+ * Register: PhyDebugTrainingVec
+ * peuPhy Debug Training Vector
+ * Description: peuPhy Debug Training Vector register.
+ * Fields:
+ * Hard-coded value for peuPhy wrt global debug training block
+ * signatures.
+ * Blade Number, the value read depends on the blade this block
+ * resides
+ * debug training vector the sub-group select value of 0 selects
+ * this vector
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t dbg_msb:1;
+ uint32_t bld_num:3;
+ uint32_t phydbg_training_vec:28;
+#else
+ uint32_t phydbg_training_vec:28;
+ uint32_t bld_num:3;
+ uint32_t dbg_msb:1;
+#endif
+ } bits;
+} phy_debug_training_vec_t;
+
+
+/*
+ * Register: PeuDebugTrainingVec
+ * PEU Debug Training Vector
+ * Description: PEU Debug Training Vector register.
+ * Fields:
+ * Hard-coded value for PEU (VNMy - core clk domain) wrt global
+ * debug training block signatures.
+ * Blade Number, the value read depends on the blade this block
+ * resides
+ * debug training vector the sub-group select value of 0 selects
+ * this vector
+ * Hard-coded value for PEU (VNMy - core clk domain) wrt global
+ * debug training block signatures.
+ * Blade Number, the value read depends on the blade this block
+ * resides
+ * debug training vector the sub-group select value of 0 selects
+ * this vector
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t dbgmsb_upper:1;
+ uint32_t bld_num_upper:3;
+ uint32_t peudbg_upper_training_vec:12;
+ uint32_t dbgmsb_lower:1;
+ uint32_t bld_num_lower:3;
+ uint32_t peudbg_lower_training_vec:12;
+#else
+ uint32_t peudbg_lower_training_vec:12;
+ uint32_t bld_num_lower:3;
+ uint32_t dbgmsb_lower:1;
+ uint32_t peudbg_upper_training_vec:12;
+ uint32_t bld_num_upper:3;
+ uint32_t dbgmsb_upper:1;
+#endif
+ } bits;
+} peu_debug_training_vec_t;
+
+
+/*
+ * Register: PipeCfg0
+ * PIPE Configuration
+ * Description: These are controls signals for the pipe core and are
+ * used to define the PIPE core configuration with PipeCfg1 reg value
+ * (0x08124)
+ * Fields:
+ * If this bit is 1 when pipe reset is released, then the value
+ * on the pipe core's input port 'pipeParameter' is loaded into
+ * the Pipe Core's internal Rx/Tx Parameter register which is
+ * pipeRxTxParam at addr 0x01010. Note that it is software's
+ * responsibility to program the pipeParameter (Pipe Cfg1)
+ * register correctly: e.g. LOSADJ must be 0x1.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:21;
+ uint32_t pipe_serdes_x1:1;
+ uint32_t pipe_force_ewrap:1;
+ uint32_t pipe_force_loopback:1;
+ uint32_t pipe_force_parm:1;
+ uint32_t pipe_freq_sel:1;
+ uint32_t pipe_p1_pdown:1;
+ uint32_t pipe_p1_pdtx:1;
+ uint32_t pipe_same_sel:1;
+ uint32_t pipe_system_clk:1;
+ uint32_t gbt_term_i:2;
+#else
+ uint32_t gbt_term_i:2;
+ uint32_t pipe_system_clk:1;
+ uint32_t pipe_same_sel:1;
+ uint32_t pipe_p1_pdtx:1;
+ uint32_t pipe_p1_pdown:1;
+ uint32_t pipe_freq_sel:1;
+ uint32_t pipe_force_parm:1;
+ uint32_t pipe_force_loopback:1;
+ uint32_t pipe_force_ewrap:1;
+ uint32_t pipe_serdes_x1:1;
+ uint32_t rsrvd:21;
+#endif
+ } bits;
+} pipe_cfg0_t;
+
+
+/*
+ * Register: PipeCfg1
+ * PIPE Configuration
+ * Description: These values define the PIPE core configuration and
+ * is presented on the Pipe core's input port 'pipeParameter'.
+ * The value on the pipe core's input 'pipeParameter' is loaded into
+ * the pipe core's internal Rx/Tx Parameter register, which is
+ * pipeRxTxParam at addr 0x01010, by forcing the pipeForceParm bit of
+ * the Pipe Cfg0 Register at address 0x08120.
+ *
+ * Fields:
+ * Tx Driver Emphasis
+ * Serial output Slew Rate Control
+ * Tx Voltage Mux control
+ * Tx Voltage Pulse control
+ * Output Swing setting
+ * Transmitter Clock generator pole adjust
+ * Transmitter Clock generator zero adjust
+ * Receiver Clock generator pole adjust
+ * Receiver Clock generator zero adjust
+ * Bias Control for factory testing and debugging
+ * Receiver LOS Threshold adjustment. LSI suggests this POR
+ * default value must be 0x1 (which is the POR default value of
+ * the Pipe Rx/Tx Parameter Register).
+ * Receiver Input Equalizer control
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:1;
+ uint32_t emph:3;
+ uint32_t rsrvd1:1;
+ uint32_t risefall:3;
+ uint32_t vmuxlo:2;
+ uint32_t vpulselo:2;
+ uint32_t vtxlo:4;
+ uint32_t tp:2;
+ uint32_t tz:2;
+ uint32_t rp:2;
+ uint32_t rz:2;
+ uint32_t biascntl:1;
+ uint32_t losadj:3;
+ uint32_t rxeq:4;
+#else
+ uint32_t rxeq:4;
+ uint32_t losadj:3;
+ uint32_t biascntl:1;
+ uint32_t rz:2;
+ uint32_t rp:2;
+ uint32_t tz:2;
+ uint32_t tp:2;
+ uint32_t vtxlo:4;
+ uint32_t vpulselo:2;
+ uint32_t vmuxlo:2;
+ uint32_t risefall:3;
+ uint32_t rsrvd1:1;
+ uint32_t emph:3;
+ uint32_t rsrvd:1;
+#endif
+ } bits;
+} pipe_cfg1_t;
+
+
+/*
+ * Register: CipBarMaskCfg
+ * BAR Mask Config
+ * Description: To write to the BAR MASK registers in the EP Core PCI
+ * Config registers This register should be initialised before
+ * writing the value to into the cipBarMask register. The lower 3
+ * bits define the BAR register number whose mask value has to be
+ * over written with the values that will be written into the
+ * cipBarMask register. [2:0] = 0 thru 5 selects bar0Mask thru
+ * bar5Mask registers = 6,7 selects Expansion romBarMask register
+ * Hydra's configuration for the BARs is as below : BAR1, BAR0 :
+ * Forms 64 bit PIO BAR. BAR1 handles the upper address bits BAR0
+ * handles the lower address bits BAR3, BAR2 : Forms 64 bit MSIX BAR
+ * BAR3 handles the upper address bits BAR2 handles the lower address
+ * bits BAR5, BAR4 : Not used and so disabled. Hence, user writes
+ * will not have any effect. romBar : Expansion romBar
+ *
+ * Fields:
+ * 0 : bar0Mask 1 : bar1Mask 2 : bar2Mask 3 : bar3Mask 4 :
+ * bar4Mask 5 : bar5Mask 6, 7 ; romBarMask
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:29;
+ uint32_t data:3;
+#else
+ uint32_t data:3;
+ uint32_t rsrvd:29;
+#endif
+ } bits;
+} cip_bar_mask_cfg_t;
+
+
+/*
+ * Register: CipBarMask
+ * BAR Mask
+ * Description: Value to write to the BAR MASK registers in the EP
+ * Core PCI Config registers The lower 3 bits of cipMaskCfg register
+ * define the BAR register number Write to this register will
+ * initiate the DBI access to the EP Core. The cipBarMaskCfg register
+ * should be setup before writing to this register. [31:1] = Mask
+ * value [0] = 1: BAR is enabled; 0: BAR is disabled. Note that the
+ * BAR must be enabled ([0] == 1) before the Mask value will be
+ * written into the actual bar mask register. If the BAR is disabled
+ * ([0]==0), two writes to this register are required before the Mask
+ * value is written into the actual bar mask register. Refer to EP
+ * core data book for more details.
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} cip_bar_mask_t;
+
+
+/*
+ * Register: CipLdsv0Stat
+ * LDSV0 Status (for debug purpose)
+ * Description: Returns the status of LDSV0 Flags regardless of their
+ * group
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} cip_ldsv0_stat_t;
+
+
+/*
+ * Register: CipLdsv1Stat
+ * LDSV1 Status (for debug purpose)
+ * Description: Returns the status of LDSV1 Flags regardless of their
+ * group
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t data:32;
+#else
+ uint32_t data:32;
+#endif
+ } bits;
+} cip_ldsv1_stat_t;
+
+
+/*
+ * Register: PeuIntrStat
+ * PEU Interrupt Status
+ * Description: Returns the parity error status of all of the PEU
+ * RAMs, and external (to peu) block pio access errors. External
+ * block pio access errors could be due to either host or SPI
+ * initiated accesses. These fields are RO and can be cleared only
+ * through a cip reset All these errors feed to devErrStat.peuErr1
+ * which in turn feed to LDSV1.devErr1
+ * Partity Error bits: These bits log the very first parity error
+ * detected in a particular memory. The corresponding memory location
+ * is logged in respective perrLoc registers. External Block PIO
+ * Access Error bits: These bits log the very first error that
+ * resulted in access error. The corresponding address is logged in
+ * respective accErrLog registers.
+ * These bits can be set by writing a '1' to the corresponding
+ * mirror bit in the peuIntrStatMirror register.
+ * Note: PEU RAM Parity Errors and their corresponding interrupt:
+ * When these bits are set and the device error status interrupt is
+ * not masked, the PEU attempts to send the corresponding interrupt
+ * back to the RC. Depending on which ram is impacted and the
+ * corresponding logic impacted in the EP core, a coherent interrupt
+ * message may not be sent in all cases. For the times when the EP
+ * core is unable to send an interrupt, the SPI interface is to be
+ * used for error diagnosis as the PEU interrupt status is logged
+ * regardless of whether the interrupt is sent to the RC. The
+ * following data was collected via simulation: -Parity error
+ * impacted rams that likely will be able to send an interrupt:
+ * npDataq, pDataq, cplDataq, hcr. -Parity error impacted rams that
+ * may not be able to send an interrupt: npHdrq, pHdrq, cplHdrq, MSIx
+ * table, retryram, retrysot.
+ *
+ * Fields:
+ * Error indication from SPROM Controller for Sprom Download
+ * access This error indicates that a parity error was detected
+ * from SRAM. For more details, please refer to SPROM Controller
+ * PRM.
+ * Error indication from TDC for PIO access The error location
+ * and type are logged in tdcPioaccErrLog
+ * Error indication from RDC for PIO access The error location
+ * and type are logged in rdcPioaccErrLog
+ * Error indication from PFC for PIO access The error location
+ * and type are logged in pfcPioaccErrLog
+ * Error indication from VMAC for PIO access The error location
+ * and type are logged in vmacPioaccErrLog
+ * memory in PCIe data path and value unknown until packet flow
+ * starts.
+ * memory in PCIe data path and value unknown until packet flow
+ * starts.
+ * memory in PCIe data path and value unknown until packet flow
+ * starts.
+ * memory in PCIe data path and value unknown until packet flow
+ * starts.
+ * memory in PCIe data path and value unknown until packet flow
+ * starts.
+ * memory in PCIe data path and value unknown until packet flow
+ * starts.
+ * memory in PCIe data path and value unknown until packet flow
+ * starts.
+ * memory in PCIe data path and value unknown until packet flow
+ * starts.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:11;
+ uint32_t spc_acc_err:1;
+ uint32_t tdc_pioacc_err:1;
+ uint32_t rdc_pioacc_err:1;
+ uint32_t pfc_pioacc_err:1;
+ uint32_t vmac_pioacc_err:1;
+ uint32_t rsrvd1:6;
+ uint32_t cpl_hdrq_parerr:1;
+ uint32_t cpl_dataq_parerr:1;
+ uint32_t retryram_xdlh_parerr:1;
+ uint32_t retrysotram_xdlh_parerr:1;
+ uint32_t p_hdrq_parerr:1;
+ uint32_t p_dataq_parerr:1;
+ uint32_t np_hdrq_parerr:1;
+ uint32_t np_dataq_parerr:1;
+ uint32_t eic_msix_parerr:1;
+ uint32_t hcr_parerr:1;
+#else
+ uint32_t hcr_parerr:1;
+ uint32_t eic_msix_parerr:1;
+ uint32_t np_dataq_parerr:1;
+ uint32_t np_hdrq_parerr:1;
+ uint32_t p_dataq_parerr:1;
+ uint32_t p_hdrq_parerr:1;
+ uint32_t retrysotram_xdlh_parerr:1;
+ uint32_t retryram_xdlh_parerr:1;
+ uint32_t cpl_dataq_parerr:1;
+ uint32_t cpl_hdrq_parerr:1;
+ uint32_t rsrvd1:6;
+ uint32_t vmac_pioacc_err:1;
+ uint32_t pfc_pioacc_err:1;
+ uint32_t rdc_pioacc_err:1;
+ uint32_t tdc_pioacc_err:1;
+ uint32_t spc_acc_err:1;
+ uint32_t rsrvd:11;
+#endif
+ } bits;
+} peu_intr_stat_t;
+
+
+/*
+ * Register: PeuIntrMask
+ * Parity Error Status Mask
+ * Description: Masks for interrupt generation for block and parity
+ * error in the PEU RAMs For the VNM errors (spc, tdc, rdc, pfc, &
+ * vmac), note that the interrupt message to the host will be delayed
+ * from the actual moment that the error is detected until the host
+ * does a PIO access and this mask is cleared.
+ *
+ * Fields:
+ * 1: Mask interrupt generation for access error from SPROM
+ * Controller
+ * 1: Mask interrupt generation for PIO access error from TDC
+ * 1: Mask interrupt generation for PIO access error from RDC
+ * 1: Mask interrupt generation for PIO access error from PFC
+ * 1: Mask interrupt generation for PIO access error from VMAC
+ * 1: Mask interrupt generation for parity error from Completion
+ * Header Q memory
+ * 1: Mask interrupt generation for parity error from Completion
+ * Data Q memory
+ * 1: Mask interrupt generation for parity error from Retry
+ * memory
+ * 1: Mask interrupt generation for parity error from Retry SOT
+ * memory
+ * 1: Mask interrupt generation for parity error from Posted
+ * Header Q memory
+ * 1: Mask interrupt generation for parity error from Posted Data
+ * Q memory
+ * 1: Mask interrupt generation for parity error from Non-Posted
+ * Header Q memory
+ * 1: Mask interrupt generation for parity error from Non-Posted
+ * Data Q memory
+ * 1: Mask interrupt generation for parity error from MSIX memory
+ * 1: Mask interrupt generation for parity error from HCR memory
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:11;
+ uint32_t spc_acc_err_mask:1;
+ uint32_t tdc_pioacc_err_mask:1;
+ uint32_t rdc_pioacc_err_mask:1;
+ uint32_t pfc_pioacc_err_mask:1;
+ uint32_t vmac_pioacc_err_mask:1;
+ uint32_t rsrvd1:6;
+ uint32_t cpl_hdrq_parerr_mask:1;
+ uint32_t cpl_dataq_parerr_mask:1;
+ uint32_t retryram_xdlh_parerr_mask:1;
+ uint32_t retrysotram_xdlh_parerr_mask:1;
+ uint32_t p_hdrq_parerr_mask:1;
+ uint32_t p_dataq_parerr_mask:1;
+ uint32_t np_hdrq_parerr_mask:1;
+ uint32_t np_dataq_parerr_mask:1;
+ uint32_t eic_msix_parerr_mask:1;
+ uint32_t hcr_parerr_mask:1;
+#else
+ uint32_t hcr_parerr_mask:1;
+ uint32_t eic_msix_parerr_mask:1;
+ uint32_t np_dataq_parerr_mask:1;
+ uint32_t np_hdrq_parerr_mask:1;
+ uint32_t p_dataq_parerr_mask:1;
+ uint32_t p_hdrq_parerr_mask:1;
+ uint32_t retrysotram_xdlh_parerr_mask:1;
+ uint32_t retryram_xdlh_parerr_mask:1;
+ uint32_t cpl_dataq_parerr_mask:1;
+ uint32_t cpl_hdrq_parerr_mask:1;
+ uint32_t rsrvd1:6;
+ uint32_t vmac_pioacc_err_mask:1;
+ uint32_t pfc_pioacc_err_mask:1;
+ uint32_t rdc_pioacc_err_mask:1;
+ uint32_t tdc_pioacc_err_mask:1;
+ uint32_t spc_acc_err_mask:1;
+ uint32_t rsrvd:11;
+#endif
+ } bits;
+} peu_intr_mask_t;
+
+
+/*
+ * Register: PeuIntrStatMirror
+ * Parity Error Status Mirror
+ * Description: Mirror bits for Parity error generation in the PEU
+ * RAMs When set, the corresponding parity error is generated ; this
+ * will cause an interrupt to occur if the respective mask bit is not
+ * set. As the mirror of the Parity Error Status Register, clearing
+ * of the status bits is controlled by how the Parity Error Status
+ * Register is cleared. These bits cannot be cleared by writing 0 to
+ * this register.
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:11;
+ uint32_t spc_acc_err_mirror:1;
+ uint32_t tdc_pioacc_err_mirror:1;
+ uint32_t rdc_pioacc_err_mirror:1;
+ uint32_t pfc_pioacc_err_mirror:1;
+ uint32_t vmac_pioacc_err_mirror:1;
+ uint32_t rsrvd1:6;
+ uint32_t cpl_hdrq_parerr_mirror:1;
+ uint32_t cpl_dataq_parerr_mirror:1;
+ uint32_t retryram_xdlh_parerr_mirror:1;
+ uint32_t retrysotram_xdlh_parerr_mirror:1;
+ uint32_t p_hdrq_parerr_mirror:1;
+ uint32_t p_dataq_parerr_mirror:1;
+ uint32_t np_hdrq_parerr_mirror:1;
+ uint32_t np_dataq_parerr_mirror:1;
+ uint32_t eic_msix_parerr_mirror:1;
+ uint32_t hcr_parerr_mirror:1;
+#else
+ uint32_t hcr_parerr_mirror:1;
+ uint32_t eic_msix_parerr_mirror:1;
+ uint32_t np_dataq_parerr_mirror:1;
+ uint32_t np_hdrq_parerr_mirror:1;
+ uint32_t p_dataq_parerr_mirror:1;
+ uint32_t p_hdrq_parerr_mirror:1;
+ uint32_t retrysotram_xdlh_parerr_mirror:1;
+ uint32_t retryram_xdlh_parerr_mirror:1;
+ uint32_t cpl_dataq_parerr_mirror:1;
+ uint32_t cpl_hdrq_parerr_mirror:1;
+ uint32_t rsrvd1:6;
+ uint32_t vmac_pioacc_err_mirror:1;
+ uint32_t pfc_pioacc_err_mirror:1;
+ uint32_t rdc_pioacc_err_mirror:1;
+ uint32_t tdc_pioacc_err_mirror:1;
+ uint32_t spc_acc_err_mirror:1;
+ uint32_t rsrvd:11;
+#endif
+ } bits;
+} peu_intr_stat_mirror_t;
+
+
+/*
+ * Register: CplHdrqPerrLoc
+ * Completion Header Queue Parity Error Location
+ * Description: Returns the location of the first parity error
+ * detected in Completion Header Q
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t cpl_hdrq_parerr_loc:16;
+#else
+ uint32_t cpl_hdrq_parerr_loc:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} cpl_hdrq_perr_loc_t;
+
+
+/*
+ * Register: CplDataqPerrLoc
+ * Completion Data Queue Parity Error Location
+ * Description: Returns the location of the first parity error
+ * detected in Completion Data Q
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t cpl_dataq_parerr_loc:16;
+#else
+ uint32_t cpl_dataq_parerr_loc:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} cpl_dataq_perr_loc_t;
+
+
+/*
+ * Register: RetrPerrLoc
+ * Retry RAM Parity Error Location
+ * Description: Returns the location of the first parity error
+ * detected in Retry RAM
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t retr_parerr_loc:16;
+#else
+ uint32_t retr_parerr_loc:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} retr_perr_loc_t;
+
+
+/*
+ * Register: RetrSotPerrLoc
+ * Retry SOT RAM Parity Error Location
+ * Description: Returns the location of the first parity error
+ * detected in Retry RAM SOT
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t retr_sot_parerr_loc:16;
+#else
+ uint32_t retr_sot_parerr_loc:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} retr_sot_perr_loc_t;
+
+
+/*
+ * Register: PHdrqPerrLoc
+ * Posted Header Queue Parity Error Location
+ * Description: Returns the location of the first parity error
+ * detected in Posted Header Q
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t p_hdrq_parerr_loc:16;
+#else
+ uint32_t p_hdrq_parerr_loc:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} p_hdrq_perr_loc_t;
+
+
+/*
+ * Register: PDataqPerrLoc
+ * Posted Data Queue Parity Error Location
+ * Description: Returns the location of the first parity error
+ * detected in Posted Data Q
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t p_dataq_parerr_loc:16;
+#else
+ uint32_t p_dataq_parerr_loc:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} p_dataq_perr_loc_t;
+
+
+/*
+ * Register: NpHdrqPerrLoc
+ * Non-Posted Header Queue Parity Error Location
+ * Description: Returns the location of the first parity error
+ * detected in Non-Posted Header Q
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t np_hdrq_parerr_loc:16;
+#else
+ uint32_t np_hdrq_parerr_loc:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} np_hdrq_perr_loc_t;
+
+
+/*
+ * Register: NpDataqPerrLoc
+ * Non-Posted Data Queue Parity Error Location
+ * Description: Returns the location of the first parity error
+ * detected in Non-Posted Data Q
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t np_dataq_parerr_loc:16;
+#else
+ uint32_t np_dataq_parerr_loc:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} np_dataq_perr_loc_t;
+
+
+/*
+ * Register: MsixPerrLoc
+ * MSIX Parity Error Location
+ * Description: Returns the location of the first parity error
+ * detected in MSIX memory
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t eic_msix_parerr_loc:16;
+#else
+ uint32_t eic_msix_parerr_loc:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} msix_perr_loc_t;
+
+
+/*
+ * Register: HcrPerrLoc
+ * HCR Memory Parity Error Location
+ * Description: Returns the location of the first parity error
+ * detected in HCR Memory
+ *
+ * Fields:
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:16;
+ uint32_t hcr_parerr_loc:16;
+#else
+ uint32_t hcr_parerr_loc:16;
+ uint32_t rsrvd:16;
+#endif
+ } bits;
+} hcr_perr_loc_t;
+
+
+/*
+ * Register: TdcPioaccErrLog
+ * TDC PIO Access Error Location
+ * Description: Returns the location of the first transaction
+ * location that resulted in error
+ *
+ * Fields:
+ * Type of access error 0 : Block returned error condition 1 :
+ * Transaction resulted in time out by CIP
+ * Transaction Location that resulted in error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:11;
+ uint32_t tdc_pioacc_err_type:1;
+ uint32_t tdc_pioacc_err_loc:20;
+#else
+ uint32_t tdc_pioacc_err_loc:20;
+ uint32_t tdc_pioacc_err_type:1;
+ uint32_t rsrvd:11;
+#endif
+ } bits;
+} tdc_pioacc_err_log_t;
+
+
+/*
+ * Register: RdcPioaccErrLog
+ * RDC PIO Access Error Location
+ * Description: Returns the location of the first transaction
+ * location that resulted in error
+ *
+ * Fields:
+ * Type of access error 0 : Block returned error condition 1 :
+ * Transaction resulted in time out by CIP
+ * Transaction Location that resulted in error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:11;
+ uint32_t rdc_pioacc_err_type:1;
+ uint32_t rdc_pioacc_err_loc:20;
+#else
+ uint32_t rdc_pioacc_err_loc:20;
+ uint32_t rdc_pioacc_err_type:1;
+ uint32_t rsrvd:11;
+#endif
+ } bits;
+} rdc_pioacc_err_log_t;
+
+
+/*
+ * Register: PfcPioaccErrLog
+ * PFC PIO Access Error Location
+ * Description: Returns the location of the first transaction
+ * location that resulted in error
+ *
+ * Fields:
+ * Type of access error 0 : Block returned error condition 1 :
+ * Transaction resulted in time out by CIP
+ * Transaction Location that resulted in error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:11;
+ uint32_t pfc_pioacc_err_type:1;
+ uint32_t pfc_pioacc_err_loc:20;
+#else
+ uint32_t pfc_pioacc_err_loc:20;
+ uint32_t pfc_pioacc_err_type:1;
+ uint32_t rsrvd:11;
+#endif
+ } bits;
+} pfc_pioacc_err_log_t;
+
+
+/*
+ * Register: VmacPioaccErrLog
+ * VMAC PIO Access Error Location
+ * Description: Returns the location of the first transaction
+ * location that resulted in error
+ *
+ * Fields:
+ * Type of access error 0 : Block returned error condition 1 :
+ * Transaction resulted in time out by CIP
+ * Transaction Location that resulted in error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:11;
+ uint32_t vmac_pioacc_err_type:1;
+ uint32_t vmac_pioacc_err_loc:20;
+#else
+ uint32_t vmac_pioacc_err_loc:20;
+ uint32_t vmac_pioacc_err_type:1;
+ uint32_t rsrvd:11;
+#endif
+ } bits;
+} vmac_pioacc_err_log_t;
+
+
+/*
+ * Register: LdGrpCtrl
+ * Logical Device Group Control
+ * Description: LD Group assignment
+ * Fields:
+ * Logical device group number of this logical device
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:27;
+ uint32_t num:5;
+#else
+ uint32_t num:5;
+ uint32_t rsrvd:27;
+#endif
+ } bits;
+} ld_grp_ctrl_t;
+
+
+/*
+ * Register: DevErrStat
+ * Device Error Status
+ * Description: Device Error Status logs errors that cannot be
+ * attributed to a given dma channel. It does not duplicate errors
+ * already observable via specific block logical device groups.
+ * Device Error Status bits [31:16] feed LDSV0.devErr0 Device Error
+ * Status bits [15:0] feed LDSV1.devErr1
+ * Fields:
+ * Set to 1 if Reorder Buffer/Reorder Table has a single bit
+ * ecc/parity error. This error condition is asserted by TDC to
+ * PEU.
+ * Set to 1 if RX Ctrl or Data FIFO has a single bit ecc error.
+ * This error condition is asserted by RDC to PEU.
+ * Set to 1 if any of the external block accesses have resulted
+ * in error or if a parity error was detected in the SPROM
+ * internal ram. Refer to peuIntrStat for the errors that
+ * contribute to this bit.
+ * Set to 1 if Reorder Buffer/Reorder Table has a double bit
+ * ecc/parity error. This error condition is asserted by TDC to
+ * PEU.
+ * Set to 1 if RX Ctrl or Data FIFO has a double bit ecc error.
+ * This error condition is asserted by RDC to PEU.
+ * Set to 1 if any PEU ram (MSI-X, retrybuf/sot, p/np/cpl queues)
+ * has a parity error Refer to peuIntrStat for the errors that
+ * contribute to this bit.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:13;
+ uint32_t tdc_err0:1;
+ uint32_t rdc_err0:1;
+ uint32_t rsrvd1:1;
+ uint32_t rsrvd2:12;
+ uint32_t vnm_pio_err1:1;
+ uint32_t tdc_err1:1;
+ uint32_t rdc_err1:1;
+ uint32_t peu_err1:1;
+#else
+ uint32_t peu_err1:1;
+ uint32_t rdc_err1:1;
+ uint32_t tdc_err1:1;
+ uint32_t vnm_pio_err1:1;
+ uint32_t rsrvd2:12;
+ uint32_t rsrvd1:1;
+ uint32_t rdc_err0:1;
+ uint32_t tdc_err0:1;
+ uint32_t rsrvd:13;
+#endif
+ } bits;
+} dev_err_stat_t;
+
+
+/*
+ * Register: DevErrMask
+ * Device Error Mask
+ * Description: Device Error Mask (gates devErrStat)
+ * Fields:
+ * Mask for TDC error0
+ * Mask for RDC error0
+ * Mask for VNM PIO Access error
+ * Mask for TDC error1
+ * Mask for RDC error1
+ * Mask for PEU memories parity error
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:13;
+ uint32_t tdc_mask0:1;
+ uint32_t rdc_mask0:1;
+ uint32_t rsrvd1:1;
+ uint32_t rsrvd2:12;
+ uint32_t vnm_pio_mask1:1;
+ uint32_t tdc_mask1:1;
+ uint32_t rdc_mask1:1;
+ uint32_t peu_mask1:1;
+#else
+ uint32_t peu_mask1:1;
+ uint32_t rdc_mask1:1;
+ uint32_t tdc_mask1:1;
+ uint32_t vnm_pio_mask1:1;
+ uint32_t rsrvd2:12;
+ uint32_t rsrvd1:1;
+ uint32_t rdc_mask0:1;
+ uint32_t tdc_mask0:1;
+ uint32_t rsrvd:13;
+#endif
+ } bits;
+} dev_err_mask_t;
+
+
+/*
+ * Register: LdIntrTimRes
+ * Logical Device Interrupt Timer Resolution
+ * Description: Logical Device Interrupt Timer Resolution
+ * Fields:
+ * Timer resolution in 250 MHz cycles
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:12;
+ uint32_t res:20;
+#else
+ uint32_t res:20;
+ uint32_t rsrvd:12;
+#endif
+ } bits;
+} ld_intr_tim_res_t;
+
+
+/*
+ * Register: LDSV0
+ * Logical Device State Vector 0
+ * Description: Logical Device State Vector 0
+ * Fields:
+ * Interrupt from mail box3 to HOST
+ * Interrupt from mail box2 to HOST
+ * Interrupt from mail box1 to HOST
+ * Interrupt from mail box0 to HOST
+ * Flag0 bits for Network MAC
+ * Flag0 bits for Virtual MAC
+ * Flag0 bits for Tx DMA channels 3-0
+ * Flag0 bits for Rx DMA channels 3-0
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t dev_err0:1;
+ uint32_t rsrvd:7;
+ uint32_t mbox3_irq:1;
+ uint32_t mbox2_irq:1;
+ uint32_t mbox1_irq:1;
+ uint32_t mbox0_irq:1;
+ uint32_t rsrvd1:1;
+ uint32_t nmac_f0:1;
+ uint32_t pfc_f0:1;
+ uint32_t vmac_f0:1;
+ uint32_t rsrvd2:4;
+ uint32_t tdc_f0:4;
+ uint32_t rsrvd3:4;
+ uint32_t rdc_f0:4;
+#else
+ uint32_t rdc_f0:4;
+ uint32_t rsrvd3:4;
+ uint32_t tdc_f0:4;
+ uint32_t rsrvd2:4;
+ uint32_t vmac_f0:1;
+ uint32_t pfc_f0:1;
+ uint32_t nmac_f0:1;
+ uint32_t rsrvd1:1;
+ uint32_t mbox0_irq:1;
+ uint32_t mbox1_irq:1;
+ uint32_t mbox2_irq:1;
+ uint32_t mbox3_irq:1;
+ uint32_t rsrvd:7;
+ uint32_t dev_err0:1;
+#endif
+ } bits;
+} ldsv0_t;
+
+
+/*
+ * Register: LDSV1
+ * Logical Device State Vector 1
+ * Description: Logical Device State Vector 1
+ * Fields:
+ * Flag1 bits for Network MAC
+ * Flag1 bits for Tx DMA channels 3-0
+ * Flag1 bits for Rx DMA channels 3-0
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t dev_err1:1;
+ uint32_t rsrvd:7;
+ uint32_t rsrvd1:5;
+ uint32_t nmac_f1:1;
+ uint32_t rsrvd2:1;
+ uint32_t rsrvd3:1;
+ uint32_t rsrvd4:4;
+ uint32_t tdc_f1:4;
+ uint32_t rsrvd5:4;
+ uint32_t rdc_f1:4;
+#else
+ uint32_t rdc_f1:4;
+ uint32_t rsrvd5:4;
+ uint32_t tdc_f1:4;
+ uint32_t rsrvd4:4;
+ uint32_t rsrvd3:1;
+ uint32_t rsrvd2:1;
+ uint32_t nmac_f1:1;
+ uint32_t rsrvd1:5;
+ uint32_t rsrvd:7;
+ uint32_t dev_err1:1;
+#endif
+ } bits;
+} ldsv1_t;
+
+
+/*
+ * Register: LdIntrMask
+ * Logical Device Interrupt Mask
+ * Description: Logical Device Interrupt Mask
+ * Fields:
+ * Flag1 mask for logical device N (0-31)
+ * Flag0 mask for logical device N (0-31)
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:30;
+ uint32_t ldf1_mask:1;
+ uint32_t ldf0_mask:1;
+#else
+ uint32_t ldf0_mask:1;
+ uint32_t ldf1_mask:1;
+ uint32_t rsrvd:30;
+#endif
+ } bits;
+} ld_intr_mask_t;
+
+
+/*
+ * Register: LdIntrMgmt
+ * Logical Device Interrupt Management
+ * Description: Logical Device Interrupt Management
+ * Fields:
+ * SW arms the logical device for interrupt. Cleared by HW after
+ * interrupt issued. (1 = arm)
+ * Timer set by SW. Hardware counts down.
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t arm:1;
+ uint32_t rsrvd:25;
+ uint32_t timer:6;
+#else
+ uint32_t timer:6;
+ uint32_t rsrvd:25;
+ uint32_t arm:1;
+#endif
+ } bits;
+} ld_intr_mgmt_t;
+
+
+/*
+ * Register: SID
+ * System Interrupt Data
+ * Description: System Interrupt Data (MSI Vectors)
+ * Fields:
+ * Data sent along with the interrupt
+ */
+typedef union {
+ uint32_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint32_t rsrvd:27;
+ uint32_t data:5;
+#else
+ uint32_t data:5;
+ uint32_t rsrvd:27;
+#endif
+ } bits;
+} sid_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HXGE_PEU_HW_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_pfc.c b/usr/src/uts/common/io/hxge/hxge_pfc.c
new file mode 100644
index 0000000000..5544f4341b
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_pfc.c
@@ -0,0 +1,1306 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+#include <hxge_classify.h>
+#include <hxge_pfc.h>
+#include <hpi_pfc.h>
+#include <sys/ethernet.h>
+
+/*
+ * Ethernet broadcast address definition.
+ */
+static ether_addr_st etherbroadcastaddr = {\
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff \
+};
+
+static hxge_status_t hxge_pfc_set_mac_address(p_hxge_t, uint32_t,
+ struct ether_addr *);
+static uint32_t crc32_mchash(p_ether_addr_t addr);
+static hxge_status_t hxge_pfc_load_hash_table(p_hxge_t hxgep);
+static uint32_t hxge_get_blade_id(p_hxge_t hxgep);
+static hxge_status_t hxge_tcam_default_add_entry(p_hxge_t hxgep,
+ tcam_class_t class);
+static hxge_status_t hxge_tcam_default_config(p_hxge_t hxgep);
+
+hxge_status_t
+hxge_classify_init(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init"));
+
+ status = hxge_classify_init_sw(hxgep);
+ if (status != HXGE_OK)
+ return (status);
+
+ status = hxge_classify_init_hw(hxgep);
+ if (status != HXGE_OK) {
+ (void) hxge_classify_exit_sw(hxgep);
+ return (status);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init"));
+
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_classify_uninit(p_hxge_t hxgep)
+{
+ return (hxge_classify_exit_sw(hxgep));
+}
+
+static hxge_status_t
+hxge_tcam_dump_entry(p_hxge_t hxgep, uint32_t location)
+{
+ hxge_tcam_entry_t tcam_rdptr;
+ uint64_t asc_ram = 0;
+ hpi_handle_t handle;
+ hpi_status_t status;
+
+ handle = hxgep->hpi_reg_handle;
+
+ /* Retrieve the saved entry */
+ bcopy((void *)&hxgep->classifier.tcam_entries[location].tce,
+ (void *)&tcam_rdptr, sizeof (hxge_tcam_entry_t));
+
+ /* Compare the entry */
+ status = hpi_pfc_tcam_entry_read(handle, location, &tcam_rdptr);
+ if (status == HPI_FAILURE) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_tcam_dump_entry: tcam read failed at location %d ",
+ location));
+ return (HXGE_ERROR);
+ }
+
+ status = hpi_pfc_tcam_asc_ram_entry_read(handle, location, &asc_ram);
+
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "location %x\n"
+ " key: %llx %llx\n mask: %llx %llx\n ASC RAM %llx \n", location,
+ tcam_rdptr.key0, tcam_rdptr.key1,
+ tcam_rdptr.mask0, tcam_rdptr.mask1, asc_ram));
+ return (HXGE_OK);
+}
+
+void
+hxge_get_tcam(p_hxge_t hxgep, p_mblk_t mp)
+{
+ uint32_t tcam_loc;
+ uint32_t *lptr;
+ int location;
+ int start_location = 0;
+ int stop_location = hxgep->classifier.tcam_size;
+
+ lptr = (uint32_t *)mp->b_rptr;
+ location = *lptr;
+
+ if ((location >= hxgep->classifier.tcam_size) || (location < -1)) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_tcam_dump: Invalid location %d \n", location));
+ return;
+ }
+ if (location == -1) {
+ start_location = 0;
+ stop_location = hxgep->classifier.tcam_size;
+ } else {
+ start_location = location;
+ stop_location = location + 1;
+ }
+ for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
+ (void) hxge_tcam_dump_entry(hxgep, tcam_loc);
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_add_tcam_entry(p_hxge_t hxgep, flow_resource_t *flow_res)
+{
+ return (HXGE_OK);
+}
+
+void
+hxge_put_tcam(p_hxge_t hxgep, p_mblk_t mp)
+{
+ flow_resource_t *fs;
+ fs = (flow_resource_t *)mp->b_rptr;
+
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_put_tcam addr fs $%p type %x offset %x",
+ fs, fs->flow_spec.flow_type, fs->channel_cookie));
+
+ (void) hxge_add_tcam_entry(hxgep, fs);
+}
+
+static uint32_t
+hxge_get_blade_id(p_hxge_t hxgep)
+{
+ phy_debug_training_vec_t blade_id;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_get_blade_id"));
+ HXGE_REG_RD32(hxgep->hpi_reg_handle, PHY_DEBUG_TRAINING_VEC,
+ &blade_id.value);
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_get_blade_id: id = %d",
+ blade_id.bits.bld_num));
+
+ return (blade_id.bits.bld_num);
+}
+
+static hxge_status_t
+hxge_tcam_default_add_entry(p_hxge_t hxgep, tcam_class_t class)
+{
+ hpi_status_t rs = HPI_SUCCESS;
+ uint32_t location;
+ hxge_tcam_entry_t entry;
+ hxge_tcam_spread_t *key = NULL;
+ hxge_tcam_spread_t *mask = NULL;
+ hpi_handle_t handle;
+ p_hxge_hw_list_t hw_p;
+
+ if ((hw_p = hxgep->hxge_hw_p) == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_tcam_default_add_entry: common hardware not set"));
+ return (HXGE_ERROR);
+ }
+
+ bzero(&entry, sizeof (hxge_tcam_entry_t));
+
+ /*
+ * The class id and blade id are common for all classes
+ * Only use the blade id for matching and the rest are wild cards.
+ * This will allow one TCAM entry to match all traffic in order
+ * to spread the traffic using source hash.
+ */
+ key = &entry.key.spread;
+ mask = &entry.mask.spread;
+
+ key->blade_id = hxge_get_blade_id(hxgep);
+
+ mask->class_code = 0x1f;
+ mask->blade_id = 0;
+ mask->wild1 = 0x7ffffff;
+ mask->wild = ~0x0;
+
+ location = class;
+
+ handle = hxgep->hpi_reg_handle;
+
+ MUTEX_ENTER(&hw_p->hxge_tcam_lock);
+ rs = hpi_pfc_tcam_entry_write(handle, location, &entry);
+ if (rs & HPI_PFC_ERROR) {
+ MUTEX_EXIT(&hw_p->hxge_tcam_lock);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_tcam_default_add_entry tcam entry write"
+ " failed for location %d", location));
+ return (HXGE_ERROR);
+ }
+
+ /* Add the associative portion */
+ entry.match_action.value = 0;
+
+ /* Use source hash to spread traffic */
+ entry.match_action.bits.channel_d = 0;
+ entry.match_action.bits.channel_c = 1;
+ entry.match_action.bits.channel_b = 2;
+ entry.match_action.bits.channel_a = 3;
+ entry.match_action.bits.source_hash = 1;
+ entry.match_action.bits.discard = 0;
+
+ rs = hpi_pfc_tcam_asc_ram_entry_write(handle,
+ location, entry.match_action.value);
+ if (rs & HPI_PFC_ERROR) {
+ MUTEX_EXIT(&hw_p->hxge_tcam_lock);
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL,
+ " hxge_tcam_default_add_entry tcam entry write"
+ " failed for ASC RAM location %d", location));
+ return (HXGE_ERROR);
+ }
+
+ bcopy((void *) &entry,
+ (void *) &hxgep->classifier.tcam_entries[location].tce,
+ sizeof (hxge_tcam_entry_t));
+
+ MUTEX_EXIT(&hw_p->hxge_tcam_lock);
+
+ return (HXGE_OK);
+}
+
+/*
+ * Configure one TCAM entry for each class and make it match
+ * everything within the class in order to spread the traffic
+ * among the DMA channels based on the source hash.
+ *
+ * This is the default for now. This may change when Crossbow is
+ * available for configuring TCAM.
+ */
+static hxge_status_t
+hxge_tcam_default_config(p_hxge_t hxgep)
+{
+ uint8_t class;
+ uint32_t class_config;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_tcam_default_config"));
+
+ /*
+ * Add TCAM and its associative ram entries
+ * A wild card will be used for the class code in order to match
+ * any classes.
+ */
+ class = 0;
+ status = hxge_tcam_default_add_entry(hxgep, class);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_tcam_default_config "
+ "hxge_tcam_default_add_entry failed class %d ",
+ class));
+ return (HXGE_ERROR);
+ }
+
+ /* Enable the classes */
+ for (class = TCAM_CLASS_TCP_IPV4;
+ class <= TCAM_CLASS_SCTP_IPV6; class++) {
+ /*
+ * By default, it is set to HXGE_CLASS_TCAM_LOOKUP in
+ * hxge_ndd.c. It may be overwritten in hxge.conf.
+ */
+ class_config = hxgep->class_config.class_cfg[class];
+
+ status = hxge_pfc_ip_class_config(hxgep, class, class_config);
+ if (status & HPI_PFC_ERROR) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_tcam_default_config "
+ "hxge_pfc_ip_class_config failed "
+ " class %d config %x ", class, class_config));
+ return (HXGE_ERROR);
+ }
+ }
+
+ status = hxge_pfc_config_tcam_enable(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_tcam_default_config"));
+
+ return (status);
+}
+
+hxge_status_t
+hxge_pfc_set_default_mac_addr(p_hxge_t hxgep)
+{
+ hxge_status_t status;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_set_default_mac_addr"));
+
+ MUTEX_ENTER(&hxgep->ouraddr_lock);
+
+ /*
+ * Set new interface local address and re-init device.
+ * This is destructive to any other streams attached
+ * to this device.
+ */
+ RW_ENTER_WRITER(&hxgep->filter_lock);
+ status = hxge_pfc_set_mac_address(hxgep,
+ HXGE_MAC_DEFAULT_ADDR_SLOT, &hxgep->ouraddr);
+ RW_EXIT(&hxgep->filter_lock);
+
+ MUTEX_EXIT(&hxgep->ouraddr_lock);
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_set_default_mac_addr"));
+ return (status);
+}
+
+hxge_status_t
+hxge_set_mac_addr(p_hxge_t hxgep, struct ether_addr *addrp)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_set_mac_addr"));
+
+ MUTEX_ENTER(&hxgep->ouraddr_lock);
+
+ /*
+ * Exit if the address is same as ouraddr or multicast or broadcast
+ */
+ if (((addrp->ether_addr_octet[0] & 01) == 1) ||
+ (ether_cmp(addrp, &etherbroadcastaddr) == 0) ||
+ (ether_cmp(addrp, &hxgep->ouraddr) == 0)) {
+ goto hxge_set_mac_addr_exit;
+ }
+ hxgep->ouraddr = *addrp;
+
+ /*
+ * Set new interface local address and re-init device.
+ * This is destructive to any other streams attached
+ * to this device.
+ */
+ RW_ENTER_WRITER(&hxgep->filter_lock);
+ status = hxge_pfc_set_mac_address(hxgep,
+ HXGE_MAC_DEFAULT_ADDR_SLOT, addrp);
+ RW_EXIT(&hxgep->filter_lock);
+
+ MUTEX_EXIT(&hxgep->ouraddr_lock);
+ goto hxge_set_mac_addr_end;
+
+hxge_set_mac_addr_exit:
+ MUTEX_EXIT(&hxgep->ouraddr_lock);
+
+hxge_set_mac_addr_end:
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_set_mac_addr"));
+ return (status);
+fail:
+ MUTEX_EXIT(&hxgep->ouraddr_lock);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_set_mac_addr: "
+ "Unable to set mac address"));
+ return (status);
+}
+
+/*
+ * Add a multicast address entry into the HW hash table
+ */
+hxge_status_t
+hxge_add_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp)
+{
+ uint32_t mchash;
+ p_hash_filter_t hash_filter;
+ uint16_t hash_bit;
+ boolean_t rx_init = B_FALSE;
+ uint_t j;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_add_mcast_addr"));
+
+ RW_ENTER_WRITER(&hxgep->filter_lock);
+ mchash = crc32_mchash(addrp);
+
+ if (hxgep->hash_filter == NULL) {
+ HXGE_DEBUG_MSG((NULL, STR_CTL,
+ "Allocating hash filter storage."));
+ hxgep->hash_filter = KMEM_ZALLOC(sizeof (hash_filter_t),
+ KM_SLEEP);
+ }
+
+ hash_filter = hxgep->hash_filter;
+ /*
+ * Note that mchash is an 8 bit value and thus 0 <= mchash <= 255.
+ * Consequently, 0 <= j <= 15 and 0 <= mchash % HASH_REG_WIDTH <= 15.
+ */
+ j = mchash / HASH_REG_WIDTH;
+ hash_bit = (1 << (mchash % HASH_REG_WIDTH));
+ hash_filter->hash_filter_regs[j] |= hash_bit;
+
+ hash_filter->hash_bit_ref_cnt[mchash]++;
+ if (hash_filter->hash_bit_ref_cnt[mchash] == 1) {
+ hash_filter->hash_ref_cnt++;
+ rx_init = B_TRUE;
+ }
+
+ if (rx_init) {
+ (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE);
+ (void) hxge_pfc_load_hash_table(hxgep);
+ (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_TRUE);
+ }
+
+ RW_EXIT(&hxgep->filter_lock);
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_add_mcast_addr"));
+
+ return (HXGE_OK);
+fail:
+ RW_EXIT(&hxgep->filter_lock);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_add_mcast_addr: "
+ "Unable to add multicast address"));
+
+ return (status);
+}
+
+/*
+ * Remove a multicast address entry from the HW hash table
+ */
+hxge_status_t
+hxge_del_mcast_addr(p_hxge_t hxgep, struct ether_addr *addrp)
+{
+ uint32_t mchash;
+ p_hash_filter_t hash_filter;
+ uint16_t hash_bit;
+ boolean_t rx_init = B_FALSE;
+ uint_t j;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_del_mcast_addr"));
+ RW_ENTER_WRITER(&hxgep->filter_lock);
+ mchash = crc32_mchash(addrp);
+ if (hxgep->hash_filter == NULL) {
+ HXGE_DEBUG_MSG((NULL, STR_CTL,
+ "Hash filter already de_allocated."));
+ RW_EXIT(&hxgep->filter_lock);
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr"));
+ return (HXGE_OK);
+ }
+
+ hash_filter = hxgep->hash_filter;
+ hash_filter->hash_bit_ref_cnt[mchash]--;
+ if (hash_filter->hash_bit_ref_cnt[mchash] == 0) {
+ j = mchash / HASH_REG_WIDTH;
+ hash_bit = (1 << (mchash % HASH_REG_WIDTH));
+ hash_filter->hash_filter_regs[j] &= ~hash_bit;
+ hash_filter->hash_ref_cnt--;
+ rx_init = B_TRUE;
+ }
+
+ if (hash_filter->hash_ref_cnt == 0) {
+ HXGE_DEBUG_MSG((NULL, STR_CTL,
+ "De-allocating hash filter storage."));
+ KMEM_FREE(hash_filter, sizeof (hash_filter_t));
+ hxgep->hash_filter = NULL;
+ }
+
+ if (rx_init) {
+ (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle, B_FALSE);
+ (void) hxge_pfc_load_hash_table(hxgep);
+
+ /* Enable hash only if there are any hash entries */
+ if (hxgep->hash_filter != NULL)
+ (void) hpi_pfc_set_l2_hash(hxgep->hpi_reg_handle,
+ B_TRUE);
+ }
+
+ RW_EXIT(&hxgep->filter_lock);
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_del_mcast_addr"));
+
+ return (HXGE_OK);
+fail:
+ RW_EXIT(&hxgep->filter_lock);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_del_mcast_addr: "
+ "Unable to remove multicast address"));
+
+ return (status);
+}
+
+
+static hxge_status_t
+hxge_pfc_set_mac_address(p_hxge_t hxgep, uint32_t slot,
+ struct ether_addr *addrp)
+{
+ hpi_handle_t handle;
+ uint64_t addr;
+ hpi_status_t hpi_status;
+ uint8_t *address = addrp->ether_addr_octet;
+ uint64_t tmp;
+ int i;
+
+ if (hxgep->hxge_hw_p == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_pfc_set_mac_address: common hardware not set"));
+ return (HXGE_ERROR);
+ }
+
+ /*
+ * Convert a byte array to a 48 bit value.
+ * Need to check endianess if in doubt
+ */
+ addr = 0;
+ for (i = 0; i < ETHERADDRL; i++) {
+ tmp = address[i];
+ addr <<= 8;
+ addr |= tmp;
+ }
+
+ handle = hxgep->hpi_reg_handle;
+ hpi_status = hpi_pfc_set_mac_address(handle, slot, addr);
+
+ if (hpi_status != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_pfc_set_mac_address: failed to set address"));
+ return (HXGE_ERROR);
+ }
+
+ return (HXGE_OK);
+}
+
+/*ARGSUSED*/
+hxge_status_t
+hxge_pfc_num_macs_get(p_hxge_t hxgep, uint32_t *nmacs)
+{
+ *nmacs = PFC_N_MAC_ADDRESSES;
+ return (HXGE_OK);
+}
+
+
+hxge_status_t
+hxge_pfc_set_hash(p_hxge_t hxgep, uint32_t seed)
+{
+ hpi_status_t rs = HPI_SUCCESS;
+ hpi_handle_t handle;
+ p_hxge_class_pt_cfg_t p_class_cfgp;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_set_hash"));
+
+ p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
+ p_class_cfgp->init_hash = seed;
+ handle = hxgep->hpi_reg_handle;
+
+ rs = hpi_pfc_set_hash_seed_value(handle, seed);
+ if (rs & HPI_PFC_ERROR) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_pfc_set_hash %x failed ", seed));
+ return (HXGE_ERROR | rs);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, " <== hxge_pfc_set_hash"));
+
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_pfc_config_tcam_enable(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+ boolean_t enable = B_TRUE;
+ hpi_status_t hpi_status;
+
+ handle = hxgep->hpi_reg_handle;
+ if (hxgep->hxge_hw_p == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_pfc_config_tcam_enable: common hardware not set"));
+ return (HXGE_ERROR);
+ }
+
+ hpi_status = hpi_pfc_set_tcam_enable(handle, enable);
+ if (hpi_status != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hpi_pfc_set_tcam_enable: enable tcam failed"));
+ return (HXGE_ERROR);
+ }
+
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_pfc_config_tcam_disable(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+ boolean_t enable = B_FALSE;
+ hpi_status_t hpi_status;
+
+ handle = hxgep->hpi_reg_handle;
+ if (hxgep->hxge_hw_p == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_pfc_config_tcam_disable: common hardware not set"));
+ return (HXGE_ERROR);
+ }
+
+ hpi_status = hpi_pfc_set_tcam_enable(handle, enable);
+ if (hpi_status != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hpi_pfc_set_tcam_enable: disable tcam failed"));
+ return (HXGE_ERROR);
+ }
+
+ return (HXGE_OK);
+}
+
+static hxge_status_t
+hxge_cfg_tcam_ip_class_get(p_hxge_t hxgep, tcam_class_t class,
+ uint32_t *class_config)
+{
+ hpi_status_t rs = HPI_SUCCESS;
+ tcam_key_cfg_t cfg;
+ hpi_handle_t handle;
+ uint32_t ccfg = 0;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_cfg_tcam_ip_class_get"));
+
+ bzero(&cfg, sizeof (tcam_key_cfg_t));
+ handle = hxgep->hpi_reg_handle;
+
+ rs = hpi_pfc_get_l3_class_config(handle, class, &cfg);
+ if (rs & HPI_PFC_ERROR) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_cfg_tcam_ip_class opt %x for class %d failed ",
+ class_config, class));
+ return (HXGE_ERROR | rs);
+ }
+ if (cfg.discard)
+ ccfg |= HXGE_CLASS_DISCARD;
+
+ if (cfg.lookup_enable)
+ ccfg |= HXGE_CLASS_TCAM_LOOKUP;
+
+ *class_config = ccfg;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_cfg_tcam_ip_class_get %x",
+ ccfg));
+
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_pfc_ip_class_config_get(p_hxge_t hxgep, tcam_class_t class,
+ uint32_t *config)
+{
+ uint32_t t_class_config;
+ int t_status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config_get"));
+ t_class_config = 0;
+ t_status = hxge_cfg_tcam_ip_class_get(hxgep, class, &t_class_config);
+
+ if (t_status & HPI_PFC_ERROR) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_pfc_ip_class_config_get for class %d tcam failed",
+ class));
+ return (t_status);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, " hxge_pfc_ip_class_config tcam %x",
+ t_class_config));
+
+ *config = t_class_config;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_get"));
+ return (HXGE_OK);
+}
+
+static hxge_status_t
+hxge_pfc_config_init(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+
+ handle = hxgep->hpi_reg_handle;
+ if (hxgep->hxge_hw_p == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_pfc_config_init: common hardware not set"));
+ return (HXGE_ERROR);
+ }
+
+ (void) hpi_pfc_set_tcam_enable(handle, B_FALSE);
+ (void) hpi_pfc_set_l2_hash(handle, B_FALSE);
+ (void) hpi_pfc_set_tcp_cksum(handle, B_FALSE);
+ (void) hpi_pfc_set_default_dma(handle, 0);
+ (void) hpi_pfc_mac_addr_enable(handle, 0);
+ (void) hpi_pfc_set_force_csum(handle, B_FALSE);
+
+ /* Set the drop log mask to ignore the logs */
+ (void) hpi_pfc_set_drop_log_mask(handle, 1, 1, 1, 1, 1);
+
+ /* Clear the interrupt masks to receive interrupts */
+ (void) hpi_pfc_set_interrupt_mask(handle, 0, 0, 0);
+
+ /* Clear the interrupt status */
+ (void) hpi_pfc_clear_interrupt_status(handle);
+
+ return (HXGE_OK);
+}
+
+static hxge_status_t
+hxge_pfc_tcam_invalidate_all(p_hxge_t hxgep)
+{
+ hpi_status_t rs = HPI_SUCCESS;
+ hpi_handle_t handle;
+ p_hxge_hw_list_t hw_p;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL,
+ "==> hxge_pfc_tcam_invalidate_all"));
+ handle = hxgep->hpi_reg_handle;
+ if ((hw_p = hxgep->hxge_hw_p) == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_pfc_tcam_invalidate_all: common hardware not set"));
+ return (HXGE_ERROR);
+ }
+
+ MUTEX_ENTER(&hw_p->hxge_tcam_lock);
+ rs = hpi_pfc_tcam_invalidate_all(handle);
+ MUTEX_EXIT(&hw_p->hxge_tcam_lock);
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_invalidate_all"));
+ if (rs != HPI_SUCCESS)
+ return (HXGE_ERROR);
+
+ return (HXGE_OK);
+}
+
+static hxge_status_t
+hxge_pfc_tcam_init(p_hxge_t hxgep)
+{
+ hpi_status_t rs = HPI_SUCCESS;
+ hpi_handle_t handle;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_tcam_init"));
+ handle = hxgep->hpi_reg_handle;
+
+ if (hxgep->hxge_hw_p == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_pfc_tcam_init: common hardware not set"));
+ return (HXGE_ERROR);
+ }
+
+ /*
+ * Disable the TCAM.
+ */
+ rs = hpi_pfc_set_tcam_enable(handle, B_FALSE);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n"));
+ return (HXGE_ERROR | rs);
+ }
+
+ /*
+ * Invalidate all the TCAM entries for this blade.
+ */
+ rs = hxge_pfc_tcam_invalidate_all(hxgep);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM Disable\n"));
+ return (HXGE_ERROR | rs);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_tcam_init"));
+ return (HXGE_OK);
+}
+
+static hxge_status_t
+hxge_pfc_vlan_tbl_clear_all(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ p_hxge_hw_list_t hw_p;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_vlan_tbl_clear_all "));
+
+ handle = hxgep->hpi_reg_handle;
+ if ((hw_p = hxgep->hxge_hw_p) == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_pfc_vlan_tbl_clear_all: common hardware not set"));
+ return (HXGE_ERROR);
+ }
+
+ mutex_enter(&hw_p->hxge_vlan_lock);
+ rs = hpi_pfc_cfg_vlan_table_clear(handle);
+ mutex_exit(&hw_p->hxge_vlan_lock);
+
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "failed vlan table clear\n"));
+ return (HXGE_ERROR | rs);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_vlan_tbl_clear_all "));
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_pfc_ip_class_config(p_hxge_t hxgep, tcam_class_t class, uint32_t config)
+{
+ uint32_t class_config;
+ p_hxge_class_pt_cfg_t p_class_cfgp;
+ tcam_key_cfg_t cfg;
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_ip_class_config"));
+ p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
+ class_config = p_class_cfgp->class_cfg[class];
+
+ if (class_config != config) {
+ p_class_cfgp->class_cfg[class] = config;
+ class_config = config;
+ }
+
+ handle = hxgep->hpi_reg_handle;
+
+ if (class == TCAM_CLASS_ETYPE_1 || class == TCAM_CLASS_ETYPE_2) {
+ rs = hpi_pfc_set_l2_class_slot(handle,
+ class_config & HXGE_CLASS_ETHER_TYPE_MASK,
+ class_config & HXGE_CLASS_VALID,
+ class - TCAM_CLASS_ETYPE_1);
+ } else {
+ if (class_config & HXGE_CLASS_DISCARD)
+ cfg.discard = 1;
+ else
+ cfg.discard = 0;
+ if (class_config & HXGE_CLASS_TCAM_LOOKUP)
+ cfg.lookup_enable = 1;
+ else
+ cfg.lookup_enable = 0;
+
+ rs = hpi_pfc_set_l3_class_config(handle, class, cfg);
+ }
+
+ if (rs & HPI_PFC_ERROR) {
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL,
+ " hxge_pfc_ip_class_config %x for class %d tcam failed",
+ config, class));
+ return (HXGE_ERROR);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config"));
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_pfc_ip_class_config_all(p_hxge_t hxgep)
+{
+ uint32_t class_config;
+ tcam_class_t cl;
+ int status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_ip_class_config_all"));
+
+ for (cl = TCAM_CLASS_ETYPE_1; cl <= TCAM_CLASS_SCTP_IPV6; cl++) {
+ if (cl == TCAM_CLASS_RESERVED_4 ||
+ cl == TCAM_CLASS_RESERVED_5 ||
+ cl == TCAM_CLASS_RESERVED_6 ||
+ cl == TCAM_CLASS_RESERVED_7)
+ continue;
+
+ class_config = hxgep->class_config.class_cfg[cl];
+ status = hxge_pfc_ip_class_config(hxgep, cl, class_config);
+ if (status & HPI_PFC_ERROR) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_pfc_ip_class_config failed "
+ " class %d config %x ", cl, class_config));
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_ip_class_config_all"));
+ return (HXGE_OK);
+}
+
+static hxge_status_t
+hxge_pfc_update_hw(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+ hpi_handle_t handle;
+ p_hxge_param_t pa;
+ uint64_t cfgd_vlans;
+ uint64_t *val_ptr;
+ int i;
+ hxge_param_map_t *p_map;
+ boolean_t parity = 0;
+ boolean_t implicit_valid = 0;
+ vlan_id_t implicit_vlan_id;
+
+ p_hxge_mv_cfg_t vlan_table;
+ p_hxge_class_pt_cfg_t p_class_cfgp;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_update_hw"));
+ p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
+ handle = hxgep->hpi_reg_handle;
+
+ status = hxge_pfc_set_hash(hxgep, p_class_cfgp->init_hash);
+ if (status != HXGE_OK) {
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "hxge_pfc_set_hash Failed"));
+ return (HXGE_ERROR);
+ }
+
+ vlan_table = p_class_cfgp->vlan_tbl;
+
+ /* configure vlan tables */
+ pa = (p_hxge_param_t)&hxgep->param_arr[param_vlan_ids];
+#if defined(__i386)
+ val_ptr = (uint64_t *)(uint32_t)pa->value;
+#else
+ val_ptr = (uint64_t *)pa->value;
+#endif
+ cfgd_vlans = ((pa->type & HXGE_PARAM_ARRAY_CNT_MASK) >>
+ HXGE_PARAM_ARRAY_CNT_SHIFT);
+
+ for (i = 0; i < cfgd_vlans; i++) {
+ p_map = (hxge_param_map_t *)&val_ptr[i];
+ if (vlan_table[p_map->param_id].flag) {
+ status = hpi_pfc_cfg_vlan_table_entry_set(handle,
+ p_map->param_id);
+ if (status != HPI_SUCCESS) {
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL,
+ "hpi_pfc_cfg_vlan_table_entry_set Failed"));
+ return (HXGE_ERROR);
+ }
+ }
+ }
+
+ /* Configure the vlan_ctrl register */
+ /* Let hw generate the parity bits in pfc_vlan_table */
+ parity = 0;
+
+ pa = (p_hxge_param_t)&hxgep->param_arr[param_implicit_vlan_id];
+ implicit_vlan_id = (vlan_id_t)pa->value;
+
+ /*
+ * Enable it only if there is a valid implicity vlan id either in
+ * NDD table or the .conf file.
+ */
+ if (implicit_vlan_id >= VLAN_ID_MIN && implicit_vlan_id <= VLAN_ID_MAX)
+ implicit_valid = 1;
+
+ status = hpi_pfc_cfg_vlan_control_set(handle, parity, implicit_valid,
+ implicit_vlan_id);
+ if (status != HPI_SUCCESS) {
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL,
+ "hxge_pfc_update_hw: hpi_pfc_cfg_vlan_control_set failed"));
+ return (HXGE_ERROR);
+ }
+
+ /* config MAC addresses */
+ /* Need to think about this */
+
+ /* Configure hash value and classes */
+ status = hxge_pfc_ip_class_config_all(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_pfc_ip_class_config_all Failed"));
+ return (HXGE_ERROR);
+ }
+
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_pfc_hw_reset(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, " ==> hxge_pfc_hw_reset"));
+
+ status = hxge_pfc_config_init(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "failed PFC config init."));
+ return (status);
+ }
+
+ status = hxge_pfc_tcam_init(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "failed TCAM init."));
+ return (status);
+ }
+
+ /*
+ * invalidate VLAN RDC tables
+ */
+ status = hxge_pfc_vlan_tbl_clear_all(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "failed VLAN Table Invalidate. "));
+ return (status);
+ }
+ hxgep->classifier.state |= HXGE_PFC_HW_RESET;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_hw_reset"));
+
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_classify_init_hw(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_hw"));
+
+ if (hxgep->classifier.state & HXGE_PFC_HW_INIT) {
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL,
+ "hxge_classify_init_hw already init"));
+ return (HXGE_OK);
+ }
+
+ /* Now do a real configuration */
+ status = hxge_pfc_update_hw(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_pfc_update_hw failed"));
+ return (HXGE_ERROR);
+ }
+
+ status = hxge_tcam_default_config(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_tcam_default_config failed"));
+ return (status);
+ }
+
+ hxgep->classifier.state |= HXGE_PFC_HW_INIT;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_hw"));
+
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_classify_init_sw(p_hxge_t hxgep)
+{
+ int alloc_size;
+ hxge_classify_t *classify_ptr;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_init_sw"));
+ classify_ptr = &hxgep->classifier;
+
+ if (classify_ptr->state & HXGE_PFC_SW_INIT) {
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL,
+ "hxge_classify_init_sw already init"));
+ return (HXGE_OK);
+ }
+
+ /* Init SW structures */
+ classify_ptr->tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY;
+
+ alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size;
+ classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, NULL);
+ bzero(classify_ptr->class_usage, sizeof (classify_ptr->class_usage));
+
+ /* Start from the beginning of TCAM */
+ hxgep->classifier.tcam_location = 0;
+ classify_ptr->state |= HXGE_PFC_SW_INIT;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_init_sw"));
+
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_classify_exit_sw(p_hxge_t hxgep)
+{
+ int alloc_size;
+ hxge_classify_t *classify_ptr;
+ int fsize;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_classify_exit_sw"));
+ classify_ptr = &hxgep->classifier;
+
+ fsize = sizeof (tcam_flow_spec_t);
+ if (classify_ptr->tcam_entries) {
+ alloc_size = fsize * classify_ptr->tcam_size;
+ KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size);
+ }
+ hxgep->classifier.state = NULL;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_classify_exit_sw"));
+
+ return (HXGE_OK);
+}
+
+/*ARGSUSED*/
+hxge_status_t
+hxge_pfc_handle_sys_errors(p_hxge_t hxgep)
+{
+ return (HXGE_OK);
+}
+
+uint_t
+hxge_pfc_intr(caddr_t arg1, caddr_t arg2)
+{
+ p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
+ p_hxge_t hxgep = (p_hxge_t)arg2;
+ hpi_handle_t handle;
+ p_hxge_pfc_stats_t statsp;
+ pfc_int_status_t int_status;
+ pfc_bad_cs_counter_t bad_cs_count;
+ pfc_drop_counter_t drop_count;
+ pfc_drop_log_t drop_log;
+ pfc_vlan_par_err_log_t vlan_par_err_log;
+ pfc_tcam_par_err_log_t tcam_par_err_log;
+
+ if (ldvp == NULL) {
+ HXGE_DEBUG_MSG((NULL, INT_CTL,
+ "<== hxge_pfc_intr: hxgep $%p ldvp $%p", hxgep, ldvp));
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
+ hxgep = ldvp->hxgep;
+ }
+
+ handle = hxgep->hpi_reg_handle;
+ statsp = (p_hxge_pfc_stats_t)&hxgep->statsp->pfc_stats;
+
+ /*
+ * need to read the pfc interrupt status register to figure out
+ * what is happenning
+ */
+ (void) hpi_pfc_get_interrupt_status(handle, &int_status);
+
+ if (int_status.bits.pkt_drop) {
+ statsp->pkt_drop++;
+ if (statsp->pkt_drop == 1)
+ HXGE_ERROR_MSG((hxgep, INT_CTL, "PFC pkt_drop"));
+
+ /* Collect each individual drops */
+ (void) hpi_pfc_get_drop_log(handle, &drop_log);
+
+ if (drop_log.bits.tcp_ctrl_drop)
+ statsp->errlog.tcp_ctrl_drop++;
+ if (drop_log.bits.l2_addr_drop)
+ statsp->errlog.l2_addr_drop++;
+ if (drop_log.bits.class_code_drop)
+ statsp->errlog.class_code_drop++;
+ if (drop_log.bits.tcam_drop)
+ statsp->errlog.tcam_drop++;
+ if (drop_log.bits.vlan_drop)
+ statsp->errlog.vlan_drop++;
+
+ /* Collect the total drops for all kinds */
+ (void) hpi_pfc_get_drop_counter(handle, &drop_count.value);
+ statsp->drop_count += drop_count.bits.drop_count;
+ }
+
+ if (int_status.bits.tcam_parity_err) {
+ statsp->tcam_parity_err++;
+
+ (void) hpi_pfc_get_tcam_parity_log(handle, &tcam_par_err_log);
+ statsp->errlog.tcam_par_err_log = tcam_par_err_log.bits.addr;
+
+ if (statsp->tcam_parity_err == 1)
+ HXGE_ERROR_MSG((hxgep,
+ INT_CTL, " TCAM parity error addr: 0x%x",
+ tcam_par_err_log.bits.addr));
+ }
+
+ if (int_status.bits.vlan_parity_err) {
+ statsp->vlan_parity_err++;
+
+ (void) hpi_pfc_get_vlan_parity_log(handle, &vlan_par_err_log);
+ statsp->errlog.vlan_par_err_log = vlan_par_err_log.bits.addr;
+
+ if (statsp->vlan_parity_err == 1)
+ HXGE_ERROR_MSG((hxgep, INT_CTL,
+ " vlan table parity error addr: 0x%x",
+ vlan_par_err_log.bits.addr));
+ }
+
+ (void) hpi_pfc_get_bad_csum_counter(handle, &bad_cs_count.value);
+ statsp->bad_cs_count += bad_cs_count.bits.bad_cs_count;
+
+ (void) hpi_pfc_clear_interrupt_status(handle);
+ return (DDI_INTR_CLAIMED);
+}
+
+static void
+hxge_pfc_get_next_mac_addr(uint8_t *st_mac, struct ether_addr *final_mac)
+{
+ uint64_t mac[ETHERADDRL];
+ uint64_t mac_addr = 0;
+ int i, j;
+
+ for (i = ETHERADDRL - 1, j = 0; j < ETHERADDRL; i--, j++) {
+ mac[j] = st_mac[i];
+ mac_addr |= (mac[j] << (j*8));
+ }
+
+ final_mac->ether_addr_octet[0] = (mac_addr & 0xff0000000000) >> 40;
+ final_mac->ether_addr_octet[1] = (mac_addr & 0xff00000000) >> 32;
+ final_mac->ether_addr_octet[2] = (mac_addr & 0xff000000) >> 24;
+ final_mac->ether_addr_octet[3] = (mac_addr & 0xff0000) >> 16;
+ final_mac->ether_addr_octet[4] = (mac_addr & 0xff00) >> 8;
+ final_mac->ether_addr_octet[5] = (mac_addr & 0xff);
+}
+
+hxge_status_t
+hxge_pfc_mac_addrs_get(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+ hpi_status_t hpi_status = HPI_SUCCESS;
+ hpi_handle_t handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ uint8_t mac_addr[ETHERADDRL];
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_mac_addr_get"));
+
+ hpi_status = hpi_pfc_mac_addr_get_i(handle, mac_addr, 0);
+ if (hpi_status != HPI_SUCCESS) {
+ status = (HXGE_ERROR | hpi_status);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_pfc_mac_addr_get: pfc_mac_addr_get_i failed"));
+ goto exit;
+ }
+
+ hxge_pfc_get_next_mac_addr(mac_addr, &hxgep->factaddr);
+ HXGE_ERROR_MSG((hxgep, PFC_CTL, "MAC Addr(0): %x:%x:%x:%x:%x:%x\n",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5]));
+
+exit:
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_pfc_mac_addr_get, "
+ "status [0x%x]", status));
+ return (status);
+}
+
+/*
+ * Calculate the bit in the multicast address filter
+ * that selects the given * address.
+ * Note: For Hydra, the last 8-bits are used.
+ */
+static uint32_t
+crc32_mchash(p_ether_addr_t addr)
+{
+ uint8_t *cp;
+ uint32_t crc;
+ uint32_t c;
+ int byte;
+ int bit;
+
+ cp = (uint8_t *)addr;
+ crc = (uint32_t)0xffffffff;
+ for (byte = 0; byte < ETHERADDRL; byte++) {
+ /* Hydra calculates the hash backwardly */
+ c = (uint32_t)cp[ETHERADDRL - 1 - byte];
+ for (bit = 0; bit < 8; bit++) {
+ if ((c & 0x1) ^ (crc & 0x1))
+ crc = (crc >> 1)^0xedb88320;
+ else
+ crc = (crc >> 1);
+ c >>= 1;
+ }
+ }
+ return ((~crc) >> (32 - HASH_BITS));
+}
+
+static hxge_status_t
+hxge_pfc_load_hash_table(p_hxge_t hxgep)
+{
+ uint32_t i;
+ uint16_t hashtab_e;
+ p_hash_filter_t hash_filter;
+ hpi_handle_t handle;
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "==> hxge_pfc_load_hash_table\n"));
+ handle = hxgep->hpi_reg_handle;
+
+ /*
+ * Load the multicast hash filter bits.
+ */
+ hash_filter = hxgep->hash_filter;
+ for (i = 0; i < MAC_MAX_HASH_ENTRY; i++) {
+ if (hash_filter != NULL) {
+ hashtab_e = (uint16_t)hash_filter->hash_filter_regs[i];
+ } else {
+ hashtab_e = 0;
+ }
+
+ if (hpi_pfc_set_multicast_hash_table(handle, i,
+ hashtab_e) != HPI_SUCCESS)
+ return (HXGE_ERROR);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, PFC_CTL, "<== hxge_pfc_load_hash_table\n"));
+
+ return (HXGE_OK);
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_pfc.h b/usr/src/uts/common/io/hxge/hxge_pfc.h
new file mode 100644
index 0000000000..6a10fad67f
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_pfc.h
@@ -0,0 +1,332 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HXGE_PFC_H
+#define _HXGE_PFC_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* 0 and 4095 are reserved */
+#define VLAN_ID_MIN 1
+#define VLAN_ID_MAX 4094
+#define VLAN_ID_IMPLICIT 0
+
+#define HXGE_MAC_DEFAULT_ADDR_SLOT 0
+
+#define HASH_BITS 8
+#define NMCFILTER_BITS (1 << HASH_BITS)
+#define HASH_REG_WIDTH 16
+#define NMCFILTER_REGS (NMCFILTER_BITS / HASH_REG_WIDTH)
+ /* Number of multicast filter regs */
+#define MAC_MAX_HASH_ENTRY NMCFILTER_REGS
+
+#define REG_PIO_WRITE64(handle, offset, value) \
+ HXGE_REG_WR64((handle), (offset), (value))
+#define REG_PIO_READ64(handle, offset, val_p) \
+ HXGE_REG_RD64((handle), (offset), (val_p))
+
+#define TCAM_CTL_RWC_TCAM_WR 0x0
+#define TCAM_CTL_RWC_TCAM_CMP 0x2
+#define TCAM_CTL_RWC_RAM_WR 0x4
+#define TCAM_CTL_RWC_RAM_RD 0x5
+#define TCAM_CTL_RWC_RWC_STAT 0x1
+#define TCAM_CTL_RWC_RWC_MATCH 0x1
+
+#define WRITE_TCAM_REG_CTL(handle, ctl) \
+ REG_PIO_WRITE64(handle, PFC_TCAM_CTRL, ctl)
+
+#define READ_TCAM_REG_CTL(handle, val_p) \
+ REG_PIO_READ64(handle, PFC_TCAM_CTRL, val_p)
+
+#define WRITE_TCAM_REG_KEY0(handle, key) \
+ REG_PIO_WRITE64(handle, PFC_TCAM_KEY0, key)
+#define WRITE_TCAM_REG_KEY1(handle, key) \
+ REG_PIO_WRITE64(handle, PFC_TCAM_KEY1, key)
+#define WRITE_TCAM_REG_MASK0(handle, mask) \
+ REG_PIO_WRITE64(handle, PFC_TCAM_MASK0, mask)
+#define WRITE_TCAM_REG_MASK1(handle, mask) \
+ REG_PIO_WRITE64(handle, PFC_TCAM_MASK1, mask)
+
+#define READ_TCAM_REG_KEY0(handle, val_p) \
+ REG_PIO_READ64(handle, PFC_TCAM_KEY0, val_p)
+#define READ_TCAM_REG_KEY1(handle, val_p) \
+ REG_PIO_READ64(handle, PFC_TCAM_KEY1, val_p)
+#define READ_TCAM_REG_MASK0(handle, val_p) \
+ REG_PIO_READ64(handle, PFC_TCAM_MASK0, val_p)
+#define READ_TCAM_REG_MASK1(handle, val_p) \
+ REG_PIO_READ64(handle, PFC_TCAM_MASK1, val_p)
+
+typedef union _hxge_tcam_res_t {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t padding:34;
+ uint64_t reserved:15;
+ uint64_t parity:1;
+ uint64_t hit_count:4;
+ uint64_t channel_d:2;
+ uint64_t channel_c:2;
+ uint64_t channel_b:2;
+ uint64_t channel_a:2;
+ uint64_t source_hash:1;
+ uint64_t discard:1;
+#else
+ uint64_t discard:1;
+ uint64_t source_hash:1;
+ uint64_t channel_a:2;
+ uint64_t channel_b:2;
+ uint64_t channel_c:2;
+ uint64_t channel_d:2;
+ uint64_t hit_count:4;
+ uint64_t parity:1;
+ uint64_t reserved:15;
+ uint64_t padding:34;
+#endif
+ } bits;
+} hxge_tcam_res_t, *p_hxge_tcam_res_t;
+
+typedef struct tcam_reg {
+#if defined(_BIG_ENDIAN)
+ uint64_t reg1; /* 99:64 */
+ uint64_t reg0; /* 63:0 */
+#else
+ uint64_t reg0; /* 63:0 */
+ uint64_t reg1; /* 99:64 */
+#endif
+} hxge_tcam_reg_t;
+
+typedef struct hxge_tcam_ipv4_S {
+#if defined(_BIG_ENDIAN)
+ uint32_t class_code:5; /* 99:95 */
+ uint32_t blade_id:4; /* 94:91 */
+ uint32_t rsrvd2:2; /* 90:89 */
+ uint32_t noport:1; /* 88 */
+ uint32_t protocol:8; /* 87:80 */
+ uint32_t l4_hdr; /* 79:48 */
+ uint32_t rsrvd:16; /* 47:32 */
+ uint32_t ip_daddr; /* 31:0 */
+#else
+ uint32_t ip_daddr; /* 31:0 */
+ uint32_t rsrvd:16; /* 47:32 */
+ uint32_t l4_hdr; /* 79:48 */
+ uint32_t protocol:8; /* 87:80 */
+ uint32_t noport:1; /* 88 */
+ uint32_t rsrvd2:2; /* 90:89 */
+ uint32_t blade_id:4; /* 94:91 */
+ uint32_t class_code:5; /* 99:95 */
+#endif
+} hxge_tcam_ipv4_t;
+
+typedef struct hxge_tcam_ipv6_S {
+#if defined(_BIG_ENDIAN)
+ uint32_t class_code:5; /* 99:95 */
+ uint32_t blade_id:4; /* 94:91 */
+ uint64_t rsrvd2:3; /* 90:88 */
+ uint64_t protocol:8; /* 87:80 */
+ uint64_t l4_hdr:32; /* 79:48 */
+ uint64_t rsrvd:48; /* 47:0 */
+#else
+ uint64_t rsrvd:48; /* 47:0 */
+ uint64_t l4_hdr:32; /* 79:48 */
+ uint64_t protocol:8; /* 87:80 */
+ uint64_t rsrvd2:3; /* 90:88 */
+ uint32_t blade_id:4; /* 94:91 */
+ uint32_t class_code:5; /* 99:95 */
+#endif
+} hxge_tcam_ipv6_t;
+
+typedef struct hxge_tcam_enet_S {
+#if defined(_BIG_ENDIAN)
+ uint8_t class_code:5; /* 99:95 */
+ uint8_t blade_id:4; /* 94:91 */
+ uint8_t rsrvd:3; /* 90:88 */
+ uint8_t eframe[11]; /* 87:0 */
+#else
+ uint8_t eframe[11]; /* 87:0 */
+ uint8_t rsrvd:3; /* 90:88 */
+ uint8_t blade_id:4; /* 94:91 */
+ uint8_t class_code:5; /* 99:95 */
+#endif
+} hxge_tcam_ether_t;
+
+typedef struct hxge_tcam_spread_S {
+#if defined(_BIG_ENDIAN)
+ uint64_t unused:28; /* 127:100 */
+ uint64_t class_code:5; /* 99:95 */
+ uint64_t blade_id:4; /* 94:91 */
+ uint64_t wild1:27; /* 90:64 */
+ uint64_t wild; /* 63:0 */
+#else
+ uint64_t wild; /* 63:0 */
+ uint64_t wild1:27; /* 90:64 */
+ uint64_t blade_id:4; /* 94:91 */
+ uint64_t class_code:5; /* 99:95 */
+ uint64_t unused:28; /* 127:100 */
+#endif
+} hxge_tcam_spread_t;
+
+typedef struct hxge_tcam_entry_S {
+ union _hxge_tcam_entry {
+ hxge_tcam_ipv4_t ipv4;
+ hxge_tcam_ipv6_t ipv6;
+ hxge_tcam_ether_t enet;
+ hxge_tcam_reg_t regs;
+ hxge_tcam_spread_t spread;
+ } key, mask;
+ hxge_tcam_res_t match_action;
+ uint16_t ether_type;
+} hxge_tcam_entry_t;
+
+#define key_reg0 key.regs.reg0
+#define key_reg1 key.regs.reg1
+#define mask_reg0 mask.regs.reg0
+#define mask_reg1 mask.regs.reg1
+
+#define key0 key.regs.reg0
+#define key1 key.regs.reg1
+#define mask0 mask.regs.reg0
+#define mask1 mask.regs.reg1
+
+#define ip4_class_key key.ipv4.class_code
+#define ip4_blade_id_key key.ipv4.blade_id
+#define ip4_noport_key key.ipv4.noport
+#define ip4_proto_key key.ipv4.protocol
+#define ip4_l4_hdr_key key.ipv4.l4_hdr
+#define ip4_dest_key key.ipv4.ip_daddr
+
+#define ip4_class_mask mask.ipv4.class_code
+#define ip4_blade_id_mask mask.ipv4.blade_id
+#define ip4_noport_mask mask.ipv4.noport
+#define ip4_proto_mask mask.ipv4.protocol
+#define ip4_l4_hdr_mask mask.ipv4.l4_hdr
+#define ip4_dest_mask mask.ipv4.ip_daddr
+
+#define ip6_class_key key.ipv6.class_code
+#define ip6_blade_id_key key.ipv6.blade_id
+#define ip6_proto_key key.ipv6.protocol
+#define ip6_l4_hdr_key key.ipv6.l4_hdr
+
+#define ip6_class_mask mask.ipv6.class_code
+#define ip6_blade_id_mask mask.ipv6.blade_id
+#define ip6_proto_mask mask.ipv6.protocol
+#define ip6_l4_hdr_mask mask.ipv6.l4_hdr
+
+#define ether_class_key key.enet.class_code
+#define ether_blade_id_key key.enet.blade_id
+#define ether_ethframe_key key.enet.eframe
+
+#define ether_class_mask mask.enet.class_code
+#define ether_blade_id_mask mask.enet.blade_id
+#define ether_ethframe_mask mask.enet.eframe
+
+typedef struct _pfc_errlog {
+ uint32_t tcp_ctrl_drop; /* pfc_drop_log */
+ uint32_t l2_addr_drop;
+ uint32_t class_code_drop;
+ uint32_t tcam_drop;
+ uint32_t vlan_drop;
+
+ uint32_t vlan_par_err_log; /* pfc_vlan_par_err_log */
+ uint32_t tcam_par_err_log; /* pfc_tcam_par_err_log */
+} pfc_errlog_t, *p_pfc_errlog_t;
+
+typedef struct _pfc_stats {
+ uint32_t pkt_drop; /* pfc_int_status */
+ uint32_t tcam_parity_err;
+ uint32_t vlan_parity_err;
+
+ uint32_t bad_cs_count; /* pfc_bad_cs_counter */
+ uint32_t drop_count; /* pfc_drop_counter */
+ pfc_errlog_t errlog;
+} hxge_pfc_stats_t, *p_hxge_pfc_stats_t;
+
+typedef enum pfc_tcam_class {
+ TCAM_CLASS_INVALID = 0,
+ TCAM_CLASS_DUMMY = 1,
+ TCAM_CLASS_ETYPE_1 = 2,
+ TCAM_CLASS_ETYPE_2,
+ TCAM_CLASS_RESERVED_4,
+ TCAM_CLASS_RESERVED_5,
+ TCAM_CLASS_RESERVED_6,
+ TCAM_CLASS_RESERVED_7,
+ TCAM_CLASS_TCP_IPV4,
+ TCAM_CLASS_UDP_IPV4,
+ TCAM_CLASS_AH_ESP_IPV4,
+ TCAM_CLASS_SCTP_IPV4,
+ TCAM_CLASS_TCP_IPV6,
+ TCAM_CLASS_UDP_IPV6,
+ TCAM_CLASS_AH_ESP_IPV6,
+ TCAM_CLASS_SCTP_IPV6,
+ TCAM_CLASS_ARP,
+ TCAM_CLASS_RARP,
+ TCAM_CLASS_DUMMY_12,
+ TCAM_CLASS_DUMMY_13,
+ TCAM_CLASS_DUMMY_14,
+ TCAM_CLASS_DUMMY_15,
+ TCAM_CLASS_MAX
+} tcam_class_t;
+
+typedef struct _tcam_key_cfg_t {
+ boolean_t lookup_enable;
+ boolean_t discard;
+} tcam_key_cfg_t;
+
+typedef struct _hash_filter_t {
+ uint_t hash_ref_cnt;
+ uint16_t hash_filter_regs[NMCFILTER_REGS];
+ uint32_t hash_bit_ref_cnt[NMCFILTER_BITS];
+} hash_filter_t, *p_hash_filter_t;
+
+#define HXGE_ETHER_FLOWS (FLOW_ETHER_DHOST | FLOW_ETHER_SHOST | \
+ FLOW_ETHER_TYPE)
+#define HXGE_VLAN_FLOWS (FLOW_ETHER_TPID | FLOW_ETHER_TCI)
+#define HXGE_ETHERNET_FLOWS (HXGE_ETHER_FLOWS | HXGE_VLAN_FLOWS)
+#define HXGE_PORT_FLOWS (FLOW_ULP_PORT_REMOTE | FLOW_ULP_PORT_LOCAL)
+#define HXGE_ADDR_FLOWS (FLOW_IP_REMOTE | FLOW_IP_LOCAL)
+#define HXGE_IP_FLOWS (FLOW_IP_VERSION | FLOW_IP_PROTOCOL | \
+ HXGE_PORT_FLOWS | HXGE_ADDR_FLOWS)
+#define HXGE_SUPPORTED_FLOWS (HXGE_ETHERNET_FLOWS | HXGE_IP_FLOWS)
+
+#define CLS_CODE_MASK 0x1f
+#define BLADE_ID_MASK 0xf
+#define PID_MASK 0xff
+#define IP_PORT_MASK 0xffff
+
+#define IP_ADDR_SA_MASK 0xFFFFFFFF
+#define IP_ADDR_DA_MASK IP_ADDR_SA_MASK
+#define L4PT_SPI_MASK IP_ADDR_SA_MASK
+
+#define BLADE_ID_OFFSET 127 /* Last entry in HCR_REG */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_HXGE_PFC_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_pfc_hw.h b/usr/src/uts/common/io/hxge/hxge_pfc_hw.h
new file mode 100644
index 0000000000..20c173afd6
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_pfc_hw.h
@@ -0,0 +1,773 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HXGE_PFC_HW_H
+#define _HXGE_PFC_HW_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PFC_BASE_ADDR 0X0200000
+
+#define PFC_VLAN_TABLE (PFC_BASE_ADDR + 0x0)
+#define PFC_VLAN_CTRL (PFC_BASE_ADDR + 0x9000)
+#define PFC_MAC_ADDR (PFC_BASE_ADDR + 0x10000)
+#define PFC_MAC_ADDR_MASK (PFC_BASE_ADDR + 0x10080)
+#define PFC_HASH_TABLE (PFC_BASE_ADDR + 0x10100)
+#define PFC_L2_CLASS_CONFIG (PFC_BASE_ADDR + 0x20000)
+#define PFC_L3_CLASS_CONFIG (PFC_BASE_ADDR + 0x20030)
+#define PFC_TCAM_KEY0 (PFC_BASE_ADDR + 0x20090)
+#define PFC_TCAM_KEY1 (PFC_BASE_ADDR + 0x20098)
+#define PFC_TCAM_MASK0 (PFC_BASE_ADDR + 0x200B0)
+#define PFC_TCAM_MASK1 (PFC_BASE_ADDR + 0x200B8)
+#define PFC_TCAM_CTRL (PFC_BASE_ADDR + 0x200D0)
+#define PFC_CONFIG (PFC_BASE_ADDR + 0x20100)
+#define TCP_CTRL_MASK (PFC_BASE_ADDR + 0x20108)
+#define SRC_HASH_VAL (PFC_BASE_ADDR + 0x20110)
+#define PFC_INT_STATUS (PFC_BASE_ADDR + 0x30000)
+#define PFC_DBG_INT_STATUS (PFC_BASE_ADDR + 0x30008)
+#define PFC_INT_MASK (PFC_BASE_ADDR + 0x30100)
+#define PFC_DROP_LOG (PFC_BASE_ADDR + 0x30200)
+#define PFC_DROP_LOG_MASK (PFC_BASE_ADDR + 0x30208)
+#define PFC_VLAN_PAR_ERR_LOG (PFC_BASE_ADDR + 0x30210)
+#define PFC_TCAM_PAR_ERR_LOG (PFC_BASE_ADDR + 0x30218)
+#define PFC_BAD_CS_COUNTER (PFC_BASE_ADDR + 0x30220)
+#define PFC_DROP_COUNTER (PFC_BASE_ADDR + 0x30228)
+#define PFC_AUTO_INIT (PFC_BASE_ADDR + 0x30300)
+
+
+/*
+ * Register: PfcVlanTable
+ * VLAN Table Registers
+ * Description: VLAN membership table. CPU programs in the VLANs that
+ * it wants to belong to. A blade may be a member of multiple VLANs.
+ * Bits [31:0] of the first entry corresponds to vlan members [31:0],
+ * bits [31:0] of the second entry corresponds to vlan members
+ * [63:32] and so on.
+ * Fields:
+ * Odd parities of member[31:24], member[23:16], member[17:8],
+ * member[7:0]. These parity bits are ignored when parEn in the
+ * VLAN Control register is set to '0'.
+ * Set to 1 to indicate that blade is a member of the VLAN IDs
+ * (32 to 0) * entry number
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:28;
+ uint64_t parity:4;
+ uint64_t member:32;
+#else
+ uint64_t member:32;
+ uint64_t parity:4;
+ uint64_t rsrvd:28;
+#endif
+ } bits;
+} pfc_vlan_table_t;
+
+
+/*
+ * Register: PfcVlanCtrl
+ * VLAN Control Register
+ * Description: VLAN control register. Controls VLAN table properties
+ * and implicit VLAN properties for non-VLAN tagged packets.
+ * Fields:
+ * VLAN table parity debug write enable. When set to 1, software
+ * writes the parity bits together with the data during a VLAN
+ * table write. Otherwise, hardware automatically generates the
+ * parity bits from the data.
+ * Set to 1 to indicate the implicit VLAN ID is valid for use in
+ * non-VLAN tagged packets filtering
+ * Implicit VLAN ID for non-VLAN tagged packets
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:50;
+ uint64_t par_en:1;
+ uint64_t valid:1;
+ uint64_t id:12;
+#else
+ uint64_t id:12;
+ uint64_t valid:1;
+ uint64_t par_en:1;
+ uint64_t rsrvd:50;
+#endif
+ } bits;
+} pfc_vlan_ctrl_t;
+
+
+/*
+ * Register: PfcMacAddr
+ * MAC Address
+ * Description: MAC Address - Contains a station's 48 bit MAC
+ * address. The first register corresponds to MAC address 0, the
+ * second register corresponds to MAC address 1 and so on. For a MAC
+ * address of format aa-bb-cc-dd-ee-ff, addr[47:0] corresponds to
+ * "aabbccddeeff". When used in conjunction with the MAC address
+ * filter mask registers, these registers can be used to construct
+ * either a unicast or multicast address. An address is considered
+ * matched if (DA & ~mask) == (MAC address & ~mask)
+ * Fields:
+ * 48 bits of stations's MAC address
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:16;
+ uint64_t addr:48;
+#else
+ uint64_t addr:48;
+ uint64_t rsrvd:16;
+#endif
+ } bits;
+} pfc_mac_addr_t;
+
+
+/*
+ * Register: PfcMacAddrMask
+ * MAC Address Filter
+ * Description: MAC Address Filter Mask - Contains the station's 48
+ * bit MAC address filter mask. The first register corresponds to MAC
+ * address 0 filter mask, the second register corresponds to MAC
+ * address 1 filter mask and so on. These filter masks cover MAC
+ * address bits 47:0 in the same order as the address registers
+ * Fields:
+ * 48 bits of stations's MAC address filter mask
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:16;
+ uint64_t mask:48;
+#else
+ uint64_t mask:48;
+ uint64_t rsrvd:16;
+#endif
+ } bits;
+} pfc_mac_addr_mask_t;
+
+
+/*
+ * Register: PfcHashTable
+ * MAC Multicast Hash Filter
+ * Description: MAC multicast hash table filter. The multicast
+ * destination address is used to perform Ethernet CRC-32 hashing
+ * with seed value 0xffffFfff. Bits 47:40 of the hash result are used
+ * to index one bit of this multicast hash table. If the bit is '1',
+ * the multicast hash matches.
+ * Fields:
+ * 16 bits of 256 bit hash table. First entry contains bits
+ * [15:0], last entry contains bits [255:240]
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:48;
+ uint64_t hash_val:16;
+#else
+ uint64_t hash_val:16;
+ uint64_t rsrvd:48;
+#endif
+ } bits;
+} pfc_hash_table_t;
+
+
+/*
+ * Register: PfcL2ClassConfig
+ * L2 Class Configuration
+ * Description: Programmable EtherType for class codes 2 and 3. The
+ * first register is class 2, and the second class 3
+ * Fields:
+ * Set to 1 to indicate that the entry is valid for use in
+ * classification
+ * EtherType value
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:47;
+ uint64_t valid:1;
+ uint64_t etype:16;
+#else
+ uint64_t etype:16;
+ uint64_t valid:1;
+ uint64_t rsrvd:47;
+#endif
+ } bits;
+} pfc_l2_class_config_t;
+
+
+/*
+ * Register: PfcL3ClassConfig
+ * L3 Class Configuration
+ * Description: Configuration for class codes 0x8-0xF. PFC can be set
+ * to discard certain classes of traffic, or to not initiate a TCAM
+ * match for that class
+ * Fields:
+ * Set to 1 to discard all packets of this class code
+ * Set to 1 to indicate that packets of this class should be sent
+ * to the TCAM for perfect match
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:60;
+ uint64_t discard:1;
+ uint64_t tsel:1;
+ uint64_t rsrvd1:2;
+#else
+ uint64_t rsrvd1:2;
+ uint64_t tsel:1;
+ uint64_t discard:1;
+ uint64_t rsrvd:60;
+#endif
+ } bits;
+} pfc_l3_class_config_t;
+
+
+/*
+ * Register: PfcTcamKey0
+ * TCAM Key 0
+ * Description: TCAM key value. Holds bit 63:0 of the TCAM key
+ * Fields:
+ * bits 63:0 of tcam key
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t key:64;
+#else
+ uint64_t key:64;
+#endif
+ } bits;
+} pfc_tcam_key0_t;
+
+
+/*
+ * Register: PfcTcamKey1
+ * TCAM Key 1
+ * Description: TCAM key value. Holds bit 99:64 of the TCAM key
+ * Fields:
+ * bits 99:64 of tcam key
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:28;
+ uint64_t key:36;
+#else
+ uint64_t key:36;
+ uint64_t rsrvd:28;
+#endif
+ } bits;
+} pfc_tcam_key1_t;
+
+
+/*
+ * Register: PfcTcamMask0
+ * TCAM Mask 0
+ * Description: TCAM mask value. Holds bit 63:0 of the TCAM mask
+ * Fields:
+ * bits 63:0 of tcam mask
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t mask:64;
+#else
+ uint64_t mask:64;
+#endif
+ } bits;
+} pfc_tcam_mask0_t;
+
+
+/*
+ * Register: PfcTcamMask1
+ * TCAM Mask 1
+ * Description: TCAM mask value. Holds bit 99:64 of the TCAM mask
+ * Fields:
+ * bits 99:64 of tcam mask
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:28;
+ uint64_t mask:36;
+#else
+ uint64_t mask:36;
+ uint64_t rsrvd:28;
+#endif
+ } bits;
+} pfc_tcam_mask1_t;
+
+
+/*
+ * Register: PfcTcamCtrl
+ * TCAM Control
+ * Description: TCAM and TCAM lookup memory access control register.
+ * Controls how TCAM and result lookup table are accessed by blade
+ * CPU. For a TCAM write, the data in the TCAM key and mask registers
+ * will be written to the TCAM. A compare will initiate a TCAM match
+ * with the data stored in the TCAM key register. The match bit is
+ * toggled, and the matching address is reported in the addr field.
+ * For an access to the TCAM result lookup memory, the TCAM 0 key
+ * register is used for the read/write data.
+ * Fields:
+ * TCAM lookup table debug parity bit write enable. When a '1' is
+ * written, software writes the parity bit together with the data
+ * during a TCAM result lookup write. Otherwise, hardware
+ * automatically generates the parity bit from the data.
+ * 3'b000 = TCAM write 3'b001 = reserved 3'b010 = TCAM compare
+ * 3'b011 = reserved 3'b100 = TCAM result lookup write 3'b101 =
+ * TCAM result lookup read 3'b110 = reserved 3'b111 = reserved
+ * Status of read/write/compare operation. When a zero is
+ * written, hardware initiates access. Hardware writes a '1' to
+ * the bit when it completes
+ * Set to 1 if there is a TCAM match for compare command. Zero
+ * otherwise
+ * Address location for access of TCAM or RAM (valid values
+ * 0-42). For a compare, the location of the match is written
+ * here by hardware.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:45;
+ uint64_t par_en:1;
+ uint64_t cmd:3;
+ uint64_t status:1;
+ uint64_t match:1;
+ uint64_t rsrvd1:5;
+ uint64_t addr:8;
+#else
+ uint64_t addr:8;
+ uint64_t rsrvd1:5;
+ uint64_t match:1;
+ uint64_t status:1;
+ uint64_t cmd:3;
+ uint64_t par_en:1;
+ uint64_t rsrvd:45;
+#endif
+ } bits;
+} pfc_tcam_ctrl_t;
+
+
+/*
+ * Register: PfcConfig
+ * PFC General Configuration
+ * Description: PFC configuration options that are under the control
+ * of a blade CPU
+ * Fields:
+ * MAC address enable mask. Each bit corresponds to one MAC
+ * adress (lsb = addr0). With 16 MAC addresses, only the lower 16
+ * bits are valid.
+ * default DMA channel number
+ * force TCP/UDP checksum result to always match
+ * Enable for TCP/UDP checksum. If not enabled, the result will
+ * never match.
+ * Enable TCAM matching. If TCAM matching is not enabled, traffic
+ * will be sent to the default DMA channel.
+ * Enable L2 Multicast hash
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:24;
+ uint64_t mac_addr_en:32;
+ uint64_t default_dma:4;
+ uint64_t force_cs_en:1;
+ uint64_t tcp_cs_en:1;
+ uint64_t tcam_en:1;
+ uint64_t l2_hash_en:1;
+#else
+ uint64_t l2_hash_en:1;
+ uint64_t tcam_en:1;
+ uint64_t tcp_cs_en:1;
+ uint64_t force_cs_en:1;
+ uint64_t default_dma:4;
+ uint64_t mac_addr_en:32;
+ uint64_t rsrvd:24;
+#endif
+ } bits;
+} pfc_config_t;
+
+
+/*
+ * Register: TcpCtrlMask
+ * TCP control bits mask
+ * Description: Mask of TCP control bits to forward onto downstream
+ * blocks The TCP packet's control bits are masked, and then bitwise
+ * OR'd to produce a signal to the Rx DMA. Normally, all bits are
+ * masked off except the TCP SYN bit. The Rx DMA uses this bitwise OR
+ * for statistics. When discard = 1, the packet will be dropped if
+ * the bitwise OR = 1.
+ * Fields:
+ * Drop the packet if bitwise OR of the TCP control bits masked
+ * on = 1
+ * TCP end of data flag
+ * TCP SYN flag
+ * TCP reset flag
+ * TCP push flag
+ * TCP ack flag
+ * TCP urgent flag
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:57;
+ uint64_t discard:1;
+ uint64_t fin:1;
+ uint64_t syn:1;
+ uint64_t rst:1;
+ uint64_t psh:1;
+ uint64_t ack:1;
+ uint64_t urg:1;
+#else
+ uint64_t urg:1;
+ uint64_t ack:1;
+ uint64_t psh:1;
+ uint64_t rst:1;
+ uint64_t syn:1;
+ uint64_t fin:1;
+ uint64_t discard:1;
+ uint64_t rsrvd:57;
+#endif
+ } bits;
+} tcp_ctrl_mask_t;
+
+
+/*
+ * Register: SrcHashVal
+ * Source hash Seed Value
+ * Hash CRC seed value
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t seed:32;
+#else
+ uint64_t seed:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} src_hash_val_t;
+
+
+/*
+ * Register: PfcIntStatus
+ * PFC Interrupt Status
+ * Description: PFC interrupt status register
+ * Fields:
+ * triggered when packet drop log captured a drop. Part of LDF 0.
+ * Write 1 to clear.
+ * TCAM result lookup table parity error. Part of LDF 0. Write 1
+ * to clear.
+ * VLAN table parity error. Part of LDF 0. Write 1 to clear.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:61;
+ uint64_t pkt_drop:1;
+ uint64_t tcam_parity_err:1;
+ uint64_t vlan_parity_err:1;
+#else
+ uint64_t vlan_parity_err:1;
+ uint64_t tcam_parity_err:1;
+ uint64_t pkt_drop:1;
+ uint64_t rsrvd:61;
+#endif
+ } bits;
+} pfc_int_status_t;
+
+
+/*
+ * Register: PfcDbgIntStatus
+ * PFC Debug Interrupt Status
+ * Description: PFC debug interrupt status mirror register. This
+ * debug register triggers the same interrupts as those in the PFC
+ * Interrupt Status register. Interrupts in this mirror register are
+ * subject to the filtering of the PFC Interrupt Mask register.
+ * Fields:
+ * Packet drop. Part of LDF 0.
+ * TCAM result lookup table parity error. Part of LDF 0.
+ * VLAN table parity error. Part of LDF 0.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:61;
+ uint64_t pkt_drop:1;
+ uint64_t tcam_parity_err:1;
+ uint64_t vlan_parity_err:1;
+#else
+ uint64_t vlan_parity_err:1;
+ uint64_t tcam_parity_err:1;
+ uint64_t pkt_drop:1;
+ uint64_t rsrvd:61;
+#endif
+ } bits;
+} pfc_dbg_int_status_t;
+
+
+/*
+ * Register: PfcIntMask
+ * PFC Interrupt Mask
+ * Description: PFC interrupt status mask register
+ * Fields:
+ * mask for pktDrop capture;
+ * TCAM result lookup table parity error mask;
+ * VLAN table parity error mask;
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:61;
+ uint64_t pkt_drop_mask:1;
+ uint64_t tcam_parity_err_mask:1;
+ uint64_t vlan_parity_err_mask:1;
+#else
+ uint64_t vlan_parity_err_mask:1;
+ uint64_t tcam_parity_err_mask:1;
+ uint64_t pkt_drop_mask:1;
+ uint64_t rsrvd:61;
+#endif
+ } bits;
+} pfc_int_mask_t;
+
+
+/*
+ * Register: PfcDropLog
+ * Packet Drop Log
+ * Description: Packet drop log. Log for capturing packet drops. Log
+ * is re-armed when associated interrupt bit is cleared.
+ * Fields:
+ * drop because bitwise OR of the tcp control bits masked on = 1
+ * drop because L2 address did not match
+ * drop because class code indicated drop
+ * drop because TCAM result indicated drop
+ * drop because blade was not a member of VLAN
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:59;
+ uint64_t tcp_ctrl_drop:1;
+ uint64_t l2_addr_drop:1;
+ uint64_t class_code_drop:1;
+ uint64_t tcam_drop:1;
+ uint64_t vlan_drop:1;
+#else
+ uint64_t vlan_drop:1;
+ uint64_t tcam_drop:1;
+ uint64_t class_code_drop:1;
+ uint64_t l2_addr_drop:1;
+ uint64_t tcp_ctrl_drop:1;
+ uint64_t rsrvd:59;
+#endif
+ } bits;
+} pfc_drop_log_t;
+
+
+/*
+ * Register: PfcDropLogMask
+ * Packet Drop Log Mask
+ * Description: Mask for logging packet drop. If the drop type is
+ * masked off, it will not trigger the drop log to capture the packet
+ * drop
+ * Fields:
+ * mask drop because bitwise OR of the tcp control bits masked on
+ * = 1
+ * mask drop because L2 address did not match
+ * mask drop because class code indicated
+ * mask drop because TCAM result indicated drop
+ * mask drop because blade was not a member of VLAN
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:59;
+ uint64_t tcp_ctrl_drop_mask:1;
+ uint64_t l2_addr_drop_mask:1;
+ uint64_t class_code_drop_mask:1;
+ uint64_t tcam_drop_mask:1;
+ uint64_t vlan_drop_mask:1;
+#else
+ uint64_t vlan_drop_mask:1;
+ uint64_t tcam_drop_mask:1;
+ uint64_t class_code_drop_mask:1;
+ uint64_t l2_addr_drop_mask:1;
+ uint64_t tcp_ctrl_drop_mask:1;
+ uint64_t rsrvd:59;
+#endif
+ } bits;
+} pfc_drop_log_mask_t;
+
+
+/*
+ * Register: PfcVlanParErrLog
+ * VLAN Parity Error Log
+ * Description: Log of parity errors in VLAN table.
+ * Fields:
+ * address of parity error. Log is cleared when corresponding
+ * interrupt bit is cleared by writing '1'.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:52;
+ uint64_t addr:12;
+#else
+ uint64_t addr:12;
+ uint64_t rsrvd:52;
+#endif
+ } bits;
+} pfc_vlan_par_err_log_t;
+
+
+/*
+ * Register: PfcTcamParErrLog
+ * TCAM Parity Error Log
+ * Description: Log of parity errors in TCAM result lookup table.
+ * Fields:
+ * address of parity error. Log is cleared when corresponding
+ * interrupt bit is cleared by writing '1'.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:56;
+ uint64_t addr:8;
+#else
+ uint64_t addr:8;
+ uint64_t rsrvd:56;
+#endif
+ } bits;
+} pfc_tcam_par_err_log_t;
+
+
+/*
+ * Register: PfcBadCsCounter
+ * PFC Bad Checksum Counter
+ * Description: Count number of bad TCP/UDP checksum. Only counted if
+ * L2 adddress matched
+ * Fields:
+ * count of number of bad TCP/UDP checksums received. Clear on
+ * read
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t bad_cs_count:32;
+#else
+ uint64_t bad_cs_count:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} pfc_bad_cs_counter_t;
+
+
+/*
+ * Register: PfcDropCounter
+ * PFC Drop Counter
+ * Description: Count number of packets dropped due to VLAN
+ * membership, class code, TCP control bits, or TCAM results Only
+ * counted if L2 address matched.
+ * Fields:
+ * Count of number of packets dropped due to VLAN, TCAM results.
+ * Clear on read
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t drop_count:32;
+#else
+ uint64_t drop_count:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} pfc_drop_counter_t;
+
+
+/*
+ * Register: PfcAutoInit
+ * PFC Auto Init
+ * Description: PFC Auto Initialization. Writing to this register
+ * triggers the auto initialization of the blade's TCAM entries with
+ * 100 bits of '0' for both key and mask. TCAM lookup is disabled
+ * during auto initialization.
+ * Fields:
+ * TCAM auto initialization status. 0=busy, 1=done.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:63;
+ uint64_t auto_init_status:1;
+#else
+ uint64_t auto_init_status:1;
+ uint64_t rsrvd:63;
+#endif
+ } bits;
+} pfc_auto_init_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HXGE_PFC_HW_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_rdc_hw.h b/usr/src/uts/common/io/hxge/hxge_rdc_hw.h
new file mode 100644
index 0000000000..540c582cf3
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_rdc_hw.h
@@ -0,0 +1,1611 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HXGE_RDC_HW_H
+#define _HXGE_RDC_HW_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RDC_BASE_ADDR 0X00300000
+
+#define RDC_PAGE_HANDLE (RDC_BASE_ADDR + 0x8)
+#define RDC_RX_CFG1 (RDC_BASE_ADDR + 0x20)
+#define RDC_RX_CFG2 (RDC_BASE_ADDR + 0x28)
+#define RDC_RBR_CFG_A (RDC_BASE_ADDR + 0x40)
+#define RDC_RBR_CFG_B (RDC_BASE_ADDR + 0x48)
+#define RDC_RBR_KICK (RDC_BASE_ADDR + 0x50)
+#define RDC_RBR_QLEN (RDC_BASE_ADDR + 0x58)
+#define RDC_RBR_HEAD (RDC_BASE_ADDR + 0x68)
+#define RDC_RCR_CFG_A (RDC_BASE_ADDR + 0x80)
+#define RDC_RCR_CFG_B (RDC_BASE_ADDR + 0x88)
+#define RDC_RCR_QLEN (RDC_BASE_ADDR + 0x90)
+#define RDC_RCR_TAIL (RDC_BASE_ADDR + 0xA0)
+#define RDC_RCR_FLUSH (RDC_BASE_ADDR + 0xA8)
+#define RDC_CLOCK_DIV (RDC_BASE_ADDR + 0xB0)
+#define RDC_INT_MASK (RDC_BASE_ADDR + 0xB8)
+#define RDC_STAT (RDC_BASE_ADDR + 0xC0)
+#define RDC_PKT_COUNT (RDC_BASE_ADDR + 0xD0)
+#define RDC_DROP_COUNT (RDC_BASE_ADDR + 0xD8)
+#define RDC_BYTE_COUNT (RDC_BASE_ADDR + 0xE0)
+#define RDC_PREF_CMD (RDC_BASE_ADDR + 0x100)
+#define RDC_PREF_DATA (RDC_BASE_ADDR + 0x108)
+#define RDC_SHADOW_CMD (RDC_BASE_ADDR + 0x110)
+#define RDC_SHADOW_DATA (RDC_BASE_ADDR + 0x118)
+#define RDC_SHADOW_PAR_DATA (RDC_BASE_ADDR + 0x120)
+#define RDC_CTRL_FIFO_CMD (RDC_BASE_ADDR + 0x128)
+#define RDC_CTRL_FIFO_DATA_LO (RDC_BASE_ADDR + 0x130)
+#define RDC_CTRL_FIFO_DATA_HI (RDC_BASE_ADDR + 0x138)
+#define RDC_CTRL_FIFO_DATA_ECC (RDC_BASE_ADDR + 0x140)
+#define RDC_DATA_FIFO_CMD (RDC_BASE_ADDR + 0x148)
+#define RDC_DATA_FIFO_DATA_LO (RDC_BASE_ADDR + 0x150)
+#define RDC_DATA_FIFO_DATA_HI (RDC_BASE_ADDR + 0x158)
+#define RDC_DATA_FIFO_DATA_ECC (RDC_BASE_ADDR + 0x160)
+#define RDC_STAT_INT_DBG (RDC_BASE_ADDR + 0x200)
+#define RDC_PREF_PAR_LOG (RDC_BASE_ADDR + 0x210)
+#define RDC_SHADOW_PAR_LOG (RDC_BASE_ADDR + 0x218)
+#define RDC_CTRL_FIFO_ECC_LOG (RDC_BASE_ADDR + 0x220)
+#define RDC_DATA_FIFO_ECC_LOG (RDC_BASE_ADDR + 0x228)
+#define RDC_FIFO_ERR_INT_MASK (RDC_BASE_ADDR + 0x230)
+#define RDC_FIFO_ERR_STAT (RDC_BASE_ADDR + 0x238)
+#define RDC_FIFO_ERR_INT_DBG (RDC_BASE_ADDR + 0x240)
+#define RDC_PEU_TXN_LOG (RDC_BASE_ADDR + 0x250)
+#define RDC_DBG_TRAINING_VEC (RDC_BASE_ADDR + 0x300)
+#define RDC_DBG_GRP_SEL (RDC_BASE_ADDR + 0x308)
+
+
+/*
+ * Register: RdcPageHandle
+ * Logical Page Handle
+ * Description: Logical page handle specifying upper bits of 64-bit
+ * PCIE addresses. Fields in this register are part of the dma
+ * configuration and cannot be changed once the dma is enabled.
+ * Fields:
+ * Bits [63:44] of a 64-bit address, used to concatenate to a
+ * 44-bit address when generating 64-bit addresses on the PCIE
+ * bus.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:44;
+ uint64_t handle:20;
+#else
+ uint64_t handle:20;
+ uint64_t rsrvd:44;
+#endif
+ } bits;
+} rdc_page_handle_t;
+
+
+/*
+ * Register: RdcRxCfg1
+ * DMA Configuration 1
+ * Description: Configuration parameters for receive DMA block.
+ * Fields in this register are part of the dma configuration and
+ * cannot be changed once the dma is enabled.
+ * The usage of enable, reset, and qst is as follows. Software
+ * should use the following sequence to reset a DMA channel. First,
+ * set DMA.enable to 0, wait for DMA.qst=1 and then, set DMA.reset to
+ * 1. After DMA.reset is cleared by hardware and the DMA.qst is set
+ * to 1, software may then start configuring the DMA channel. The
+ * DMA.enable can be set or cleared while the DMA is in operation.
+ * The state machines of the DMA may not have returned to its initial
+ * states yet after the DMA.enable bit is cleared. This condition is
+ * indicated by the value of the DMA.qst. An example of DMA.enable
+ * being cleared during operation is when a fatal error occurs.
+ * Fields:
+ * Set to 1 to enable the Receive DMA. If set to 0, packets
+ * selecting this DMA will be discarded. On fatal errors, this
+ * bit will be cleared by hardware. This bit cannot be set if sw
+ * has not resolved any pending fatal error condition: i.e. any
+ * RdcStat ldf1 error bits remain set.
+ * Set to 1 to reset the DMA. Hardware will clear this bit after
+ * reset is completed. A reset will bring the sepecific DMA back
+ * to the power on state (including the DMA.en in this register).
+ * When set to 1, it indicates all state associated with the DMA
+ * are in its initial state following either dma reset or
+ * disable. Thus, once this is set to 1, sw could start to
+ * configure the DMA if needed.
+ * Bits [43:32] of the Mailbox address.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t enable:1;
+ uint64_t reset:1;
+ uint64_t qst:1;
+ uint64_t rsrvd1:17;
+ uint64_t mbaddr_h:12;
+#else
+ uint64_t mbaddr_h:12;
+ uint64_t rsrvd1:17;
+ uint64_t qst:1;
+ uint64_t reset:1;
+ uint64_t enable:1;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_rx_cfg1_t;
+
+
+/*
+ * Register: RdcRxCfg2
+ * DMA Configuration 2
+ * Description: Configuration parameters for receive DMA block.
+ * Fields in this register are part of the dma configuration and
+ * cannot be changed once the dma is enabled.
+ * Fields:
+ * Bits [31:6] of the Mailbox address. Bits [5:0] are assumed to
+ * be zero, or 64B aligned.
+ * Multiple of 64Bs, 0 means no offset, b01 means 64B, b10 means
+ * 128B. b11 is invalid, hardware behavior not specified.
+ * Set to 1 to select the entire header of 6B.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t mbaddr_l:26;
+ uint64_t rsrvd1:3;
+ uint64_t offset:2;
+ uint64_t full_hdr:1;
+#else
+ uint64_t full_hdr:1;
+ uint64_t offset:2;
+ uint64_t rsrvd1:3;
+ uint64_t mbaddr_l:26;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_rx_cfg2_t;
+
+
+/*
+ * Register: RdcRbrCfgA
+ * RBR Configuration A
+ * Description: The following registers are used to configure and
+ * manage the RBR. Note that the entire RBR must stay within the
+ * 'page' defined by staddrBase. The behavior of the hardware is
+ * undefined if the last entry is outside of the page (if bits 43:18
+ * of the address of the last entry are different from bits 43:18 of
+ * the base address). Hardware will support wrapping around at the
+ * end of the ring buffer defined by LEN. LEN must be a multiple of
+ * 64. Fields in this register are part of the dma configuration and
+ * cannot be changed once the dma is enabled.
+ * HW does not check for all configuration errors across different
+ * fields.
+ *
+ * Fields:
+ * Bits 15:6 of the maximum number of RBBs in the buffer ring.
+ * Bits 5:0 are hardcoded to zero. The maximum is (2^16 - 64) and
+ * is limited by the staddr value. (len + staddr) should not
+ * exceed (2^16 - 64).
+ * Bits [43:18] of the address for the RBR. This value remains
+ * fixed, and is used as the base address of the ring. All
+ * entries in the ring have this as their upper address bits.
+ * Bits [17:6] of the address of the RBR. staddrBase concatinated
+ * with staddr is the starting address of the RBR. (len + staddr)
+ * should not exceed (2^16 - 64).
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t len:10;
+ uint64_t len_lo:6;
+ uint64_t rsrvd:4;
+ uint64_t staddr_base:26;
+ uint64_t staddr:12;
+ uint64_t rsrvd1:6;
+#else
+ uint64_t rsrvd1:6;
+ uint64_t staddr:12;
+ uint64_t staddr_base:26;
+ uint64_t rsrvd:4;
+ uint64_t len_lo:6;
+ uint64_t len:10;
+#endif
+ } bits;
+} rdc_rbr_cfg_a_t;
+
+
+/*
+ * Register: RdcRbrCfgB
+ * RBR Configuration B
+ * Description: This register configures the block size, and the
+ * individual packet buffer sizes. The VLD bits of the three block
+ * sizes have to be set to 1 in normal operations. These bits may be
+ * turned off for debug purpose only. Fields in this register are
+ * part of the dma configuration and cannot be changed once the dma
+ * is enabled.
+ * Fields:
+ * Buffer Block Size. b0 - 4K; b1 - 8K.
+ * Set to 1 to indicate SIZE2 is valid, and enable hardware to
+ * allocate buffers of size 2. Always set to 1 in normal
+ * operation.
+ * Size 2 of packet buffer. b0 - 2K; b1 - 4K.
+ * Set to 1 to indicate SIZE1 is valid, and enable hardware to
+ * allocate buffers of size 1. Always set to 1 in normal
+ * operation.
+ * Size 1 of packet buffer. b0 - 1K; b1 - 2K.
+ * Set to 1 to indicate SIZE0 is valid, and enable hardware to
+ * allocate buffers of size 0. Always set to 1 in normal
+ * operation.
+ * Size 0 of packet buffer. b00 - 256; b01 - 512; b10 - 1K; b11 -
+ * reserved.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:39;
+ uint64_t bksize:1;
+ uint64_t vld2:1;
+ uint64_t rsrvd1:6;
+ uint64_t bufsz2:1;
+ uint64_t vld1:1;
+ uint64_t rsrvd2:6;
+ uint64_t bufsz1:1;
+ uint64_t vld0:1;
+ uint64_t rsrvd3:5;
+ uint64_t bufsz0:2;
+#else
+ uint64_t bufsz0:2;
+ uint64_t rsrvd3:5;
+ uint64_t vld0:1;
+ uint64_t bufsz1:1;
+ uint64_t rsrvd2:6;
+ uint64_t vld1:1;
+ uint64_t bufsz2:1;
+ uint64_t rsrvd1:6;
+ uint64_t vld2:1;
+ uint64_t bksize:1;
+ uint64_t rsrvd:39;
+#endif
+ } bits;
+} rdc_rbr_cfg_b_t;
+
+
+/*
+ * Register: RdcRbrKick
+ * RBR Kick
+ * Description: Block buffer addresses are added to the ring buffer
+ * by software. When software writes to the Kick register, indicating
+ * the number of descriptors added, hardware will update the internal
+ * state of the corresponding buffer pool.
+ * HW does not check for all configuration errors across different
+ * fields.
+ *
+ * Fields:
+ * Number of Block Buffers added by software. Hardware effect
+ * will be triggered when the register is written to.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:48;
+ uint64_t bkadd:16;
+#else
+ uint64_t bkadd:16;
+ uint64_t rsrvd:48;
+#endif
+ } bits;
+} rdc_rbr_kick_t;
+
+
+/*
+ * Register: RdcRbrQlen
+ * RBR Queue Length
+ * Description: The current number of entries in the RBR.
+ * Fields:
+ * Number of block addresses in the ring buffer.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:48;
+ uint64_t qlen:16;
+#else
+ uint64_t qlen:16;
+ uint64_t rsrvd:48;
+#endif
+ } bits;
+} rdc_rbr_qlen_t;
+
+
+/*
+ * Register: RdcRbrHead
+ * RBR Head
+ * Description: Lower bits of the RBR head pointer. Software programs
+ * the upper bits, specified in rdcRbrConfigA.staddrBase.
+ * Fields:
+ * Bits [17:2] of the software posted address, 4B aligned. This
+ * pointer is updated by hardware after each block buffer is
+ * consumed.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:46;
+ uint64_t head:16;
+ uint64_t rsrvd1:2;
+#else
+ uint64_t rsrvd1:2;
+ uint64_t head:16;
+ uint64_t rsrvd:46;
+#endif
+ } bits;
+} rdc_rbr_head_t;
+
+
+/*
+ * Register: RdcRcrCfgA
+ * RCR Configuration A
+ * Description: The RCR should be within the 'page' defined by the
+ * staddrBase, i.e. staddrBase concatenate with STADDR plus 8 x LEN
+ * should be within the last address of the 'page' defined by
+ * staddrBase. The length must be a multiple of 32. Fields in this
+ * register are part of the dma configuration and cannot be changed
+ * once the dma is enabled.
+ * HW does not check for all configuration errors across different
+ * fields.
+ *
+ * Fields:
+ * Bits 15:5 of the maximum number of 8B entries in RCR. Bits 4:0
+ * are hard-coded to zero. The maximum size is (2^16 - 32) and is
+ * limited by staddr value. (len + staddr) should not exceed
+ * (2^16 - 32).
+ * Bits [43:19] of the Start address for the RCR.
+ * Bits [18:6] of start address for the RCR. (len + staddr)
+ * should not exceed (2^16 - 32).
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t len:11;
+ uint64_t len_lo:5;
+ uint64_t rsrvd:4;
+ uint64_t staddr_base:25;
+ uint64_t staddr:13;
+ uint64_t rsrvd1:6;
+#else
+ uint64_t rsrvd1:6;
+ uint64_t staddr:13;
+ uint64_t staddr_base:25;
+ uint64_t rsrvd:4;
+ uint64_t len_lo:5;
+ uint64_t len:11;
+#endif
+ } bits;
+} rdc_rcr_cfg_a_t;
+
+
+/*
+ * Register: RdcRcrCfgB
+ * RCR Configuration B
+ * Description: RCR configuration settings.
+ * Fields:
+ * Packet Threshold; when the number of packets enqueued in RCR
+ * is strictly larger than PTHRES, the DMA MAY issue an interrupt
+ * if enabled.
+ * Enable timeout. If set to one, enable the timeout. A timeout
+ * will initiate an update of the software visible states. If
+ * interrupt is armed, in addition to the update, an interrupt to
+ * CPU will be generated, and the interrupt disarmed.
+ * Time out value. The system clock is divided down by the value
+ * programmed in the Receive DMA Clock Divider register.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t pthres:16;
+ uint64_t entout:1;
+ uint64_t rsrvd1:9;
+ uint64_t timeout:6;
+#else
+ uint64_t timeout:6;
+ uint64_t rsrvd1:9;
+ uint64_t entout:1;
+ uint64_t pthres:16;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_rcr_cfg_b_t;
+
+
+/*
+ * Register: RdcRcrQlen
+ * RCR Queue Length
+ * Description: The number of entries in the RCR.
+ * Fields:
+ * Number of packets queued. Initialize to zero after the RCR
+ * Configuration A register is written to.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:48;
+ uint64_t qlen:16;
+#else
+ uint64_t qlen:16;
+ uint64_t rsrvd:48;
+#endif
+ } bits;
+} rdc_rcr_qlen_t;
+
+
+/*
+ * Register: RdcRcrTail
+ * RCR Tail
+ * Description: Lower bits of the RCR tail pointer. Software programs
+ * the upper bits, specified in rdcRcrConfigA.staddrBase.
+ * Fields:
+ * Address of the RCR Tail Pointer [18:3] (points to the next
+ * available location.) Initialized after the RCR Configuration A
+ * register is written to.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:45;
+ uint64_t tail:16;
+ uint64_t rsrvd1:3;
+#else
+ uint64_t rsrvd1:3;
+ uint64_t tail:16;
+ uint64_t rsrvd:45;
+#endif
+ } bits;
+} rdc_rcr_tail_t;
+
+
+/*
+ * Register: RdcRcrFlush
+ * RCR Flush
+ * Description: This register will force an update to the RCR in
+ * system memory.
+ * Fields:
+ * Set to 1 to force the hardware to store the shadow tail block
+ * to DRAM if the hardware state (queue length and pointers) is
+ * different from the software visible state. Reset to 0 by
+ * hardware when done.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:63;
+ uint64_t flush:1;
+#else
+ uint64_t flush:1;
+ uint64_t rsrvd:63;
+#endif
+ } bits;
+} rdc_rcr_flush_t;
+
+
+/*
+ * Register: RdcClockDiv
+ * Receive DMA Clock Divider
+ * Description: The granularity of the DMA timers is determined by
+ * the following counter. This is used to drive the DMA timeout
+ * counters. For a 250MHz system clock, a value of 25000 (decimal)
+ * will yield a granularity of 100 usec.
+ * Fields:
+ * System clock divider, determines the granularity of the DMA
+ * timeout count-down. The hardware count down is count+1.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:48;
+ uint64_t count:16;
+#else
+ uint64_t count:16;
+ uint64_t rsrvd:48;
+#endif
+ } bits;
+} rdc_clock_div_t;
+
+
+/*
+ * Register: RdcIntMask
+ * RDC Interrupt Mask
+ * Description: RDC interrupt status register. RCRTHRES and RCRTO
+ * bits are used to keep track of normal DMA operations, while the
+ * remaining bits are primarily used to detect error conditions.
+ * Fields:
+ * Set to 0 to enable flagging when rdc receives a response
+ * completion timeout from peu. Part of LDF 1.
+ * Set to 1 to enable flagging when rdc receives a poisoned
+ * completion or non-zero (unsuccessful) completion status
+ * received from PEU. Part of LDF 1.
+ * Set to 0 to enable flagging when RCR threshold crossed. Part
+ * of LDF 0.
+ * Set to 0 to enable flagging when RCR timeout. Part of LDF 0.
+ * Set to 0 to enable flagging when read from rcr shadow ram
+ * generates a parity error Part of LDF 1.
+ * Set to 0 to enable flagging when read from rbr prefetch ram
+ * generates a parity error Part of LDF 1.
+ * Set to 0 to enable flagging when Receive Block Ring prefetch
+ * is empty (not enough buffer blocks available depending on
+ * incoming pkt size) when hardware tries to queue a packet.
+ * Incoming packets will be discarded. Non-fatal error. Part of
+ * LDF 1.
+ * Set to 0 to enable flagging when packet discard because of RCR
+ * shadow full.
+ * Set to 0 to enable flagging when Receive Completion Ring full
+ * when hardware tries to enqueue the completion status of a
+ * packet. Part of LDF 1.
+ * Set to 0 to enable flagging when RBR empty when hardware
+ * attempts to prefetch. Part of LDF 1.
+ * Set to 0 to enable flagging when Receive Block Ring full when
+ * software tries to post more blocks. Part of LDF 1.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:10;
+ uint64_t rbr_cpl_to:1;
+ uint64_t peu_resp_err:1;
+ uint64_t rsrvd1:5;
+ uint64_t rcr_thres:1;
+ uint64_t rcr_to:1;
+ uint64_t rcr_shadow_par_err:1;
+ uint64_t rbr_prefetch_par_err:1;
+ uint64_t rsrvd2:2;
+ uint64_t rbr_pre_empty:1;
+ uint64_t rcr_shadow_full:1;
+ uint64_t rsrvd3:2;
+ uint64_t rcr_full:1;
+ uint64_t rbr_empty:1;
+ uint64_t rbr_full:1;
+ uint64_t rsrvd4:2;
+ uint64_t rsrvd5:32;
+#else
+ uint64_t rsrvd5:32;
+ uint64_t rsrvd4:2;
+ uint64_t rbr_full:1;
+ uint64_t rbr_empty:1;
+ uint64_t rcr_full:1;
+ uint64_t rsrvd3:2;
+ uint64_t rcr_shadow_full:1;
+ uint64_t rbr_pre_empty:1;
+ uint64_t rsrvd2:2;
+ uint64_t rbr_prefetch_par_err:1;
+ uint64_t rcr_shadow_par_err:1;
+ uint64_t rcr_to:1;
+ uint64_t rcr_thres:1;
+ uint64_t rsrvd1:5;
+ uint64_t peu_resp_err:1;
+ uint64_t rbr_cpl_to:1;
+ uint64_t rsrvd:10;
+#endif
+ } bits;
+} rdc_int_mask_t;
+
+
+/*
+ * Register: RdcStat
+ * RDC Control And Status
+ * Description: The DMA channels are controlled using this register.
+ * Fields:
+ * Set to 1 to indicate rdc received a response completion
+ * timeout from peu. Fatal error. Part of LDF 1.
+ * Set to 1 to indicate poisoned completion or non-zero
+ * (unsuccessful) completion status received from PEU. Part of
+ * LDF 1.
+ * Set to 1 to enable mailbox update. Hardware will reset to 0
+ * after one update. Software needs to set to 1 for each update.
+ * Write 0 has no effect. Note that once set by software, only
+ * hardware can reset the value. This bit is also used to keep
+ * track of the exclusivity between threshold triggered or
+ * timeout triggered interrupt. If this bit is not set, there
+ * will be no timer based interrupt, and threshold based
+ * interrupt will not issue a mailbox update. It is recommended
+ * that software should set this bit to one when arming the
+ * device for interrupt.
+ * Set to 1 to indicate RCR threshold crossed. This is a level
+ * event. Part of LDF 0.
+ * Set to 1 to indicate RCR time-outed if MEX bit is set and the
+ * queue length is non-zero when timeout occurs. When software
+ * writes 1 to this bit, RCRTO will be reset to 0. Part of LDF 0.
+ * Set to 1 to indicate read from rcr shadow ram generates a
+ * parity error Writing a 1 to this register also clears the
+ * rdcshadowParLog register Fatal error. Part of LDF 1.
+ * Set to 1 to indicate read from rbr prefetch ram generates
+ * parity error Writing a 1 to this register also clears the
+ * rdcPrefParLog register Fatal error. Part of LDF 1.
+ * Set to 1 to indicate Receive Block Ring prefetch is empty (not
+ * enough buffer blocks available depending on incoming pkt size)
+ * when hardware tries to queue a packet. Incoming packets will
+ * be discarded. Non-fatal error. Part of LDF 1.
+ * Set to 1 to indicate packet discard because of RCR shadow
+ * full. RCR Shadow full cannot be set to 1 in a normal
+ * operation. When set to 1, it indicates a fatal error. Part of
+ * LDF 1.
+ * Set to 1 to indicate Receive Completion Ring full when
+ * hardware tries to enqueue the completion status of a packet.
+ * Incoming packets will be discarded. No buffer consumed. Fatal
+ * error. Part of LDF 1.
+ * Set to 1 to indicate RBR empty when hardware attempts to
+ * prefetch. Part of LDF 1.
+ * Set to 1 to indicate Receive Buffer Ring full when software
+ * writes the kick register with a value greater than the length
+ * of the RBR length. Incoming packets will be discarded. Fatal
+ * error. Part of LDF 1.
+ * Number of buffer pointers read. Used to advance the RCR head
+ * pointer.
+ * Number of packets read; when written to, decrement the QLEN
+ * counter by PKTREAD. QLEN is lower bounded to zero.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:10;
+ uint64_t rbr_cpl_to:1;
+ uint64_t peu_resp_err:1;
+ uint64_t rsrvd1:4;
+ uint64_t mex:1;
+ uint64_t rcr_thres:1;
+ uint64_t rcr_to:1;
+ uint64_t rcr_shadow_par_err:1;
+ uint64_t rbr_prefetch_par_err:1;
+ uint64_t rsrvd2:2;
+ uint64_t rbr_pre_empty:1;
+ uint64_t rcr_shadow_full:1;
+ uint64_t rsrvd3:2;
+ uint64_t rcr_full:1;
+ uint64_t rbr_empty:1;
+ uint64_t rbr_full:1;
+ uint64_t rsrvd4:2;
+ uint64_t ptrread:16;
+ uint64_t pktread:16;
+#else
+ uint64_t pktread:16;
+ uint64_t ptrread:16;
+ uint64_t rsrvd4:2;
+ uint64_t rbr_full:1;
+ uint64_t rbr_empty:1;
+ uint64_t rcr_full:1;
+ uint64_t rsrvd3:2;
+ uint64_t rcr_shadow_full:1;
+ uint64_t rbr_pre_empty:1;
+ uint64_t rsrvd2:2;
+ uint64_t rbr_prefetch_par_err:1;
+ uint64_t rcr_shadow_par_err:1;
+ uint64_t rcr_to:1;
+ uint64_t rcr_thres:1;
+ uint64_t mex:1;
+ uint64_t rsrvd1:4;
+ uint64_t peu_resp_err:1;
+ uint64_t rbr_cpl_to:1;
+ uint64_t rsrvd:10;
+#endif
+ } bits;
+} rdc_stat_t;
+
+
+/*
+ * Register: RdcPktCount
+ * Rx DMA Packet Counter
+ * Description: Counts the number of packets received from the Rx
+ * Virtual MAC for this DMA channel.
+ * Fields:
+ * Count of SYN packets received from RVM. This counter
+ * saturates.
+ * Count of packets received from RVM. This counter saturates.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t syn_pkt_count:32;
+ uint64_t pkt_count:32;
+#else
+ uint64_t pkt_count:32;
+ uint64_t syn_pkt_count:32;
+#endif
+ } bits;
+} rdc_pkt_count_t;
+
+
+/*
+ * Register: RdcDropCount
+ * Rx DMA Dropped Packet Counters
+ * Description: Counts the number of packets dropped due to different
+ * types of errors.
+ * Fields:
+ * Count of packets dropped because they were longer than the
+ * maximum length. This counter saturates.
+ * Count of packets dropped because there was no block available
+ * in the RBR Prefetch Buffer. This counter saturates.
+ * Count of packets dropped because the RVM marked the packet as
+ * errored. This counter saturates.
+ * Count of packets dropped because there was a framing error
+ * from the RVM. This counter saturates.
+ * Count of packets dropped because the packet did not fit in the
+ * rx ram. This counter saturates.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:16;
+ uint64_t too_long:8;
+ uint64_t no_rbr_avail:8;
+ uint64_t rvm_error:8;
+ uint64_t frame_error:8;
+ uint64_t rxram_error:8;
+ uint64_t rsrvd1:8;
+#else
+ uint64_t rsrvd1:8;
+ uint64_t rxram_error:8;
+ uint64_t frame_error:8;
+ uint64_t rvm_error:8;
+ uint64_t no_rbr_avail:8;
+ uint64_t too_long:8;
+ uint64_t rsrvd:16;
+#endif
+ } bits;
+} rdc_drop_count_t;
+
+
+/*
+ * Register: RdcByteCount
+ * Rx DMA Byte Counter
+ * Description: Counts the number of bytes transferred by dma for all
+ * channels.
+ * Fields:
+ * Count of bytes transferred by dma. This counter saturates.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t count:32;
+#else
+ uint64_t count:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_byte_count_t;
+
+
+/*
+ * Register: RdcPrefCmd
+ * Rx DMA Prefetch Buffer Command
+ * Description: Allows debug access to the entire prefetch buffer,
+ * along with the rdcPrefData register. Writing the rdcPrefCmd
+ * triggers the access. For writes, software writes the 32 bits of
+ * data to the rdcPrefData register before writing the write command
+ * to this register. For reads, software first writes the the read
+ * command to this register, then reads the 32-bit value from the
+ * rdcPrefData register. The status field should be polled by
+ * software until it goes low, indicating the read or write has
+ * completed.
+ * Fields:
+ * status of indirect access 0=busy 1=done
+ * Command type. 1 indicates a read command, 0 a write command.
+ * enable writing of parity bits 1=enabled, 0=disabled
+ * DMA channel of entry to read or write
+ * Entry in the prefetch buffer to read or write
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t status:1;
+ uint64_t cmd:1;
+ uint64_t par_en:1;
+ uint64_t rsrvd1:22;
+ uint64_t dmc:2;
+ uint64_t entry:5;
+#else
+ uint64_t entry:5;
+ uint64_t dmc:2;
+ uint64_t rsrvd1:22;
+ uint64_t par_en:1;
+ uint64_t cmd:1;
+ uint64_t status:1;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_pref_cmd_t;
+
+
+/*
+ * Register: RdcPrefData
+ * Rx DMA Prefetch Buffer Data
+ * Description: See rdcPrefCmd register.
+ * Fields:
+ * For writes, parity bits is written into prefetch buffer. For
+ * reads, parity bits read from the prefetch buffer.
+ * For writes, data which is written into prefetch buffer. For
+ * reads, data read from the prefetch buffer.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:28;
+ uint64_t par:4;
+ uint64_t data:32;
+#else
+ uint64_t data:32;
+ uint64_t par:4;
+ uint64_t rsrvd:28;
+#endif
+ } bits;
+} rdc_pref_data_t;
+
+
+/*
+ * Register: RdcShadowCmd
+ * Rx DMA Shadow Tail Command
+ * Description: Allows debug access to the entire shadow tail, along
+ * with the rdcShadowData register. Writing the rdcShadowCmd triggers
+ * the access. For writes, software writes the 64 bits of data to the
+ * rdcShadowData register before writing the write command to this
+ * register. For reads, software first writes the the read command to
+ * this register, then reads the 64-bit value from the rdcShadowData
+ * register. The valid field should be polled by software until it
+ * goes low, indicating the read or write has completed.
+ * Fields:
+ * status of indirect access 0=busy 1=done
+ * Command type. 1 indicates a read command, 0 a write command.
+ * enable writing of parity bits 1=enabled, 0=disabled
+ * DMA channel of entry to read or write
+ * Entry in the shadow tail to read or write
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t status:1;
+ uint64_t cmd:1;
+ uint64_t par_en:1;
+ uint64_t rsrvd1:23;
+ uint64_t dmc:2;
+ uint64_t entry:4;
+#else
+ uint64_t entry:4;
+ uint64_t dmc:2;
+ uint64_t rsrvd1:23;
+ uint64_t par_en:1;
+ uint64_t cmd:1;
+ uint64_t status:1;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_shadow_cmd_t;
+
+
+/*
+ * Register: RdcShadowData
+ * Rx DMA Shadow Tail Data
+ * Description: See rdcShadowCmd register.
+ * Fields:
+ * For writes, data which is written into shadow tail. For reads,
+ * data read from the shadow tail.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } bits;
+} rdc_shadow_data_t;
+
+
+/*
+ * Register: RdcShadowParData
+ * Rx DMA Shadow Tail Parity Data
+ * Description: See rdcShadowCmd register.
+ * Fields:
+ * For writes, parity data is written into shadow tail. For
+ * reads, parity data read from the shadow tail.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:24;
+ uint64_t parity_data:8;
+#else
+ uint64_t parity_data:8;
+ uint64_t rsrvd1:24;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_shadow_par_data_t;
+
+
+/*
+ * Register: RdcCtrlFifoCmd
+ * Rx DMA Control Fifo Command
+ * Description: Allows debug access to the entire Rx Ctl FIFO, along
+ * with the rdcCtrlFifoData register. Writing the rdcCtrlFifoCmd
+ * triggers the access. For writes, software writes the 128 bits of
+ * data to the rdcCtrlFifoData registers before writing the write
+ * command to this register. For reads, software first writes the the
+ * read command to this register, then reads the 128-bit value from
+ * the rdcCtrlFifoData registers. The valid field should be polled by
+ * software until it goes low, indicating the read or write has
+ * completed.
+ * Fields:
+ * status of indirect access 0=busy 1=done
+ * Command type. 1 indicates a read command, 0 a write command.
+ * enable writing of ECC bits 1=enabled, 0=disabled
+ * Entry in the rx control ram to read or write
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t status:1;
+ uint64_t cmd:1;
+ uint64_t ecc_en:1;
+ uint64_t rsrvd1:20;
+ uint64_t entry:9;
+#else
+ uint64_t entry:9;
+ uint64_t rsrvd1:20;
+ uint64_t ecc_en:1;
+ uint64_t cmd:1;
+ uint64_t status:1;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_ctrl_fifo_cmd_t;
+
+
+/*
+ * Register: RdcCtrlFifoDataLo
+ * Rx DMA Control Fifo Data Lo
+ * Description: Lower 64 bits read or written to the Rx Ctl FIFO. See
+ * rdcCtrlFifoCmd register.
+ * Fields:
+ * For writes, data which is written into rx control ram. For
+ * reads, data read from the rx control ram.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } bits;
+} rdc_ctrl_fifo_data_lo_t;
+
+
+/*
+ * Register: RdcCtrlFifoDataHi
+ * Rx DMA Control Fifo Data Hi
+ * Description: Upper 64 bits read or written to the Rx Ctl FIFO. See
+ * rdcCtrlFifoCmd register.
+ * Fields:
+ * For writes, data which is written into rx control ram. For
+ * reads, data read from the rx control ram.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } bits;
+} rdc_ctrl_fifo_data_hi_t;
+
+
+/*
+ * Register: RdcCtrlFifoDataEcc
+ * Rx DMA Control Fifo Data ECC
+ * Description: 16 bits ECC data read or written to the Rx Ctl FIFO.
+ * See rdcCtrlFifoCmd register.
+ * Fields:
+ * For writes, data which is written into rx control ram. For
+ * reads, data read from the rx control ram.
+ * For writes, data which is written into rx control ram. For
+ * reads, data read from the rx control ram.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:16;
+ uint64_t ecc_data_hi:8;
+ uint64_t ecc_data_lo:8;
+#else
+ uint64_t ecc_data_lo:8;
+ uint64_t ecc_data_hi:8;
+ uint64_t rsrvd1:16;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_ctrl_fifo_data_ecc_t;
+
+
+/*
+ * Register: RdcDataFifoCmd
+ * Rx DMA Data Fifo Command
+ * Description: Allows debug access to the entire Rx Data FIFO, along
+ * with the rdcDataFifoData register. Writing the rdcCtrlFifoCmd
+ * triggers the access. For writes, software writes the 128 bits of
+ * data to the rdcDataFifoData registers before writing the write
+ * command to this register. For reads, software first writes the the
+ * read command to this register, then reads the 128-bit value from
+ * the rdcDataFifoData registers. The valid field should be polled by
+ * software until it goes low, indicating the read or write has
+ * completed.
+ * Fields:
+ * status of indirect access 0=busy 1=done
+ * Command type. 1 indicates a read command, 0 a write command.
+ * enable writing of ECC bits 1=enabled, 0=disabled
+ * Entry in the rx data ram to read or write
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t status:1;
+ uint64_t cmd:1;
+ uint64_t ecc_en:1;
+ uint64_t rsrvd1:18;
+ uint64_t entry:11;
+#else
+ uint64_t entry:11;
+ uint64_t rsrvd1:18;
+ uint64_t ecc_en:1;
+ uint64_t cmd:1;
+ uint64_t status:1;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_data_fifo_cmd_t;
+
+
+/*
+ * Register: RdcDataFifoDataLo
+ * Rx DMA Data Fifo Data Lo
+ * Description: Lower 64 bits read or written to the Rx Data FIFO.
+ * See rdcDataFifoCmd register.
+ * Fields:
+ * For writes, data which is written into rx data ram. For reads,
+ * data read from the rx data ram.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } bits;
+} rdc_data_fifo_data_lo_t;
+
+
+/*
+ * Register: RdcDataFifoDataHi
+ * Rx DMA Data Fifo Data Hi
+ * Description: Upper 64 bits read or written to the Rx Data FIFO.
+ * See rdcDataFifoCmd register.
+ * Fields:
+ * For writes, data which is written into rx data ram. For reads,
+ * data read from the rx data ram.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } bits;
+} rdc_data_fifo_data_hi_t;
+
+
+/*
+ * Register: RdcDataFifoDataEcc
+ * Rx DMA Data Fifo ECC Data
+ * Description: 16 bits ECC data read or written to the Rx Data FIFO.
+ * See rdcDataFifoCmd register.
+ * Fields:
+ * For writes, data which is written into rx data ram. For reads,
+ * data read from the rx data ram.
+ * For writes, data which is written into rx data ram. For reads,
+ * data read from the rx data ram.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:16;
+ uint64_t ecc_data_hi:8;
+ uint64_t ecc_data_lo:8;
+#else
+ uint64_t ecc_data_lo:8;
+ uint64_t ecc_data_hi:8;
+ uint64_t rsrvd1:16;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_data_fifo_data_ecc_t;
+
+
+/*
+ * Register: RdcStatIntDbg
+ * RDC Debug Control and Status Interrupt
+ * Description: RDC debug control and status interrupt register.
+ * Debug RDC control and status register bits to check if interrupt
+ * is asserted used to detect error conditions.
+ * Fields:
+ * Set to 1 to enable interrupt Part of LDF 1.
+ * Set to 1 to enable interrupt Part of LDF 1.
+ * Set to 1 to enable interrupt Part of LDF 0.
+ * Set to 1 to enable interrupt Part of LDF 0.
+ * Set to 1 to enable interrupt Part of LDF 1.
+ * Set to 1 to enable interrupt Part of LDF 1.
+ * Set to 1 to enable interrupt Part of LDF 1.
+ * Set to 1 to enable interrupt
+ * Set to 1 to enable interrupt Part of LDF 1.
+ * Set to 1 to enable interrupt Part of LDF 1.
+ * Set to 1 to enable interrupt Part of LDF 1.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:10;
+ uint64_t rbr_cpl_to:1;
+ uint64_t peu_resp_err:1;
+ uint64_t rsrvd1:5;
+ uint64_t rcr_thres:1;
+ uint64_t rcr_to:1;
+ uint64_t rcr_shadow_par_err:1;
+ uint64_t rbr_prefetch_par_err:1;
+ uint64_t rsrvd2:2;
+ uint64_t rbr_pre_empty:1;
+ uint64_t rcr_shadow_full:1;
+ uint64_t rsrvd3:2;
+ uint64_t rcr_full:1;
+ uint64_t rbr_empty:1;
+ uint64_t rbr_full:1;
+ uint64_t rsrvd4:2;
+ uint64_t rsrvd5:32;
+#else
+ uint64_t rsrvd5:32;
+ uint64_t rsrvd4:2;
+ uint64_t rbr_full:1;
+ uint64_t rbr_empty:1;
+ uint64_t rcr_full:1;
+ uint64_t rsrvd3:2;
+ uint64_t rcr_shadow_full:1;
+ uint64_t rbr_pre_empty:1;
+ uint64_t rsrvd2:2;
+ uint64_t rbr_prefetch_par_err:1;
+ uint64_t rcr_shadow_par_err:1;
+ uint64_t rcr_to:1;
+ uint64_t rcr_thres:1;
+ uint64_t rsrvd1:5;
+ uint64_t peu_resp_err:1;
+ uint64_t rbr_cpl_to:1;
+ uint64_t rsrvd:10;
+#endif
+ } bits;
+} rdc_stat_int_dbg_t;
+
+
+/*
+ * Register: RdcPrefParLog
+ * Rx DMA Prefetch Buffer Parity Log
+ * Description: RDC DMA Prefetch Buffer parity log register This
+ * register logs the first parity error that is encountered. Writing
+ * a 1 to RdcStat::rbrPrefetchParErr clears this register
+ * Fields:
+ * Address of parity error
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:57;
+ uint64_t address:7;
+#else
+ uint64_t address:7;
+ uint64_t rsrvd:57;
+#endif
+ } bits;
+} rdc_pref_par_log_t;
+
+
+/*
+ * Register: RdcShadowParLog
+ * Rx DMA Shadow Tail Parity Log
+ * Description: RDC DMA Shadow Tail parity log register This register
+ * logs the first parity error that is encountered. Writing a 1 to
+ * RdcStat::rcrShadowParErr clears this register
+ * Fields:
+ * Address of parity error
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:26;
+ uint64_t address:6;
+#else
+ uint64_t address:6;
+ uint64_t rsrvd1:26;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_shadow_par_log_t;
+
+
+/*
+ * Register: RdcCtrlFifoEccLog
+ * Rx DMA Control Fifo ECC Log
+ * Description: RDC DMA Control FIFO ECC log register This register
+ * logs the first ECC error that is encountered. A double-bit ecc
+ * error over writes any single-bit ecc error previously logged
+ * Fields:
+ * Address of ECC error for upper 64 bits Writing a 1 to
+ * RdcFifoErrStat::rxCtrlFifoDed[1] or
+ * RdcFifoErrStat::rxCtrlFifoSec[1] clears this register
+ * Address of ECC error for lower 64 bits Writing a 1 to
+ * RdcFifoErrStat::rxCtrlFifoDed[0] or
+ * RdcFifoErrStat::rxCtrlFifoSec[0] clears this register
+ * ECC syndrome for upper 64 bits Writing a 1 to
+ * RdcFifoErrStat::rxCtrlFifoDed[1] or
+ * RdcFifoErrStat::rxCtrlFifoSec[1] clears this register
+ * ECC syndrome for lower 64 bits Writing a 1 to
+ * RdcFifoErrStat::rxCtrlFifoDed[0] or
+ * RdcFifoErrStat::rxCtrlFifoSec[0] clears this register
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:7;
+ uint64_t address_hi:9;
+ uint64_t rsrvd1:7;
+ uint64_t address_lo:9;
+ uint64_t rsrvd2:8;
+ uint64_t syndrome_hi:8;
+ uint64_t rsrvd3:8;
+ uint64_t syndrome_lo:8;
+#else
+ uint64_t syndrome_lo:8;
+ uint64_t rsrvd3:8;
+ uint64_t syndrome_hi:8;
+ uint64_t rsrvd2:8;
+ uint64_t address_lo:9;
+ uint64_t rsrvd1:7;
+ uint64_t address_hi:9;
+ uint64_t rsrvd:7;
+#endif
+ } bits;
+} rdc_ctrl_fifo_ecc_log_t;
+
+
+/*
+ * Register: RdcDataFifoEccLog
+ * Rx DMA Data Fifo ECC Log
+ * Description: RDC DMA data FIFO ECC log register This register logs
+ * the first ECC error that is encountered. A double-bit ecc error
+ * over writes any single-bit ecc error previously logged
+ * Fields:
+ * Address of ECC error for upper 64 bits Writing a 1 to
+ * RdcFifoErrStat::rxDataFifoDed[1] or
+ * RdcFifoErrStat::rxDataFifoSec[1] clears this register
+ * Address of ECC error for lower 64 bits Writing a 1 to
+ * RdcFifoErrStat::rxDataFifoDed[0] or
+ * RdcFifoErrStat::rxDataFifoSec[0] clears this register
+ * ECC syndrome for upper 64 bits Writing a 1 to
+ * RdcFifoErrStat::rxDataFifoDed[1] or
+ * RdcFifoErrStat::rxDataFifoSec[1] clears this register
+ * ECC syndrome for lower 64 bits Writing a 1 to
+ * RdcFifoErrStat::rxDataFifoDed[0] or
+ * RdcFifoErrStat::rxDataFifoSec[0] clears this register
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:5;
+ uint64_t address_hi:11;
+ uint64_t rsrvd1:5;
+ uint64_t address_lo:11;
+ uint64_t rsrvd2:8;
+ uint64_t syndrome_hi:8;
+ uint64_t rsrvd3:8;
+ uint64_t syndrome_lo:8;
+#else
+ uint64_t syndrome_lo:8;
+ uint64_t rsrvd3:8;
+ uint64_t syndrome_hi:8;
+ uint64_t rsrvd2:8;
+ uint64_t address_lo:11;
+ uint64_t rsrvd1:5;
+ uint64_t address_hi:11;
+ uint64_t rsrvd:5;
+#endif
+ } bits;
+} rdc_data_fifo_ecc_log_t;
+
+
+/*
+ * Register: RdcFifoErrIntMask
+ * FIFO Error Interrupt Mask
+ * Description: FIFO Error interrupt mask register. Control the
+ * interrupt assertion of FIFO Errors. see FIFO Error Status register
+ * for more description
+ * Fields:
+ * Set to 0 to enable flagging when rx ctrl ram logs ecc single
+ * bit error Part of Device Error 0.
+ * Set to 0 to enable flagging when rx ctrl ram logs ecc double
+ * bit error Part of Device Error 1.
+ * Set to 0 to enable flagging when rx data ram logs ecc single
+ * bit error Part of Device Error 0.
+ * Set to 0 to enable flagging when rx data ram logs ecc double
+ * bit error Part of Device Error 1.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:24;
+ uint64_t rx_ctrl_fifo_sec:2;
+ uint64_t rx_ctrl_fifo_ded:2;
+ uint64_t rx_data_fifo_sec:2;
+ uint64_t rx_data_fifo_ded:2;
+#else
+ uint64_t rx_data_fifo_ded:2;
+ uint64_t rx_data_fifo_sec:2;
+ uint64_t rx_ctrl_fifo_ded:2;
+ uint64_t rx_ctrl_fifo_sec:2;
+ uint64_t rsrvd1:24;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_fifo_err_int_mask_t;
+
+
+/*
+ * Register: RdcFifoErrStat
+ * FIFO Error Status
+ * Description: FIFO Error Status register. Log status of FIFO
+ * Errors. Rx Data buffer is physically two seperate memory, each of
+ * the two error bits point to one of the memory. Each entry in the
+ * rx ctrl point to 2 buffer locations and they are read seperatly.
+ * The two error bits point to each half of the entry.
+ * Fields:
+ * Set to 1 by HW to indicate rx control ram received a ecc
+ * single bit error Writing a 1 to either bit clears the
+ * RdcCtrlFifoEccLog register Non-Fatal error. Part of Device
+ * Error 0
+ * Set to 1 by HW to indicate rx control ram received a ecc
+ * double bit error Writing a 1 to either bit clears the
+ * RdcCtrlFifoEccLog register Fatal error. Part of Device Error 1
+ * Set to 1 by HW to indicate rx data ram received a ecc single
+ * bit error Writing a 1 to either bit clears the
+ * RdcDataFifoEccLog register Non-Fatal error. Part of Device
+ * Error 0
+ * Set to 1 by HW to indicate rx data ram received a ecc double
+ * bit error Writing a 1 to either bit clears the
+ * RdcDataFifoEccLog register Fatal error. Part of Device Error 1
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:56;
+ uint64_t rx_ctrl_fifo_sec:2;
+ uint64_t rx_ctrl_fifo_ded:2;
+ uint64_t rx_data_fifo_sec:2;
+ uint64_t rx_data_fifo_ded:2;
+#else
+ uint64_t rx_data_fifo_ded:2;
+ uint64_t rx_data_fifo_sec:2;
+ uint64_t rx_ctrl_fifo_ded:2;
+ uint64_t rx_ctrl_fifo_sec:2;
+ uint64_t rsrvd:56;
+#endif
+ } bits;
+} rdc_fifo_err_stat_t;
+
+
+/*
+ * Register: RdcFifoErrIntDbg
+ * FIFO Error Interrupt Debug
+ * Description: FIFO Error interrupt Debug register. Debug Control
+ * the interrupt assertion of FIFO Errors.
+ * Fields:
+ * Set to 1 to enable interrupt Part of Device Error 0.
+ * Set to 1 to enable interrupt Part of Device Error 1.
+ * Set to 1 to enable interrupt Part of Device Error 0.
+ * Set to 1 to enable interrupt Part of Device Error 1.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:24;
+ uint64_t rx_ctrl_fifo_sec:2;
+ uint64_t rx_ctrl_fifo_ded:2;
+ uint64_t rx_data_fifo_sec:2;
+ uint64_t rx_data_fifo_ded:2;
+#else
+ uint64_t rx_data_fifo_ded:2;
+ uint64_t rx_data_fifo_sec:2;
+ uint64_t rx_ctrl_fifo_ded:2;
+ uint64_t rx_ctrl_fifo_sec:2;
+ uint64_t rsrvd1:24;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_fifo_err_int_dbg_t;
+
+
+/*
+ * Register: RdcPeuTxnLog
+ * PEU Transaction Log
+ * Description: PEU Transaction Log register. Counts the memory read
+ * and write requests sent to peu block. For debug only.
+ * Fields:
+ * Counts the memory write transactions sent to peu block. This
+ * counter saturates. This counter increments when vnmDbg is on
+ * Counts the memory read transactions sent to peu block. This
+ * counter saturates. This counter increments when vnmDbg is on
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:16;
+ uint64_t peu_mem_wr_count:8;
+ uint64_t peu_mem_rd_count:8;
+#else
+ uint64_t peu_mem_rd_count:8;
+ uint64_t peu_mem_wr_count:8;
+ uint64_t rsrvd1:16;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_peu_txn_log_t;
+
+
+/*
+ * Register: RdcDbgTrainingVec
+ * Debug Training Vector
+ * Description: Debug Training Vector register Debug Training Vector
+ * for the coreClk domain. For the pcieClk domain, the dbgxMsb and
+ * dbgyMsb values are flipped on the debug bus.
+ * Fields:
+ * Blade Number, the value read depends on the blade this block
+ * resides
+ * debug training vector the sub-group select value of 0 selects
+ * this vector
+ * Blade Number, the value read depends on the blade this block
+ * resides
+ * debug training vector the sub-group select value of 0 selects
+ * this vector
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t dbgx_msb:1;
+ uint64_t dbgx_bld_num:3;
+ uint64_t dbgx_training_vec:12;
+ uint64_t dbgy_msb:1;
+ uint64_t dbgy_bld_num:3;
+ uint64_t dbgy_training_vec:12;
+#else
+ uint64_t dbgy_training_vec:12;
+ uint64_t dbgy_bld_num:3;
+ uint64_t dbgy_msb:1;
+ uint64_t dbgx_training_vec:12;
+ uint64_t dbgx_bld_num:3;
+ uint64_t dbgx_msb:1;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} rdc_dbg_training_vec_t;
+
+
+/*
+ * Register: RdcDbgGrpSel
+ * Debug Group Select
+ * Description: Debug Group Select register. Debug Group Select
+ * register selects the group of signals brought out on the debug
+ * port
+ * Fields:
+ * high 32b sub-group select
+ * low 32b sub-group select
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:48;
+ uint64_t dbg_h32_sub_sel:8;
+ uint64_t dbg_l32_sub_sel:8;
+#else
+ uint64_t dbg_l32_sub_sel:8;
+ uint64_t dbg_h32_sub_sel:8;
+ uint64_t rsrvd:48;
+#endif
+ } bits;
+} rdc_dbg_grp_sel_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HXGE_RDC_HW_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_rxdma.c b/usr/src/uts/common/io/hxge/hxge_rxdma.c
new file mode 100644
index 0000000000..049bc88cef
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_rxdma.c
@@ -0,0 +1,3491 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+#include <hxge_rxdma.h>
+
+/*
+ * Globals: tunable parameters (/etc/system or adb)
+ *
+ */
+extern uint32_t hxge_rbr_size;
+extern uint32_t hxge_rcr_size;
+extern uint32_t hxge_rbr_spare_size;
+
+extern uint32_t hxge_mblks_pending;
+
+/*
+ * Tunable to reduce the amount of time spent in the
+ * ISR doing Rx Processing.
+ */
+extern uint32_t hxge_max_rx_pkts;
+boolean_t hxge_jumbo_enable;
+
+/*
+ * Tunables to manage the receive buffer blocks.
+ *
+ * hxge_rx_threshold_hi: copy all buffers.
+ * hxge_rx_bcopy_size_type: receive buffer block size type.
+ * hxge_rx_threshold_lo: copy only up to tunable block size type.
+ */
+extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi;
+extern hxge_rxbuf_type_t hxge_rx_buf_size_type;
+extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo;
+
+static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep);
+static void hxge_unmap_rxdma(p_hxge_t hxgep);
+static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep);
+static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep);
+static void hxge_rxdma_hw_stop(p_hxge_t hxgep);
+static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
+ uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
+ p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
+static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
+static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep,
+ uint16_t dma_channel, p_hxge_dma_common_t *dma_cntl_p,
+ p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
+static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
+ p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
+static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep,
+ uint16_t channel, p_hxge_dma_common_t *dma_buf_p,
+ p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks);
+static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
+ p_rx_rbr_ring_t rbr_p);
+static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
+ p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p);
+static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel);
+static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
+ p_rx_rcr_ring_t *rcr_p, rdc_stat_t cs);
+static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p,
+ p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p,
+ mblk_t ** mp, mblk_t ** mp_cont);
+static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep,
+ uint16_t channel);
+static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t);
+static void hxge_freeb(p_rx_msg_t);
+static void hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex,
+ p_hxge_ldv_t ldvp, rdc_stat_t cs);
+static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index,
+ p_hxge_ldv_t ldvp, rdc_stat_t cs);
+static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep,
+ p_rx_rbr_ring_t rx_dmap);
+static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep,
+ uint16_t channel);
+static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep);
+
+hxge_status_t
+hxge_init_rxdma_channels(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels"));
+
+ status = hxge_map_rxdma(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_init_rxdma: status 0x%x", status));
+ return (status);
+ }
+
+ status = hxge_rxdma_hw_start_common(hxgep);
+ if (status != HXGE_OK) {
+ hxge_unmap_rxdma(hxgep);
+ }
+
+ status = hxge_rxdma_hw_start(hxgep);
+ if (status != HXGE_OK) {
+ hxge_unmap_rxdma(hxgep);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_init_rxdma_channels: status 0x%x", status));
+ return (status);
+}
+
+void
+hxge_uninit_rxdma_channels(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels"));
+
+ hxge_rxdma_hw_stop(hxgep);
+ hxge_unmap_rxdma(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels"));
+}
+
+hxge_status_t
+hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel,
+ rdc_stat_t *cs_p)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "<== hxge_init_rxdma_channel_cntl_stat"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p);
+
+ if (rs != HPI_SUCCESS) {
+ status = HXGE_ERROR | rs;
+ }
+ return (status);
+}
+
+
+hxge_status_t
+hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
+{
+ hpi_handle_t handle;
+ rdc_desc_cfg_t rdc_desc;
+ rdc_rcr_cfg_b_t *cfgb_p;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel"));
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ /*
+ * Use configuration data composed at init time. Write to hardware the
+ * receive ring configurations.
+ */
+ rdc_desc.mbox_enable = 1;
+ rdc_desc.mbox_addr = mbox_p->mbox_addr;
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_enable_rxdma_channel: mboxp $%p($%p)",
+ mbox_p->mbox_addr, rdc_desc.mbox_addr));
+
+ rdc_desc.rbr_len = rbr_p->rbb_max;
+ rdc_desc.rbr_addr = rbr_p->rbr_addr;
+
+ switch (hxgep->rx_bksize_code) {
+ case RBR_BKSIZE_4K:
+ rdc_desc.page_size = SIZE_4KB;
+ break;
+ case RBR_BKSIZE_8K:
+ rdc_desc.page_size = SIZE_8KB;
+ break;
+ }
+
+ rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0;
+ rdc_desc.valid0 = 1;
+
+ rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1;
+ rdc_desc.valid1 = 1;
+
+ rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2;
+ rdc_desc.valid2 = 1;
+
+ rdc_desc.full_hdr = rcr_p->full_hdr_flag;
+ rdc_desc.offset = rcr_p->sw_priv_hdr_len;
+
+ rdc_desc.rcr_len = rcr_p->comp_size;
+ rdc_desc.rcr_addr = rcr_p->rcr_addr;
+
+ cfgb_p = &(rcr_p->rcr_cfgb);
+ rdc_desc.rcr_threshold = cfgb_p->bits.pthres;
+ rdc_desc.rcr_timeout = cfgb_p->bits.timeout;
+ rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
+ "rbr_len qlen %d pagesize code %d rcr_len %d",
+ rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
+ "size 0 %d size 1 %d size 2 %d",
+ rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1,
+ rbr_p->hpi_pkt_buf_size2));
+
+ rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
+ if (rs != HPI_SUCCESS) {
+ return (HXGE_ERROR | rs);
+ }
+
+ /*
+ * Enable the timeout and threshold.
+ */
+ rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
+ rdc_desc.rcr_threshold);
+ if (rs != HPI_SUCCESS) {
+ return (HXGE_ERROR | rs);
+ }
+
+ rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
+ rdc_desc.rcr_timeout);
+ if (rs != HPI_SUCCESS) {
+ return (HXGE_ERROR | rs);
+ }
+
+ /* Enable the DMA */
+ rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
+ if (rs != HPI_SUCCESS) {
+ return (HXGE_ERROR | rs);
+ }
+
+ /* Kick the DMA engine. */
+ hpi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
+ /* Clear the rbr empty bit */
+ (void) hpi_rxdma_channel_rbr_empty_clear(handle, channel);
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel"));
+
+ return (HXGE_OK);
+}
+
+static hxge_status_t
+hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ /* disable the DMA */
+ rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
+ if (rs != HPI_SUCCESS) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_disable_rxdma_channel:failed (0x%x)", rs));
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel"));
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel)
+{
+ hpi_handle_t handle;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_rxdma_channel_rcrflush"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ hpi_rxdma_rdc_rcr_flush(handle, channel);
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "<== hxge_rxdma_channel_rcrflush"));
+ return (status);
+
+}
+
+#define MID_INDEX(l, r) ((r + l + 1) >> 1)
+
+#define TO_LEFT -1
+#define TO_RIGHT 1
+#define BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
+#define BOTH_LEFT (TO_LEFT + TO_LEFT)
+#define IN_MIDDLE (TO_RIGHT + TO_LEFT)
+#define NO_HINT 0xffffffff
+
+/*ARGSUSED*/
+hxge_status_t
+hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p,
+ uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
+ uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
+{
+ int bufsize;
+ uint64_t pktbuf_pp;
+ uint64_t dvma_addr;
+ rxring_info_t *ring_info;
+ int base_side, end_side;
+ int r_index, l_index, anchor_index;
+ int found, search_done;
+ uint32_t offset, chunk_size, block_size, page_size_mask;
+ uint32_t chunk_index, block_index, total_index;
+ int max_iterations, iteration;
+ rxbuf_index_info_t *bufinfo;
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp"));
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
+ pkt_buf_addr_pp, pktbufsz_type));
+
+ pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
+
+ switch (pktbufsz_type) {
+ case 0:
+ bufsize = rbr_p->pkt_buf_size0;
+ break;
+ case 1:
+ bufsize = rbr_p->pkt_buf_size1;
+ break;
+ case 2:
+ bufsize = rbr_p->pkt_buf_size2;
+ break;
+ case RCR_SINGLE_BLOCK:
+ bufsize = 0;
+ anchor_index = 0;
+ break;
+ default:
+ return (HXGE_ERROR);
+ }
+
+ if (rbr_p->num_blocks == 1) {
+ anchor_index = 0;
+ ring_info = rbr_p->ring_info;
+ bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: (found, 1 block) "
+ "buf_pp $%p btype %d anchor_index %d bufinfo $%p",
+ pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo));
+
+ goto found_index;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d",
+ pkt_buf_addr_pp, pktbufsz_type, anchor_index));
+
+ ring_info = rbr_p->ring_info;
+ found = B_FALSE;
+ bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
+ iteration = 0;
+ max_iterations = ring_info->max_iterations;
+
+ /*
+ * First check if this block have been seen recently. This is indicated
+ * by a hint which is initialized when the first buffer of the block is
+ * seen. The hint is reset when the last buffer of the block has been
+ * processed. As three block sizes are supported, three hints are kept.
+ * The idea behind the hints is that once the hardware uses a block
+ * for a buffer of that size, it will use it exclusively for that size
+ * and will use it until it is exhausted. It is assumed that there
+ * would a single block being used for the same buffer sizes at any
+ * given time.
+ */
+ if (ring_info->hint[pktbufsz_type] != NO_HINT) {
+ anchor_index = ring_info->hint[pktbufsz_type];
+ dvma_addr = bufinfo[anchor_index].dvma_addr;
+ chunk_size = bufinfo[anchor_index].buf_size;
+ if ((pktbuf_pp >= dvma_addr) &&
+ (pktbuf_pp < (dvma_addr + chunk_size))) {
+ found = B_TRUE;
+ /*
+ * check if this is the last buffer in the block If so,
+ * then reset the hint for the size;
+ */
+
+ if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
+ ring_info->hint[pktbufsz_type] = NO_HINT;
+ }
+ }
+
+ if (found == B_FALSE) {
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: (!found)"
+ "buf_pp $%p btype %d anchor_index %d",
+ pkt_buf_addr_pp, pktbufsz_type, anchor_index));
+
+ /*
+ * This is the first buffer of the block of this size. Need to
+ * search the whole information array. the search algorithm
+ * uses a binary tree search algorithm. It assumes that the
+ * information is already sorted with increasing order info[0]
+ * < info[1] < info[2] .... < info[n-1] where n is the size of
+ * the information array
+ */
+ r_index = rbr_p->num_blocks - 1;
+ l_index = 0;
+ search_done = B_FALSE;
+ anchor_index = MID_INDEX(r_index, l_index);
+ while (search_done == B_FALSE) {
+ if ((r_index == l_index) ||
+ (iteration >= max_iterations))
+ search_done = B_TRUE;
+
+ end_side = TO_RIGHT; /* to the right */
+ base_side = TO_LEFT; /* to the left */
+ /* read the DVMA address information and sort it */
+ dvma_addr = bufinfo[anchor_index].dvma_addr;
+ chunk_size = bufinfo[anchor_index].buf_size;
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: (searching)"
+ "buf_pp $%p btype %d "
+ "anchor_index %d chunk_size %d dvmaaddr $%p",
+ pkt_buf_addr_pp, pktbufsz_type, anchor_index,
+ chunk_size, dvma_addr));
+
+ if (pktbuf_pp >= dvma_addr)
+ base_side = TO_RIGHT; /* to the right */
+ if (pktbuf_pp < (dvma_addr + chunk_size))
+ end_side = TO_LEFT; /* to the left */
+
+ switch (base_side + end_side) {
+ case IN_MIDDLE:
+ /* found */
+ found = B_TRUE;
+ search_done = B_TRUE;
+ if ((pktbuf_pp + bufsize) <
+ (dvma_addr + chunk_size))
+ ring_info->hint[pktbufsz_type] =
+ bufinfo[anchor_index].buf_index;
+ break;
+ case BOTH_RIGHT:
+ /* not found: go to the right */
+ l_index = anchor_index + 1;
+ anchor_index = MID_INDEX(r_index, l_index);
+ break;
+
+ case BOTH_LEFT:
+ /* not found: go to the left */
+ r_index = anchor_index - 1;
+ anchor_index = MID_INDEX(r_index, l_index);
+ break;
+ default: /* should not come here */
+ return (HXGE_ERROR);
+ }
+ iteration++;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: (search done)"
+ "buf_pp $%p btype %d anchor_index %d",
+ pkt_buf_addr_pp, pktbufsz_type, anchor_index));
+ }
+
+ if (found == B_FALSE) {
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: (search failed)"
+ "buf_pp $%p btype %d anchor_index %d",
+ pkt_buf_addr_pp, pktbufsz_type, anchor_index));
+ return (HXGE_ERROR);
+ }
+
+found_index:
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: (FOUND1)"
+ "buf_pp $%p btype %d bufsize %d anchor_index %d",
+ pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index));
+
+ /* index of the first block in this chunk */
+ chunk_index = bufinfo[anchor_index].start_index;
+ dvma_addr = bufinfo[anchor_index].dvma_addr;
+ page_size_mask = ring_info->block_size_mask;
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
+ "buf_pp $%p btype %d bufsize %d "
+ "anchor_index %d chunk_index %d dvma $%p",
+ pkt_buf_addr_pp, pktbufsz_type, bufsize,
+ anchor_index, chunk_index, dvma_addr));
+
+ offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
+ block_size = rbr_p->block_size; /* System block(page) size */
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
+ "buf_pp $%p btype %d bufsize %d "
+ "anchor_index %d chunk_index %d dvma $%p "
+ "offset %d block_size %d",
+ pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index,
+ chunk_index, dvma_addr, offset, block_size));
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index"));
+
+ block_index = (offset / block_size); /* index within chunk */
+ total_index = chunk_index + block_index;
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: "
+ "total_index %d dvma_addr $%p "
+ "offset %d block_size %d "
+ "block_index %d ",
+ total_index, dvma_addr, offset, block_size, block_index));
+
+ *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
+ offset);
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: "
+ "total_index %d dvma_addr $%p "
+ "offset %d block_size %d "
+ "block_index %d "
+ "*pkt_buf_addr_p $%p",
+ total_index, dvma_addr, offset, block_size,
+ block_index, *pkt_buf_addr_p));
+
+ *msg_index = total_index;
+ *bufoffset = (offset & page_size_mask);
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_rxbuf_pp_to_vp: get msg index: "
+ "msg_index %d bufoffset_index %d",
+ *msg_index, *bufoffset));
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp"));
+
+ return (HXGE_OK);
+}
+
+
+/*
+ * used by quick sort (qsort) function
+ * to perform comparison
+ */
+static int
+hxge_sort_compare(const void *p1, const void *p2)
+{
+
+ rxbuf_index_info_t *a, *b;
+
+ a = (rxbuf_index_info_t *)p1;
+ b = (rxbuf_index_info_t *)p2;
+
+ if (a->dvma_addr > b->dvma_addr)
+ return (1);
+ if (a->dvma_addr < b->dvma_addr)
+ return (-1);
+ return (0);
+}
+
+/*
+ * Grabbed this sort implementation from common/syscall/avl.c
+ *
+ * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
+ * v = Ptr to array/vector of objs
+ * n = # objs in the array
+ * s = size of each obj (must be multiples of a word size)
+ * f = ptr to function to compare two objs
+ * returns (-1 = less than, 0 = equal, 1 = greater than
+ */
+void
+hxge_ksort(caddr_t v, int n, int s, int (*f) ())
+{
+ int g, i, j, ii;
+ unsigned int *p1, *p2;
+ unsigned int tmp;
+
+ /* No work to do */
+ if (v == NULL || n <= 1)
+ return;
+ /* Sanity check on arguments */
+ ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
+ ASSERT(s > 0);
+
+ for (g = n / 2; g > 0; g /= 2) {
+ for (i = g; i < n; i++) {
+ for (j = i - g; j >= 0 &&
+ (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) {
+ p1 = (unsigned *)(v + j * s);
+ p2 = (unsigned *)(v + (j + g) * s);
+ for (ii = 0; ii < s / 4; ii++) {
+ tmp = *p1;
+ *p1++ = *p2;
+ *p2++ = tmp;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Initialize data structures required for rxdma
+ * buffer dvma->vmem address lookup
+ */
+/*ARGSUSED*/
+static hxge_status_t
+hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp)
+{
+ int index;
+ rxring_info_t *ring_info;
+ int max_iteration = 0, max_index = 0;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init"));
+
+ ring_info = rbrp->ring_info;
+ ring_info->hint[0] = NO_HINT;
+ ring_info->hint[1] = NO_HINT;
+ ring_info->hint[2] = NO_HINT;
+ max_index = rbrp->num_blocks;
+
+ /* read the DVMA address information and sort it */
+ /* do init of the information array */
+
+ HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
+ " hxge_rxbuf_index_info_init Sort ptrs"));
+
+ /* sort the array */
+ hxge_ksort((void *) ring_info->buffer, max_index,
+ sizeof (rxbuf_index_info_t), hxge_sort_compare);
+
+ for (index = 0; index < max_index; index++) {
+ HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
+ " hxge_rxbuf_index_info_init: sorted chunk %d "
+ " ioaddr $%p kaddr $%p size %x",
+ index, ring_info->buffer[index].dvma_addr,
+ ring_info->buffer[index].kaddr,
+ ring_info->buffer[index].buf_size));
+ }
+
+ max_iteration = 0;
+ while (max_index >= (1ULL << max_iteration))
+ max_iteration++;
+ ring_info->max_iterations = max_iteration + 1;
+
+ HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
+ " hxge_rxbuf_index_info_init Find max iter %d",
+ ring_info->max_iterations));
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init"));
+
+ return (HXGE_OK);
+}
+
+/*ARGSUSED*/
+void
+hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p)
+{
+#ifdef HXGE_DEBUG
+
+ uint32_t bptr;
+ uint64_t pp;
+
+ bptr = entry_p->bits.pkt_buf_addr;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "\trcr entry $%p "
+ "\trcr entry 0x%0llx "
+ "\trcr entry 0x%08x "
+ "\trcr entry 0x%08x "
+ "\tvalue 0x%0llx\n"
+ "\tmulti = %d\n"
+ "\tpkt_type = 0x%x\n"
+ "\terror = 0x%04x\n"
+ "\tl2_len = %d\n"
+ "\tpktbufsize = %d\n"
+ "\tpkt_buf_addr = $%p\n"
+ "\tpkt_buf_addr (<< 6) = $%p\n",
+ entry_p,
+ *(int64_t *)entry_p,
+ *(int32_t *)entry_p,
+ *(int32_t *)((char *)entry_p + 32),
+ entry_p->value,
+ entry_p->bits.multi,
+ entry_p->bits.pkt_type,
+ entry_p->bits.error,
+ entry_p->bits.l2_len,
+ entry_p->bits.pktbufsz,
+ bptr,
+ entry_p->bits.pkt_buf_addr));
+
+ pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
+ RCR_PKT_BUF_ADDR_SHIFT;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
+ pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
+#endif
+}
+
+/*ARGSUSED*/
+void
+hxge_rxdma_stop(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop"));
+
+ (void) hxge_rx_vmac_disable(hxgep);
+ (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop"));
+}
+
+void
+hxge_rxdma_stop_reinit(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit"));
+
+ (void) hxge_rxdma_stop(hxgep);
+ (void) hxge_uninit_rxdma_channels(hxgep);
+ (void) hxge_init_rxdma_channels(hxgep);
+
+ (void) hxge_rx_vmac_enable(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit"));
+}
+
+hxge_status_t
+hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
+{
+ int i, ndmas;
+ uint16_t channel;
+ p_rx_rbr_rings_t rx_rbr_rings;
+ p_rx_rbr_ring_t *rbr_rings;
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_hw_mode: mode %d", enable));
+
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_mode: not initialized"));
+ return (HXGE_ERROR);
+ }
+
+ rx_rbr_rings = hxgep->rx_rbr_rings;
+ if (rx_rbr_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_mode: NULL ring pointer"));
+ return (HXGE_ERROR);
+ }
+
+ if (rx_rbr_rings->rbr_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_mode: NULL rbr rings pointer"));
+ return (HXGE_ERROR);
+ }
+
+ ndmas = rx_rbr_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_mode: no channel"));
+ return (HXGE_ERROR);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_mode (ndmas %d)", ndmas));
+
+ rbr_rings = rx_rbr_rings->rbr_rings;
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ for (i = 0; i < ndmas; i++) {
+ if (rbr_rings == NULL || rbr_rings[i] == NULL) {
+ continue;
+ }
+ channel = rbr_rings[i]->rdc;
+ if (enable) {
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_hw_mode: channel %d (enable)",
+ channel));
+ rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
+ } else {
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_hw_mode: channel %d (disable)",
+ channel));
+ rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
+ }
+ }
+
+ status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_rxdma_hw_mode: status 0x%x", status));
+
+ return (status);
+}
+
+int
+hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel)
+{
+ int i, ndmas;
+ uint16_t rdc;
+ p_rx_rbr_rings_t rx_rbr_rings;
+ p_rx_rbr_ring_t *rbr_rings;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rxdma_get_ring_index: channel %d", channel));
+
+ rx_rbr_rings = hxgep->rx_rbr_rings;
+ if (rx_rbr_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_get_ring_index: NULL ring pointer"));
+ return (-1);
+ }
+
+ ndmas = rx_rbr_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_get_ring_index: no channel"));
+ return (-1);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas));
+
+ rbr_rings = rx_rbr_rings->rbr_rings;
+ for (i = 0; i < ndmas; i++) {
+ rdc = rbr_rings[i]->rdc;
+ if (channel == rdc) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rxdma_get_rbr_ring: "
+ "channel %d (index %d) "
+ "ring %d", channel, i, rbr_rings[i]));
+
+ return (i);
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_get_rbr_ring_index: not found"));
+
+ return (-1);
+}
+
+/*
+ * Static functions start here.
+ */
+static p_rx_msg_t
+hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p)
+{
+ p_rx_msg_t hxge_mp = NULL;
+ p_hxge_dma_common_t dmamsg_p;
+ uchar_t *buffer;
+
+ hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
+ if (hxge_mp == NULL) {
+ HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
+ "Allocation of a rx msg failed."));
+ goto hxge_allocb_exit;
+ }
+
+ hxge_mp->use_buf_pool = B_FALSE;
+ if (dmabuf_p) {
+ hxge_mp->use_buf_pool = B_TRUE;
+
+ dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma;
+ *dmamsg_p = *dmabuf_p;
+ dmamsg_p->nblocks = 1;
+ dmamsg_p->block_size = size;
+ dmamsg_p->alength = size;
+ buffer = (uchar_t *)dmabuf_p->kaddrp;
+
+ dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size);
+ dmabuf_p->ioaddr_pp = (void *)
+ ((char *)dmabuf_p->ioaddr_pp + size);
+
+ dmabuf_p->alength -= size;
+ dmabuf_p->offset += size;
+ dmabuf_p->dma_cookie.dmac_laddress += size;
+ dmabuf_p->dma_cookie.dmac_size -= size;
+ } else {
+ buffer = KMEM_ALLOC(size, KM_NOSLEEP);
+ if (buffer == NULL) {
+ HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
+ "Allocation of a receive page failed."));
+ goto hxge_allocb_fail1;
+ }
+ }
+
+ hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb);
+ if (hxge_mp->rx_mblk_p == NULL) {
+ HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed."));
+ goto hxge_allocb_fail2;
+ }
+ hxge_mp->buffer = buffer;
+ hxge_mp->block_size = size;
+ hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb;
+ hxge_mp->freeb.free_arg = (caddr_t)hxge_mp;
+ hxge_mp->ref_cnt = 1;
+ hxge_mp->free = B_TRUE;
+ hxge_mp->rx_use_bcopy = B_FALSE;
+
+ atomic_add_32(&hxge_mblks_pending, 1);
+
+ goto hxge_allocb_exit;
+
+hxge_allocb_fail2:
+ if (!hxge_mp->use_buf_pool) {
+ KMEM_FREE(buffer, size);
+ }
+hxge_allocb_fail1:
+ KMEM_FREE(hxge_mp, sizeof (rx_msg_t));
+ hxge_mp = NULL;
+
+hxge_allocb_exit:
+ return (hxge_mp);
+}
+
+p_mblk_t
+hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
+{
+ p_mblk_t mp;
+
+ HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb"));
+ HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p "
+ "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size));
+
+ mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb);
+ if (mp == NULL) {
+ HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
+ goto hxge_dupb_exit;
+ }
+
+ atomic_inc_32(&hxge_mp->ref_cnt);
+ atomic_inc_32(&hxge_mblks_pending);
+
+hxge_dupb_exit:
+ HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
+ return (mp);
+}
+
+p_mblk_t
+hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
+{
+ p_mblk_t mp;
+ uchar_t *dp;
+
+ mp = allocb(size + HXGE_RXBUF_EXTRA, 0);
+ if (mp == NULL) {
+ HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
+ goto hxge_dupb_bcopy_exit;
+ }
+ dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA;
+ bcopy((void *) &hxge_mp->buffer[offset], dp, size);
+ mp->b_wptr = dp + size;
+
+hxge_dupb_bcopy_exit:
+
+ HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
+
+ return (mp);
+}
+
+void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p,
+ p_rx_msg_t rx_msg_p);
+
+void
+hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
+{
+ hpi_handle_t handle;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page"));
+
+ /* Reuse this buffer */
+ rx_msg_p->free = B_FALSE;
+ rx_msg_p->cur_usage_cnt = 0;
+ rx_msg_p->max_usage_cnt = 0;
+ rx_msg_p->pkt_buf_size = 0;
+
+ if (rx_rbr_p->rbr_use_bcopy) {
+ rx_msg_p->rx_use_bcopy = B_FALSE;
+ atomic_dec_32(&rx_rbr_p->rbr_consumed);
+ }
+
+ /*
+ * Get the rbr header pointer and its offset index.
+ */
+ MUTEX_ENTER(&rx_rbr_p->post_lock);
+
+ rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
+ rx_rbr_p->rbr_wrap_mask);
+ rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
+
+ /*
+ * Don't post when index is close to 0 or near the max to reduce the
+ * number rbr_emepty errors
+ */
+ rx_rbr_p->pages_to_post++;
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ if (rx_rbr_p->rbr_wr_index > (rx_rbr_p->pages_to_skip / 2) &&
+ rx_rbr_p->rbr_wr_index < rx_rbr_p->pages_to_post_threshold) {
+ hpi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc,
+ rx_rbr_p->pages_to_post);
+ rx_rbr_p->pages_to_post = 0;
+ }
+
+ MUTEX_EXIT(&rx_rbr_p->post_lock);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_post_page (channel %d post_next_index %d)",
+ rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page"));
+}
+
+void
+hxge_freeb(p_rx_msg_t rx_msg_p)
+{
+ size_t size;
+ uchar_t *buffer = NULL;
+ int ref_cnt;
+ boolean_t free_state = B_FALSE;
+ rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
+
+ HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb"));
+ HXGE_DEBUG_MSG((NULL, MEM2_CTL,
+ "hxge_freeb:rx_msg_p = $%p (block pending %d)",
+ rx_msg_p, hxge_mblks_pending));
+
+ atomic_dec_32(&hxge_mblks_pending);
+
+ /*
+ * First we need to get the free state, then
+ * atomic decrement the reference count to prevent
+ * the race condition with the interrupt thread that
+ * is processing a loaned up buffer block.
+ */
+ free_state = rx_msg_p->free;
+
+ ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
+ if (!ref_cnt) {
+ buffer = rx_msg_p->buffer;
+ size = rx_msg_p->block_size;
+
+ HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: "
+ "will free: rx_msg_p = $%p (block pending %d)",
+ rx_msg_p, hxge_mblks_pending));
+
+ if (!rx_msg_p->use_buf_pool) {
+ KMEM_FREE(buffer, size);
+ }
+
+ KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
+ /* Decrement the receive buffer ring's reference count, too. */
+ atomic_dec_32(&ring->rbr_ref_cnt);
+
+ /*
+ * Free the receive buffer ring, iff
+ * 1. all the receive buffers have been freed
+ * 2. and we are in the proper state (that is,
+ * we are not UNMAPPING).
+ */
+ if (ring->rbr_ref_cnt == 0 && ring->rbr_state == RBR_UNMAPPED) {
+ KMEM_FREE(ring, sizeof (*ring));
+ }
+ return;
+ }
+
+ /*
+ * Repost buffer.
+ */
+ if (free_state && (ref_cnt == 1)) {
+ HXGE_DEBUG_MSG((NULL, RX_CTL,
+ "hxge_freeb: post page $%p:", rx_msg_p));
+ if (ring->rbr_state == RBR_POSTING)
+ hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p);
+ }
+
+ HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb"));
+}
+
+uint_t
+hxge_rx_intr(caddr_t arg1, caddr_t arg2)
+{
+ p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
+ p_hxge_t hxgep = (p_hxge_t)arg2;
+ p_hxge_ldg_t ldgp;
+ uint8_t channel;
+ hpi_handle_t handle;
+ rdc_stat_t cs;
+ uint_t serviced = DDI_INTR_UNCLAIMED;
+
+ if (ldvp == NULL) {
+ HXGE_DEBUG_MSG((NULL, RX_INT_CTL,
+ "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
+ return (DDI_INTR_CLAIMED);
+ }
+
+ if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
+ hxgep = ldvp->hxgep;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
+ "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
+
+ /*
+ * This interrupt handler is for a specific receive dma channel.
+ */
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ /*
+ * Get the control and status for this channel.
+ */
+ channel = ldvp->channel;
+ ldgp = ldvp->ldgp;
+ RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
+
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_intr:channel %d "
+ "cs 0x%016llx rcrto 0x%x rcrthres %x",
+ channel, cs.value, cs.bits.rcr_to, cs.bits.rcr_thres));
+
+ hxge_rx_pkts_vring(hxgep, ldvp->vdma_index, ldvp, cs);
+ serviced = DDI_INTR_CLAIMED;
+
+ /* error events. */
+ if (cs.value & RDC_STAT_ERROR) {
+ (void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
+ }
+
+hxge_intr_exit:
+ /*
+ * Enable the mailbox update interrupt if we want to use mailbox. We
+ * probably don't need to use mailbox as it only saves us one pio read.
+ * Also write 1 to rcrthres and rcrto to clear these two edge triggered
+ * bits.
+ */
+ cs.value &= RDC_STAT_WR1C;
+ cs.bits.mex = 1;
+ RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
+
+ /*
+ * Rearm this logical group if this is a single device group.
+ */
+ if (ldgp->nldvs == 1) {
+ ld_intr_mgmt_t mgm;
+
+ mgm.value = 0;
+ mgm.bits.arm = 1;
+ mgm.bits.timer = ldgp->ldg_timer;
+ HXGE_REG_WR32(handle,
+ LD_INTR_MGMT + LDSV_OFFSET(ldgp->ldg), mgm.value);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
+ "<== hxge_rx_intr: serviced %d", serviced));
+
+ return (serviced);
+}
+
+static void
+hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
+ rdc_stat_t cs)
+{
+ p_mblk_t mp;
+ p_rx_rcr_ring_t rcrp;
+
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts_vring"));
+ if ((mp = hxge_rx_pkts(hxgep, vindex, ldvp, &rcrp, cs)) == NULL) {
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
+ "<== hxge_rx_pkts_vring: no mp"));
+ return;
+ }
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts_vring: $%p", mp));
+
+#ifdef HXGE_DEBUG
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rx_pkts_vring:calling mac_rx (NEMO) "
+ "LEN %d mp $%p mp->b_next $%p rcrp $%p "
+ "mac_handle $%p",
+ (mp->b_wptr - mp->b_rptr), mp, mp->b_next,
+ rcrp, rcrp->rcr_mac_handle));
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rx_pkts_vring: dump packets "
+ "(mp $%p b_rptr $%p b_wptr $%p):\n %s",
+ mp, mp->b_rptr, mp->b_wptr,
+ hxge_dump_packet((char *)mp->b_rptr, 64)));
+
+ if (mp->b_cont) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rx_pkts_vring: dump b_cont packets "
+ "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
+ mp->b_cont, mp->b_cont->b_rptr, mp->b_cont->b_wptr,
+ hxge_dump_packet((char *)mp->b_cont->b_rptr,
+ mp->b_cont->b_wptr - mp->b_cont->b_rptr)));
+ }
+ if (mp->b_next) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rx_pkts_vring: dump next packets "
+ "(b_rptr $%p): %s",
+ mp->b_next->b_rptr,
+ hxge_dump_packet((char *)mp->b_next->b_rptr, 64)));
+ }
+#endif
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rx_pkts_vring: send packet to stack"));
+ mac_rx(hxgep->mach, rcrp->rcr_mac_handle, mp);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_pkts_vring"));
+}
+
+/*ARGSUSED*/
+mblk_t *
+hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
+ p_rx_rcr_ring_t *rcrp, rdc_stat_t cs)
+{
+ hpi_handle_t handle;
+ uint8_t channel;
+ p_rx_rcr_rings_t rx_rcr_rings;
+ p_rx_rcr_ring_t rcr_p;
+ uint32_t comp_rd_index;
+ p_rcr_entry_t rcr_desc_rd_head_p;
+ p_rcr_entry_t rcr_desc_rd_head_pp;
+ p_mblk_t nmp, mp_cont, head_mp, *tail_mp;
+ uint16_t qlen, nrcr_read, npkt_read;
+ uint32_t qlen_hw;
+ boolean_t multi;
+ rdc_rcr_cfg_b_t rcr_cfg_b;
+#if defined(_BIG_ENDIAN)
+ hpi_status_t rs = HPI_SUCCESS;
+#endif
+
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d "
+ "channel %d", vindex, ldvp->channel));
+
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ return (NULL);
+ }
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ rx_rcr_rings = hxgep->rx_rcr_rings;
+ rcr_p = rx_rcr_rings->rcr_rings[vindex];
+ channel = rcr_p->rdc;
+ if (channel != ldvp->channel) {
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d "
+ "channel %d, and rcr channel %d not matched.",
+ vindex, ldvp->channel, channel));
+ return (NULL);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
+ "==> hxge_rx_pkts: START: rcr channel %d "
+ "head_p $%p head_pp $%p index %d ",
+ channel, rcr_p->rcr_desc_rd_head_p,
+ rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index));
+
+#if !defined(_BIG_ENDIAN)
+ qlen = RXDMA_REG_READ32(handle, RDC_RCR_QLEN, channel) & 0xffff;
+#else
+ rs = hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
+ if (rs != HPI_SUCCESS) {
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d "
+ "channel %d, get qlen failed 0x%08x",
+ vindex, ldvp->channel, rs));
+ return (NULL);
+ }
+#endif
+ if (!qlen) {
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
+ "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)",
+ channel, qlen));
+ return (NULL);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d "
+ "qlen %d", channel, qlen));
+
+ comp_rd_index = rcr_p->comp_rd_index;
+
+ rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
+ rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
+ nrcr_read = npkt_read = 0;
+
+ /*
+ * Number of packets queued (The jumbo or multi packet will be counted
+ * as only one paccket and it may take up more than one completion
+ * entry).
+ */
+ qlen_hw = (qlen < hxge_max_rx_pkts) ? qlen : hxge_max_rx_pkts;
+ head_mp = NULL;
+ tail_mp = &head_mp;
+ nmp = mp_cont = NULL;
+ multi = B_FALSE;
+
+ while (qlen_hw) {
+#ifdef HXGE_DEBUG
+ hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p);
+#endif
+ /*
+ * Process one completion ring entry.
+ */
+ hxge_receive_packet(hxgep,
+ rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
+
+ /*
+ * message chaining modes (nemo msg chaining)
+ */
+ if (nmp) {
+ nmp->b_next = NULL;
+ if (!multi && !mp_cont) { /* frame fits a partition */
+ *tail_mp = nmp;
+ tail_mp = &nmp->b_next;
+ nmp = NULL;
+ } else if (multi && !mp_cont) { /* first segment */
+ *tail_mp = nmp;
+ tail_mp = &nmp->b_cont;
+ } else if (multi && mp_cont) { /* mid of multi segs */
+ *tail_mp = mp_cont;
+ tail_mp = &mp_cont->b_cont;
+ } else if (!multi && mp_cont) { /* last segment */
+ *tail_mp = mp_cont;
+ tail_mp = &nmp->b_next;
+ nmp = NULL;
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
+ "==> hxge_rx_pkts: loop: rcr channel %d "
+ "before updating: multi %d "
+ "nrcr_read %d "
+ "npk read %d "
+ "head_pp $%p index %d ",
+ channel, multi,
+ nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index));
+
+ if (!multi) {
+ qlen_hw--;
+ npkt_read++;
+ }
+
+ /*
+ * Update the next read entry.
+ */
+ comp_rd_index = NEXT_ENTRY(comp_rd_index,
+ rcr_p->comp_wrap_mask);
+
+ rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
+ rcr_p->rcr_desc_first_p, rcr_p->rcr_desc_last_p);
+
+ nrcr_read++;
+
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
+ "<== hxge_rx_pkts: (SAM, process one packet) "
+ "nrcr_read %d", nrcr_read));
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
+ "==> hxge_rx_pkts: loop: rcr channel %d "
+ "multi %d nrcr_read %d npk read %d head_pp $%p index %d ",
+ channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp,
+ comp_rd_index));
+ }
+
+ rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
+ rcr_p->comp_rd_index = comp_rd_index;
+ rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
+
+ if ((hxgep->intr_timeout != rcr_p->intr_timeout) ||
+ (hxgep->intr_threshold != rcr_p->intr_threshold)) {
+ rcr_p->intr_timeout = hxgep->intr_timeout;
+ rcr_p->intr_threshold = hxgep->intr_threshold;
+ rcr_cfg_b.value = 0x0ULL;
+ if (rcr_p->intr_timeout)
+ rcr_cfg_b.bits.entout = 1;
+ rcr_cfg_b.bits.timeout = rcr_p->intr_timeout;
+ rcr_cfg_b.bits.pthres = rcr_p->intr_threshold;
+ RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B,
+ channel, rcr_cfg_b.value);
+ }
+
+ cs.bits.pktread = npkt_read;
+ cs.bits.ptrread = nrcr_read;
+ RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
+
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
+ "==> hxge_rx_pkts: EXIT: rcr channel %d "
+ "head_pp $%p index %016llx ",
+ channel, rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index));
+
+ /*
+ * Update RCR buffer pointer read and number of packets read.
+ */
+
+ *rcrp = rcr_p;
+
+ HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts"));
+
+ return (head_mp);
+}
+
+/*ARGSUSED*/
+void
+hxge_receive_packet(p_hxge_t hxgep,
+ p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
+ boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
+{
+ p_mblk_t nmp = NULL;
+ uint64_t multi;
+ uint8_t channel;
+
+ boolean_t first_entry = B_TRUE;
+ boolean_t is_tcp_udp = B_FALSE;
+ boolean_t buffer_free = B_FALSE;
+ boolean_t error_send_up = B_FALSE;
+ uint8_t error_type;
+ uint16_t l2_len;
+ uint16_t skip_len;
+ uint8_t pktbufsz_type;
+ uint64_t rcr_entry;
+ uint64_t *pkt_buf_addr_pp;
+ uint64_t *pkt_buf_addr_p;
+ uint32_t buf_offset;
+ uint32_t bsize;
+ uint32_t msg_index;
+ p_rx_rbr_ring_t rx_rbr_p;
+ p_rx_msg_t *rx_msg_ring_p;
+ p_rx_msg_t rx_msg_p;
+
+ uint16_t sw_offset_bytes = 0, hdr_size = 0;
+ hxge_status_t status = HXGE_OK;
+ boolean_t is_valid = B_FALSE;
+ p_hxge_rx_ring_stats_t rdc_stats;
+ uint32_t bytes_read;
+
+ uint64_t pkt_type;
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet"));
+
+ first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
+ rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
+
+ multi = (rcr_entry & RCR_MULTI_MASK);
+ pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
+
+ error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
+ l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
+
+ /*
+ * Hardware does not strip the CRC due bug ID 11451 where
+ * the hardware mis handles minimum size packets.
+ */
+ l2_len -= ETHERFCSL;
+
+ pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
+ RCR_PKTBUFSZ_SHIFT);
+ pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
+ RCR_PKT_BUF_ADDR_SHIFT);
+
+ channel = rcr_p->rdc;
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
+ "pkt_buf_addr_pp $%p l2_len %d multi %d "
+ "error_type 0x%x pkt_type 0x%x "
+ "pktbufsz_type %d ",
+ rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len,
+ multi, error_type, pkt_type, pktbufsz_type));
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
+ "pkt_buf_addr_pp $%p l2_len %d multi %d "
+ "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
+ rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type, pkt_type));
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> (rbr) hxge_receive_packet: entry 0x%0llx "
+ "full pkt_buf_addr_pp $%p l2_len %d",
+ rcr_entry, pkt_buf_addr_pp, l2_len));
+
+ /* get the stats ptr */
+ rdc_stats = rcr_p->rdc_stats;
+
+ if (!l2_len) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_receive_packet: failed: l2 length is 0."));
+ return;
+ }
+
+ /* shift 6 bits to get the full io address */
+ pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
+ RCR_PKT_BUF_ADDR_SHIFT_FULL);
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> (rbr) hxge_receive_packet: entry 0x%0llx "
+ "full pkt_buf_addr_pp $%p l2_len %d",
+ rcr_entry, pkt_buf_addr_pp, l2_len));
+
+ rx_rbr_p = rcr_p->rx_rbr_p;
+ rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
+
+ if (first_entry) {
+ hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
+ RXDMA_HDR_SIZE_DEFAULT);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_receive_packet: first entry 0x%016llx "
+ "pkt_buf_addr_pp $%p l2_len %d hdr %d",
+ rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size));
+ }
+
+ MUTEX_ENTER(&rcr_p->lock);
+ MUTEX_ENTER(&rx_rbr_p->lock);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> (rbr 1) hxge_receive_packet: entry 0x%0llx "
+ "full pkt_buf_addr_pp $%p l2_len %d",
+ rcr_entry, pkt_buf_addr_pp, l2_len));
+
+ /*
+ * Packet buffer address in the completion entry points to the starting
+ * buffer address (offset 0). Use the starting buffer address to locate
+ * the corresponding kernel address.
+ */
+ status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p,
+ pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
+ &buf_offset, &msg_index);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> (rbr 2) hxge_receive_packet: entry 0x%0llx "
+ "full pkt_buf_addr_pp $%p l2_len %d",
+ rcr_entry, pkt_buf_addr_pp, l2_len));
+
+ if (status != HXGE_OK) {
+ MUTEX_EXIT(&rx_rbr_p->lock);
+ MUTEX_EXIT(&rcr_p->lock);
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_receive_packet: found vaddr failed %d", status));
+ return;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> (rbr 3) hxge_receive_packet: entry 0x%0llx "
+ "full pkt_buf_addr_pp $%p l2_len %d",
+ rcr_entry, pkt_buf_addr_pp, l2_len));
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
+ "full pkt_buf_addr_pp $%p l2_len %d",
+ msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
+
+ if (msg_index >= rx_rbr_p->tnblocks) {
+ MUTEX_EXIT(&rx_rbr_p->lock);
+ MUTEX_EXIT(&rcr_p->lock);
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: FATAL msg_index (%d) "
+ "should be smaller than tnblocks (%d)\n",
+ msg_index, rx_rbr_p->tnblocks));
+ return;
+ }
+
+ rx_msg_p = rx_msg_ring_p[msg_index];
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
+ "full pkt_buf_addr_pp $%p l2_len %d",
+ msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
+
+ switch (pktbufsz_type) {
+ case RCR_PKTBUFSZ_0:
+ bsize = rx_rbr_p->pkt_buf_size0_bytes;
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: 0 buf %d", bsize));
+ break;
+ case RCR_PKTBUFSZ_1:
+ bsize = rx_rbr_p->pkt_buf_size1_bytes;
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: 1 buf %d", bsize));
+ break;
+ case RCR_PKTBUFSZ_2:
+ bsize = rx_rbr_p->pkt_buf_size2_bytes;
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_receive_packet: 2 buf %d", bsize));
+ break;
+ case RCR_SINGLE_BLOCK:
+ bsize = rx_msg_p->block_size;
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: single %d", bsize));
+
+ break;
+ default:
+ MUTEX_EXIT(&rx_rbr_p->lock);
+ MUTEX_EXIT(&rcr_p->lock);
+ return;
+ }
+
+ DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
+ (buf_offset + sw_offset_bytes), (hdr_size + l2_len),
+ DDI_DMA_SYNC_FORCPU);
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: after first dump:usage count"));
+
+ if (rx_msg_p->cur_usage_cnt == 0) {
+ if (rx_rbr_p->rbr_use_bcopy) {
+ atomic_inc_32(&rx_rbr_p->rbr_consumed);
+ if (rx_rbr_p->rbr_consumed <
+ rx_rbr_p->rbr_threshold_hi) {
+ if (rx_rbr_p->rbr_threshold_lo == 0 ||
+ ((rx_rbr_p->rbr_consumed >=
+ rx_rbr_p->rbr_threshold_lo) &&
+ (rx_rbr_p->rbr_bufsize_type >=
+ pktbufsz_type))) {
+ rx_msg_p->rx_use_bcopy = B_TRUE;
+ }
+ } else {
+ rx_msg_p->rx_use_bcopy = B_TRUE;
+ }
+ }
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: buf %d (new block) ", bsize));
+
+ rx_msg_p->pkt_buf_size_code = pktbufsz_type;
+ rx_msg_p->pkt_buf_size = bsize;
+ rx_msg_p->cur_usage_cnt = 1;
+ if (pktbufsz_type == RCR_SINGLE_BLOCK) {
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: buf %d (single block) ",
+ bsize));
+ /*
+ * Buffer can be reused once the free function is
+ * called.
+ */
+ rx_msg_p->max_usage_cnt = 1;
+ buffer_free = B_TRUE;
+ } else {
+ rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize;
+ if (rx_msg_p->max_usage_cnt == 1) {
+ buffer_free = B_TRUE;
+ }
+ }
+ } else {
+ rx_msg_p->cur_usage_cnt++;
+ if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
+ buffer_free = B_TRUE;
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
+ msg_index, l2_len,
+ rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
+
+ if (error_type) {
+ rdc_stats->ierrors++;
+ /* Update error stats */
+ rdc_stats->errlog.compl_err_type = error_type;
+ HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR);
+
+ if (error_type & RCR_CTRL_FIFO_DED) {
+ rdc_stats->ctrl_fifo_ecc_err++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_receive_packet: "
+ " channel %d RCR ctrl_fifo_ded error", channel));
+ } else if (error_type & RCR_DATA_FIFO_DED) {
+ rdc_stats->data_fifo_ecc_err++;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_receive_packet: channel %d"
+ " RCR data_fifo_ded error", channel));
+ }
+
+ /*
+ * Update and repost buffer block if max usage count is
+ * reached.
+ */
+ if (error_send_up == B_FALSE) {
+ atomic_inc_32(&rx_msg_p->ref_cnt);
+ atomic_inc_32(&hxge_mblks_pending);
+ if (buffer_free == B_TRUE) {
+ rx_msg_p->free = B_TRUE;
+ }
+
+ MUTEX_EXIT(&rx_rbr_p->lock);
+ MUTEX_EXIT(&rcr_p->lock);
+ hxge_freeb(rx_msg_p);
+ return;
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: DMA sync second "));
+
+ bytes_read = rcr_p->rcvd_pkt_bytes;
+ skip_len = sw_offset_bytes + hdr_size;
+ if (!rx_msg_p->rx_use_bcopy) {
+ /*
+ * For loaned up buffers, the driver reference count
+ * will be incremented first and then the free state.
+ */
+ if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
+ if (first_entry) {
+ nmp->b_rptr = &nmp->b_rptr[skip_len];
+ if (l2_len < bsize - skip_len) {
+ nmp->b_wptr = &nmp->b_rptr[l2_len];
+ } else {
+ nmp->b_wptr = &nmp->b_rptr[bsize
+ - skip_len];
+ }
+ } else {
+ if (l2_len - bytes_read < bsize) {
+ nmp->b_wptr =
+ &nmp->b_rptr[l2_len - bytes_read];
+ } else {
+ nmp->b_wptr = &nmp->b_rptr[bsize];
+ }
+ }
+ }
+ } else {
+ if (first_entry) {
+ nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
+ l2_len < bsize - skip_len ?
+ l2_len : bsize - skip_len);
+ } else {
+ nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset,
+ l2_len - bytes_read < bsize ?
+ l2_len - bytes_read : bsize);
+ }
+ }
+
+ if (nmp != NULL) {
+ if (first_entry)
+ bytes_read = nmp->b_wptr - nmp->b_rptr;
+ else
+ bytes_read += nmp->b_wptr - nmp->b_rptr;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_receive_packet after dupb: "
+ "rbr consumed %d "
+ "pktbufsz_type %d "
+ "nmp $%p rptr $%p wptr $%p "
+ "buf_offset %d bzise %d l2_len %d skip_len %d",
+ rx_rbr_p->rbr_consumed,
+ pktbufsz_type,
+ nmp, nmp->b_rptr, nmp->b_wptr,
+ buf_offset, bsize, l2_len, skip_len));
+ } else {
+ cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)");
+
+ atomic_inc_32(&rx_msg_p->ref_cnt);
+ atomic_inc_32(&hxge_mblks_pending);
+ if (buffer_free == B_TRUE) {
+ rx_msg_p->free = B_TRUE;
+ }
+
+ MUTEX_EXIT(&rx_rbr_p->lock);
+ MUTEX_EXIT(&rcr_p->lock);
+ hxge_freeb(rx_msg_p);
+ return;
+ }
+
+ if (buffer_free == B_TRUE) {
+ rx_msg_p->free = B_TRUE;
+ }
+
+ /*
+ * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a
+ * packet is not fragmented and no error bit is set, then L4 checksum
+ * is OK.
+ */
+ is_valid = (nmp != NULL);
+ if (first_entry) {
+ rdc_stats->ipackets++; /* count only 1st seg for jumbo */
+ rdc_stats->ibytes += skip_len + l2_len < bsize ?
+ l2_len : bsize;
+ } else {
+ rdc_stats->ibytes += l2_len - bytes_read < bsize ?
+ l2_len - bytes_read : bsize;
+ }
+
+ rcr_p->rcvd_pkt_bytes = bytes_read;
+
+ MUTEX_EXIT(&rx_rbr_p->lock);
+ MUTEX_EXIT(&rcr_p->lock);
+
+ if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
+ atomic_inc_32(&rx_msg_p->ref_cnt);
+ atomic_inc_32(&hxge_mblks_pending);
+ hxge_freeb(rx_msg_p);
+ }
+
+ if (is_valid) {
+ nmp->b_cont = NULL;
+ if (first_entry) {
+ *mp = nmp;
+ *mp_cont = NULL;
+ } else {
+ *mp_cont = nmp;
+ }
+ }
+
+ /*
+ * Update stats and hardware checksuming.
+ */
+ if (is_valid && !multi) {
+
+ is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
+ pkt_type == RCR_PKT_IS_UDP) ? B_TRUE : B_FALSE);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_receive_packet: "
+ "is_valid 0x%x multi %d pkt %d d error %d",
+ is_valid, multi, is_tcp_udp, error_type));
+
+ if (is_tcp_udp && !error_type) {
+ (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
+ HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_receive_packet: Full tcp/udp cksum "
+ "is_valid 0x%x multi %d pkt %d "
+ "error %d",
+ is_valid, multi, is_tcp_udp, error_type));
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL,
+ "==> hxge_receive_packet: *mp 0x%016llx", *mp));
+
+ *multi_p = (multi == RCR_MULTI_MASK);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: "
+ "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
+ *multi_p, nmp, *mp, *mp_cont));
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
+ rdc_stat_t cs)
+{
+ p_hxge_rx_ring_stats_t rdc_stats;
+ hpi_handle_t handle;
+ boolean_t rxchan_fatal = B_FALSE;
+ uint8_t channel;
+ hxge_status_t status = HXGE_OK;
+ uint64_t cs_val;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ channel = ldvp->channel;
+
+ /* Clear the interrupts */
+ cs_val = cs.value & RDC_STAT_WR1C;
+ RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_val);
+
+ rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index];
+
+ if (cs.bits.rbr_cpl_to) {
+ rdc_stats->rbr_tmout++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_RDMC_RBR_CPL_TO);
+ rxchan_fatal = B_TRUE;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): "
+ "fatal error: rx_rbr_timeout", channel));
+ }
+
+ if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) {
+ (void) hpi_rxdma_ring_perr_stat_get(handle,
+ &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par);
+ }
+
+ if (cs.bits.rcr_shadow_par_err) {
+ rdc_stats->rcr_sha_par++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
+ rxchan_fatal = B_TRUE;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): "
+ "fatal error: rcr_shadow_par_err", channel));
+ }
+
+ if (cs.bits.rbr_prefetch_par_err) {
+ rdc_stats->rbr_pre_par++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
+ rxchan_fatal = B_TRUE;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): "
+ "fatal error: rbr_prefetch_par_err", channel));
+ }
+
+ if (cs.bits.rbr_pre_empty) {
+ rdc_stats->rbr_pre_empty++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY);
+ rxchan_fatal = B_TRUE;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): "
+ "fatal error: rbr_pre_empty", channel));
+ }
+
+ if (cs.bits.peu_resp_err) {
+ rdc_stats->peu_resp_err++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR);
+ rxchan_fatal = B_TRUE;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): "
+ "fatal error: peu_resp_err", channel));
+ }
+
+ if (cs.bits.rcr_thres) {
+ rdc_stats->rcr_thres++;
+ if (rdc_stats->rcr_thres == 1)
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): rcr_thres",
+ channel));
+ }
+
+ if (cs.bits.rcr_to) {
+ rdc_stats->rcr_to++;
+ if (rdc_stats->rcr_to == 1)
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): rcr_to",
+ channel));
+ }
+
+ if (cs.bits.rcr_shadow_full) {
+ rdc_stats->rcr_shadow_full++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL);
+ rxchan_fatal = B_TRUE;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): "
+ "fatal error: rcr_shadow_full", channel));
+ }
+
+ if (cs.bits.rcr_full) {
+ rdc_stats->rcrfull++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_RDMC_RCRFULL);
+ rxchan_fatal = B_TRUE;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): "
+ "fatal error: rcrfull error", channel));
+ }
+
+ if (cs.bits.rbr_empty) {
+ rdc_stats->rbr_empty++;
+ if (rdc_stats->rbr_empty == 1)
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): "
+ "rbr empty error", channel));
+ /*
+ * DMA channel is disabled due to rbr_empty bit is set
+ * although it is not fatal. Enable the DMA channel here
+ * to work-around the hardware bug.
+ */
+ (void) hpi_rxdma_cfg_rdc_enable(handle, channel);
+ }
+
+ if (cs.bits.rbr_full) {
+ rdc_stats->rbrfull++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_RDMC_RBRFULL);
+ rxchan_fatal = B_TRUE;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rx_err_evnts(channel %d): "
+ "fatal error: rbr_full error", channel));
+ }
+
+ if (rxchan_fatal) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_rx_err_evnts: fatal error on Channel #%d\n",
+ channel));
+ status = hxge_rxdma_fatal_err_recover(hxgep, channel);
+ if (status == HXGE_OK) {
+ FM_SERVICE_RESTORED(hxgep);
+ }
+ }
+ HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rx_err_evnts"));
+
+ return (status);
+}
+
+static hxge_status_t
+hxge_map_rxdma(p_hxge_t hxgep)
+{
+ int i, ndmas;
+ uint16_t channel;
+ p_rx_rbr_rings_t rx_rbr_rings;
+ p_rx_rbr_ring_t *rbr_rings;
+ p_rx_rcr_rings_t rx_rcr_rings;
+ p_rx_rcr_ring_t *rcr_rings;
+ p_rx_mbox_areas_t rx_mbox_areas_p;
+ p_rx_mbox_t *rx_mbox_p;
+ p_hxge_dma_pool_t dma_buf_poolp;
+ p_hxge_dma_pool_t dma_cntl_poolp;
+ p_hxge_dma_common_t *dma_buf_p;
+ p_hxge_dma_common_t *dma_cntl_p;
+ uint32_t *num_chunks;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma"));
+
+ dma_buf_poolp = hxgep->rx_buf_pool_p;
+ dma_cntl_poolp = hxgep->rx_cntl_pool_p;
+
+ if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_map_rxdma: buf not allocated"));
+ return (HXGE_ERROR);
+ }
+
+ ndmas = dma_buf_poolp->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_map_rxdma: no dma allocated"));
+ return (HXGE_ERROR);
+ }
+
+ num_chunks = dma_buf_poolp->num_chunks;
+ dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
+ dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
+ rx_rbr_rings = (p_rx_rbr_rings_t)
+ KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
+ rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC(
+ sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
+
+ rx_rcr_rings = (p_rx_rcr_rings_t)
+ KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
+ rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC(
+ sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
+
+ rx_mbox_areas_p = (p_rx_mbox_areas_t)
+ KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
+ rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC(
+ sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
+
+ /*
+ * Timeout should be set based on the system clock divider.
+ * The following timeout value of 1 assumes that the
+ * granularity (1000) is 3 microseconds running at 300MHz.
+ */
+
+ hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
+ hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
+
+ /*
+ * Map descriptors from the buffer polls for each dam channel.
+ */
+ for (i = 0; i < ndmas; i++) {
+ /*
+ * Set up and prepare buffer blocks, descriptors and mailbox.
+ */
+ channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
+ status = hxge_map_rxdma_channel(hxgep, channel,
+ (p_hxge_dma_common_t *)&dma_buf_p[i],
+ (p_rx_rbr_ring_t *)&rbr_rings[i],
+ num_chunks[i], (p_hxge_dma_common_t *)&dma_cntl_p[i],
+ (p_rx_rcr_ring_t *)&rcr_rings[i],
+ (p_rx_mbox_t *)&rx_mbox_p[i]);
+ if (status != HXGE_OK) {
+ goto hxge_map_rxdma_fail1;
+ }
+ rbr_rings[i]->index = (uint16_t)i;
+ rcr_rings[i]->index = (uint16_t)i;
+ rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i];
+ }
+
+ rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
+ rx_rbr_rings->rbr_rings = rbr_rings;
+ hxgep->rx_rbr_rings = rx_rbr_rings;
+ rx_rcr_rings->rcr_rings = rcr_rings;
+ hxgep->rx_rcr_rings = rx_rcr_rings;
+
+ rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
+ hxgep->rx_mbox_areas_p = rx_mbox_areas_p;
+
+ goto hxge_map_rxdma_exit;
+
+hxge_map_rxdma_fail1:
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)",
+ status, channel, i));
+ i--;
+ for (; i >= 0; i--) {
+ channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
+ hxge_unmap_rxdma_channel(hxgep, channel,
+ rbr_rings[i], rcr_rings[i], rx_mbox_p[i]);
+ }
+
+ KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
+ KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
+ KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
+ KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
+ KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
+ KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
+
+hxge_map_rxdma_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel));
+
+ return (status);
+}
+
+static void
+hxge_unmap_rxdma(p_hxge_t hxgep)
+{
+ int i, ndmas;
+ uint16_t channel;
+ p_rx_rbr_rings_t rx_rbr_rings;
+ p_rx_rbr_ring_t *rbr_rings;
+ p_rx_rcr_rings_t rx_rcr_rings;
+ p_rx_rcr_ring_t *rcr_rings;
+ p_rx_mbox_areas_t rx_mbox_areas_p;
+ p_rx_mbox_t *rx_mbox_p;
+ p_hxge_dma_pool_t dma_buf_poolp;
+ p_hxge_dma_pool_t dma_cntl_poolp;
+ p_hxge_dma_common_t *dma_buf_p;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma"));
+
+ dma_buf_poolp = hxgep->rx_buf_pool_p;
+ dma_cntl_poolp = hxgep->rx_cntl_pool_p;
+
+ if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_unmap_rxdma: NULL buf pointers"));
+ return;
+ }
+
+ rx_rbr_rings = hxgep->rx_rbr_rings;
+ rx_rcr_rings = hxgep->rx_rcr_rings;
+ if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_unmap_rxdma: NULL ring pointers"));
+ return;
+ }
+
+ ndmas = rx_rbr_rings->ndmas;
+ if (!ndmas) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_unmap_rxdma: no channel"));
+ return;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_unmap_rxdma (ndmas %d)", ndmas));
+
+ rbr_rings = rx_rbr_rings->rbr_rings;
+ rcr_rings = rx_rcr_rings->rcr_rings;
+ rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
+ rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
+ dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
+
+ for (i = 0; i < ndmas; i++) {
+ channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_unmap_rxdma (ndmas %d) channel %d",
+ ndmas, channel));
+ (void) hxge_unmap_rxdma_channel(hxgep, channel,
+ (p_rx_rbr_ring_t)rbr_rings[i],
+ (p_rx_rcr_ring_t)rcr_rings[i],
+ (p_rx_mbox_t)rx_mbox_p[i]);
+ }
+
+ KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
+ KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
+ KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
+ KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
+ KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
+ KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma"));
+}
+
+hxge_status_t
+hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
+ uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
+ p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
+{
+ int status = HXGE_OK;
+
+ /*
+ * Set up and prepare buffer blocks, descriptors and mailbox.
+ */
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_map_rxdma_channel (channel %d)", channel));
+
+ /*
+ * Receive buffer blocks
+ */
+ status = hxge_map_rxdma_channel_buf_ring(hxgep, channel,
+ dma_buf_p, rbr_p, num_chunks);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_map_rxdma_channel (channel %d): "
+ "map buffer failed 0x%x", channel, status));
+ goto hxge_map_rxdma_channel_exit;
+ }
+
+ /*
+ * Receive block ring, completion ring and mailbox.
+ */
+ status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel,
+ dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_map_rxdma_channel (channel %d): "
+ "map config failed 0x%x", channel, status));
+ goto hxge_map_rxdma_channel_fail2;
+ }
+ goto hxge_map_rxdma_channel_exit;
+
+hxge_map_rxdma_channel_fail3:
+ /* Free rbr, rcr */
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)",
+ status, channel));
+ hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p);
+
+hxge_map_rxdma_channel_fail2:
+ /* Free buffer blocks */
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_map_rxdma_channel: free rx buffers"
+ "(hxgep 0x%x status 0x%x channel %d)",
+ hxgep, status, channel));
+ hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p);
+
+ status = HXGE_ERROR;
+
+hxge_map_rxdma_channel_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)",
+ hxgep, status, channel));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static void
+hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
+{
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_unmap_rxdma_channel (channel %d)", channel));
+
+ /*
+ * unmap receive block ring, completion ring and mailbox.
+ */
+ (void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p);
+
+ /* unmap buffer blocks */
+ (void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p);
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel"));
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
+ p_hxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
+ p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
+{
+ p_rx_rbr_ring_t rbrp;
+ p_rx_rcr_ring_t rcrp;
+ p_rx_mbox_t mboxp;
+ p_hxge_dma_common_t cntl_dmap;
+ p_hxge_dma_common_t dmap;
+ p_rx_msg_t *rx_msg_ring;
+ p_rx_msg_t rx_msg_p;
+ rdc_rbr_cfg_a_t *rcfga_p;
+ rdc_rbr_cfg_b_t *rcfgb_p;
+ rdc_rcr_cfg_a_t *cfga_p;
+ rdc_rcr_cfg_b_t *cfgb_p;
+ rdc_rx_cfg1_t *cfig1_p;
+ rdc_rx_cfg2_t *cfig2_p;
+ rdc_rbr_kick_t *kick_p;
+ uint32_t dmaaddrp;
+ uint32_t *rbr_vaddrp;
+ uint32_t bkaddr;
+ hxge_status_t status = HXGE_OK;
+ int i;
+ uint32_t hxge_port_rcr_size;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_map_rxdma_channel_cfg_ring"));
+
+ cntl_dmap = *dma_cntl_p;
+
+ /* Map in the receive block ring */
+ rbrp = *rbr_p;
+ dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc;
+ hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
+
+ /*
+ * Zero out buffer block ring descriptors.
+ */
+ bzero((caddr_t)dmap->kaddrp, dmap->alength);
+
+ rcfga_p = &(rbrp->rbr_cfga);
+ rcfgb_p = &(rbrp->rbr_cfgb);
+ kick_p = &(rbrp->rbr_kick);
+ rcfga_p->value = 0;
+ rcfgb_p->value = 0;
+ kick_p->value = 0;
+ rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
+ rcfga_p->value = (rbrp->rbr_addr &
+ (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK));
+ rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
+
+ /* XXXX: how to choose packet buffer sizes */
+ rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0;
+ rcfgb_p->bits.vld0 = 1;
+ rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1;
+ rcfgb_p->bits.vld1 = 1;
+ rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2;
+ rcfgb_p->bits.vld2 = 1;
+ rcfgb_p->bits.bksize = hxgep->rx_bksize_code;
+
+ /*
+ * For each buffer block, enter receive block address to the ring.
+ */
+ rbr_vaddrp = (uint32_t *)dmap->kaddrp;
+ rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
+ "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
+
+ rx_msg_ring = rbrp->rx_msg_ring;
+ for (i = 0; i < rbrp->tnblocks; i++) {
+ rx_msg_p = rx_msg_ring[i];
+ rx_msg_p->hxgep = hxgep;
+ rx_msg_p->rx_rbr_p = rbrp;
+ bkaddr = (uint32_t)
+ ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
+ RBR_BKADDR_SHIFT));
+ rx_msg_p->free = B_FALSE;
+ rx_msg_p->max_usage_cnt = 0xbaddcafe;
+
+ *rbr_vaddrp++ = bkaddr;
+ }
+
+ kick_p->bits.bkadd = rbrp->rbb_max;
+ rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
+
+ rbrp->rbr_rd_index = 0;
+
+ rbrp->rbr_consumed = 0;
+ rbrp->rbr_use_bcopy = B_TRUE;
+ rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
+
+ /*
+ * Do bcopy on packets greater than bcopy size once the lo threshold is
+ * reached. This lo threshold should be less than the hi threshold.
+ *
+ * Do bcopy on every packet once the hi threshold is reached.
+ */
+ if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) {
+ /* default it to use hi */
+ hxge_rx_threshold_lo = hxge_rx_threshold_hi;
+ }
+ if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) {
+ hxge_rx_buf_size_type = HXGE_RBR_TYPE2;
+ }
+ rbrp->rbr_bufsize_type = hxge_rx_buf_size_type;
+
+ switch (hxge_rx_threshold_hi) {
+ default:
+ case HXGE_RX_COPY_NONE:
+ /* Do not do bcopy at all */
+ rbrp->rbr_use_bcopy = B_FALSE;
+ rbrp->rbr_threshold_hi = rbrp->rbb_max;
+ break;
+
+ case HXGE_RX_COPY_1:
+ case HXGE_RX_COPY_2:
+ case HXGE_RX_COPY_3:
+ case HXGE_RX_COPY_4:
+ case HXGE_RX_COPY_5:
+ case HXGE_RX_COPY_6:
+ case HXGE_RX_COPY_7:
+ rbrp->rbr_threshold_hi =
+ rbrp->rbb_max * (hxge_rx_threshold_hi) /
+ HXGE_RX_BCOPY_SCALE;
+ break;
+
+ case HXGE_RX_COPY_ALL:
+ rbrp->rbr_threshold_hi = 0;
+ break;
+ }
+
+ switch (hxge_rx_threshold_lo) {
+ default:
+ case HXGE_RX_COPY_NONE:
+ /* Do not do bcopy at all */
+ if (rbrp->rbr_use_bcopy) {
+ rbrp->rbr_use_bcopy = B_FALSE;
+ }
+ rbrp->rbr_threshold_lo = rbrp->rbb_max;
+ break;
+
+ case HXGE_RX_COPY_1:
+ case HXGE_RX_COPY_2:
+ case HXGE_RX_COPY_3:
+ case HXGE_RX_COPY_4:
+ case HXGE_RX_COPY_5:
+ case HXGE_RX_COPY_6:
+ case HXGE_RX_COPY_7:
+ rbrp->rbr_threshold_lo =
+ rbrp->rbb_max * (hxge_rx_threshold_lo) /
+ HXGE_RX_BCOPY_SCALE;
+ break;
+
+ case HXGE_RX_COPY_ALL:
+ rbrp->rbr_threshold_lo = 0;
+ break;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d "
+ "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d "
+ "rbb_threshold_lo %d",
+ dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type,
+ rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo));
+
+ /* Map in the receive completion ring */
+ rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
+ rcrp->rdc = dma_channel;
+
+ hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
+ rcrp->comp_size = hxge_port_rcr_size;
+ rcrp->comp_wrap_mask = hxge_port_rcr_size - 1;
+
+ rcrp->max_receive_pkts = hxge_max_rx_pkts;
+
+ dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
+ hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
+ sizeof (rcr_entry_t));
+ rcrp->comp_rd_index = 0;
+ rcrp->comp_wt_index = 0;
+ rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
+ (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
+ rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
+ (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
+ rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
+ (hxge_port_rcr_size - 1);
+ rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
+ (hxge_port_rcr_size - 1);
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
+ "rbr_vaddrp $%p rcr_desc_rd_head_p $%p "
+ "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p "
+ "rcr_desc_rd_last_pp $%p ",
+ dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p,
+ rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p,
+ rcrp->rcr_desc_last_pp));
+
+ /*
+ * Zero out buffer block ring descriptors.
+ */
+ bzero((caddr_t)dmap->kaddrp, dmap->alength);
+ rcrp->intr_timeout = hxgep->intr_timeout;
+ rcrp->intr_threshold = hxgep->intr_threshold;
+ rcrp->full_hdr_flag = B_FALSE;
+ rcrp->sw_priv_hdr_len = 0;
+
+ cfga_p = &(rcrp->rcr_cfga);
+ cfgb_p = &(rcrp->rcr_cfgb);
+ cfga_p->value = 0;
+ cfgb_p->value = 0;
+ rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
+
+ cfga_p->value = (rcrp->rcr_addr &
+ (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK));
+
+ cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF);
+
+ /*
+ * Timeout should be set based on the system clock divider. The
+ * following timeout value of 1 assumes that the granularity (1000) is
+ * 3 microseconds running at 300MHz.
+ */
+ cfgb_p->bits.pthres = rcrp->intr_threshold;
+ cfgb_p->bits.timeout = rcrp->intr_timeout;
+ cfgb_p->bits.entout = 1;
+
+ /* Map in the mailbox */
+ mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
+ dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox;
+ hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
+ cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1;
+ cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2;
+ cfig1_p->value = cfig2_p->value = 0;
+
+ mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_map_rxdma_channel_cfg_ring: "
+ "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
+ dma_channel, cfig1_p->value, cfig2_p->value,
+ mboxp->mbox_addr));
+
+ dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff);
+ cfig1_p->bits.mbaddr_h = dmaaddrp;
+
+ dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
+ dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
+ RXDMA_CFIG2_MBADDR_L_MASK);
+
+ cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p "
+ "cfg1 0x%016llx cfig2 0x%016llx",
+ dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value));
+
+ cfig2_p->bits.full_hdr = rcrp->full_hdr_flag;
+ cfig2_p->bits.offset = rcrp->sw_priv_hdr_len;
+
+ rbrp->rx_rcr_p = rcrp;
+ rcrp->rx_rbr_p = rbrp;
+ *rcr_p = rcrp;
+ *rx_mbox_p = mboxp;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
+ return (status);
+}
+
+/*ARGSUSED*/
+static void
+hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
+ p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
+{
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc));
+
+ KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
+ KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_unmap_rxdma_channel_cfg_ring"));
+}
+
+static hxge_status_t
+hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
+ p_hxge_dma_common_t *dma_buf_p,
+ p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
+{
+ p_rx_rbr_ring_t rbrp;
+ p_hxge_dma_common_t dma_bufp, tmp_bufp;
+ p_rx_msg_t *rx_msg_ring;
+ p_rx_msg_t rx_msg_p;
+ p_mblk_t mblk_p;
+
+ rxring_info_t *ring_info;
+ hxge_status_t status = HXGE_OK;
+ int i, j, index;
+ uint32_t size, bsize, nblocks, nmsgs;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel));
+
+ dma_bufp = tmp_bufp = *dma_buf_p;
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ " hxge_map_rxdma_channel_buf_ring: channel %d to map %d "
+ "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp));
+
+ nmsgs = 0;
+ for (i = 0; i < num_chunks; i++, tmp_bufp++) {
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_map_rxdma_channel_buf_ring: channel %d "
+ "bufp 0x%016llx nblocks %d nmsgs %d",
+ channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
+ nmsgs += tmp_bufp->nblocks;
+ }
+ if (!nmsgs) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_map_rxdma_channel_buf_ring: channel %d "
+ "no msg blocks", channel));
+ status = HXGE_ERROR;
+ goto hxge_map_rxdma_channel_buf_ring_exit;
+ }
+ rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP);
+
+ size = nmsgs * sizeof (p_rx_msg_t);
+ rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
+ ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
+ KM_SLEEP);
+
+ MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
+ (void *) hxgep->interrupt_cookie);
+ MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
+ (void *) hxgep->interrupt_cookie);
+ rbrp->rdc = channel;
+ rbrp->num_blocks = num_chunks;
+ rbrp->tnblocks = nmsgs;
+ rbrp->rbb_max = nmsgs;
+ rbrp->rbr_max_size = nmsgs;
+ rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
+
+ rbrp->pages_to_post = 0;
+ rbrp->pages_to_skip = 20;
+ rbrp->pages_to_post_threshold = rbrp->rbb_max - rbrp->pages_to_skip / 2;
+
+ /*
+ * Buffer sizes suggested by NIU architect. 256, 512 and 2K.
+ */
+
+ rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
+ rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
+ rbrp->hpi_pkt_buf_size0 = SIZE_256B;
+
+ rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
+ rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
+ rbrp->hpi_pkt_buf_size1 = SIZE_1KB;
+
+ rbrp->block_size = hxgep->rx_default_block_size;
+
+ if (!hxge_jumbo_enable && !hxgep->param_arr[param_accept_jumbo].value) {
+ rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
+ rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
+ rbrp->hpi_pkt_buf_size2 = SIZE_2KB;
+ } else {
+ if (rbrp->block_size >= 0x2000) {
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_map_rxdma_channel_buf_ring: channel %d "
+ "no msg blocks", channel));
+ status = HXGE_ERROR;
+ goto hxge_map_rxdma_channel_buf_ring_fail1;
+ } else {
+ rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
+ rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
+ rbrp->hpi_pkt_buf_size2 = SIZE_4KB;
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_map_rxdma_channel_buf_ring: channel %d "
+ "actual rbr max %d rbb_max %d nmsgs %d "
+ "rbrp->block_size %d default_block_size %d "
+ "(config hxge_rbr_size %d hxge_rbr_spare_size %d)",
+ channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
+ rbrp->block_size, hxgep->rx_default_block_size,
+ hxge_rbr_size, hxge_rbr_spare_size));
+
+ /*
+ * Map in buffers from the buffer pool.
+ * Note that num_blocks is the num_chunks. For Sparc, there is likely
+ * only one chunk. For x86, there will be many chunks.
+ * Loop over chunks.
+ */
+ index = 0;
+ for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
+ bsize = dma_bufp->block_size;
+ nblocks = dma_bufp->nblocks;
+ ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
+ ring_info->buffer[i].buf_index = i;
+ ring_info->buffer[i].buf_size = dma_bufp->alength;
+ ring_info->buffer[i].start_index = index;
+ ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ " hxge_map_rxdma_channel_buf_ring: map channel %d "
+ "chunk %d nblocks %d chunk_size %x block_size 0x%x "
+ "dma_bufp $%p dvma_addr $%p", channel, i,
+ dma_bufp->nblocks,
+ ring_info->buffer[i].buf_size, bsize, dma_bufp,
+ ring_info->buffer[i].dvma_addr));
+
+ /* loop over blocks within a chunk */
+ for (j = 0; j < nblocks; j++) {
+ if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO,
+ dma_bufp)) == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "allocb failed (index %d i %d j %d)",
+ index, i, j));
+ goto hxge_map_rxdma_channel_buf_ring_fail1;
+ }
+ rx_msg_ring[index] = rx_msg_p;
+ rx_msg_p->block_index = index;
+ rx_msg_p->shifted_addr = (uint32_t)
+ ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
+ RBR_BKADDR_SHIFT));
+ /*
+ * Too much output
+ * HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ * "index %d j %d rx_msg_p $%p mblk %p",
+ * index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
+ */
+ mblk_p = rx_msg_p->rx_mblk_p;
+ mblk_p->b_wptr = mblk_p->b_rptr + bsize;
+
+ rbrp->rbr_ref_cnt++;
+ index++;
+ rx_msg_p->buf_dma.dma_channel = channel;
+ }
+ }
+ if (i < rbrp->num_blocks) {
+ goto hxge_map_rxdma_channel_buf_ring_fail1;
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "hxge_map_rxdma_channel_buf_ring: done buf init "
+ "channel %d msg block entries %d", channel, index));
+ ring_info->block_size_mask = bsize - 1;
+ rbrp->rx_msg_ring = rx_msg_ring;
+ rbrp->dma_bufp = dma_buf_p;
+ rbrp->ring_info = ring_info;
+
+ status = hxge_rxbuf_index_info_init(hxgep, rbrp);
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: "
+ "channel %d done buf info init", channel));
+
+ /*
+ * Finally, permit hxge_freeb() to call hxge_post_page().
+ */
+ rbrp->rbr_state = RBR_POSTING;
+
+ *rbr_p = rbrp;
+
+ goto hxge_map_rxdma_channel_buf_ring_exit;
+
+hxge_map_rxdma_channel_buf_ring_fail1:
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
+ channel, status));
+
+ index--;
+ for (; index >= 0; index--) {
+ rx_msg_p = rx_msg_ring[index];
+ if (rx_msg_p != NULL) {
+ hxge_freeb(rx_msg_p);
+ rx_msg_ring[index] = NULL;
+ }
+ }
+
+hxge_map_rxdma_channel_buf_ring_fail:
+ MUTEX_DESTROY(&rbrp->post_lock);
+ MUTEX_DESTROY(&rbrp->lock);
+ KMEM_FREE(ring_info, sizeof (rxring_info_t));
+ KMEM_FREE(rx_msg_ring, size);
+ KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
+
+ status = HXGE_ERROR;
+
+hxge_map_rxdma_channel_buf_ring_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static void
+hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
+ p_rx_rbr_ring_t rbr_p)
+{
+ p_rx_msg_t *rx_msg_ring;
+ p_rx_msg_t rx_msg_p;
+ rxring_info_t *ring_info;
+ int i;
+ uint32_t size;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_unmap_rxdma_channel_buf_ring"));
+ if (rbr_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
+ return;
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc));
+
+ rx_msg_ring = rbr_p->rx_msg_ring;
+ ring_info = rbr_p->ring_info;
+
+ if (rx_msg_ring == NULL || ring_info == NULL) {
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_unmap_rxdma_channel_buf_ring: "
+ "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info));
+ return;
+ }
+
+ size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
+ "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks,
+ rbr_p->tnblocks, rbr_p->rbr_max_size, size));
+
+ for (i = 0; i < rbr_p->tnblocks; i++) {
+ rx_msg_p = rx_msg_ring[i];
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ " hxge_unmap_rxdma_channel_buf_ring: "
+ "rx_msg_p $%p", rx_msg_p));
+ if (rx_msg_p != NULL) {
+ hxge_freeb(rx_msg_p);
+ rx_msg_ring[i] = NULL;
+ }
+ }
+
+ /*
+ * We no longer may use the mutex <post_lock>. By setting
+ * <rbr_state> to anything but POSTING, we prevent
+ * hxge_post_page() from accessing a dead mutex.
+ */
+ rbr_p->rbr_state = RBR_UNMAPPING;
+ MUTEX_DESTROY(&rbr_p->post_lock);
+
+ MUTEX_DESTROY(&rbr_p->lock);
+ KMEM_FREE(ring_info, sizeof (rxring_info_t));
+ KMEM_FREE(rx_msg_ring, size);
+
+ if (rbr_p->rbr_ref_cnt == 0) {
+ /* This is the normal state of affairs. */
+ KMEM_FREE(rbr_p, sizeof (*rbr_p));
+ } else {
+ /*
+ * Some of our buffers are still being used.
+ * Therefore, tell hxge_freeb() this ring is
+ * unmapped, so it may free <rbr_p> for us.
+ */
+ rbr_p->rbr_state = RBR_UNMAPPED;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "unmap_rxdma_buf_ring: %d %s outstanding.",
+ rbr_p->rbr_ref_cnt,
+ rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "<== hxge_unmap_rxdma_channel_buf_ring"));
+}
+
+static hxge_status_t
+hxge_rxdma_hw_start_common(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
+
+ /*
+ * Load the sharable parameters by writing to the function zero control
+ * registers. These FZC registers should be initialized only once for
+ * the entire chip.
+ */
+ (void) hxge_init_fzc_rx_common(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
+
+ return (status);
+}
+
+static hxge_status_t
+hxge_rxdma_hw_start(p_hxge_t hxgep)
+{
+ int i, ndmas;
+ uint16_t channel;
+ p_rx_rbr_rings_t rx_rbr_rings;
+ p_rx_rbr_ring_t *rbr_rings;
+ p_rx_rcr_rings_t rx_rcr_rings;
+ p_rx_rcr_ring_t *rcr_rings;
+ p_rx_mbox_areas_t rx_mbox_areas_p;
+ p_rx_mbox_t *rx_mbox_p;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start"));
+
+ rx_rbr_rings = hxgep->rx_rbr_rings;
+ rx_rcr_rings = hxgep->rx_rcr_rings;
+ if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_hw_start: NULL ring pointers"));
+ return (HXGE_ERROR);
+ }
+
+ ndmas = rx_rbr_rings->ndmas;
+ if (ndmas == 0) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_hw_start: no dma channel allocated"));
+ return (HXGE_ERROR);
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_hw_start (ndmas %d)", ndmas));
+
+ /*
+ * Scrub the RDC Rx DMA Prefetch Buffer Command.
+ */
+ for (i = 0; i < 128; i++) {
+ HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i);
+ }
+
+ /*
+ * Scrub Rx DMA Shadow Tail Command.
+ */
+ for (i = 0; i < 64; i++) {
+ HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i);
+ }
+
+ /*
+ * Scrub Rx DMA Control Fifo Command.
+ */
+ for (i = 0; i < 512; i++) {
+ HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i);
+ }
+
+ /*
+ * Scrub Rx DMA Data Fifo Command.
+ */
+ for (i = 0; i < 1536; i++) {
+ HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i);
+ }
+
+ /*
+ * Reset the FIFO Error Stat.
+ */
+ HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF);
+
+ /* Set the error mask to receive interrupts */
+ HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
+
+ rbr_rings = rx_rbr_rings->rbr_rings;
+ rcr_rings = rx_rcr_rings->rcr_rings;
+ rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
+ if (rx_mbox_areas_p) {
+ rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
+ }
+
+ for (i = 0; i < ndmas; i++) {
+ channel = rbr_rings[i]->rdc;
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_hw_start (ndmas %d) channel %d",
+ ndmas, channel));
+ status = hxge_rxdma_start_channel(hxgep, channel,
+ (p_rx_rbr_ring_t)rbr_rings[i],
+ (p_rx_rcr_ring_t)rcr_rings[i],
+ (p_rx_mbox_t)rx_mbox_p[i]);
+ if (status != HXGE_OK) {
+ goto hxge_rxdma_hw_start_fail1;
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: "
+ "rx_rbr_rings 0x%016llx rings 0x%016llx",
+ rx_rbr_rings, rx_rcr_rings));
+ goto hxge_rxdma_hw_start_exit;
+
+hxge_rxdma_hw_start_fail1:
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rxdma_hw_start: disable "
+ "(status 0x%x channel %d i %d)", status, channel, i));
+ for (; i >= 0; i--) {
+ channel = rbr_rings[i]->rdc;
+ (void) hxge_rxdma_stop_channel(hxgep, channel);
+ }
+
+hxge_rxdma_hw_start_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_hw_start: (status 0x%x)", status));
+ return (status);
+}
+
+static void
+hxge_rxdma_hw_stop(p_hxge_t hxgep)
+{
+ int i, ndmas;
+ uint16_t channel;
+ p_rx_rbr_rings_t rx_rbr_rings;
+ p_rx_rbr_ring_t *rbr_rings;
+ p_rx_rcr_rings_t rx_rcr_rings;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop"));
+
+ rx_rbr_rings = hxgep->rx_rbr_rings;
+ rx_rcr_rings = hxgep->rx_rcr_rings;
+
+ if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_hw_stop: NULL ring pointers"));
+ return;
+ }
+
+ ndmas = rx_rbr_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "<== hxge_rxdma_hw_stop: no dma channel allocated"));
+ return;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas));
+
+ rbr_rings = rx_rbr_rings->rbr_rings;
+ for (i = 0; i < ndmas; i++) {
+ channel = rbr_rings[i]->rdc;
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_hw_stop (ndmas %d) channel %d",
+ ndmas, channel));
+ (void) hxge_rxdma_stop_channel(hxgep, channel);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: "
+ "rx_rbr_rings 0x%016llx rings 0x%016llx",
+ rx_rbr_rings, rx_rcr_rings));
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop"));
+}
+
+static hxge_status_t
+hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
+ p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ rdc_stat_t cs;
+ rdc_int_mask_t ent_mask;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: "
+ "hpi handle addr $%p acc $%p",
+ hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
+
+ /* Reset RXDMA channel */
+ rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rxdma_start_channel: "
+ "reset rxdma failed (0x%08x channel %d)",
+ status, channel));
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_start_channel: reset done: channel %d", channel));
+
+ /*
+ * Initialize the RXDMA channel specific FZC control configurations.
+ * These FZC registers are pertaining to each RX channel (logical
+ * pages).
+ */
+ status = hxge_init_fzc_rxdma_channel(hxgep,
+ channel, rbr_p, rcr_p, mbox_p);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rxdma_start_channel: "
+ "init fzc rxdma failed (0x%08x channel %d)",
+ status, channel));
+ return (status);
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_start_channel: fzc done"));
+
+ /*
+ * Zero out the shadow and prefetch ram.
+ */
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_start_channel: ram done"));
+
+ /* Set up the interrupt event masks. */
+ ent_mask.value = 0;
+ rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rxdma_start_channel: "
+ "init rxdma event masks failed (0x%08x channel %d)",
+ status, channel));
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
+ "event done: channel %d (mask 0x%016llx)",
+ channel, ent_mask.value));
+
+ /*
+ * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA
+ * channels and enable each DMA channel.
+ */
+ status = hxge_enable_rxdma_channel(hxgep,
+ channel, rbr_p, rcr_p, mbox_p);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_rxdma_start_channel: "
+ " init enable rxdma failed (0x%08x channel %d)",
+ status, channel));
+ return (status);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
+ "control done - channel %d cs 0x%016llx", channel, cs.value));
+
+ /*
+ * Initialize the receive DMA control and status register
+ * Note that rdc_stat HAS to be set after RBR and RCR rings are set
+ */
+ cs.value = 0;
+ cs.bits.mex = 1;
+ cs.bits.rcr_thres = 1;
+ cs.bits.rcr_to = 1;
+ cs.bits.rbr_empty = 1;
+ status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
+ "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rxdma_start_channel: "
+ "init rxdma control register failed (0x%08x channel %d",
+ status, channel));
+ return (status);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
+ "control done - channel %d cs 0x%016llx", channel, cs.value));
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
+ "==> hxge_rxdma_start_channel: enable done"));
+ HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel"));
+
+ return (HXGE_OK);
+}
+
+static hxge_status_t
+hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ rdc_stat_t cs;
+ rdc_int_mask_t ent_mask;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: "
+ "hpi handle addr $%p acc $%p",
+ hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
+
+ /* Reset RXDMA channel */
+ rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_rxdma_stop_channel: "
+ " reset rxdma failed (0x%08x channel %d)",
+ rs, channel));
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rxdma_stop_channel: reset done"));
+
+ /* Set up the interrupt event masks. */
+ ent_mask.value = RDC_INT_MASK_ALL;
+ rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rxdma_stop_channel: "
+ "set rxdma event masks failed (0x%08x channel %d)",
+ rs, channel));
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rxdma_stop_channel: event done"));
+
+ /* Initialize the receive DMA control and status register */
+ cs.value = 0;
+ status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control "
+ " to default (all 0s) 0x%08x", cs.value));
+
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_rxdma_stop_channel: init rxdma"
+ " control register failed (0x%08x channel %d",
+ status, channel));
+ return (status);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rxdma_stop_channel: control done"));
+
+ /* disable dma channel */
+ status = hxge_disable_rxdma_channel(hxgep, channel);
+
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_rxdma_stop_channel: "
+ " init enable rxdma failed (0x%08x channel %d)",
+ status, channel));
+ return (status);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL,
+ "==> hxge_rxdma_stop_channel: disable done"));
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel"));
+
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_rxdma_handle_sys_errors(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+ p_hxge_rdc_sys_stats_t statsp;
+ rdc_fifo_err_stat_t stat;
+ hxge_status_t status = HXGE_OK;
+
+ handle = hxgep->hpi_handle;
+ statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats;
+
+ /* Clear the int_dbg register in case it is an injected err */
+ HXGE_REG_WR64(handle, RDC_FIFO_ERR_INT_DBG, 0x0);
+
+ /* Get the error status and clear the register */
+ HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value);
+ HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value);
+
+ if (stat.bits.rx_ctrl_fifo_sec) {
+ statsp->ctrl_fifo_sec++;
+ if (statsp->ctrl_fifo_sec == 1)
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rxdma_handle_sys_errors: "
+ "rx_ctrl_fifo_sec"));
+ }
+
+ if (stat.bits.rx_ctrl_fifo_ded) {
+ /* Global fatal error encountered */
+ statsp->ctrl_fifo_ded++;
+ HXGE_FM_REPORT_ERROR(hxgep, NULL,
+ HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rxdma_handle_sys_errors: "
+ "fatal error: rx_ctrl_fifo_ded error"));
+ }
+
+ if (stat.bits.rx_data_fifo_sec) {
+ statsp->data_fifo_sec++;
+ if (statsp->data_fifo_sec == 1)
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rxdma_handle_sys_errors: "
+ "rx_data_fifo_sec"));
+ }
+
+ if (stat.bits.rx_data_fifo_ded) {
+ /* Global fatal error encountered */
+ statsp->data_fifo_ded++;
+ HXGE_FM_REPORT_ERROR(hxgep, NULL,
+ HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_rxdma_handle_sys_errors: "
+ "fatal error: rx_data_fifo_ded error"));
+ }
+
+ if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_rxdma_handle_sys_errors: fatal error\n"));
+ status = hxge_rx_port_fatal_err_recover(hxgep);
+ if (status == HXGE_OK) {
+ FM_SERVICE_RESTORED(hxgep);
+ }
+ }
+
+ return (HXGE_OK);
+}
+
+static hxge_status_t
+hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+ p_rx_rbr_ring_t rbrp;
+ p_rx_rcr_ring_t rcrp;
+ p_rx_mbox_t mboxp;
+ rdc_int_mask_t ent_mask;
+ p_hxge_dma_common_t dmap;
+ int ring_idx;
+ uint32_t ref_cnt;
+ p_rx_msg_t rx_msg_p;
+ int i;
+ uint32_t hxge_port_rcr_size;
+ uint64_t tmp;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover"));
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Recovering from RxDMAChannel#%d error...", channel));
+
+ /*
+ * Stop the dma channel waits for the stop done. If the stop done bit
+ * is not set, then create an error.
+ */
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop..."));
+
+ ring_idx = hxge_rxdma_get_ring_index(hxgep, channel);
+ rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[ring_idx];
+ rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[ring_idx];
+
+ MUTEX_ENTER(&rcrp->lock);
+ MUTEX_ENTER(&rbrp->lock);
+ MUTEX_ENTER(&rbrp->post_lock);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel..."));
+
+ rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_disable_rxdma_channel:failed"));
+ goto fail;
+ }
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt..."));
+
+ /* Disable interrupt */
+ ent_mask.value = RDC_INT_MASK_ALL;
+ rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Set rxdma event masks failed (channel %d)", channel));
+ }
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset..."));
+
+ /* Reset RXDMA channel */
+ rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Reset rxdma failed (channel %d)", channel));
+ goto fail;
+ }
+ hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
+ mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx];
+
+ rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
+ rbrp->rbr_rd_index = 0;
+ rbrp->pages_to_post = 0;
+
+ rcrp->comp_rd_index = 0;
+ rcrp->comp_wt_index = 0;
+ rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
+ (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
+ rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
+ (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
+
+ rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
+ (hxge_port_rcr_size - 1);
+ rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
+ (hxge_port_rcr_size - 1);
+
+ dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
+ bzero((caddr_t)dmap->kaddrp, dmap->alength);
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n",
+ rbrp->rbr_max_size));
+
+ for (i = 0; i < rbrp->rbr_max_size; i++) {
+ /* Reset all the buffers */
+ rx_msg_p = rbrp->rx_msg_ring[i];
+ ref_cnt = rx_msg_p->ref_cnt;
+
+ rx_msg_p->ref_cnt = 1;
+ rx_msg_p->free = B_TRUE;
+ rx_msg_p->cur_usage_cnt = 0;
+ rx_msg_p->max_usage_cnt = 0;
+ rx_msg_p->pkt_buf_size = 0;
+
+ if (ref_cnt > 1)
+ atomic_add_32(&hxge_mblks_pending, 1 - ref_cnt);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start..."));
+
+ status = hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp);
+ if (status != HXGE_OK) {
+ goto fail;
+ }
+
+ /*
+ * The DMA channel may disable itself automatically.
+ * The following is a work-around.
+ */
+ HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp);
+ rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hpi_rxdma_cfg_rdc_enable (channel %d)", channel));
+ }
+
+ MUTEX_EXIT(&rbrp->post_lock);
+ MUTEX_EXIT(&rbrp->lock);
+ MUTEX_EXIT(&rcrp->lock);
+
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Recovery Successful, RxDMAChannel#%d Restored", channel));
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover"));
+
+ return (HXGE_OK);
+
+fail:
+ MUTEX_EXIT(&rbrp->post_lock);
+ MUTEX_EXIT(&rbrp->lock);
+ MUTEX_EXIT(&rcrp->lock);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
+
+ return (HXGE_ERROR | rs);
+}
+
+static hxge_status_t
+hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+ p_hxge_dma_common_t *dma_buf_p;
+ uint16_t channel;
+ int ndmas;
+ int i;
+ block_reset_t reset_reg;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover"));
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ..."));
+
+ /* Reset RDC block from PEU for this fatal error */
+ reset_reg.value = 0;
+ reset_reg.bits.rdc_rst = 1;
+ HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
+
+ /* Disable RxMAC */
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n"));
+ if (hxge_rx_vmac_disable(hxgep) != HXGE_OK)
+ goto fail;
+
+ HXGE_DELAY(1000);
+
+ /* Restore any common settings after PEU reset */
+ if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK)
+ goto fail;
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels..."));
+
+ ndmas = hxgep->rx_buf_pool_p->ndmas;
+ dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p;
+
+ for (i = 0; i < ndmas; i++) {
+ channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
+ if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Could not recover channel %d", channel));
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC..."));
+
+ /* Reset RxMAC */
+ if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
+ goto fail;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC..."));
+
+ /* Re-Initialize RxMAC */
+ if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
+ goto fail;
+ }
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC..."));
+
+ /* Re-enable RxMAC */
+ if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC"));
+ goto fail;
+ }
+
+ /* Reset the error mask since PEU reset cleared it */
+ HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
+
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Recovery Successful, RxPort Restored"));
+ HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover"));
+
+ return (HXGE_OK);
+fail:
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
+ return (status);
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_rxdma.h b/usr/src/uts/common/io/hxge/hxge_rxdma.h
new file mode 100644
index 0000000000..34f7b6920b
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_rxdma.h
@@ -0,0 +1,484 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_RXDMA_H
+#define _SYS_HXGE_HXGE_RXDMA_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hxge_rdc_hw.h>
+#include <hpi_rxdma.h>
+
+#define RXDMA_CK_DIV_DEFAULT 7500 /* 25 usec */
+#define RXDMA_RCR_PTHRES_DEFAULT 0x20
+#define RXDMA_RCR_TO_DEFAULT 0x8
+#define RXDMA_HDR_SIZE_DEFAULT 2
+#define RXDMA_HDR_SIZE_FULL 6 /* entire header of 6B */
+
+/*
+ * Receive Completion Ring (RCR)
+ */
+#define RCR_PKT_BUF_ADDR_SHIFT 0 /* bit 37:0 */
+#define RCR_PKT_BUF_ADDR_SHIFT_FULL 6 /* fulll buffer address */
+#define RCR_PKT_BUF_ADDR_MASK 0x0000003FFFFFFFFFULL
+#define RCR_PKTBUFSZ_SHIFT 38 /* bit 39:38 */
+#define RCR_PKTBUFSZ_MASK 0x000000C000000000ULL
+#define RCR_L2_LEN_SHIFT 40 /* bit 53:40 */
+#define RCR_L2_LEN_MASK 0x003fff0000000000ULL
+#define RCR_ERROR_SHIFT 54 /* bit 57:54 */
+#define RCR_ERROR_MASK 0x03C0000000000000ULL
+#define RCR_PKT_TYPE_SHIFT 61 /* bit 62:61 */
+#define RCR_PKT_TYPE_MASK 0x6000000000000000ULL
+#define RCR_MULTI_SHIFT 63 /* bit 63 */
+#define RCR_MULTI_MASK 0x8000000000000000ULL
+
+#define RCR_PKTBUFSZ_0 0x00
+#define RCR_PKTBUFSZ_1 0x01
+#define RCR_PKTBUFSZ_2 0x02
+#define RCR_SINGLE_BLOCK 0x03
+
+#define RCR_NO_ERROR 0x0
+#define RCR_CTRL_FIFO_DED 0x1
+#define RCR_DATA_FIFO_DED 0x2
+#define RCR_ERROR_RESERVE 0x4
+
+#define RCR_PKT_IS_TCP 0x2000000000000000ULL
+#define RCR_PKT_IS_UDP 0x4000000000000000ULL
+#define RCR_PKT_IS_SCTP 0x6000000000000000ULL
+
+#define RDC_INT_MASK_RBRFULL_SHIFT 34
+#define RDC_INT_MASK_RBRFULL_MASK 0x0000000400000000ULL
+#define RDC_INT_MASK_RBREMPTY_SHIFT 35
+#define RDC_INT_MASK_RBREMPTY_MASK 0x0000000800000000ULL
+#define RDC_INT_MASK_RCRFULL_SHIFT 36
+#define RDC_INT_MASK_RCRFULL_MASK 0x0000001000000000ULL
+#define RDC_INT_MASK_RCRSH_FULL_SHIFT 39
+#define RDC_INT_MASK_RCRSH_FULL_MASK 0x0000008000000000ULL
+#define RDC_INT_MASK_RBR_PRE_EMPTY_SHIFT 40
+#define RDC_INT_MASK_RBR_PRE_EMPTY_MASK 0x0000010000000000ULL
+#define RDC_INT_MASK_RBR_PRE_PAR_SHIFT 43
+#define RDC_INT_MASK_RBR_PRE_PAR_MASK 0x0000080000000000ULL
+#define RDC_INT_MASK_RCR_SHA_PAR_SHIFT 44
+#define RDC_INT_MASK_RCR_SHA_PAR_MASK 0x0000100000000000ULL
+#define RDC_INT_MASK_RCRTO_SHIFT 45
+#define RDC_INT_MASK_RCRTO_MASK 0x0000200000000000ULL
+#define RDC_INT_MASK_THRES_SHIFT 46
+#define RDC_INT_MASK_THRES_MASK 0x0000400000000000ULL
+#define RDC_INT_MASK_PEU_ERR_SHIFT 52
+#define RDC_INT_MASK_PEU_ERR_MASK 0x0010000000000000ULL
+#define RDC_INT_MASK_RBR_CPL_SHIFT 53
+#define RDC_INT_MASK_RBR_CPL_MASK 0x0020000000000000ULL
+#define RDC_INT_MASK_ALL (RDC_INT_MASK_RBRFULL_MASK | \
+ RDC_INT_MASK_RBREMPTY_MASK | \
+ RDC_INT_MASK_RCRFULL_MASK | \
+ RDC_INT_MASK_RCRSH_FULL_MASK | \
+ RDC_INT_MASK_RBR_PRE_EMPTY_MASK | \
+ RDC_INT_MASK_RBR_PRE_PAR_MASK | \
+ RDC_INT_MASK_RCR_SHA_PAR_MASK | \
+ RDC_INT_MASK_RCRTO_MASK | \
+ RDC_INT_MASK_THRES_MASK | \
+ RDC_INT_MASK_PEU_ERR_MASK | \
+ RDC_INT_MASK_RBR_CPL_MASK)
+
+#define RDC_STAT_PKTREAD_SHIFT 0 /* WO, bit 15:0 */
+#define RDC_STAT_PKTREAD_MASK 0x000000000000ffffULL
+#define RDC_STAT_PTRREAD_SHIFT 16 /* WO, bit 31:16 */
+#define RDC_STAT_PTRREAD_MASK 0x00000000FFFF0000ULL
+
+#define RDC_STAT_RBRFULL_SHIFT 34 /* RO, bit 34 */
+#define RDC_STAT_RBRFULL 0x0000000400000000ULL
+#define RDC_STAT_RBRFULL_MASK 0x0000000400000000ULL
+#define RDC_STAT_RBREMPTY_SHIFT 35 /* RW1C, bit 35 */
+#define RDC_STAT_RBREMPTY 0x0000000800000000ULL
+#define RDC_STAT_RBREMPTY_MASK 0x0000000800000000ULL
+#define RDC_STAT_RCR_FULL_SHIFT 36 /* RW1C, bit 36 */
+#define RDC_STAT_RCR_FULL 0x0000001000000000ULL
+#define RDC_STAT_RCR_FULL_MASK 0x0000001000000000ULL
+
+#define RDC_STAT_RCR_SHDW_FULL_SHIFT 39 /* RW1C, bit 39 */
+#define RDC_STAT_RCR_SHDW_FULL 0x0000008000000000ULL
+#define RDC_STAT_RCR_SHDW_FULL_MASK 0x0000008000000000ULL
+#define RDC_STAT_RBR_PRE_EMPTY_SHIFT 40 /* RO, bit 40 */
+#define RDC_STAT_RBR_PRE_EMPTY 0x0000010000000000ULL
+#define RDC_STAT_RBR_PRE_EMPTY_MASK 0x0000010000000000ULL
+
+#define RDC_STAT_RBR_PRE_PAR_SHIFT 43 /* RO, bit 43 */
+#define RDC_STAT_RBR_PRE_PAR 0x0000080000000000ULL
+#define RDC_STAT_RBR_PRE_PAR_MASK 0x0000080000000000ULL
+#define RDC_STAT_RCR_SHA_PAR_SHIFT 44 /* RO, bit 44 */
+#define RDC_STAT_RCR_SHA_PAR 0x0000100000000000ULL
+#define RDC_STAT_RCR_SHA_PAR_MASK 0x0000100000000000ULL
+
+#define RDC_STAT_RCR_TO_SHIFT 45 /* RW1C, bit 45 */
+#define RDC_STAT_RCR_TO 0x0000200000000000ULL
+#define RDC_STAT_RCR_TO_MASK 0x0000200000000000ULL
+#define RDC_STAT_RCR_THRES_SHIFT 46 /* RO, bit 46 */
+#define RDC_STAT_RCR_THRES 0x0000400000000000ULL
+#define RDC_STAT_RCR_THRES_MASK 0x0000400000000000ULL
+#define RDC_STAT_RCR_MEX_SHIFT 47 /* RW, bit 47 */
+#define RDC_STAT_RCR_MEX 0x0000800000000000ULL
+#define RDC_STAT_RCR_MEX_MASK 0x0000800000000000ULL
+
+#define RDC_STAT_PEU_ERR_SHIFT 52 /* RO, bit 52 */
+#define RDC_STAT_PEU_ERR 0x0010000000000000ULL
+#define RDC_STAT_PEU_ERR_MASK 0x0010000000000000ULL
+
+#define RDC_STAT_RBR_CPL_SHIFT 53 /* RO, bit 53 */
+#define RDC_STAT_RBR_CPL 0x0020000000000000ULL
+#define RDC_STAT_RBR_CPL_MASK 0x0020000000000000ULL
+
+#define RDC_STAT_ERROR RDC_INT_MASK_ALL
+
+/* the following are write 1 to clear bits */
+#define RDC_STAT_WR1C (RDC_STAT_RBREMPTY | \
+ RDC_STAT_RCR_SHDW_FULL | \
+ RDC_STAT_RBR_PRE_EMPTY | \
+ RDC_STAT_RBR_PRE_PAR | \
+ RDC_STAT_RCR_SHA_PAR | \
+ RDC_STAT_RCR_TO | \
+ RDC_STAT_RCR_THRES | \
+ RDC_STAT_RBR_CPL | \
+ RDC_STAT_PEU_ERR)
+
+typedef union _rcr_entry_t {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t multi:1;
+ uint64_t pkt_type:2;
+ uint64_t reserved:3;
+ uint64_t error:4;
+ uint64_t l2_len:14;
+ uint64_t pktbufsz:2;
+ uint64_t pkt_buf_addr:38;
+#else
+ uint64_t pkt_buf_addr:38;
+ uint64_t pktbufsz:2;
+ uint64_t l2_len:14;
+ uint64_t error:4;
+ uint64_t reserved:3;
+ uint64_t pkt_type:2;
+ uint64_t multi:1;
+#endif
+ } bits;
+} rcr_entry_t, *p_rcr_entry_t;
+
+#define RX_DMA_MAILBOX_BYTE_LENGTH 64
+#define RX_DMA_MBOX_UNUSED_1 8
+#define RX_DMA_MBOX_UNUSED_2 16
+
+typedef struct _rxdma_mailbox_t {
+ rdc_stat_t rxdma_ctl_stat; /* 8 bytes */
+ rdc_rbr_qlen_t rbr_stat; /* 8 bytes */
+ rdc_rbr_head_t rbr_hdh; /* 8 bytes */
+ uint8_t resv_1[RX_DMA_MBOX_UNUSED_1];
+ rdc_rcr_tail_t rcrstat_c; /* 8 bytes */
+ uint8_t resv_2[RX_DMA_MBOX_UNUSED_1];
+ rdc_rcr_qlen_t rcrstat_a; /* 8 bytes */
+ uint8_t resv_3[RX_DMA_MBOX_UNUSED_1];
+} rxdma_mailbox_t, *p_rxdma_mailbox_t;
+
+/*
+ * hardware workarounds: kick 16 (was 8 before)
+ */
+#define HXGE_RXDMA_POST_BATCH 16
+
+#define RXBUF_START_ADDR(a, index, bsize) ((a & (index * bsize))
+#define RXBUF_OFFSET_FROM_START(a, start) (start - a)
+#define RXBUF_64B_ALIGNED 64
+
+#define HXGE_RXBUF_EXTRA 34
+
+/*
+ * Receive buffer thresholds and buffer types
+ */
+#define HXGE_RX_BCOPY_SCALE 8 /* use 1/8 as lowest granularity */
+
+typedef enum {
+ HXGE_RX_COPY_ALL = 0, /* do bcopy on every packet */
+ HXGE_RX_COPY_1, /* bcopy on 1/8 of buffer posted */
+ HXGE_RX_COPY_2, /* bcopy on 2/8 of buffer posted */
+ HXGE_RX_COPY_3, /* bcopy on 3/8 of buffer posted */
+ HXGE_RX_COPY_4, /* bcopy on 4/8 of buffer posted */
+ HXGE_RX_COPY_5, /* bcopy on 5/8 of buffer posted */
+ HXGE_RX_COPY_6, /* bcopy on 6/8 of buffer posted */
+ HXGE_RX_COPY_7, /* bcopy on 7/8 of buffer posted */
+ HXGE_RX_COPY_NONE /* don't do bcopy at all */
+} hxge_rxbuf_threshold_t;
+
+typedef enum {
+ HXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0, /* bcopy buffer size 0 (small) */
+ HXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1, /* bcopy buffer size 1 (medium) */
+ HXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2 /* bcopy buffer size 2 (large) */
+} hxge_rxbuf_type_t;
+
+typedef struct _rdc_errlog {
+ rdc_pref_par_log_t pre_par;
+ rdc_pref_par_log_t sha_par;
+ uint8_t compl_err_type;
+} rdc_errlog_t;
+
+/*
+ * Receive Statistics.
+ */
+typedef struct _hxge_rx_ring_stats_t {
+ uint64_t ipackets;
+ uint64_t ibytes;
+ uint32_t ierrors;
+ uint32_t jumbo_pkts;
+
+ /*
+ * Error event stats.
+ */
+ uint32_t rcr_unknown_err;
+ uint32_t ctrl_fifo_ecc_err;
+ uint32_t data_fifo_ecc_err;
+ uint32_t rbr_tmout; /* rbr_cpl_to */
+ uint32_t peu_resp_err; /* peu_resp_err */
+ uint32_t rcr_sha_par; /* rcr_shadow_par_err */
+ uint32_t rbr_pre_par; /* rbr_prefetch_par_err */
+ uint32_t rbr_pre_empty; /* rbr_pre_empty */
+ uint32_t rcr_shadow_full; /* rcr_shadow_full */
+ uint32_t rcrfull; /* rcr_full */
+ uint32_t rbr_empty; /* rbr_empty */
+ uint32_t rbrfull; /* rbr_full */
+ uint32_t rcr_to; /* rcr_to */
+ uint32_t rcr_thres; /* rcr_thres */
+ rdc_errlog_t errlog;
+} hxge_rx_ring_stats_t, *p_hxge_rx_ring_stats_t;
+
+typedef struct _hxge_rdc_sys_stats {
+ uint32_t ctrl_fifo_sec;
+ uint32_t ctrl_fifo_ded;
+ uint32_t data_fifo_sec;
+ uint32_t data_fifo_ded;
+} hxge_rdc_sys_stats_t, *p_hxge_rdc_sys_stats_t;
+
+typedef struct _rx_msg_t {
+ hxge_os_dma_common_t buf_dma;
+ hxge_os_mutex_t lock;
+ struct _hxge_t *hxgep;
+ struct _rx_rbr_ring_t *rx_rbr_p;
+ boolean_t free;
+ uint32_t ref_cnt;
+ hxge_os_frtn_t freeb;
+ size_t block_size;
+ uint32_t block_index;
+ uint32_t pkt_buf_size;
+ uint32_t pkt_buf_size_code;
+ uint32_t cur_usage_cnt;
+ uint32_t max_usage_cnt;
+ uchar_t *buffer;
+ uint32_t pri;
+ uint32_t shifted_addr;
+ boolean_t use_buf_pool;
+ p_mblk_t rx_mblk_p;
+ boolean_t rx_use_bcopy;
+} rx_msg_t, *p_rx_msg_t;
+
+/* Receive Completion Ring */
+typedef struct _rx_rcr_ring_t {
+ hxge_os_dma_common_t rcr_desc;
+ struct _hxge_t *hxgep;
+
+ p_hxge_rx_ring_stats_t rdc_stats; /* pointer to real kstats */
+
+ rdc_rcr_cfg_a_t rcr_cfga;
+ rdc_rcr_cfg_b_t rcr_cfgb;
+
+ hxge_os_mutex_t lock;
+ uint16_t index;
+ uint16_t rdc;
+ boolean_t full_hdr_flag; /* 1: 18 bytes header */
+ uint16_t sw_priv_hdr_len; /* 0 - 192 bytes (SW) */
+ uint32_t comp_size; /* # of RCR entries */
+ uint64_t rcr_addr;
+ uint_t comp_wrap_mask;
+ uint_t comp_rd_index;
+ uint_t comp_wt_index;
+
+ p_rcr_entry_t rcr_desc_first_p;
+ p_rcr_entry_t rcr_desc_first_pp;
+ p_rcr_entry_t rcr_desc_last_p;
+ p_rcr_entry_t rcr_desc_last_pp;
+
+ p_rcr_entry_t rcr_desc_rd_head_p; /* software next read */
+ p_rcr_entry_t rcr_desc_rd_head_pp;
+
+ struct _rx_rbr_ring_t *rx_rbr_p;
+ uint32_t intr_timeout;
+ uint32_t intr_threshold;
+ uint64_t max_receive_pkts;
+ mac_resource_handle_t rcr_mac_handle;
+ uint32_t rcvd_pkt_bytes; /* Received bytes of a packet */
+} rx_rcr_ring_t, *p_rx_rcr_ring_t;
+
+
+/* Buffer index information */
+typedef struct _rxbuf_index_info_t {
+ uint32_t buf_index;
+ uint32_t start_index;
+ uint32_t buf_size;
+ uint64_t dvma_addr;
+ uint64_t kaddr;
+} rxbuf_index_info_t, *p_rxbuf_index_info_t;
+
+/* Buffer index information */
+
+typedef struct _rxring_info_t {
+ uint32_t hint[3];
+ uint32_t block_size_mask;
+ uint16_t max_iterations;
+ rxbuf_index_info_t buffer[HXGE_DMA_BLOCK];
+} rxring_info_t, *p_rxring_info_t;
+
+
+typedef enum {
+ RBR_POSTING = 1, /* We may post rx buffers. */
+ RBR_UNMAPPING, /* We are in the process of unmapping. */
+ RBR_UNMAPPED /* The ring is unmapped. */
+} rbr_state_t;
+
+
+/* Receive Buffer Block Ring */
+typedef struct _rx_rbr_ring_t {
+ hxge_os_dma_common_t rbr_desc;
+ p_rx_msg_t *rx_msg_ring;
+ p_hxge_dma_common_t *dma_bufp;
+ rdc_rbr_cfg_a_t rbr_cfga;
+ rdc_rbr_cfg_b_t rbr_cfgb;
+ rdc_rbr_kick_t rbr_kick;
+ rdc_page_handle_t page_hdl;
+
+ hxge_os_mutex_t lock;
+ hxge_os_mutex_t post_lock;
+ uint16_t index;
+ struct _hxge_t *hxgep;
+ uint16_t rdc;
+ uint_t rbr_max_size;
+ uint64_t rbr_addr;
+ uint_t rbr_wrap_mask;
+ uint_t rbb_max;
+ uint_t block_size;
+ uint_t num_blocks;
+ uint_t tnblocks;
+ uint_t pkt_buf_size0;
+ uint_t pkt_buf_size0_bytes;
+ uint_t hpi_pkt_buf_size0;
+ uint_t pkt_buf_size1;
+ uint_t pkt_buf_size1_bytes;
+ uint_t hpi_pkt_buf_size1;
+ uint_t pkt_buf_size2;
+ uint_t pkt_buf_size2_bytes;
+ uint_t hpi_pkt_buf_size2;
+
+ uint64_t rbr_head_pp;
+ uint64_t rbr_tail_pp;
+ uint32_t *rbr_desc_vp;
+
+ p_rx_rcr_ring_t rx_rcr_p;
+
+ rdc_rbr_head_t rbr_head;
+ uint_t rbr_wr_index;
+ uint_t rbr_rd_index;
+ uint_t rbr_hw_head_index;
+ uint64_t rbr_hw_head_ptr;
+
+ rxring_info_t *ring_info;
+ uint_t rbr_consumed;
+ uint_t rbr_threshold_hi;
+ uint_t rbr_threshold_lo;
+ hxge_rxbuf_type_t rbr_bufsize_type;
+ boolean_t rbr_use_bcopy;
+
+ /*
+ * <rbr_ref_cnt> is a count of those receive buffers which
+ * have been loaned to the kernel. We will not free this
+ * ring until the reference count reaches zero (0).
+ */
+ uint32_t rbr_ref_cnt;
+ rbr_state_t rbr_state; /* POSTING, etc */
+
+ int pages_to_post;
+ int pages_to_post_threshold;
+ int pages_to_skip;
+} rx_rbr_ring_t, *p_rx_rbr_ring_t;
+
+/* Receive Mailbox */
+typedef struct _rx_mbox_t {
+ hxge_os_dma_common_t rx_mbox;
+ rdc_rx_cfg1_t rx_cfg1;
+ rdc_rx_cfg2_t rx_cfg2;
+ uint64_t mbox_addr;
+ boolean_t cfg_set;
+
+ hxge_os_mutex_t lock;
+ uint16_t index;
+ struct _hxge_t *hxgep;
+ uint16_t rdc;
+} rx_mbox_t, *p_rx_mbox_t;
+
+typedef struct _rx_rbr_rings_t {
+ p_rx_rbr_ring_t *rbr_rings;
+ uint32_t ndmas;
+ boolean_t rxbuf_allocated;
+} rx_rbr_rings_t, *p_rx_rbr_rings_t;
+
+typedef struct _rx_rcr_rings_t {
+ p_rx_rcr_ring_t *rcr_rings;
+ uint32_t ndmas;
+ boolean_t cntl_buf_allocated;
+} rx_rcr_rings_t, *p_rx_rcr_rings_t;
+
+typedef struct _rx_mbox_areas_t {
+ p_rx_mbox_t *rxmbox_areas;
+ uint32_t ndmas;
+ boolean_t mbox_allocated;
+} rx_mbox_areas_t, *p_rx_mbox_areas_t;
+
+/*
+ * Receive DMA Prototypes.
+ */
+hxge_status_t hxge_init_rxdma_channels(p_hxge_t hxgep);
+void hxge_uninit_rxdma_channels(p_hxge_t hxgep);
+hxge_status_t hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep,
+ uint16_t channel, rdc_stat_t *cs_p);
+hxge_status_t hxge_enable_rxdma_channel(p_hxge_t hxgep,
+ uint16_t channel, p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p,
+ p_rx_mbox_t mbox_p);
+hxge_status_t hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable);
+int hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel);
+hxge_status_t hxge_rxdma_handle_sys_errors(p_hxge_t hxgep);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_RXDMA_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_send.c b/usr/src/uts/common/io/hxge/hxge_send.c
new file mode 100644
index 0000000000..1b624b84f7
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_send.c
@@ -0,0 +1,1014 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+
+extern uint32_t hxge_reclaim_pending;
+extern uint32_t hxge_bcopy_thresh;
+extern uint32_t hxge_dvma_thresh;
+extern uint32_t hxge_dma_stream_thresh;
+extern uint32_t hxge_tx_minfree;
+extern uint32_t hxge_tx_intr_thres;
+extern uint32_t hxge_tx_max_gathers;
+extern uint32_t hxge_tx_tiny_pack;
+extern uint32_t hxge_tx_use_bcopy;
+extern uint32_t hxge_tx_lb_policy;
+extern uint32_t hxge_no_tx_lb;
+
+typedef struct _mac_tx_hint {
+ uint16_t sap;
+ uint16_t vid;
+ void *hash;
+} mac_tx_hint_t, *p_mac_tx_hint_t;
+
+int hxge_tx_lb_ring(p_mblk_t, uint32_t, p_mac_tx_hint_t);
+
+int
+hxge_start(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp)
+{
+ int status = 0;
+ p_tx_desc_t tx_desc_ring_vp;
+ hpi_handle_t hpi_desc_handle;
+ hxge_os_dma_handle_t tx_desc_dma_handle;
+ p_tx_desc_t tx_desc_p;
+ p_tx_msg_t tx_msg_ring;
+ p_tx_msg_t tx_msg_p;
+ tx_desc_t tx_desc, *tmp_desc_p;
+ tx_desc_t sop_tx_desc, *sop_tx_desc_p;
+ p_tx_pkt_header_t hdrp;
+ p_tx_pkt_hdr_all_t pkthdrp;
+ uint8_t npads = 0;
+ uint64_t dma_ioaddr;
+ uint32_t dma_flags;
+ int last_bidx;
+ uint8_t *b_rptr;
+ caddr_t kaddr;
+ uint32_t nmblks;
+ uint32_t ngathers;
+ uint32_t clen;
+ int len;
+ uint32_t pkt_len, pack_len, min_len;
+ uint32_t bcopy_thresh;
+ int i, cur_index, sop_index;
+ uint16_t tail_index;
+ boolean_t tail_wrap = B_FALSE;
+ hxge_dma_common_t desc_area;
+ hxge_os_dma_handle_t dma_handle;
+ ddi_dma_cookie_t dma_cookie;
+ hpi_handle_t hpi_handle;
+ p_mblk_t nmp;
+ p_mblk_t t_mp;
+ uint32_t ncookies;
+ boolean_t good_packet;
+ boolean_t mark_mode = B_FALSE;
+ p_hxge_stats_t statsp;
+ p_hxge_tx_ring_stats_t tdc_stats;
+ t_uscalar_t start_offset = 0;
+ t_uscalar_t stuff_offset = 0;
+ t_uscalar_t end_offset = 0;
+ t_uscalar_t value = 0;
+ t_uscalar_t cksum_flags = 0;
+ boolean_t cksum_on = B_FALSE;
+ uint32_t boff = 0;
+ uint64_t tot_xfer_len = 0, tmp_len = 0;
+ boolean_t header_set = B_FALSE;
+ tdc_tdr_kick_t kick;
+#ifdef HXGE_DEBUG
+ p_tx_desc_t tx_desc_ring_pp;
+ p_tx_desc_t tx_desc_pp;
+ tx_desc_t *save_desc_p;
+ int dump_len;
+ int sad_len;
+ uint64_t sad;
+ int xfer_len;
+ uint32_t msgsize;
+#endif
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: tx dma channel %d", tx_ring_p->tdc));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: Starting tdc %d desc pending %d",
+ tx_ring_p->tdc, tx_ring_p->descs_pending));
+
+ statsp = hxgep->statsp;
+
+ if (hxgep->statsp->port_stats.lb_mode == hxge_lb_normal) {
+ if (!statsp->mac_stats.link_up) {
+ freemsg(mp);
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: "
+ "link not up or LB mode"));
+ goto hxge_start_fail1;
+ }
+ }
+
+ hcksum_retrieve(mp, NULL, NULL, &start_offset,
+ &stuff_offset, &end_offset, &value, &cksum_flags);
+ if (!HXGE_IS_VLAN_PACKET(mp->b_rptr)) {
+ start_offset += sizeof (ether_header_t);
+ stuff_offset += sizeof (ether_header_t);
+ } else {
+ start_offset += sizeof (struct ether_vlan_header);
+ stuff_offset += sizeof (struct ether_vlan_header);
+ }
+
+ if (cksum_flags & HCK_PARTIALCKSUM) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: mp $%p len %d "
+ "cksum_flags 0x%x (partial checksum) ",
+ mp, MBLKL(mp), cksum_flags));
+ cksum_on = B_TRUE;
+ }
+
+ MUTEX_ENTER(&tx_ring_p->lock);
+start_again:
+ ngathers = 0;
+ sop_index = tx_ring_p->wr_index;
+#ifdef HXGE_DEBUG
+ if (tx_ring_p->descs_pending) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: desc pending %d ",
+ tx_ring_p->descs_pending));
+ }
+
+ dump_len = (int)(MBLKL(mp));
+ dump_len = (dump_len > 128) ? 128: dump_len;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: tdc %d: dumping ...: b_rptr $%p "
+ "(Before header reserve: ORIGINAL LEN %d)",
+ tx_ring_p->tdc, mp->b_rptr, dump_len));
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: dump packets (IP ORIGINAL b_rptr $%p): %s",
+ mp->b_rptr, hxge_dump_packet((char *)mp->b_rptr, dump_len)));
+#endif
+
+ tdc_stats = tx_ring_p->tdc_stats;
+ mark_mode = (tx_ring_p->descs_pending &&
+ ((tx_ring_p->tx_ring_size - tx_ring_p->descs_pending) <
+ hxge_tx_minfree));
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "TX Descriptor ring is channel %d mark mode %d",
+ tx_ring_p->tdc, mark_mode));
+
+ if (!hxge_txdma_reclaim(hxgep, tx_ring_p, hxge_tx_minfree)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "TX Descriptor ring is full: channel %d", tx_ring_p->tdc));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "TX Descriptor ring is full: channel %d", tx_ring_p->tdc));
+ cas32((uint32_t *)&tx_ring_p->queueing, 0, 1);
+ tdc_stats->tx_no_desc++;
+ MUTEX_EXIT(&tx_ring_p->lock);
+ if (hxgep->resched_needed && !hxgep->resched_running) {
+ hxgep->resched_running = B_TRUE;
+ ddi_trigger_softintr(hxgep->resched_id);
+ }
+ status = 1;
+ goto hxge_start_fail1;
+ }
+
+ nmp = mp;
+ i = sop_index = tx_ring_p->wr_index;
+ nmblks = 0;
+ ngathers = 0;
+ pkt_len = 0;
+ pack_len = 0;
+ clen = 0;
+ last_bidx = -1;
+ good_packet = B_TRUE;
+
+ desc_area = tx_ring_p->tdc_desc;
+ hpi_handle = desc_area.hpi_handle;
+ hpi_desc_handle.regh = (hxge_os_acc_handle_t)
+ DMA_COMMON_ACC_HANDLE(desc_area);
+ tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
+#ifdef HXGE_DEBUG
+ tx_desc_ring_pp = (p_tx_desc_t)DMA_COMMON_IOADDR(desc_area);
+#endif
+ tx_desc_dma_handle = (hxge_os_dma_handle_t)DMA_COMMON_HANDLE(desc_area);
+ tx_msg_ring = tx_ring_p->tx_msg_ring;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: wr_index %d i %d",
+ sop_index, i));
+
+#ifdef HXGE_DEBUG
+ msgsize = msgdsize(nmp);
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(1): wr_index %d i %d msgdsize %d",
+ sop_index, i, msgsize));
+#endif
+ /*
+ * The first 16 bytes of the premapped buffer are reserved
+ * for header. No padding will be used.
+ */
+ pkt_len = pack_len = boff = TX_PKT_HEADER_SIZE;
+ if (hxge_tx_use_bcopy) {
+ bcopy_thresh = (hxge_bcopy_thresh - TX_PKT_HEADER_SIZE);
+ } else {
+ bcopy_thresh = (TX_BCOPY_SIZE - TX_PKT_HEADER_SIZE);
+ }
+ while (nmp) {
+ good_packet = B_TRUE;
+ b_rptr = nmp->b_rptr;
+ len = MBLKL(nmp);
+ if (len <= 0) {
+ nmp = nmp->b_cont;
+ continue;
+ }
+ nmblks++;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(1): nmblks %d "
+ "len %d pkt_len %d pack_len %d",
+ nmblks, len, pkt_len, pack_len));
+ /*
+ * Hardware limits the transfer length to 4K.
+ * If len is more than 4K, we need to break
+ * nmp into two chunks: Make first chunk smaller
+ * than 4K. The second chunk will be broken into
+ * less than 4K (if needed) during the next pass.
+ */
+ if (len > (TX_MAX_TRANSFER_LENGTH - TX_PKT_HEADER_SIZE)) {
+ if ((t_mp = dupb(nmp)) != NULL) {
+ nmp->b_wptr = nmp->b_rptr +
+ (TX_MAX_TRANSFER_LENGTH -
+ TX_PKT_HEADER_SIZE);
+ t_mp->b_rptr = nmp->b_wptr;
+ t_mp->b_cont = nmp->b_cont;
+ nmp->b_cont = t_mp;
+ len = MBLKL(nmp);
+ } else {
+ good_packet = B_FALSE;
+ goto hxge_start_fail2;
+ }
+ }
+ tx_desc.value = 0;
+ tx_desc_p = &tx_desc_ring_vp[i];
+#ifdef HXGE_DEBUG
+ tx_desc_pp = &tx_desc_ring_pp[i];
+#endif
+ tx_msg_p = &tx_msg_ring[i];
+#if defined(__i386)
+ hpi_desc_handle.regp = (uint32_t)tx_desc_p;
+#else
+ hpi_desc_handle.regp = (uint64_t)tx_desc_p;
+#endif
+ if (!header_set &&
+ ((!hxge_tx_use_bcopy && (len > TX_BCOPY_SIZE)) ||
+ (len >= bcopy_thresh))) {
+ header_set = B_TRUE;
+ bcopy_thresh += TX_PKT_HEADER_SIZE;
+ boff = 0;
+ pack_len = 0;
+ kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
+ hdrp = (p_tx_pkt_header_t)kaddr;
+ clen = pkt_len;
+ dma_handle = tx_msg_p->buf_dma_handle;
+ dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
+ (void) ddi_dma_sync(dma_handle,
+ i * hxge_bcopy_thresh, hxge_bcopy_thresh,
+ DDI_DMA_SYNC_FORDEV);
+
+ tx_msg_p->flags.dma_type = USE_BCOPY;
+ goto hxge_start_control_header_only;
+ }
+
+ pkt_len += len;
+ pack_len += len;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(3): desc entry %d DESC IOADDR $%p "
+ "desc_vp $%p tx_desc_p $%p desc_pp $%p tx_desc_pp $%p "
+ "len %d pkt_len %d pack_len %d",
+ i,
+ DMA_COMMON_IOADDR(desc_area),
+ tx_desc_ring_vp, tx_desc_p,
+ tx_desc_ring_pp, tx_desc_pp,
+ len, pkt_len, pack_len));
+
+ if (len < bcopy_thresh) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(4): USE BCOPY: "));
+ if (hxge_tx_tiny_pack) {
+ uint32_t blst = TXDMA_DESC_NEXT_INDEX(i, -1,
+ tx_ring_p->tx_wrap_mask);
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(5): pack"));
+ if ((pack_len <= bcopy_thresh) &&
+ (last_bidx == blst)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: pack(6) "
+ "(pkt_len %d pack_len %d)",
+ pkt_len, pack_len));
+ i = blst;
+ tx_desc_p = &tx_desc_ring_vp[i];
+#ifdef HXGE_DEBUG
+ tx_desc_pp = &tx_desc_ring_pp[i];
+#endif
+ tx_msg_p = &tx_msg_ring[i];
+ boff = pack_len - len;
+ ngathers--;
+ } else if (pack_len > bcopy_thresh &&
+ header_set) {
+ pack_len = len;
+ boff = 0;
+ bcopy_thresh = hxge_bcopy_thresh;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(7): > max NEW "
+ "bcopy thresh %d "
+ "pkt_len %d pack_len %d(next)",
+ bcopy_thresh, pkt_len, pack_len));
+ }
+ last_bidx = i;
+ }
+ kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
+ if ((boff == TX_PKT_HEADER_SIZE) && (nmblks == 1)) {
+ hdrp = (p_tx_pkt_header_t)kaddr;
+ header_set = B_TRUE;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(7_x2): "
+ "pkt_len %d pack_len %d (new hdrp $%p)",
+ pkt_len, pack_len, hdrp));
+ }
+ tx_msg_p->flags.dma_type = USE_BCOPY;
+ kaddr += boff;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(8): USE BCOPY: before bcopy "
+ "DESC IOADDR $%p entry %d bcopy packets %d "
+ "bcopy kaddr $%p bcopy ioaddr (SAD) $%p "
+ "bcopy clen %d bcopy boff %d",
+ DMA_COMMON_IOADDR(desc_area), i,
+ tdc_stats->tx_hdr_pkts, kaddr, dma_ioaddr,
+ clen, boff));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: 1USE BCOPY: "));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: 2USE BCOPY: "));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: "
+ "last USE BCOPY: copy from b_rptr $%p "
+ "to KADDR $%p (len %d offset %d",
+ b_rptr, kaddr, len, boff));
+ bcopy(b_rptr, kaddr, len);
+#ifdef HXGE_DEBUG
+ dump_len = (len > 128) ? 128: len;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: dump packets "
+ "(After BCOPY len %d)"
+ "(b_rptr $%p): %s", len, nmp->b_rptr,
+ hxge_dump_packet((char *)nmp->b_rptr,
+ dump_len)));
+#endif
+ dma_handle = tx_msg_p->buf_dma_handle;
+ dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
+ (void) ddi_dma_sync(dma_handle,
+ i * hxge_bcopy_thresh, hxge_bcopy_thresh,
+ DDI_DMA_SYNC_FORDEV);
+ clen = len + boff;
+ tdc_stats->tx_hdr_pkts++;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(9): "
+ "USE BCOPY: DESC IOADDR $%p entry %d "
+ "bcopy packets %d bcopy kaddr $%p "
+ "bcopy ioaddr (SAD) $%p bcopy clen %d "
+ "bcopy boff %d",
+ DMA_COMMON_IOADDR(desc_area), i,
+ tdc_stats->tx_hdr_pkts, kaddr, dma_ioaddr,
+ clen, boff));
+ } else {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(12): USE DVMA: len %d", len));
+ tx_msg_p->flags.dma_type = USE_DMA;
+ dma_flags = DDI_DMA_WRITE;
+ if (len < hxge_dma_stream_thresh) {
+ dma_flags |= DDI_DMA_CONSISTENT;
+ } else {
+ dma_flags |= DDI_DMA_STREAMING;
+ }
+
+ dma_handle = tx_msg_p->dma_handle;
+ status = ddi_dma_addr_bind_handle(dma_handle, NULL,
+ (caddr_t)b_rptr, len, dma_flags,
+ DDI_DMA_DONTWAIT, NULL,
+ &dma_cookie, &ncookies);
+ if (status == DDI_DMA_MAPPED) {
+ dma_ioaddr = dma_cookie.dmac_laddress;
+ len = (int)dma_cookie.dmac_size;
+ clen = (uint32_t)dma_cookie.dmac_size;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(12_1): "
+ "USE DVMA: len %d clen %d ngathers %d",
+ len, clen, ngathers));
+#if defined(__i386)
+ hpi_desc_handle.regp = (uint32_t)tx_desc_p;
+#else
+ hpi_desc_handle.regp = (uint64_t)tx_desc_p;
+#endif
+ while (ncookies > 1) {
+ ngathers++;
+ /*
+ * this is the fix for multiple
+ * cookies, which are basically
+ * a descriptor entry, we don't set
+ * SOP bit as well as related fields
+ */
+
+ (void) hpi_txdma_desc_gather_set(
+ hpi_desc_handle, &tx_desc,
+ (ngathers -1), mark_mode,
+ ngathers, dma_ioaddr, clen);
+ tx_msg_p->tx_msg_size = clen;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: DMA "
+ "ncookie %d ngathers %d "
+ "dma_ioaddr $%p len %d"
+ "desc $%p descp $%p (%d)",
+ ncookies, ngathers,
+ dma_ioaddr, clen,
+ *tx_desc_p, tx_desc_p, i));
+
+ ddi_dma_nextcookie(dma_handle,
+ &dma_cookie);
+ dma_ioaddr = dma_cookie.dmac_laddress;
+
+ len = (int)dma_cookie.dmac_size;
+ clen = (uint32_t)dma_cookie.dmac_size;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(12_2): "
+ "USE DVMA: len %d clen %d ",
+ len, clen));
+
+ i = TXDMA_DESC_NEXT_INDEX(i, 1,
+ tx_ring_p->tx_wrap_mask);
+ tx_desc_p = &tx_desc_ring_vp[i];
+
+ hpi_desc_handle.regp =
+#if defined(__i386)
+ (uint32_t)tx_desc_p;
+#else
+ (uint64_t)tx_desc_p;
+#endif
+ tx_msg_p = &tx_msg_ring[i];
+ tx_msg_p->flags.dma_type = USE_NONE;
+ tx_desc.value = 0;
+ ncookies--;
+ }
+ tdc_stats->tx_ddi_pkts++;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: DMA: ddi packets %d",
+ tdc_stats->tx_ddi_pkts));
+ } else {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "dma mapping failed for %d "
+ "bytes addr $%p flags %x (%d)",
+ len, b_rptr, status, status));
+ good_packet = B_FALSE;
+ tdc_stats->tx_dma_bind_fail++;
+ tx_msg_p->flags.dma_type = USE_NONE;
+ goto hxge_start_fail2;
+ }
+ } /* ddi dvma */
+
+ nmp = nmp->b_cont;
+hxge_start_control_header_only:
+#if defined(__i386)
+ hpi_desc_handle.regp = (uint32_t)tx_desc_p;
+#else
+ hpi_desc_handle.regp = (uint64_t)tx_desc_p;
+#endif
+ ngathers++;
+
+ if (ngathers == 1) {
+#ifdef HXGE_DEBUG
+ save_desc_p = &sop_tx_desc;
+#endif
+ sop_tx_desc_p = &sop_tx_desc;
+ sop_tx_desc_p->value = 0;
+ sop_tx_desc_p->bits.tr_len = clen;
+ sop_tx_desc_p->bits.sad = dma_ioaddr;
+ } else {
+#ifdef HXGE_DEBUG
+ save_desc_p = &tx_desc;
+#endif
+ tmp_desc_p = &tx_desc;
+ tmp_desc_p->value = 0;
+ tmp_desc_p->bits.tr_len = clen;
+ tmp_desc_p->bits.sad = dma_ioaddr;
+
+ tx_desc_p->value = tmp_desc_p->value;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(13): Desc_entry %d ngathers %d "
+ "desc_vp $%p tx_desc_p $%p "
+ "len %d clen %d pkt_len %d pack_len %d nmblks %d "
+ "dma_ioaddr (SAD) $%p mark %d",
+ i, ngathers, tx_desc_ring_vp, tx_desc_p,
+ len, clen, pkt_len, pack_len, nmblks,
+ dma_ioaddr, mark_mode));
+
+#ifdef HXGE_DEBUG
+ hpi_desc_handle.hxgep = hxgep;
+ hpi_desc_handle.function.function = 0;
+ hpi_desc_handle.function.instance = hxgep->instance;
+ sad = save_desc_p->bits.sad;
+ xfer_len = save_desc_p->bits.tr_len;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "\n\t: value 0x%llx\n"
+ "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\t"
+ "mark %d sop %d\n",
+ save_desc_p->value, sad, save_desc_p->bits.tr_len,
+ xfer_len, save_desc_p->bits.num_ptr,
+ save_desc_p->bits.mark, save_desc_p->bits.sop));
+
+ hpi_txdma_dump_desc_one(hpi_desc_handle, NULL, i);
+#endif
+
+ tx_msg_p->tx_msg_size = clen;
+ i = TXDMA_DESC_NEXT_INDEX(i, 1, tx_ring_p->tx_wrap_mask);
+ if (ngathers > hxge_tx_max_gathers) {
+ good_packet = B_FALSE;
+ hcksum_retrieve(mp, NULL, NULL, &start_offset,
+ &stuff_offset, &end_offset, &value, &cksum_flags);
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_start(14): pull msg - "
+ "len %d pkt_len %d ngathers %d",
+ len, pkt_len, ngathers));
+ /* Pull all message blocks from b_cont */
+ if ((msgpullup(mp, -1)) == NULL) {
+ goto hxge_start_fail2;
+ }
+ goto hxge_start_fail2;
+ }
+ } /* while (nmp) */
+
+ tx_msg_p->tx_message = mp;
+ tx_desc_p = &tx_desc_ring_vp[sop_index];
+#if defined(__i386)
+ hpi_desc_handle.regp = (uint32_t)tx_desc_p;
+#else
+ hpi_desc_handle.regp = (uint64_t)tx_desc_p;
+#endif
+
+ pkthdrp = (p_tx_pkt_hdr_all_t)hdrp;
+ pkthdrp->reserved = 0;
+ hdrp->value = 0;
+ (void) hxge_fill_tx_hdr(mp, B_FALSE, cksum_on,
+ (pkt_len - TX_PKT_HEADER_SIZE), npads, pkthdrp);
+ if (pkt_len > STD_FRAME_SIZE) {
+ tdc_stats->tx_jumbo_pkts++;
+ }
+
+ min_len = (hxgep->msg_min + TX_PKT_HEADER_SIZE + (npads * 2));
+ if (pkt_len < min_len) {
+ /* Assume we use bcopy to premapped buffers */
+ kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_start(14-1): < (msg_min + 16)"
+ "len %d pkt_len %d min_len %d bzero %d ngathers %d",
+ len, pkt_len, min_len, (min_len - pkt_len), ngathers));
+ bzero((kaddr + pkt_len), (min_len - pkt_len));
+ pkt_len = tx_msg_p->tx_msg_size = min_len;
+
+ sop_tx_desc_p->bits.tr_len = min_len;
+
+ HXGE_MEM_PIO_WRITE64(hpi_desc_handle, sop_tx_desc_p->value);
+ tx_desc_p->value = sop_tx_desc_p->value;
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_start(14-2): < msg_min - "
+ "len %d pkt_len %d min_len %d ngathers %d",
+ len, pkt_len, min_len, ngathers));
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: cksum_flags 0x%x ",
+ cksum_flags));
+ if (cksum_flags & HCK_PARTIALCKSUM) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: cksum_flags 0x%x (partial checksum) ",
+ cksum_flags));
+ cksum_on = B_TRUE;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: from IP cksum_flags 0x%x "
+ "(partial checksum) "
+ "start_offset %d stuff_offset %d",
+ cksum_flags, start_offset, stuff_offset));
+ tmp_len = (uint64_t)(start_offset >> 1);
+ hdrp->value |= (tmp_len << TX_PKT_HEADER_L4START_SHIFT);
+ tmp_len = (uint64_t)(stuff_offset >> 1);
+ hdrp->value |= (tmp_len << TX_PKT_HEADER_L4STUFF_SHIFT);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: from IP cksum_flags 0x%x "
+ "(partial checksum) "
+ "after SHIFT start_offset %d stuff_offset %d",
+ cksum_flags, start_offset, stuff_offset));
+ }
+
+ /*
+ * pkt_len already includes 16 + paddings!!
+ * Update the control header length
+ */
+
+ /*
+ * Note that Hydra is different from Neptune where
+ * tot_xfer_len = (pkt_len - TX_PKT_HEADER_SIZE);
+ */
+ tot_xfer_len = pkt_len;
+ tmp_len = hdrp->value |
+ (tot_xfer_len << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(15_x1): setting SOP "
+ "tot_xfer_len 0x%llx (%d) pkt_len %d tmp_len "
+ "0x%llx hdrp->value 0x%llx",
+ tot_xfer_len, tot_xfer_len, pkt_len, tmp_len, hdrp->value));
+#if defined(_BIG_ENDIAN)
+ hdrp->value = ddi_swap64(tmp_len);
+#else
+ hdrp->value = tmp_len;
+#endif
+ HXGE_DEBUG_MSG((hxgep,
+ TX_CTL, "==> hxge_start(15_x2): setting SOP "
+ "after SWAP: tot_xfer_len 0x%llx pkt_len %d "
+ "tmp_len 0x%llx hdrp->value 0x%llx",
+ tot_xfer_len, pkt_len, tmp_len, hdrp->value));
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(15): setting SOP "
+ "wr_index %d tot_xfer_len (%d) pkt_len %d npads %d",
+ sop_index, tot_xfer_len, pkt_len, npads));
+
+ sop_tx_desc_p->bits.sop = 1;
+ sop_tx_desc_p->bits.mark = mark_mode;
+ sop_tx_desc_p->bits.num_ptr = ngathers;
+
+ if (mark_mode)
+ tdc_stats->tx_marks++;
+
+ HXGE_MEM_PIO_WRITE64(hpi_desc_handle, sop_tx_desc_p->value);
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(16): set SOP done"));
+
+#ifdef HXGE_DEBUG
+ hpi_desc_handle.hxgep = hxgep;
+ hpi_desc_handle.function.function = 0;
+ hpi_desc_handle.function.instance = hxgep->instance;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "\n\t: value 0x%llx\n"
+ "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\tmark %d sop %d\n",
+ save_desc_p->value, sad, save_desc_p->bits.tr_len,
+ xfer_len, save_desc_p->bits.num_ptr, save_desc_p->bits.mark,
+ save_desc_p->bits.sop));
+ (void) hpi_txdma_dump_desc_one(hpi_desc_handle, NULL, sop_index);
+
+ dump_len = (pkt_len > 128) ? 128: pkt_len;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start: dump packets(17) (after sop set, len "
+ " (len/dump_len/pkt_len/tot_xfer_len) %d/%d/%d/%d):\n"
+ "ptr $%p: %s", len, dump_len, pkt_len, tot_xfer_len,
+ (char *)hdrp, hxge_dump_packet((char *)hdrp, dump_len)));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_start(18): TX desc sync: sop_index %d", sop_index));
+#endif
+
+ if ((ngathers == 1) || tx_ring_p->wr_index < i) {
+ (void) ddi_dma_sync(tx_desc_dma_handle,
+ sop_index * sizeof (tx_desc_t),
+ ngathers * sizeof (tx_desc_t), DDI_DMA_SYNC_FORDEV);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "hxge_start(19): sync 1 "
+ "cs_off = 0x%02X cs_s_off = 0x%02X "
+ "pkt_len %d ngathers %d sop_index %d\n",
+ stuff_offset, start_offset,
+ pkt_len, ngathers, sop_index));
+ } else { /* more than one descriptor and wrap around */
+ uint32_t nsdescs = tx_ring_p->tx_ring_size - sop_index;
+ (void) ddi_dma_sync(tx_desc_dma_handle,
+ sop_index * sizeof (tx_desc_t),
+ nsdescs * sizeof (tx_desc_t), DDI_DMA_SYNC_FORDEV);
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "hxge_start(20): sync 1 "
+ "cs_off = 0x%02X cs_s_off = 0x%02X "
+ "pkt_len %d ngathers %d sop_index %d\n",
+ stuff_offset, start_offset, pkt_len, ngathers, sop_index));
+
+ (void) ddi_dma_sync(tx_desc_dma_handle, 0,
+ (ngathers - nsdescs) * sizeof (tx_desc_t),
+ DDI_DMA_SYNC_FORDEV);
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "hxge_start(21): sync 2 "
+ "cs_off = 0x%02X cs_s_off = 0x%02X "
+ "pkt_len %d ngathers %d sop_index %d\n",
+ stuff_offset, start_offset,
+ pkt_len, ngathers, sop_index));
+ }
+
+ tail_index = tx_ring_p->wr_index;
+ tail_wrap = tx_ring_p->wr_index_wrap;
+
+ tx_ring_p->wr_index = i;
+ if (tx_ring_p->wr_index <= tail_index) {
+ tx_ring_p->wr_index_wrap = ((tail_wrap == B_TRUE) ?
+ B_FALSE : B_TRUE);
+ }
+
+ tx_ring_p->descs_pending += ngathers;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: TX kick: "
+ "channel %d wr_index %d wrap %d ngathers %d desc_pend %d",
+ tx_ring_p->tdc, tx_ring_p->wr_index, tx_ring_p->wr_index_wrap,
+ ngathers, tx_ring_p->descs_pending));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: TX KICKING: "));
+
+ kick.value = 0;
+ kick.bits.wrap = tx_ring_p->wr_index_wrap;
+ kick.bits.tail = (uint16_t)tx_ring_p->wr_index;
+
+ /* Kick start the Transmit kick register */
+ TXDMA_REG_WRITE64(HXGE_DEV_HPI_HANDLE(hxgep),
+ TDC_TDR_KICK, (uint8_t)tx_ring_p->tdc, kick.value);
+ tdc_stats->tx_starts++;
+ MUTEX_EXIT(&tx_ring_p->lock);
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_start"));
+ return (status);
+
+hxge_start_fail2:
+ if (good_packet == B_FALSE) {
+ cur_index = sop_index;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: clean up"));
+ for (i = 0; i < ngathers; i++) {
+ tx_desc_p = &tx_desc_ring_vp[cur_index];
+#if defined(__i386)
+ hpi_handle.regp = (uint32_t)tx_desc_p;
+#else
+ hpi_handle.regp = (uint64_t)tx_desc_p;
+#endif
+ tx_msg_p = &tx_msg_ring[cur_index];
+ (void) hpi_txdma_desc_set_zero(hpi_handle, 1);
+ if (tx_msg_p->flags.dma_type == USE_DVMA) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "tx_desc_p = %X index = %d",
+ tx_desc_p, tx_ring_p->rd_index));
+ (void) dvma_unload(tx_msg_p->dvma_handle,
+ 0, -1);
+ tx_msg_p->dvma_handle = NULL;
+ if (tx_ring_p->dvma_wr_index ==
+ tx_ring_p->dvma_wrap_mask)
+ tx_ring_p->dvma_wr_index = 0;
+ else
+ tx_ring_p->dvma_wr_index++;
+ tx_ring_p->dvma_pending--;
+ } else if (tx_msg_p->flags.dma_type == USE_DMA) {
+ if (ddi_dma_unbind_handle(
+ tx_msg_p->dma_handle)) {
+ cmn_err(CE_WARN, "hxge_start: "
+ "ddi_dma_unbind_handle failed");
+ }
+ }
+ tx_msg_p->flags.dma_type = USE_NONE;
+ cur_index = TXDMA_DESC_NEXT_INDEX(cur_index, 1,
+ tx_ring_p->tx_wrap_mask);
+
+ }
+
+ hxgep->resched_needed = B_TRUE;
+ }
+
+ MUTEX_EXIT(&tx_ring_p->lock);
+
+hxge_start_fail1:
+ /* Add FMA to check the access handle hxge_hregh */
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_start"));
+ return (status);
+}
+
+boolean_t
+hxge_send(p_hxge_t hxgep, mblk_t *mp, p_mac_tx_hint_t hp)
+{
+ p_tx_ring_t *tx_rings;
+ uint8_t ring_index;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_send"));
+
+ ASSERT(mp->b_next == NULL);
+
+ ring_index = hxge_tx_lb_ring(mp, hxgep->max_tdcs, hp);
+ tx_rings = hxgep->tx_rings->rings;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_tx_msg: tx_rings $%p",
+ tx_rings));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_tx_msg: max_tdcs %d "
+ "ring_index %d", hxgep->max_tdcs, ring_index));
+
+ if (hxge_start(hxgep, tx_rings[ring_index], mp)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_send: failed "
+ "ring index %d", ring_index));
+ return (B_FALSE);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_send: ring index %d",
+ ring_index));
+ return (B_TRUE);
+}
+
+/*
+ * hxge_m_tx() - send a chain of packets
+ */
+mblk_t *
+hxge_m_tx(void *arg, mblk_t *mp)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg;
+ mblk_t *next;
+ mac_tx_hint_t hint;
+
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "==> hxge_m_tx: hardware not initialized"));
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_m_tx"));
+ return (mp);
+ }
+
+ hint.hash = NULL;
+ hint.vid = 0;
+ hint.sap = 0;
+
+ while (mp != NULL) {
+ next = mp->b_next;
+ mp->b_next = NULL;
+
+ /*
+ * Until Nemo tx resource works, the mac driver
+ * does the load balancing based on TCP port,
+ * or CPU. For debugging, we use a system
+ * configurable parameter.
+ */
+ if (!hxge_send(hxgep, mp, &hint)) {
+ mp->b_next = next;
+ break;
+ }
+
+ mp = next;
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_m_tx: (go back to loop) mp $%p next $%p",
+ mp, next));
+ }
+ return (mp);
+}
+
+int
+hxge_tx_lb_ring(p_mblk_t mp, uint32_t maxtdcs, p_mac_tx_hint_t hp)
+{
+ uint8_t ring_index = 0;
+ uint8_t *tcp_port;
+ p_mblk_t nmp;
+ size_t mblk_len;
+ size_t iph_len;
+ size_t hdrs_size;
+ uint8_t hdrs_buf[sizeof (struct ether_header) +
+ IP_MAX_HDR_LENGTH + sizeof (uint32_t)];
+
+ /*
+ * allocate space big enough to cover
+ * the max ip header length and the first
+ * 4 bytes of the TCP/IP header.
+ */
+ boolean_t qos = B_FALSE;
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_lb_ring"));
+
+ if (hp->vid) {
+ qos = B_TRUE;
+ }
+ switch (hxge_tx_lb_policy) {
+ case HXGE_TX_LB_TCPUDP: /* default IPv4 TCP/UDP */
+ default:
+ tcp_port = mp->b_rptr;
+ if (!hxge_no_tx_lb && !qos &&
+ (ntohs(((p_ether_header_t)tcp_port)->ether_type) ==
+ ETHERTYPE_IP)) {
+ nmp = mp;
+ mblk_len = MBLKL(nmp);
+ tcp_port = NULL;
+ if (mblk_len > sizeof (struct ether_header) +
+ sizeof (uint8_t)) {
+ tcp_port = nmp->b_rptr +
+ sizeof (struct ether_header);
+ mblk_len -= sizeof (struct ether_header);
+ iph_len = ((*tcp_port) & 0x0f) << 2;
+ if (mblk_len > (iph_len + sizeof (uint32_t))) {
+ tcp_port = nmp->b_rptr;
+ } else {
+ tcp_port = NULL;
+ }
+ }
+ if (tcp_port == NULL) {
+ hdrs_size = 0;
+ ((p_ether_header_t)hdrs_buf)->ether_type = 0;
+ while ((nmp) && (hdrs_size <
+ sizeof (hdrs_buf))) {
+ mblk_len = MBLKL(nmp);
+ if (mblk_len >=
+ (sizeof (hdrs_buf) - hdrs_size))
+ mblk_len = sizeof (hdrs_buf) -
+ hdrs_size;
+ bcopy(nmp->b_rptr,
+ &hdrs_buf[hdrs_size], mblk_len);
+ hdrs_size += mblk_len;
+ nmp = nmp->b_cont;
+ }
+ tcp_port = hdrs_buf;
+ }
+ tcp_port += sizeof (ether_header_t);
+ if (!(tcp_port[6] & 0x3f) && !(tcp_port[7] & 0xff)) {
+ switch (tcp_port[9]) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_ESP:
+ tcp_port += ((*tcp_port) & 0x0f) << 2;
+ ring_index = ((tcp_port[0] ^
+ tcp_port[1] ^
+ tcp_port[2] ^
+ tcp_port[3]) % maxtdcs);
+ break;
+
+ case IPPROTO_AH:
+ /* SPI starts at the 4th byte */
+ tcp_port += ((*tcp_port) & 0x0f) << 2;
+ ring_index = ((tcp_port[4] ^
+ tcp_port[5] ^
+ tcp_port[6] ^
+ tcp_port[7]) % maxtdcs);
+ break;
+
+ default:
+ ring_index = tcp_port[19] % maxtdcs;
+ break;
+ }
+ } else { /* fragmented packet */
+ ring_index = tcp_port[19] % maxtdcs;
+ }
+ } else {
+ ring_index = mp->b_band % maxtdcs;
+ }
+ break;
+
+ case HXGE_TX_LB_HASH:
+ if (hp->hash) {
+#if defined(__i386)
+ ring_index = ((uint32_t)(hp->hash) % maxtdcs);
+#else
+ ring_index = ((uint64_t)(hp->hash) % maxtdcs);
+#endif
+ } else {
+ ring_index = mp->b_band % maxtdcs;
+ }
+ break;
+
+ case HXGE_TX_LB_DEST_MAC:
+ /* Use destination MAC address */
+ tcp_port = mp->b_rptr;
+ ring_index = tcp_port[5] % maxtdcs;
+ break;
+ }
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "<== hxge_tx_lb_ring"));
+ return (ring_index);
+}
+
+uint_t
+hxge_reschedule(caddr_t arg)
+{
+ p_hxge_t hxgep;
+
+ hxgep = (p_hxge_t)arg;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reschedule"));
+
+ if (hxgep->hxge_mac_state == HXGE_MAC_STARTED &&
+ hxgep->resched_needed) {
+ mac_tx_update(hxgep->mach);
+ hxgep->resched_needed = B_FALSE;
+ hxgep->resched_running = B_FALSE;
+ }
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "<== hxge_reschedule"));
+ return (DDI_INTR_CLAIMED);
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_tdc_hw.h b/usr/src/uts/common/io/hxge/hxge_tdc_hw.h
new file mode 100644
index 0000000000..28b0c2023c
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_tdc_hw.h
@@ -0,0 +1,1394 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HXGE_TDC_HW_H
+#define _HXGE_TDC_HW_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define TDC_BASE_ADDR 0X00400000
+
+#define TDC_PAGE_HANDLE (TDC_BASE_ADDR + 0x8)
+#define TDC_TDR_CFG (TDC_BASE_ADDR + 0x20)
+#define TDC_TDR_HEAD (TDC_BASE_ADDR + 0x28)
+#define TDC_TDR_PRE_HEAD (TDC_BASE_ADDR + 0x30)
+#define TDC_TDR_KICK (TDC_BASE_ADDR + 0x38)
+#define TDC_INT_MASK (TDC_BASE_ADDR + 0x40)
+#define TDC_STAT (TDC_BASE_ADDR + 0x48)
+#define TDC_MBH (TDC_BASE_ADDR + 0x50)
+#define TDC_MBL (TDC_BASE_ADDR + 0x58)
+#define TDC_BYTE_CNT (TDC_BASE_ADDR + 0x80)
+#define TDC_TDR_QLEN (TDC_BASE_ADDR + 0x88)
+#define TDC_RTAB_PTR (TDC_BASE_ADDR + 0x90)
+#define TDC_DROP_CNT (TDC_BASE_ADDR + 0x98)
+#define TDC_LAST_PKT_RBUF_PTRS (TDC_BASE_ADDR + 0xA8)
+#define TDC_PREF_CMD (TDC_BASE_ADDR + 0x100)
+#define TDC_PREF_DATA (TDC_BASE_ADDR + 0x108)
+#define TDC_PREF_PAR_DATA (TDC_BASE_ADDR + 0x110)
+#define TDC_REORD_BUF_CMD (TDC_BASE_ADDR + 0x120)
+#define TDC_REORD_BUF_DATA (TDC_BASE_ADDR + 0x128)
+#define TDC_REORD_BUF_ECC_DATA (TDC_BASE_ADDR + 0x130)
+#define TDC_REORD_TBL_CMD (TDC_BASE_ADDR + 0x140)
+#define TDC_REORD_TBL_DATA_LO (TDC_BASE_ADDR + 0x148)
+#define TDC_REORD_TBL_DATA_HI (TDC_BASE_ADDR + 0x150)
+#define TDC_PREF_PAR_LOG (TDC_BASE_ADDR + 0x200)
+#define TDC_REORD_BUF_ECC_LOG (TDC_BASE_ADDR + 0x208)
+#define TDC_REORD_TBL_PAR_LOG (TDC_BASE_ADDR + 0x210)
+#define TDC_FIFO_ERR_MASK (TDC_BASE_ADDR + 0x220)
+#define TDC_FIFO_ERR_STAT (TDC_BASE_ADDR + 0x228)
+#define TDC_FIFO_ERR_INT_DBG (TDC_BASE_ADDR + 0x230)
+#define TDC_STAT_INT_DBG (TDC_BASE_ADDR + 0x240)
+#define TDC_PKT_REQ_TID_TAG (TDC_BASE_ADDR + 0x250)
+#define TDC_SOP_PREF_DESC_LOG (TDC_BASE_ADDR + 0x260)
+#define TDC_PREF_DESC_LOG (TDC_BASE_ADDR + 0x268)
+#define TDC_PEU_TXN_LOG (TDC_BASE_ADDR + 0x270)
+#define TDC_DBG_TRAINING_VEC (TDC_BASE_ADDR + 0x300)
+#define TDC_DBG_GRP_SEL (TDC_BASE_ADDR + 0x308)
+
+
+/*
+ * Register: TdcPageHandle
+ * Logical Page Handle
+ * Description: Upper 20 bits [63:44] to use for all accesses over
+ * the PCI-E bus. Fields in this register are part of the dma
+ * configuration and cannot be changed once the dma is enabled.
+ * Fields:
+ * Page handle, bits [63:44] of all PCI-E transactions for this
+ * channel.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:44;
+ uint64_t page_handle:20;
+#else
+ uint64_t page_handle:20;
+ uint64_t rsrvd:44;
+#endif
+ } bits;
+} tdc_page_handle_t;
+
+
+/*
+ * Register: TdcTdrCfg
+ * Transmit Ring Configuration
+ * Description: Configuration parameters for transmit DMA block.
+ * Software configures the location of the transmit ring in host
+ * memory, as well as its maximum size. Fields in this register are
+ * part of the dma configuration and cannot be changed once the dma
+ * is enabled.
+ * HW does not check for all configuration errors across different
+ * fields.
+ * The usage of enable, reset, and qst is as follows. Software
+ * should use the following sequence to reset a DMA channel. First,
+ * set DMA.enable to 0, wait for DMA.qst=1 and then, set DMA.reset to
+ * 1. After DMA.reset is cleared by hardware and the DMA.qst is set
+ * to 1, software may then start configuring the DMA channel. The
+ * DMA.enable can be set or cleared while the DMA is in operation.
+ * The state machines of the DMA may not have returned to its initial
+ * states yet after the DMA.enable bit is cleared. This condition is
+ * indicated by the value of the DMA.qst. An example of DMA.enable
+ * being cleared during operation is when a fatal error occurs.
+ * Fields:
+ * Bits [15:5] of the maximum number of entries in the Transmit
+ * Queue ring buffer. Bits [4:0] are always 0. Maximum number of
+ * entries is (2^16 - 32) and is limited by the staddr value.
+ * (len + staddr) should not exceed (2^16 - 32).
+ * Set to 1 to enable the Transmit DMA. On fatal errors, this bit
+ * will be cleared by hardware. This bit cannot be set if sw has
+ * not resolved any pending fatal error condition: i.e. any
+ * TdcStat ldf1 error bits remain set.
+ * Set to 1 to reset the DMA. Hardware will clear this bit after
+ * reset is completed. A reset will bring the sepecific DMA back
+ * to the power on state (including the DMA.en in this register).
+ * When set to 1, it indicates all state associated with the DMA
+ * are in its initial state following either dma reset or
+ * disable. Thus, once this is set to 1, sw could start to
+ * configure the DMA if needed. In an extreme case such as if a
+ * parity error on an EOP descriptor prevents recognition of the
+ * EOP, it is possible that the qst bit will not be set even
+ * though the dma engine has been disabled.
+ * Address bits [43:19] of the start address for the transmit
+ * ring buffer. The value in this field is dependent on len
+ * field. (len + staddr) should not exceed (2^16 - 32).
+ * Bits [18:6] of the start address for the transmit ring buffer.
+ * Bits [5:0] are assumed to be zero, or 64B aligned.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t len:11;
+ uint64_t rsrvd:5;
+ uint64_t enable:1;
+ uint64_t reset:1;
+ uint64_t qst:1;
+ uint64_t rsrvd1:1;
+ uint64_t staddr_base:25;
+ uint64_t staddr:13;
+ uint64_t rsrvd2:6;
+#else
+ uint64_t rsrvd2:6;
+ uint64_t staddr:13;
+ uint64_t staddr_base:25;
+ uint64_t rsrvd1:1;
+ uint64_t qst:1;
+ uint64_t reset:1;
+ uint64_t enable:1;
+ uint64_t rsrvd:5;
+ uint64_t len:11;
+#endif
+ } bits;
+} tdc_tdr_cfg_t;
+
+
+/*
+ * Register: TdcTdrHead
+ * Transmit Ring Head
+ * Description: Read-only register software call poll to determine
+ * the current head of the transmit ring, from the tdcTxPkt block.
+ * Software uses this to know which Tdr entries have had their
+ * descriptors transmitted. These entries and their descriptors may
+ * then be reused by software.
+ * Fields:
+ * Hardware will toggle this bit every time the head is wrapped
+ * around the configured ring buffer.
+ * Entry in transmit ring which will be the next descriptor
+ * transmitted. Software should consider the Tdr full if head ==
+ * TdcTdrKick::tail and wrap != TdcTdrKick::wrap. The ring is
+ * empty of head == TdcTdrKick::tail and wrap ==
+ * TdcTdrKick::wrap.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:47;
+ uint64_t wrap:1;
+ uint64_t head:16;
+#else
+ uint64_t head:16;
+ uint64_t wrap:1;
+ uint64_t rsrvd:47;
+#endif
+ } bits;
+} tdc_tdr_head_t;
+
+
+/*
+ * Register: TdcTdrPreHead
+ * Transmit Ring Prefetch Head
+ * Description: Read-only register software call poll to determine
+ * the current prefetch head of the transmit ring, from the tdcPktReq
+ * block. Transmit descriptors are prefetched into chip memory.
+ * Indicates next descriptor to be read from host memory. For debug
+ * use only.
+ * Fields:
+ * Hardware will toggle this bit every time the prefetch head is
+ * wrapped around the configured ring buffer.
+ * Entry in transmit ring which will be fetched next from host
+ * memory.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:47;
+ uint64_t wrap:1;
+ uint64_t head:16;
+#else
+ uint64_t head:16;
+ uint64_t wrap:1;
+ uint64_t rsrvd:47;
+#endif
+ } bits;
+} tdc_tdr_pre_head_t;
+
+
+/*
+ * Register: TdcTdrKick
+ * Transmit Ring Kick
+ * Description: After posting transmit descriptors to the Transmit
+ * Ring, software updates the tail pointer to inform Hydra of the new
+ * descriptors. Software can only post descriptors through this
+ * register when the entire packet is in the ring. Otherwise,
+ * hardware dead-lock can occur. If an overflow kick occurs when the
+ * channel is disabled, tdcStat.txRngOflow (Transmit Ring Overflow)
+ * status is not set.
+ * Fields:
+ * Software needs to toggle this bit every time the tail is
+ * wrapped around the configured ring buffer.
+ * Entry where the next valid descriptor will be added (one entry
+ * past the last valid descriptor.)
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:47;
+ uint64_t wrap:1;
+ uint64_t tail:16;
+#else
+ uint64_t tail:16;
+ uint64_t wrap:1;
+ uint64_t rsrvd:47;
+#endif
+ } bits;
+} tdc_tdr_kick_t;
+
+
+/*
+ * Register: TdcIntMask
+ * Transmit Event Mask
+ * Description: The Tx DMA can generate a number of LDF events. The
+ * events can be enabled by software by setting the corresponding bit
+ * to 0. The default value of 1 means the event is masked and no LDF
+ * event is generated.
+ * Fields:
+ * Set to 0 to select the event to raise the LDF for packets
+ * marked. An LDF 0 event.
+ * Set to 0 to select the event to raise the LDF when poisoned
+ * completion or non-zero (unsuccessful) completion status
+ * received from PEU. An LDF 1 event.
+ * Set to 0 to select the event to raise the LDF when total bytes
+ * transmitted compared against pkt internal header bytes
+ * transmitted mismatch. An LDF 1 event.
+ * Set to 0 to select the event to raise the LDF when a runt
+ * packet is dropped (when VMAC does not allow runt packets to be
+ * padded). An LDF 1 event.
+ * Set to 0 to select the event to raise the LDF when the packet
+ * size exceeds hardware limit. An LDF 1 event.
+ * Set to 0 to select the event to raise the LDF to indicate
+ * Transmit Ring Overflow An LDF 1 event.
+ * Set to 0 to select the event to raise the LDF to indicate
+ * parity error on the tdr prefetch buffer occurred. An LDF 1
+ * event.
+ * Set to 0 to select the event to raise the LDF to indicate tdc
+ * received a response completion timeout from peu for tdr
+ * descriptor prefetch An LDF 1 event.
+ * Set to 0 to select the event to raise the LDF to indicate tdc
+ * received a response completion timeout from peu for packet
+ * data request An LDF 1 event.
+ * Set to 0 to select the event to raise the LDF to indicate tdc
+ * did not receive an SOP in the 1st descriptor as was expected
+ * or the numPtr in the 1st descriptor was set to 0. An LDF 1
+ * event.
+ * Set to 0 to select the event to raise the LDF to indicate tdc
+ * received an unexpected SOP descriptor error. An LDF 1 event.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:48;
+ uint64_t marked:1;
+ uint64_t rsrvd1:5;
+ uint64_t peu_resp_err:1;
+ uint64_t pkt_size_hdr_err:1;
+ uint64_t runt_pkt_drop_err:1;
+ uint64_t pkt_size_err:1;
+ uint64_t tx_rng_oflow:1;
+ uint64_t pref_par_err:1;
+ uint64_t tdr_pref_cpl_to:1;
+ uint64_t pkt_cpl_to:1;
+ uint64_t invalid_sop:1;
+ uint64_t unexpected_sop:1;
+#else
+ uint64_t unexpected_sop:1;
+ uint64_t invalid_sop:1;
+ uint64_t pkt_cpl_to:1;
+ uint64_t tdr_pref_cpl_to:1;
+ uint64_t pref_par_err:1;
+ uint64_t tx_rng_oflow:1;
+ uint64_t pkt_size_err:1;
+ uint64_t runt_pkt_drop_err:1;
+ uint64_t pkt_size_hdr_err:1;
+ uint64_t peu_resp_err:1;
+ uint64_t rsrvd1:5;
+ uint64_t marked:1;
+ uint64_t rsrvd:48;
+#endif
+ } bits;
+} tdc_int_mask_t;
+
+
+/*
+ * Register: TdcStat
+ * Transmit Control and Status
+ * Description: Combined control and status register. When writing to
+ * this register, any bit that software wishes not to change should
+ * be written to 0. The TdcStat register may be read or written only
+ * when no mailbox updates are pending. Accordingly, the expected
+ * algorithm for software to use in tracking marked packets and
+ * mailbox updates is one of the following only: 1) enable
+ * interrupts, enable mb, send a single marked packet, wait for Ldf0,
+ * clear marked, repeat or 2) disable interrupts, never enable mb,
+ * send one or more marked packets, poll TdcStat for marked/mMarked
+ * state, clear marked/mMarked bits, repeat. If interrupts are
+ * enabled, upon receiving an Ldf1 interrupt for a given channel
+ * software must wait until a channel's Qst bit has asserted before
+ * reading TdcStat for corresponding error information and before
+ * writing to TdcStat to clear error state.
+ * Fields:
+ * A wrap-around counter to keep track of packets transmitted.
+ * Reset to zero when the DMA is reset
+ * The pktCnt corresponds to the last packet with the MARK bit
+ * set. Reset to zero when the DMA is reset.
+ * Set to 1 to cause HW to update the mailbox when the next
+ * packet with the marked bit set is transmitted. HW clears this
+ * bit to zero after the mailbox update has completed. Note that,
+ * correspondingly, the TdcStat data for the Tx mailbox write
+ * will reflect the state of mb prior to the mb bit's update for
+ * the marked packet being sent. Software should send only one
+ * marked packet per assertion of the mb bit. Multiple marked
+ * packets after setting the mb bit and before receiving the
+ * corresponding mailbox update is not supported. Precautionary
+ * note: Emphasize HW is responsible for clearing this bit. If
+ * software clears this bit, the behavior is undefined.
+ * Set to 1 when a packet with the mark bit set is transmitted.
+ * If mb is set at the time of the marked packet transmission,
+ * marked will not be set until the corresponding mailbox write
+ * has completed. Note that, correspondingly, the TdcStat data
+ * for the Tx mailbox write will reflect the state of marked
+ * prior to the marked bit's update for the marked packet being
+ * sent. Software may read the register to clear the bit.
+ * Alternatively, software may write a 1 to clear the MARKED bit
+ * (Write 0 has no effect). In the case of write 1, if mMarked
+ * bit is set, MARKED bit will NOT be cleared. This bit is used
+ * to generate LDF 0 consistent with settings in TdcIntMask.
+ * Overflow bit for MARKED register bit. Indicates that multiple
+ * marked packets have been transmitted since the last clear of
+ * the marked bit. If hardware is waiting to update MARKED until
+ * a mailbox write has completed, when another marked packet is
+ * transmitted, mMarked will also not be set until the mailbox
+ * write completes. Note that, correspondingly, the TdcStat data
+ * for the Tx mailbox write will reflect the state of mMarked
+ * prior to the mMarked bit's update for the marked packet being
+ * sent. Software reads to clear. A write 1 to MARKED bit will
+ * also clear the mMarked bit. A write 0 has no effect.
+ * Set to 1 to indicate poisoned completion or non-zero
+ * (unsuccessful) completion status received from PEU. Part of
+ * LDF 1.
+ * Set to 1 to indicate tdc descriptor error: total bytes
+ * transmitted compared against pkt internal header bytes
+ * transmitted mismatch. Fatal error. Part of LDF 1.
+ * Set to 1 when a runt packet is dropped (when VMAC does not
+ * allow runt packets to be padded. Fatal error. Part of LDF1.
+ * Set to 1 when the packet size exceeds hardware limit: the sum
+ * of gathers exceeds the maximum transmit length (specified in
+ * the Tx VMAC Configuration register txMaxFrameLength) or any
+ * descriptor attempts to transmit more than 4K. Writing a 1
+ * clears the value to 0. Writing a 0 has no effect. Part of LDF
+ * 1. Note that packet size for the purpose of this error is
+ * determined by the actual transfer size from the Tdc to the Tdp
+ * and not from the totXferSize field of the internal header.
+ * Set to 1 to indicate Transmit Ring Overflow: Tail > Ringlength
+ * or if the relative position of the shadow tail to the ring
+ * tail is not correct with respect to the wrap bit. Transmit
+ * Ring Overflow status is not set, if the dma is disabled. Fatal
+ * error. Part of LDF1.
+ * Set to 1 by HW to indicate parity error on the tdr prefetch
+ * buffer occurred. Writing a 1 clears the parity error log
+ * register Part of LDF 1.
+ * Set to 1 to indicate tdc received a response completion
+ * timeout from peu for tdr descriptor prefetch Fatal error. Part
+ * of LDF 1.
+ * Set to 1 to indicate tdc received a response completion
+ * timeout from peu for packet data request Fatal error. Part of
+ * LDF 1.
+ * Set to 1 to indicate tdc did not receive an SOP in the 1st
+ * descriptor as was expected or the numPtr in the 1st descriptor
+ * was set to 0. Fatal error. Part of LDF 1.
+ * Set to 1 to indicate tdc received an unexpected SOP descriptor
+ * error. Fatal error. Part of LDF 1.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:4;
+ uint64_t pkt_cnt:12;
+ uint64_t rsrvd1:4;
+ uint64_t lastmark:12;
+ uint64_t rsrvd2:2;
+ uint64_t mb:1;
+ uint64_t rsrvd3:13;
+ uint64_t marked:1;
+ uint64_t m_marked:1;
+ uint64_t rsrvd4:4;
+ uint64_t peu_resp_err:1;
+ uint64_t pkt_size_hdr_err:1;
+ uint64_t runt_pkt_drop_err:1;
+ uint64_t pkt_size_err:1;
+ uint64_t tx_rng_oflow:1;
+ uint64_t pref_par_err:1;
+ uint64_t tdr_pref_cpl_to:1;
+ uint64_t pkt_cpl_to:1;
+ uint64_t invalid_sop:1;
+ uint64_t unexpected_sop:1;
+#else
+ uint64_t unexpected_sop:1;
+ uint64_t invalid_sop:1;
+ uint64_t pkt_cpl_to:1;
+ uint64_t tdr_pref_cpl_to:1;
+ uint64_t pref_par_err:1;
+ uint64_t tx_rng_oflow:1;
+ uint64_t pkt_size_err:1;
+ uint64_t runt_pkt_drop_err:1;
+ uint64_t pkt_size_hdr_err:1;
+ uint64_t peu_resp_err:1;
+ uint64_t rsrvd4:4;
+ uint64_t m_marked:1;
+ uint64_t marked:1;
+ uint64_t rsrvd3:13;
+ uint64_t mb:1;
+ uint64_t rsrvd2:2;
+ uint64_t lastmark:12;
+ uint64_t rsrvd1:4;
+ uint64_t pkt_cnt:12;
+ uint64_t rsrvd:4;
+#endif
+ } bits;
+} tdc_stat_t;
+
+
+/*
+ * Register: TdcMbh
+ * Tx DMA Mailbox High
+ * Description: Upper bits of Tx DMA mailbox address in host memory.
+ * Fields in this register are part of the dma configuration and
+ * cannot be changed once the dma is enabled.
+ * Fields:
+ * Bits [43:32] of the Mailbox address.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:52;
+ uint64_t mbaddr:12;
+#else
+ uint64_t mbaddr:12;
+ uint64_t rsrvd:52;
+#endif
+ } bits;
+} tdc_mbh_t;
+
+
+/*
+ * Register: TdcMbl
+ * Tx DMA Mailbox Low
+ * Description: Lower bits of Tx DMA mailbox address in host memory.
+ * Fields in this register are part of the dma configuration and
+ * cannot be changed once the dma is enabled.
+ * Fields:
+ * Bits [31:6] of the Mailbox address. Bits [5:0] are assumed to
+ * be zero, or 64B aligned.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t mbaddr:26;
+ uint64_t rsrvd1:6;
+#else
+ uint64_t rsrvd1:6;
+ uint64_t mbaddr:26;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_mbl_t;
+
+
+/*
+ * Register: TdcByteCnt
+ * Tx DMA Byte Count
+ * Description: Counts the number of bytes transmitted to the tx
+ * datapath block. This count may increment in advance of
+ * corresponding updates to TdcStat for the bytes transmitted.
+ * Fields:
+ * Number of bytes transmitted from transmit ring. This counter
+ * will saturate. This register is cleared on read.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t byte_count:32;
+#else
+ uint64_t byte_count:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_byte_cnt_t;
+
+
+/*
+ * Register: TdcTdrQlen
+ * Tdr Queue Length
+ * Description: Number of descriptors in Tdr For debug only. Note:
+ * Not analogous to either rdc.rbrQlen or tdc.tdcKick -
+ * tdc.tdcTdrHead. Indicates depth of the two intermediate descriptor
+ * usage points rather than end-to-end descriptor availability.
+ * Fields:
+ * Current number of descriptors in Tdr, unprefetched
+ * Current number of descriptors in Tdr in prefetch buffer, i.e.
+ * those which have been prefetched but have not yet been
+ * allocated to the RTab.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t tdr_qlen:16;
+ uint64_t tdr_pref_qlen:16;
+#else
+ uint64_t tdr_pref_qlen:16;
+ uint64_t tdr_qlen:16;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_tdr_qlen_t;
+
+
+/*
+ * Register: TdcRtabPtr
+ * RTAB pointers
+ * Description: Status of the reorder table pointers Writing to this
+ * register is for debug purposes only and is enabled when vnmDbgOn
+ * is set to 1
+ * Fields:
+ * Current rtab head pointer, used in the txPkt block This
+ * register is used to dequeue entries in the reorder table when
+ * packets are sent out
+ * Current rtab head pointer, used in the pktResp block This
+ * register is used to scan entries in the reorder table when
+ * packet data response completions arrive
+ * Current rtab tail pointer, used in the pktReq block This
+ * register is used to allocate entries in the reorder table when
+ * packet data requests are made
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:24;
+ uint64_t pkt_rtab_head:8;
+ uint64_t rsrvd1:7;
+ uint64_t rtab_head:9;
+ uint64_t rsrvd2:7;
+ uint64_t rtab_tail:9;
+#else
+ uint64_t rtab_tail:9;
+ uint64_t rsrvd2:7;
+ uint64_t rtab_head:9;
+ uint64_t rsrvd1:7;
+ uint64_t pkt_rtab_head:8;
+ uint64_t rsrvd:24;
+#endif
+ } bits;
+} tdc_rtab_ptr_t;
+
+
+/*
+ * Register: TdcDropCnt
+ * Packet Drop Counter
+ * Description: Counts the number of runt, aborted and size
+ * mismatched packets dropped by the tx datapath block.
+ * Fields:
+ * Number of dropped due to pktSizeHdrErr. This counter will
+ * saturate. This counter is cleared on read.
+ * Number of dropped due to packet abort bit being set. Many
+ * different error events could be the source of packet abort
+ * drop. Descriptor-related error events include those errors
+ * encountered while in the middle of processing a packet
+ * request: 1. unexpectedSop; 2. non-SOP descriptor parity error
+ * (prefParErr); 3. ran out of non-SOP descriptors due to peu
+ * response errors (tdrPrefCplTo or peuRespErr) or the channel
+ * being disabled before the TDR request can be made. Packet
+ * response errors encountered while in the middle of processing
+ * a packet request also can trigger the packet abort: 4. packet
+ * response did not return due to peu response errors ( pktCplTo
+ * or peuRespErr); 5. Rtab parity error (reordTblParErr). This
+ * counter will saturate. This counter is cleared on read. Note
+ * that packet aborts are not counted until the packet is cleared
+ * from the RTab, which may be an arbitrary amount of time after
+ * the corresponding error is logged in TdcStat. In most cases,
+ * this will occur before the channel is quiesced following
+ * channel disable. In an extreme case such as if a parity error
+ * on an EOP descriptor prevents recognition of the EOP, it is
+ * possible that the quiescent bit itself will not be set
+ * although the packet drop counter will be incremented.
+ * Number of dropped due to runt packet size error. This counter
+ * will saturate. This counter is cleared on read.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:40;
+ uint64_t hdr_size_error_count:8;
+ uint64_t abort_count:8;
+ uint64_t runt_count:8;
+#else
+ uint64_t runt_count:8;
+ uint64_t abort_count:8;
+ uint64_t hdr_size_error_count:8;
+ uint64_t rsrvd:40;
+#endif
+ } bits;
+} tdc_drop_cnt_t;
+
+
+/*
+ * Register: TdcLastPktRbufPtrs
+ * Last Packet RBUF Pointers
+ * Description: Logs the RBUF head and tail pointer of the last
+ * packet sent by the tx datapath block.
+ * Fields:
+ * Logs the RBUF tail pointer of the last packet sent
+ * Logs the RBUF head pointer of the last packet sent
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:36;
+ uint64_t rbuf_tail_ptr:12;
+ uint64_t rsrvd1:4;
+ uint64_t rbuf_head_ptr:12;
+#else
+ uint64_t rbuf_head_ptr:12;
+ uint64_t rsrvd1:4;
+ uint64_t rbuf_tail_ptr:12;
+ uint64_t rsrvd:36;
+#endif
+ } bits;
+} tdc_last_pkt_rbuf_ptrs_t;
+
+
+/*
+ * Register: TdcPrefCmd
+ * Tx DMA Prefetch Buffer Command
+ * Description: Allows debug access to the entire prefetch buffer.
+ * For writes, software writes the tdcPrefData and tdcPrefParData
+ * registers, before writing the tdcPrefCmd register. For reads,
+ * software writes the tdcPrefCmd register, then reads the
+ * tdcPrefData and tdcPrefParData registers. The valid field should
+ * be polled by software until it goes low, indicating the read or
+ * write has completed. Writing the tdcPrefCmd triggers the access.
+ * Fields:
+ * status of indirect access 0=busy 1=done
+ * Command type. 1 indicates a read command, 0 a write command.
+ * enable writing of parity bits 1=enabled, 0=disabled
+ * DMA channel of entry to read or write
+ * Entry in the prefetch buffer to read or write
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t status:1;
+ uint64_t cmd:1;
+ uint64_t par_en:1;
+ uint64_t rsrvd1:23;
+ uint64_t dmc:2;
+ uint64_t entry:4;
+#else
+ uint64_t entry:4;
+ uint64_t dmc:2;
+ uint64_t rsrvd1:23;
+ uint64_t par_en:1;
+ uint64_t cmd:1;
+ uint64_t status:1;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_pref_cmd_t;
+
+
+/*
+ * Register: TdcPrefData
+ * Tx DMA Prefetch Buffer Data
+ * Description: See tdcPrefCmd register.
+ * Fields:
+ * For writes, data which is written into prefetch buffer. For
+ * reads, data read from the prefetch buffer.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } bits;
+} tdc_pref_data_t;
+
+
+/*
+ * Register: TdcPrefParData
+ * Tx DMA Prefetch Buffer Parity Data
+ * Description: See tdcPrefCmd register.
+ * Fields:
+ * For writes, parity data which is written into prefetch buffer.
+ * For reads, parity data read from the prefetch buffer.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:56;
+ uint64_t par_data:8;
+#else
+ uint64_t par_data:8;
+ uint64_t rsrvd:56;
+#endif
+ } bits;
+} tdc_pref_par_data_t;
+
+
+/*
+ * Register: TdcReordBufCmd
+ * Tx DMA Reorder Buffer Command
+ * Description: Allows debug access to the entire Reorder buffer. For
+ * writes, software writes the tdcReordBufData and tdcReordBufEccData
+ * before writing the tdcReordBufCmd register. For reads, software
+ * writes the tdcReordBufCmd register, then reads the tdcReordBufData
+ * and tdcReordBufEccData registers. The valid field should be polled
+ * by software until it goes low, indicating the read or write has
+ * completed. Writing the tdcReordBufCmd triggers the access.
+ * Fields:
+ * status of indirect access 0=busy 1=done
+ * Command type. 1 indicates a read command, 0 a write command.
+ * enable writing of ecc bits 1=enabled, 0=disabled
+ * Entry in the reorder buffer to read or write
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t status:1;
+ uint64_t cmd:1;
+ uint64_t ecc_en:1;
+ uint64_t rsrvd1:17;
+ uint64_t entry:12;
+#else
+ uint64_t entry:12;
+ uint64_t rsrvd1:17;
+ uint64_t ecc_en:1;
+ uint64_t cmd:1;
+ uint64_t status:1;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_reord_buf_cmd_t;
+
+
+/*
+ * Register: TdcReordBufData
+ * Tx DMA Reorder Buffer Data
+ * Description: See tdcReordBufCmd register.
+ * Fields:
+ * For writes, data which is written into reorder buffer. For
+ * reads, data read from the reorder buffer.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } bits;
+} tdc_reord_buf_data_t;
+
+
+/*
+ * Register: TdcReordBufEccData
+ * Tx DMA Reorder Buffer ECC Data
+ * Description: See tdcReordBufCmd register.
+ * Fields:
+ * For writes, ecc data which is written into reorder buffer. For
+ * reads, ecc data read from the reorder buffer.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:56;
+ uint64_t ecc_data:8;
+#else
+ uint64_t ecc_data:8;
+ uint64_t rsrvd:56;
+#endif
+ } bits;
+} tdc_reord_buf_ecc_data_t;
+
+
+/*
+ * Register: TdcReordTblCmd
+ * Tx DMA Reorder Table Command
+ * Description: Allows debug access to the entire Reorder Table. For
+ * writes, software writes the tdcReordTblData and tdcReordTblParData
+ * before writing the tdcReordTblCmd register. For reads, software
+ * writes the tdcReordTblCmd register, then reads the tdcReordTblData
+ * and tdcReordTblParData registers. The valid field should be polled
+ * by software until it goes low, indicating the read or write has
+ * completed. Writing the tdcReordTblCmd triggers the access.
+ * Fields:
+ * status of indirect access 0=busy 1=done
+ * Command type. 1 indicates a read command, 0 a write command.
+ * enable writing of par bits 1=enabled, 0=disabled
+ * Address in the reorder table to read from or write to
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t status:1;
+ uint64_t cmd:1;
+ uint64_t par_en:1;
+ uint64_t rsrvd1:21;
+ uint64_t entry:8;
+#else
+ uint64_t entry:8;
+ uint64_t rsrvd1:21;
+ uint64_t par_en:1;
+ uint64_t cmd:1;
+ uint64_t status:1;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_reord_tbl_cmd_t;
+
+
+/*
+ * Register: TdcReordTblDataLo
+ * Tx DMA Reorder Table Data Lo
+ * Description: See tdcReordTblCmd register.
+ * Fields:
+ * For writes, data which is written into reorder table. For
+ * reads, data read from the reorder table.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t data:64;
+#else
+ uint64_t data:64;
+#endif
+ } bits;
+} tdc_reord_tbl_data_lo_t;
+
+
+/*
+ * Register: TdcReordTblDataHi
+ * Tx DMA Reorder Table Data Hi
+ * Description: See tdcReordTblCmd register.
+ * Fields:
+ * For writes, parity data which is written into reorder table.
+ * For reads, parity data read from the reorder table.
+ * For writes, data which is written into reorder table. For
+ * reads, data read from the reorder table.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:47;
+ uint64_t par_data:9;
+ uint64_t hi_data:8;
+#else
+ uint64_t hi_data:8;
+ uint64_t par_data:9;
+ uint64_t rsrvd:47;
+#endif
+ } bits;
+} tdc_reord_tbl_data_hi_t;
+
+
+/*
+ * Register: TdcPrefParLog
+ * Tx DMA Prefetch Buffer Parity Log
+ * Description: TDC DMA Prefetch Buffer parity log register This
+ * register logs the first parity error encountered. Writing a 1 to
+ * TdcStat::prefParErr clears this register and re-arms for logging
+ * the next error
+ * Fields:
+ * Address of parity error read data
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:26;
+ uint64_t address:6;
+#else
+ uint64_t address:6;
+ uint64_t rsrvd1:26;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_pref_par_log_t;
+
+
+/*
+ * Register: TdcReordBufEccLog
+ * Tx Reorder Buffer ECC Log
+ * Description: TDC Reorder Buffer ECC log register This register
+ * logs the first ECC error encountered. Writing a 1 to
+ * tdcFifoErrStat::reordBufDedErr or tdcFifoErrStat::reordBufSecErr
+ * clears this register and re-arms for logging
+ * Fields:
+ * Address of ECC error
+ * Syndrome of ECC error
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:4;
+ uint64_t address:12;
+ uint64_t rsrvd2:8;
+ uint64_t syndrome:8;
+#else
+ uint64_t syndrome:8;
+ uint64_t rsrvd2:8;
+ uint64_t address:12;
+ uint64_t rsrvd1:4;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_reord_buf_ecc_log_t;
+
+
+/*
+ * Register: TdcReordTblParLog
+ * Tx Reorder Table Parity Log
+ * Description: TDC Reorder Table parity log register This register
+ * logs the first parity error encountered. Writing a 1 to
+ * tdcFifoErrStat::reordTblParErr clears this register and re-arms
+ * for logging
+ * Fields:
+ * Address of parity error
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:24;
+ uint64_t address:8;
+#else
+ uint64_t address:8;
+ uint64_t rsrvd1:24;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_reord_tbl_par_log_t;
+
+
+/*
+ * Register: TdcFifoErrMask
+ * FIFO Error Mask
+ * Description: FIFO Error Mask register. Mask status of Reorder
+ * Buffer and Reorder Table Buffer Errors.
+ * Fields:
+ * Set to 0 to select the event to raise the LDF to indicate
+ * reorder table ram received a parity error An Device Error 1
+ * event.
+ * Set to 0 to select the event to raise the LDF to indicate
+ * reorder buffer ram received a ecc double bit error An Device
+ * Error 1 event.
+ * Set to 0 to select the event to raise the LDF to indicate
+ * reorder buffer ram received a ecc single bit error An Device
+ * Error 0 event.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:61;
+ uint64_t reord_tbl_par_err:1;
+ uint64_t reord_buf_ded_err:1;
+ uint64_t reord_buf_sec_err:1;
+#else
+ uint64_t reord_buf_sec_err:1;
+ uint64_t reord_buf_ded_err:1;
+ uint64_t reord_tbl_par_err:1;
+ uint64_t rsrvd:61;
+#endif
+ } bits;
+} tdc_fifo_err_mask_t;
+
+
+/*
+ * Register: TdcFifoErrStat
+ * FIFO Error Status
+ * Description: FIFO Error Status register. Log status of Reorder
+ * Buffer and Reorder Table Buffer Errors.
+ * Fields:
+ * Set to 1 by HW to indicate reorder table ram received a parity
+ * error Writing a 1 clears this bit and also clears the
+ * TdcReordTblParLog register Fatal error. Part of Device Error
+ * 1.
+ * Set to 1 by HW to indicate reorder buffer ram received a
+ * double bit ecc error Writing a 1 clears this bit and also
+ * clears the tdcReordBufEccLog register Fatal error. Part of
+ * Device Error 1.
+ * Set to 1 by HW to indicate reorder buffer ram received a
+ * single bit ecc error Writing a 1 clears this bit and also
+ * clears the tdcReordBufEccLog register Non-Fatal error. Part of
+ * Device Error 0.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:61;
+ uint64_t reord_tbl_par_err:1;
+ uint64_t reord_buf_ded_err:1;
+ uint64_t reord_buf_sec_err:1;
+#else
+ uint64_t reord_buf_sec_err:1;
+ uint64_t reord_buf_ded_err:1;
+ uint64_t reord_tbl_par_err:1;
+ uint64_t rsrvd:61;
+#endif
+ } bits;
+} tdc_fifo_err_stat_t;
+
+
+/*
+ * Register: TdcFifoErrIntDbg
+ * FIFO Error Interrupt Debug
+ * Description: FIFO Error Interrupt Debug register. Write this
+ * regsiter to set bits in TdcFifoErrStat, allowing debug creation of
+ * interrupts without needing to create the actual events. This
+ * register holds no state. Reading this register gives the Tdc Fifo
+ * Err Status data. Clear interrupt state by clearing TdcFifoErrStat.
+ * For Debug only
+ * Fields:
+ * Set to 1 to select the event to raise the LDF to indicate
+ * reorder table ram received a parity error An Device Error 1
+ * event.
+ * Set to 1 to select the event to raise the LDF to indicate
+ * reorder buffer ram received a ecc double bit error An Device
+ * Error 1 event.
+ * Set to 1 to select the event to raise the LDF to indicate
+ * reorder buffer ram received a ecc single bit error An Device
+ * Error 0 event.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:61;
+ uint64_t reord_tbl_par_err:1;
+ uint64_t reord_buf_ded_err:1;
+ uint64_t reord_buf_sec_err:1;
+#else
+ uint64_t reord_buf_sec_err:1;
+ uint64_t reord_buf_ded_err:1;
+ uint64_t reord_tbl_par_err:1;
+ uint64_t rsrvd:61;
+#endif
+ } bits;
+} tdc_fifo_err_int_dbg_t;
+
+
+/*
+ * Register: TdcStatIntDbg
+ * Transmit Status Interrupt Debug
+ * Description: Write this regsiter to set bits in TdcStat, allowing
+ * debug creation of interrupts without needing to create the actual
+ * events. This register holds no state. Reading this register gives
+ * the Transmit Control and Status data. Clear interrupt state by
+ * clearing TdcStat. For Debug only
+ * Fields:
+ * Set to 1 to select the event to raise the LDF for packets
+ * marked. An LDF 0 event.
+ * Set to 1 to select the event to raise the LDF when poisoned
+ * completion or non-zero (unsuccessful) completion status
+ * received from PEU. An LDF 1 event.
+ * Set to 1 to select the event to raise the LDF when total bytes
+ * transmitted compared against pkt internal header bytes
+ * transmitted mismatch. An LDF 1 event.
+ * Set to 1 to select the event to raise the LDF when a runt
+ * packet is dropped (when VMAC does not allow runt packets to be
+ * padded). An LDF 1 event.
+ * Set to 1 to select the event to raise the LDF when the packet
+ * size exceeds hardware limit. An LDF 1 event.
+ * Set to 1 to select the event to raise the LDF to indicate
+ * Transmit Ring Overflow An LDF 1 event.
+ * Set to 1 to select the event to raise the LDF to indicate
+ * parity error on the tdr prefetch buffer occurred. An LDF 1
+ * event.
+ * Set to 1 to select the event to raise the LDF to indicate tdc
+ * received a response completion timeout from peu for tdr
+ * descriptor prefetch An LDF 1 event.
+ * Set to 1 to select the event to raise the LDF to indicate tdc
+ * received a response completion timeout from peu for packet
+ * data request An LDF 1 event.
+ * Set to 1 to select the event to raise the LDF to indicate tdc
+ * did not receive an SOP in the 1st descriptor as was expected
+ * or the numPtr in the 1st descriptor was set to 0. An LDF 1
+ * event.
+ * Set to 1 to select the event to raise the LDF to indicate tdc
+ * received an unexpected SOP descriptor error. An LDF 1 event.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:48;
+ uint64_t marked:1;
+ uint64_t rsrvd1:5;
+ uint64_t peu_resp_err:1;
+ uint64_t pkt_size_hdr_err:1;
+ uint64_t runt_pkt_drop_err:1;
+ uint64_t pkt_size_err:1;
+ uint64_t tx_rng_oflow:1;
+ uint64_t pref_par_err:1;
+ uint64_t tdr_pref_cpl_to:1;
+ uint64_t pkt_cpl_to:1;
+ uint64_t invalid_sop:1;
+ uint64_t unexpected_sop:1;
+#else
+ uint64_t unexpected_sop:1;
+ uint64_t invalid_sop:1;
+ uint64_t pkt_cpl_to:1;
+ uint64_t tdr_pref_cpl_to:1;
+ uint64_t pref_par_err:1;
+ uint64_t tx_rng_oflow:1;
+ uint64_t pkt_size_err:1;
+ uint64_t runt_pkt_drop_err:1;
+ uint64_t pkt_size_hdr_err:1;
+ uint64_t peu_resp_err:1;
+ uint64_t rsrvd1:5;
+ uint64_t marked:1;
+ uint64_t rsrvd:48;
+#endif
+ } bits;
+} tdc_stat_int_dbg_t;
+
+
+/*
+ * Register: TdcPktReqTidTag
+ * Packet Request TID Tag
+ * Description: Packet Request TID Tag register Track the packet
+ * request TID currently used
+ * Fields:
+ * When set to 1, it indicates the TID is currently being used
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t pkt_req_tid_tag:24;
+ uint64_t rsrvd1:8;
+#else
+ uint64_t rsrvd1:8;
+ uint64_t pkt_req_tid_tag:24;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_pkt_req_tid_tag_t;
+
+
+/*
+ * Register: TdcSopPrefDescLog
+ * SOP Prefetch Descriptor Log
+ * Description: SOP Descriptor Log register Logs the last SOP
+ * prefetch descriptor processed by the packet request block. This
+ * log could represent the current SOP prefetch descriptor if the
+ * packet request block did not complete issuing the data requests
+ * from this descriptor. Descriptors are logged to this register when
+ * the packet request block is expecting an SOP descriptor, and it
+ * receives it.
+ * Fields:
+ * Represents the last or current SOP descriptor being processed
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t sop_pref_desc_log:64;
+#else
+ uint64_t sop_pref_desc_log:64;
+#endif
+ } bits;
+} tdc_sop_pref_desc_log_t;
+
+
+/*
+ * Register: TdcPrefDescLog
+ * Prefetch Descriptor Log
+ * Description: SOP Descriptor Log register Logs the last prefetch
+ * descriptor processed by the packet request block. This log could
+ * represent the current prefetch descriptor if the packet request
+ * block did not complete issuing the data requests from this
+ * descriptor. The contents in this register could differ from the
+ * SOP Prefetch Descriptor Log register if a particular packet
+ * requires usage of more than 1 descriptor. Descriptors are logged
+ * to this register when the packet request block is expecting a
+ * descriptor after the SOP descriptor.
+ * Fields:
+ * Represents the last or current descriptor being processed
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t pref_desc_log:64;
+#else
+ uint64_t pref_desc_log:64;
+#endif
+ } bits;
+} tdc_pref_desc_log_t;
+
+
+/*
+ * Register: TdcPeuTxnLog
+ * PEU Transaction Log
+ * Description: PEU Transaction Log register. Counts the memory read
+ * and write requests sent to peu block. For debug only.
+ * Fields:
+ * Counts the memory write transactions sent to peu block. This
+ * counter saturates. This counter increments when vnmDbg is on
+ * Counts the memory read transactions sent to peu block. This
+ * counter saturates. This counter increments when vnmDbg is on
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rsrvd1:16;
+ uint64_t peu_mem_wr_count:8;
+ uint64_t peu_mem_rd_count:8;
+#else
+ uint64_t peu_mem_rd_count:8;
+ uint64_t peu_mem_wr_count:8;
+ uint64_t rsrvd1:16;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_peu_txn_log_t;
+
+
+/*
+ * Register: TdcDbgTrainingVec
+ * Debug Training Vector
+ * Description: Debug Training Vector register. Debug Training Vector
+ * for the coreClk domain. For the pcieClk domain, the dbgxMsb and
+ * dbgyMsb values are flipped on the debug bus.
+ * Fields:
+ * Blade Number, the value read depends on the blade this block
+ * resides
+ * debug training vector the sub-group select value of 0 selects
+ * this vector
+ * Blade Number, the value read depends on the blade this block
+ * resides
+ * debug training vector the sub-group select value of 0 selects
+ * this vector
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t dbgx_msb:1;
+ uint64_t dbgx_bld_num:3;
+ uint64_t dbgx_training_vec:12;
+ uint64_t dbgy_msb:1;
+ uint64_t dbgy_bld_num:3;
+ uint64_t dbgy_training_vec:12;
+#else
+ uint64_t dbgy_training_vec:12;
+ uint64_t dbgy_bld_num:3;
+ uint64_t dbgy_msb:1;
+ uint64_t dbgx_training_vec:12;
+ uint64_t dbgx_bld_num:3;
+ uint64_t dbgx_msb:1;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} tdc_dbg_training_vec_t;
+
+
+/*
+ * Register: TdcDbgGrpSel
+ * Debug Group Select
+ * Description: Debug Group Select register. Debug Group Select
+ * register selects the group of signals brought out on the debug
+ * port
+ * Fields:
+ * high 32b sub-group select
+ * low 32b sub-group select
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:48;
+ uint64_t rsrvd1:1;
+ uint64_t dbg_h32_sub_sel:7;
+ uint64_t rsrvd2:1;
+ uint64_t dbg_l32_sub_sel:7;
+#else
+ uint64_t dbg_l32_sub_sel:7;
+ uint64_t rsrvd2:1;
+ uint64_t dbg_h32_sub_sel:7;
+ uint64_t rsrvd1:1;
+ uint64_t rsrvd:48;
+#endif
+ } bits;
+} tdc_dbg_grp_sel_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HXGE_TDC_HW_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_txdma.c b/usr/src/uts/common/io/hxge/hxge_txdma.c
new file mode 100644
index 0000000000..91e923d6f7
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_txdma.c
@@ -0,0 +1,2900 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+#include <hxge_txdma.h>
+#include <sys/llc1.h>
+
+uint32_t hxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
+uint32_t hxge_tx_minfree = 32;
+uint32_t hxge_tx_intr_thres = 0;
+uint32_t hxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
+uint32_t hxge_tx_tiny_pack = 1;
+uint32_t hxge_tx_use_bcopy = 1;
+
+extern uint32_t hxge_tx_ring_size;
+extern uint32_t hxge_bcopy_thresh;
+extern uint32_t hxge_dvma_thresh;
+extern uint32_t hxge_dma_stream_thresh;
+extern dma_method_t hxge_force_dma;
+
+/* Device register access attributes for PIO. */
+extern ddi_device_acc_attr_t hxge_dev_reg_acc_attr;
+
+/* Device descriptor access attributes for DMA. */
+extern ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr;
+
+/* Device buffer access attributes for DMA. */
+extern ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr;
+extern ddi_dma_attr_t hxge_desc_dma_attr;
+extern ddi_dma_attr_t hxge_tx_dma_attr;
+
+static hxge_status_t hxge_map_txdma(p_hxge_t hxgep);
+static void hxge_unmap_txdma(p_hxge_t hxgep);
+static hxge_status_t hxge_txdma_hw_start(p_hxge_t hxgep);
+static void hxge_txdma_hw_stop(p_hxge_t hxgep);
+
+static hxge_status_t hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p,
+ uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
+ p_tx_mbox_t *tx_mbox_p);
+static void hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
+static hxge_status_t hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t,
+ p_hxge_dma_common_t *, p_tx_ring_t *, uint32_t);
+static void hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep,
+ p_tx_ring_t tx_ring_p);
+static void hxge_map_txdma_channel_cfg_ring(p_hxge_t, uint16_t,
+ p_hxge_dma_common_t *, p_tx_ring_t, p_tx_mbox_t *);
+static void hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
+static hxge_status_t hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
+static hxge_status_t hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
+static p_tx_ring_t hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel);
+static hxge_status_t hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index,
+ p_hxge_ldv_t ldvp, tdc_stat_t cs);
+static p_tx_mbox_t hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel);
+static hxge_status_t hxge_txdma_fatal_err_recover(p_hxge_t hxgep,
+ uint16_t channel, p_tx_ring_t tx_ring_p);
+static hxge_status_t hxge_tx_port_fatal_err_recover(p_hxge_t hxgep);
+
+hxge_status_t
+hxge_init_txdma_channels(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+ block_reset_t reset_reg;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_init_txdma_channels"));
+
+ /*
+ * Reset TDC block from PEU to cleanup any unknown configuration.
+ * This may be resulted from previous reboot.
+ */
+ reset_reg.value = 0;
+ reset_reg.bits.tdc_rst = 1;
+ HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
+
+ HXGE_DELAY(1000);
+
+ status = hxge_map_txdma(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_init_txdma_channels: status 0x%x", status));
+ return (status);
+ }
+
+ status = hxge_txdma_hw_start(hxgep);
+ if (status != HXGE_OK) {
+ hxge_unmap_txdma(hxgep);
+ return (status);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_init_txdma_channels: status 0x%x", status));
+
+ return (HXGE_OK);
+}
+
+void
+hxge_uninit_txdma_channels(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_uninit_txdma_channels"));
+
+ hxge_txdma_hw_stop(hxgep);
+ hxge_unmap_txdma(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_uinit_txdma_channels"));
+}
+
+void
+hxge_setup_dma_common(p_hxge_dma_common_t dest_p, p_hxge_dma_common_t src_p,
+ uint32_t entries, uint32_t size)
+{
+ size_t tsize;
+ *dest_p = *src_p;
+ tsize = size * entries;
+ dest_p->alength = tsize;
+ dest_p->nblocks = entries;
+ dest_p->block_size = size;
+ dest_p->offset += tsize;
+
+ src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
+ src_p->alength -= tsize;
+ src_p->dma_cookie.dmac_laddress += tsize;
+ src_p->dma_cookie.dmac_size -= tsize;
+}
+
+hxge_status_t
+hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel, uint64_t reg_data)
+{
+ hpi_status_t rs = HPI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+ hpi_handle_t handle;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, " ==> hxge_reset_txdma_channel"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ if ((reg_data & TDC_TDR_RST_MASK) == TDC_TDR_RST_MASK) {
+ rs = hpi_txdma_channel_reset(handle, channel);
+ } else {
+ rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel);
+ }
+
+ if (rs != HPI_SUCCESS) {
+ status = HXGE_ERROR | rs;
+ }
+
+ /*
+ * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
+ * overflow fatal error if tail is not set to 0 after reset!
+ */
+ TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, " <== hxge_reset_txdma_channel"));
+
+ return (status);
+}
+
+hxge_status_t
+hxge_init_txdma_channel_event_mask(p_hxge_t hxgep, uint16_t channel,
+ tdc_int_mask_t *mask_p)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_init_txdma_channel_event_mask"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ /*
+ * Mask off tx_rng_oflow since it is a false alarm. The driver
+ * ensures not over flowing the hardware and check the hardware
+ * status.
+ */
+ mask_p->bits.tx_rng_oflow = 1;
+ rs = hpi_txdma_event_mask(handle, OP_SET, channel, mask_p);
+ if (rs != HPI_SUCCESS) {
+ status = HXGE_ERROR | rs;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_init_txdma_channel_event_mask"));
+ return (status);
+}
+
+hxge_status_t
+hxge_enable_txdma_channel(p_hxge_t hxgep,
+ uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_enable_txdma_channel"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ /*
+ * Use configuration data composed at init time. Write to hardware the
+ * transmit ring configurations.
+ */
+ rs = hpi_txdma_ring_config(handle, OP_SET, channel,
+ (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
+
+ if (rs != HPI_SUCCESS) {
+ return (HXGE_ERROR | rs);
+ }
+
+ /* Write to hardware the mailbox */
+ rs = hpi_txdma_mbox_config(handle, OP_SET, channel,
+ (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
+
+ if (rs != HPI_SUCCESS) {
+ return (HXGE_ERROR | rs);
+ }
+
+ /* Start the DMA engine. */
+ rs = hpi_txdma_channel_init_enable(handle, channel);
+ if (rs != HPI_SUCCESS) {
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_enable_txdma_channel"));
+ return (status);
+}
+
+void
+hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum,
+ int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp)
+{
+ p_tx_pkt_header_t hdrp;
+ p_mblk_t nmp;
+ uint64_t tmp;
+ size_t mblk_len;
+ size_t iph_len;
+ size_t hdrs_size;
+ uint8_t *ip_buf;
+ uint16_t eth_type;
+ uint8_t ipproto;
+ boolean_t is_vlan = B_FALSE;
+ size_t eth_hdr_size;
+ uint8_t hdrs_buf[sizeof (struct ether_header) + 64 + sizeof (uint32_t)];
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: mp $%p", mp));
+
+ /*
+ * Caller should zero out the headers first.
+ */
+ hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
+
+ if (fill_len) {
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_fill_tx_hdr: pkt_len %d npads %d",
+ pkt_len, npads));
+ tmp = (uint64_t)pkt_len;
+ hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
+
+ goto fill_tx_header_done;
+ }
+ tmp = (uint64_t)npads;
+ hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT);
+
+ /*
+ * mp is the original data packet (does not include the Neptune
+ * transmit header).
+ */
+ nmp = mp;
+ mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_fill_tx_hdr: mp $%p b_rptr $%p len %d",
+ mp, nmp->b_rptr, mblk_len));
+ ip_buf = NULL;
+ bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header));
+ eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> : hxge_fill_tx_hdr: (value 0x%llx) ether type 0x%x",
+ eth_type, hdrp->value));
+
+ if (eth_type < ETHERMTU) {
+ tmp = 1ull;
+ hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_hdr_init: LLC value 0x%llx", hdrp->value));
+ if (*(hdrs_buf + sizeof (struct ether_header)) ==
+ LLC_SNAP_SAP) {
+ eth_type = ntohs(*((uint16_t *)(hdrs_buf +
+ sizeof (struct ether_header) + 6)));
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_hdr_init: LLC ether type 0x%x",
+ eth_type));
+ } else {
+ goto fill_tx_header_done;
+ }
+ } else if (eth_type == VLAN_ETHERTYPE) {
+ tmp = 1ull;
+ hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
+
+ eth_type = ntohs(((struct ether_vlan_header *)
+ hdrs_buf)->ether_type);
+ is_vlan = B_TRUE;
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_hdr_init: VLAN value 0x%llx",
+ hdrp->value));
+ }
+ if (!is_vlan) {
+ eth_hdr_size = sizeof (struct ether_header);
+ } else {
+ eth_hdr_size = sizeof (struct ether_vlan_header);
+ }
+
+ switch (eth_type) {
+ case ETHERTYPE_IP:
+ if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
+ ip_buf = nmp->b_rptr + eth_hdr_size;
+ mblk_len -= eth_hdr_size;
+ iph_len = ((*ip_buf) & 0x0f);
+ if (mblk_len > (iph_len + sizeof (uint32_t))) {
+ ip_buf = nmp->b_rptr;
+ ip_buf += eth_hdr_size;
+ } else {
+ ip_buf = NULL;
+ }
+ }
+ if (ip_buf == NULL) {
+ hdrs_size = 0;
+ ((p_ether_header_t)hdrs_buf)->ether_type = 0;
+ while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) {
+ mblk_len = (size_t)nmp->b_wptr -
+ (size_t)nmp->b_rptr;
+ if (mblk_len >=
+ (sizeof (hdrs_buf) - hdrs_size))
+ mblk_len = sizeof (hdrs_buf) -
+ hdrs_size;
+ bcopy(nmp->b_rptr,
+ &hdrs_buf[hdrs_size], mblk_len);
+ hdrs_size += mblk_len;
+ nmp = nmp->b_cont;
+ }
+ ip_buf = hdrs_buf;
+ ip_buf += eth_hdr_size;
+ iph_len = ((*ip_buf) & 0x0f);
+ }
+ ipproto = ip_buf[9];
+
+ tmp = (uint64_t)iph_len;
+ hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
+ tmp = (uint64_t)(eth_hdr_size >> 1);
+ hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv4 "
+ " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
+ "tmp 0x%x", iph_len, hdrp->bits.l3start, eth_hdr_size,
+ ipproto, tmp));
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_hdr_init: IP value 0x%llx", hdrp->value));
+ break;
+
+ case ETHERTYPE_IPV6:
+ hdrs_size = 0;
+ ((p_ether_header_t)hdrs_buf)->ether_type = 0;
+ while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) {
+ mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
+ if (mblk_len >= (sizeof (hdrs_buf) - hdrs_size))
+ mblk_len = sizeof (hdrs_buf) - hdrs_size;
+ bcopy(nmp->b_rptr, &hdrs_buf[hdrs_size], mblk_len);
+ hdrs_size += mblk_len;
+ nmp = nmp->b_cont;
+ }
+ ip_buf = hdrs_buf;
+ ip_buf += eth_hdr_size;
+
+ tmp = 1ull;
+ hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
+
+ tmp = (eth_hdr_size >> 1);
+ hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
+
+ /* byte 6 is the next header protocol */
+ ipproto = ip_buf[6];
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv6 "
+ " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
+ iph_len, hdrp->bits.l3start, eth_hdr_size, ipproto));
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_hdr_init: IPv6 "
+ "value 0x%llx", hdrp->value));
+ break;
+
+ default:
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: non-IP"));
+ goto fill_tx_header_done;
+ }
+
+ switch (ipproto) {
+ case IPPROTO_TCP:
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
+ if (l4_cksum) {
+ tmp = 1ull;
+ hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_hdr_init: TCP CKSUM"
+ "value 0x%llx", hdrp->value));
+ }
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_hdr_init: TCP value 0x%llx", hdrp->value));
+ break;
+
+ case IPPROTO_UDP:
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: UDP"));
+ if (l4_cksum) {
+ tmp = 0x2ull;
+ hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
+ }
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_hdr_init: UDP value 0x%llx",
+ hdrp->value));
+ break;
+
+ default:
+ goto fill_tx_header_done;
+ }
+
+fill_tx_header_done:
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_fill_tx_hdr: pkt_len %d npads %d value 0x%llx",
+ pkt_len, npads, hdrp->value));
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "<== hxge_fill_tx_hdr"));
+}
+
+/*ARGSUSED*/
+p_mblk_t
+hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
+{
+ p_mblk_t newmp = NULL;
+
+ if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "<== hxge_tx_pkt_header_reserve: allocb failed"));
+ return (NULL);
+ }
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_header_reserve: get new mp"));
+ DB_TYPE(newmp) = M_DATA;
+ newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
+ linkb(newmp, mp);
+ newmp->b_rptr -= TX_PKT_HEADER_SIZE;
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==>hxge_tx_pkt_header_reserve: b_rptr $%p b_wptr $%p",
+ newmp->b_rptr, newmp->b_wptr));
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "<== hxge_tx_pkt_header_reserve: use new mp"));
+ return (newmp);
+}
+
+int
+hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
+{
+ uint_t nmblks;
+ ssize_t len;
+ uint_t pkt_len;
+ p_mblk_t nmp, bmp, tmp;
+ uint8_t *b_wptr;
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p len %d",
+ mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
+
+ nmp = mp;
+ bmp = mp;
+ nmblks = 0;
+ pkt_len = 0;
+ *tot_xfer_len_p = 0;
+
+ while (nmp) {
+ len = MBLKL(nmp);
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: "
+ "len %d pkt_len %d nmblks %d tot_xfer_len %d",
+ len, pkt_len, nmblks, *tot_xfer_len_p));
+
+ if (len <= 0) {
+ bmp = nmp;
+ nmp = nmp->b_cont;
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_nmblocks:"
+ " len (0) pkt_len %d nmblks %d", pkt_len, nmblks));
+ continue;
+ }
+ *tot_xfer_len_p += len;
+ HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: "
+ "len %d pkt_len %d nmblks %d tot_xfer_len %d",
+ len, pkt_len, nmblks, *tot_xfer_len_p));
+
+ if (len < hxge_bcopy_thresh) {
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_nmblocks: "
+ "len %d (< thresh) pkt_len %d nmblks %d",
+ len, pkt_len, nmblks));
+ if (pkt_len == 0)
+ nmblks++;
+ pkt_len += len;
+ if (pkt_len >= hxge_bcopy_thresh) {
+ pkt_len = 0;
+ len = 0;
+ nmp = bmp;
+ }
+ } else {
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_nmblocks: "
+ "len %d (> thresh) pkt_len %d nmblks %d",
+ len, pkt_len, nmblks));
+ pkt_len = 0;
+ nmblks++;
+ /*
+ * Hardware limits the transfer length to 4K. If len is
+ * more than 4K, we need to break it up to at most 2
+ * more blocks.
+ */
+ if (len > TX_MAX_TRANSFER_LENGTH) {
+ uint32_t nsegs;
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_nmblocks: "
+ "len %d pkt_len %d nmblks %d nsegs %d",
+ len, pkt_len, nmblks, nsegs));
+ nsegs = 1;
+ if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
+ ++nsegs;
+ }
+ do {
+ b_wptr = nmp->b_rptr +
+ TX_MAX_TRANSFER_LENGTH;
+ nmp->b_wptr = b_wptr;
+ if ((tmp = dupb(nmp)) == NULL) {
+ return (0);
+ }
+ tmp->b_rptr = b_wptr;
+ tmp->b_wptr = nmp->b_wptr;
+ tmp->b_cont = nmp->b_cont;
+ nmp->b_cont = tmp;
+ nmblks++;
+ if (--nsegs) {
+ nmp = tmp;
+ }
+ } while (nsegs);
+ nmp = tmp;
+ }
+ }
+
+ /*
+ * Hardware limits the transmit gather pointers to 15.
+ */
+ if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
+ TX_MAX_GATHER_POINTERS) {
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "==> hxge_tx_pkt_nmblocks: pull msg - "
+ "len %d pkt_len %d nmblks %d",
+ len, pkt_len, nmblks));
+ /* Pull all message blocks from b_cont */
+ if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
+ return (0);
+ }
+ freemsg(nmp->b_cont);
+ nmp->b_cont = tmp;
+ pkt_len = 0;
+ }
+ bmp = nmp;
+ nmp = nmp->b_cont;
+ }
+
+ HXGE_DEBUG_MSG((NULL, TX_CTL,
+ "<== hxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
+ "nmblks %d len %d tot_xfer_len %d",
+ mp->b_rptr, mp->b_wptr, nmblks, MBLKL(mp), *tot_xfer_len_p));
+ return (nmblks);
+}
+
+boolean_t
+hxge_txdma_reclaim(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, int nmblks)
+{
+ boolean_t status = B_TRUE;
+ p_hxge_dma_common_t tx_desc_dma_p;
+ hxge_dma_common_t desc_area;
+ p_tx_desc_t tx_desc_ring_vp;
+ p_tx_desc_t tx_desc_p;
+ p_tx_desc_t tx_desc_pp;
+ tx_desc_t r_tx_desc;
+ p_tx_msg_t tx_msg_ring;
+ p_tx_msg_t tx_msg_p;
+ hpi_handle_t handle;
+ tdc_tdr_head_t tx_head;
+ uint32_t pkt_len;
+ uint_t tx_rd_index;
+ uint16_t head_index, tail_index;
+ uint8_t tdc;
+ boolean_t head_wrap, tail_wrap;
+ p_hxge_tx_ring_stats_t tdc_stats;
+ tdc_byte_cnt_t byte_cnt;
+ tdc_tdr_qlen_t qlen;
+ int rc;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_reclaim"));
+
+ status = ((tx_ring_p->descs_pending < hxge_reclaim_pending) &&
+ (nmblks != 0));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
+ tx_ring_p->descs_pending, hxge_reclaim_pending, nmblks));
+
+ if (!status) {
+ tx_desc_dma_p = &tx_ring_p->tdc_desc;
+ desc_area = tx_ring_p->tdc_desc;
+ tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
+ tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
+ tx_rd_index = tx_ring_p->rd_index;
+ tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
+ tx_msg_ring = tx_ring_p->tx_msg_ring;
+ tx_msg_p = &tx_msg_ring[tx_rd_index];
+ tdc = tx_ring_p->tdc;
+ tdc_stats = tx_ring_p->tdc_stats;
+ if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
+ tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
+ }
+ tail_index = tx_ring_p->wr_index;
+ tail_wrap = tx_ring_p->wr_index_wrap;
+
+ /*
+ * tdc_byte_cnt reg can be used to get bytes transmitted. It
+ * includes padding too in case of runt packets.
+ */
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ TXDMA_REG_READ64(handle, TDC_BYTE_CNT, tdc, &byte_cnt.value);
+ tdc_stats->obytes_with_pad += byte_cnt.bits.byte_count;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: tdc %d tx_rd_index %d "
+ "tail_index %d tail_wrap %d tx_desc_p $%p ($%p) ",
+ tdc, tx_rd_index, tail_index, tail_wrap,
+ tx_desc_p, (*(uint64_t *)tx_desc_p)));
+
+ /*
+ * Read the hardware maintained transmit head and wrap around
+ * bit.
+ */
+ TXDMA_REG_READ64(handle, TDC_TDR_HEAD, tdc, &tx_head.value);
+ head_index = tx_head.bits.head;
+ head_wrap = tx_head.bits.wrap;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: "
+ "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
+ tx_rd_index, tail_index, tail_wrap, head_index, head_wrap));
+
+ /*
+ * For debug only. This can be used to verify the qlen and make
+ * sure the hardware is wrapping the Tdr correctly.
+ */
+ TXDMA_REG_READ64(handle, TDC_TDR_QLEN, tdc, &qlen.value);
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: tdr_qlen %d tdr_pref_qlen %d",
+ qlen.bits.tdr_qlen, qlen.bits.tdr_pref_qlen));
+
+ if (head_index == tail_index) {
+ if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index,
+ tail_wrap) && (head_index == tx_rd_index)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: EMPTY"));
+ return (B_TRUE);
+ }
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: Checking if ring full"));
+ if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
+ tail_wrap)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: full"));
+ return (B_FALSE);
+ }
+ }
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: tx_rd_index and head_index"));
+
+ /* XXXX: limit the # of reclaims */
+ tx_desc_pp = &r_tx_desc;
+ while ((tx_rd_index != head_index) &&
+ (tx_ring_p->descs_pending != 0)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: Checking if pending"));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: descs_pending %d ",
+ tx_ring_p->descs_pending));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: "
+ "(tx_rd_index %d head_index %d (tx_desc_p $%p)",
+ tx_rd_index, head_index, tx_desc_p));
+
+ tx_desc_pp->value = tx_desc_p->value;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: "
+ "(tx_rd_index %d head_index %d "
+ "tx_desc_p $%p (desc value 0x%llx) ",
+ tx_rd_index, head_index,
+ tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: dump desc:"));
+
+ /*
+ * tdc_byte_cnt reg can be used to get bytes
+ * transmitted
+ */
+ pkt_len = tx_desc_pp->bits.tr_len;
+ tdc_stats->obytes += pkt_len;
+ tdc_stats->opackets += tx_desc_pp->bits.sop;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: pkt_len %d "
+ "tdc channel %d opackets %d",
+ pkt_len, tdc, tdc_stats->opackets));
+
+ if (tx_msg_p->flags.dma_type == USE_DVMA) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "tx_desc_p = $%p tx_desc_pp = $%p "
+ "index = %d",
+ tx_desc_p, tx_desc_pp,
+ tx_ring_p->rd_index));
+ (void) dvma_unload(tx_msg_p->dvma_handle,
+ 0, -1);
+ tx_msg_p->dvma_handle = NULL;
+ if (tx_ring_p->dvma_wr_index ==
+ tx_ring_p->dvma_wrap_mask) {
+ tx_ring_p->dvma_wr_index = 0;
+ } else {
+ tx_ring_p->dvma_wr_index++;
+ }
+ tx_ring_p->dvma_pending--;
+ } else if (tx_msg_p->flags.dma_type == USE_DMA) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: USE DMA"));
+ if (rc = ddi_dma_unbind_handle
+ (tx_msg_p->dma_handle)) {
+ cmn_err(CE_WARN, "hxge_reclaim: "
+ "ddi_dma_unbind_handle "
+ "failed. status %d", rc);
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_reclaim: count packets"));
+
+ /*
+ * count a chained packet only once.
+ */
+ if (tx_msg_p->tx_message != NULL) {
+ freemsg(tx_msg_p->tx_message);
+ tx_msg_p->tx_message = NULL;
+ }
+ tx_msg_p->flags.dma_type = USE_NONE;
+ tx_rd_index = tx_ring_p->rd_index;
+ tx_rd_index = (tx_rd_index + 1) &
+ tx_ring_p->tx_wrap_mask;
+ tx_ring_p->rd_index = tx_rd_index;
+ tx_ring_p->descs_pending--;
+ tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
+ tx_msg_p = &tx_msg_ring[tx_rd_index];
+ }
+
+ status = (nmblks <= (tx_ring_p->tx_ring_size -
+ tx_ring_p->descs_pending - TX_FULL_MARK));
+ if (status) {
+ cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
+ }
+ } else {
+ status = (nmblks <= (tx_ring_p->tx_ring_size -
+ tx_ring_p->descs_pending - TX_FULL_MARK));
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_reclaim status = 0x%08x", status));
+ return (status);
+}
+
+uint_t
+hxge_tx_intr(caddr_t arg1, caddr_t arg2)
+{
+ p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
+ p_hxge_t hxgep = (p_hxge_t)arg2;
+ p_hxge_ldg_t ldgp;
+ uint8_t channel;
+ uint32_t vindex;
+ hpi_handle_t handle;
+ tdc_stat_t cs;
+ p_tx_ring_t *tx_rings;
+ p_tx_ring_t tx_ring_p;
+ hpi_status_t rs = HPI_SUCCESS;
+ uint_t serviced = DDI_INTR_UNCLAIMED;
+ hxge_status_t status = HXGE_OK;
+
+ if (ldvp == NULL) {
+ HXGE_DEBUG_MSG((NULL, INT_CTL,
+ "<== hxge_tx_intr: hxgep $%p ldvp $%p", hxgep, ldvp));
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
+ hxgep = ldvp->hxgep;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_tx_intr: hxgep(arg2) $%p ldvp(arg1) $%p", hxgep, ldvp));
+
+ /*
+ * This interrupt handler is for a specific transmit dma channel.
+ */
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ /* Get the control and status for this channel. */
+ channel = ldvp->channel;
+ ldgp = ldvp->ldgp;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_tx_intr: hxgep $%p ldvp (ldvp) $%p channel %d",
+ hxgep, ldvp, channel));
+
+ rs = hpi_txdma_control_status(handle, OP_GET, channel, &cs);
+ vindex = ldvp->vdma_index;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_tx_intr:channel %d ring index %d status 0x%08x",
+ channel, vindex, rs));
+
+ if (!rs && cs.bits.marked) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_tx_intr:channel %d ring index %d "
+ "status 0x%08x (marked bit set)", channel, vindex, rs));
+ tx_rings = hxgep->tx_rings->rings;
+ tx_ring_p = tx_rings[vindex];
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_tx_intr:channel %d ring index %d "
+ "status 0x%08x (marked bit set, calling reclaim)",
+ channel, vindex, rs));
+
+ MUTEX_ENTER(&tx_ring_p->lock);
+ (void) hxge_txdma_reclaim(hxgep, tx_rings[vindex], 0);
+ MUTEX_EXIT(&tx_ring_p->lock);
+ mac_tx_update(hxgep->mach);
+ }
+
+ /*
+ * Process other transmit control and status. Check the ldv state.
+ */
+ status = hxge_tx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
+
+ /* Clear the error bits */
+ RXDMA_REG_WRITE64(handle, TDC_STAT, channel, cs.value);
+
+ /*
+ * Rearm this logical group if this is a single device group.
+ */
+ if (ldgp->nldvs == 1) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_tx_intr: rearm"));
+ if (status == HXGE_OK) {
+ (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
+ B_TRUE, ldgp->ldg_timer);
+ }
+ }
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_tx_intr"));
+ serviced = DDI_INTR_CLAIMED;
+ return (serviced);
+}
+
+void
+hxge_txdma_stop(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop"));
+
+ (void) hxge_tx_vmac_disable(hxgep);
+ (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop"));
+}
+
+hxge_status_t
+hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
+{
+ int i, ndmas;
+ uint16_t channel;
+ p_tx_rings_t tx_rings;
+ p_tx_ring_t *tx_desc_rings;
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_hw_mode: enable mode %d", enable));
+
+ if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_mode: not initialized"));
+ return (HXGE_ERROR);
+ }
+ tx_rings = hxgep->tx_rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_mode: NULL global ring pointer"));
+ return (HXGE_ERROR);
+ }
+ tx_desc_rings = tx_rings->rings;
+ if (tx_desc_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_mode: NULL rings pointer"));
+ return (HXGE_ERROR);
+ }
+ ndmas = tx_rings->ndmas;
+ if (!ndmas) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_txdma_hw_mode: no dma channel allocated"));
+ return (HXGE_ERROR);
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_mode: "
+ "tx_rings $%p tx_desc_rings $%p ndmas %d",
+ tx_rings, tx_desc_rings, ndmas));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ for (i = 0; i < ndmas; i++) {
+ if (tx_desc_rings[i] == NULL) {
+ continue;
+ }
+ channel = tx_desc_rings[i]->tdc;
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_hw_mode: channel %d", channel));
+ if (enable) {
+ rs = hpi_txdma_channel_enable(handle, channel);
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_hw_mode: channel %d (enable) "
+ "rs 0x%x", channel, rs));
+ } else {
+ /*
+ * Stop the dma channel and waits for the stop done. If
+ * the stop done bit is not set, then force an error so
+ * TXC will stop. All channels bound to this port need
+ * to be stopped and reset after injecting an interrupt
+ * error.
+ */
+ rs = hpi_txdma_channel_disable(handle, channel);
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_hw_mode: channel %d (disable) "
+ "rs 0x%x", channel, rs));
+ }
+ }
+
+ status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_txdma_hw_mode: status 0x%x", status));
+
+ return (status);
+}
+
+void
+hxge_txdma_enable_channel(p_hxge_t hxgep, uint16_t channel)
+{
+ hpi_handle_t handle;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_txdma_enable_channel: channel %d", channel));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ /* enable the transmit dma channels */
+ (void) hpi_txdma_channel_enable(handle, channel);
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_txdma_enable_channel"));
+}
+
+void
+hxge_txdma_disable_channel(p_hxge_t hxgep, uint16_t channel)
+{
+ hpi_handle_t handle;
+
+ HXGE_DEBUG_MSG((hxgep, DMA_CTL,
+ "==> hxge_txdma_disable_channel: channel %d", channel));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ /* stop the transmit dma channels */
+ (void) hpi_txdma_channel_disable(handle, channel);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_disable_channel"));
+}
+
+int
+hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel)
+{
+ hpi_handle_t handle;
+ int status;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop_inj_err"));
+
+ /*
+ * Stop the dma channel waits for the stop done. If the stop done bit
+ * is not set, then create an error.
+ */
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ rs = hpi_txdma_channel_disable(handle, channel);
+ status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
+ if (status == HXGE_OK) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_stop_inj_err (channel %d): "
+ "stopped OK", channel));
+ return (status);
+ }
+
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
+ " (injected error but still not stopped)", channel, rs));
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop_inj_err"));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+void
+hxge_fixup_txdma_rings(p_hxge_t hxgep)
+{
+ int index, ndmas;
+ uint16_t channel;
+ p_tx_rings_t tx_rings;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_txdma_rings"));
+
+ /*
+ * For each transmit channel, reclaim each descriptor and free buffers.
+ */
+ tx_rings = hxgep->tx_rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_fixup_txdma_rings: NULL ring pointer"));
+ return;
+ }
+
+ ndmas = tx_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_fixup_txdma_rings: no channel allocated"));
+ return;
+ }
+
+ if (tx_rings->rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_fixup_txdma_rings: NULL rings pointer"));
+ return;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_fixup_txdma_rings: "
+ "tx_rings $%p tx_desc_rings $%p ndmas %d",
+ tx_rings, tx_rings->rings, ndmas));
+
+ for (index = 0; index < ndmas; index++) {
+ channel = tx_rings->rings[index]->tdc;
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_fixup_txdma_rings: channel %d", channel));
+ hxge_txdma_fixup_channel(hxgep, tx_rings->rings[index],
+ channel);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_txdma_rings"));
+}
+
+/*ARGSUSED*/
+void
+hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel)
+{
+ p_tx_ring_t ring_p;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_channel"));
+
+ ring_p = hxge_txdma_get_ring(hxgep, channel);
+ if (ring_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel"));
+ return;
+ }
+
+ if (ring_p->tdc != channel) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_fix_channel: channel not matched "
+ "ring tdc %d passed channel", ring_p->tdc, channel));
+ return;
+ }
+
+ hxge_txdma_fixup_channel(hxgep, ring_p, channel);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel"));
+}
+
+/*ARGSUSED*/
+void
+hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel)
+{
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_channel"));
+
+ if (ring_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_fixup_channel: NULL ring pointer"));
+ return;
+ }
+ if (ring_p->tdc != channel) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_fixup_channel: channel not matched "
+ "ring tdc %d passed channel", ring_p->tdc, channel));
+ return;
+ }
+ MUTEX_ENTER(&ring_p->lock);
+ (void) hxge_txdma_reclaim(hxgep, ring_p, 0);
+
+ ring_p->rd_index = 0;
+ ring_p->wr_index = 0;
+ ring_p->ring_head.value = 0;
+ ring_p->ring_kick_tail.value = 0;
+ ring_p->descs_pending = 0;
+ MUTEX_EXIT(&ring_p->lock);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_channel"));
+}
+
+/*ARGSUSED*/
+void
+hxge_txdma_hw_kick(p_hxge_t hxgep)
+{
+ int index, ndmas;
+ uint16_t channel;
+ p_tx_rings_t tx_rings;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick"));
+
+ tx_rings = hxgep->tx_rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_kick: NULL ring pointer"));
+ return;
+ }
+ ndmas = tx_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_kick: no channel allocated"));
+ return;
+ }
+ if (tx_rings->rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_kick: NULL rings pointer"));
+ return;
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_kick: "
+ "tx_rings $%p tx_desc_rings $%p ndmas %d",
+ tx_rings, tx_rings->rings, ndmas));
+
+ for (index = 0; index < ndmas; index++) {
+ channel = tx_rings->rings[index]->tdc;
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_hw_kick: channel %d", channel));
+ hxge_txdma_hw_kick_channel(hxgep, tx_rings->rings[index],
+ channel);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick"));
+}
+
+/*ARGSUSED*/
+void
+hxge_txdma_kick_channel(p_hxge_t hxgep, uint16_t channel)
+{
+ p_tx_ring_t ring_p;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_kick_channel"));
+
+ ring_p = hxge_txdma_get_ring(hxgep, channel);
+ if (ring_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, " hxge_txdma_kick_channel"));
+ return;
+ }
+
+ if (ring_p->tdc != channel) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_kick_channel: channel not matched "
+ "ring tdc %d passed channel", ring_p->tdc, channel));
+ return;
+ }
+
+ hxge_txdma_hw_kick_channel(hxgep, ring_p, channel);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_kick_channel"));
+}
+
+/*ARGSUSED*/
+void
+hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel)
+{
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick_channel"));
+
+ if (ring_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_kick_channel: NULL ring pointer"));
+ return;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick_channel"));
+}
+
+/*ARGSUSED*/
+void
+hxge_check_tx_hang(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_check_tx_hang"));
+
+ /*
+ * Needs inputs from hardware for regs: head index had not moved since
+ * last timeout. packets not transmitted or stuffed registers.
+ */
+ if (hxge_txdma_hung(hxgep)) {
+ hxge_fixup_hung_txdma_rings(hxgep);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_check_tx_hang"));
+}
+
+int
+hxge_txdma_hung(p_hxge_t hxgep)
+{
+ int index, ndmas;
+ uint16_t channel;
+ p_tx_rings_t tx_rings;
+ p_tx_ring_t tx_ring_p;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hung"));
+
+ tx_rings = hxgep->tx_rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hung: NULL ring pointer"));
+ return (B_FALSE);
+ }
+
+ ndmas = tx_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hung: no channel allocated"));
+ return (B_FALSE);
+ }
+
+ if (tx_rings->rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hung: NULL rings pointer"));
+ return (B_FALSE);
+ }
+
+ for (index = 0; index < ndmas; index++) {
+ channel = tx_rings->rings[index]->tdc;
+ tx_ring_p = tx_rings->rings[index];
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_hung: channel %d", channel));
+ if (hxge_txdma_channel_hung(hxgep, tx_ring_p, channel)) {
+ return (B_TRUE);
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hung"));
+
+ return (B_FALSE);
+}
+
+int
+hxge_txdma_channel_hung(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
+{
+ uint16_t head_index, tail_index;
+ boolean_t head_wrap, tail_wrap;
+ hpi_handle_t handle;
+ tdc_tdr_head_t tx_head;
+ uint_t tx_rd_index;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_channel_hung: channel %d", channel));
+ MUTEX_ENTER(&tx_ring_p->lock);
+ (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
+
+ tail_index = tx_ring_p->wr_index;
+ tail_wrap = tx_ring_p->wr_index_wrap;
+ tx_rd_index = tx_ring_p->rd_index;
+ MUTEX_EXIT(&tx_ring_p->lock);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_channel_hung: tdc %d tx_rd_index %d "
+ "tail_index %d tail_wrap %d ",
+ channel, tx_rd_index, tail_index, tail_wrap));
+ /*
+ * Read the hardware maintained transmit head and wrap around bit.
+ */
+ (void) hpi_txdma_ring_head_get(handle, channel, &tx_head);
+ head_index = tx_head.bits.head;
+ head_wrap = tx_head.bits.wrap;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung: "
+ "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
+ tx_rd_index, tail_index, tail_wrap, head_index, head_wrap));
+
+ if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, tail_wrap) &&
+ (head_index == tx_rd_index)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_channel_hung: EMPTY"));
+ return (B_FALSE);
+ }
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_channel_hung: Checking if ring full"));
+ if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, tail_wrap)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_txdma_channel_hung: full"));
+ return (B_TRUE);
+ }
+
+ /* If not full, check with hardware to see if it is hung */
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_channel_hung"));
+
+ return (B_FALSE);
+}
+
+/*ARGSUSED*/
+void
+hxge_fixup_hung_txdma_rings(p_hxge_t hxgep)
+{
+ int index, ndmas;
+ uint16_t channel;
+ p_tx_rings_t tx_rings;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings"));
+ tx_rings = hxgep->tx_rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_fixup_hung_txdma_rings: NULL ring pointer"));
+ return;
+ }
+ ndmas = tx_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_fixup_hung_txdma_rings: no channel allocated"));
+ return;
+ }
+ if (tx_rings->rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_fixup_hung_txdma_rings: NULL rings pointer"));
+ return;
+ }
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings: "
+ "tx_rings $%p tx_desc_rings $%p ndmas %d",
+ tx_rings, tx_rings->rings, ndmas));
+
+ for (index = 0; index < ndmas; index++) {
+ channel = tx_rings->rings[index]->tdc;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_fixup_hung_txdma_rings: channel %d", channel));
+ hxge_txdma_fixup_hung_channel(hxgep, tx_rings->rings[index],
+ channel);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_hung_txdma_rings"));
+}
+
+/*ARGSUSED*/
+void
+hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel)
+{
+ p_tx_ring_t ring_p;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_hung_channel"));
+ ring_p = hxge_txdma_get_ring(hxgep, channel);
+ if (ring_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_fix_hung_channel"));
+ return;
+ }
+ if (ring_p->tdc != channel) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_fix_hung_channel: channel not matched "
+ "ring tdc %d passed channel", ring_p->tdc, channel));
+ return;
+ }
+ hxge_txdma_fixup_channel(hxgep, ring_p, channel);
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_hung_channel"));
+}
+
+/*ARGSUSED*/
+void
+hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
+ uint16_t channel)
+{
+ hpi_handle_t handle;
+ int status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_hung_channel"));
+
+ if (ring_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_fixup_hung_channel: NULL ring pointer"));
+ return;
+ }
+ if (ring_p->tdc != channel) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_fixup_hung_channel: channel "
+ "not matched ring tdc %d passed channel",
+ ring_p->tdc, channel));
+ return;
+ }
+ /* Reclaim descriptors */
+ MUTEX_ENTER(&ring_p->lock);
+ (void) hxge_txdma_reclaim(hxgep, ring_p, 0);
+ MUTEX_EXIT(&ring_p->lock);
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ /*
+ * Stop the dma channel waits for the stop done. If the stop done bit
+ * is not set, then force an error.
+ */
+ status = hpi_txdma_channel_disable(handle, channel);
+ if (!(status & HPI_TXDMA_STOP_FAILED)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_fixup_hung_channel: stopped OK "
+ "ring tdc %d passed channel %d", ring_p->tdc, channel));
+ return;
+ }
+ /* Stop done bit will be set as a result of error injection */
+ status = hpi_txdma_channel_disable(handle, channel);
+ if (!(status & HPI_TXDMA_STOP_FAILED)) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_fixup_hung_channel: stopped again"
+ "ring tdc %d passed channel", ring_p->tdc, channel));
+ return;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_fixup_hung_channel: stop done still not set!! "
+ "ring tdc %d passed channel", ring_p->tdc, channel));
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_hung_channel"));
+}
+
+/*ARGSUSED*/
+void
+hxge_reclaim_rings(p_hxge_t hxgep)
+{
+ int index, ndmas;
+ uint16_t channel;
+ p_tx_rings_t tx_rings;
+ p_tx_ring_t tx_ring_p;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclaim_ring"));
+ tx_rings = hxgep->tx_rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_reclain_rimgs: NULL ring pointer"));
+ return;
+ }
+ ndmas = tx_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_reclain_rimgs: no channel allocated"));
+ return;
+ }
+ if (tx_rings->rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_reclain_rimgs: NULL rings pointer"));
+ return;
+ }
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclain_rimgs: "
+ "tx_rings $%p tx_desc_rings $%p ndmas %d",
+ tx_rings, tx_rings->rings, ndmas));
+
+ for (index = 0; index < ndmas; index++) {
+ channel = tx_rings->rings[index]->tdc;
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> reclain_rimgs: channel %d",
+ channel));
+ tx_ring_p = tx_rings->rings[index];
+ MUTEX_ENTER(&tx_ring_p->lock);
+ (void) hxge_txdma_reclaim(hxgep, tx_ring_p, channel);
+ MUTEX_EXIT(&tx_ring_p->lock);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_reclaim_rings"));
+}
+
+/*
+ * Static functions start here.
+ */
+static hxge_status_t
+hxge_map_txdma(p_hxge_t hxgep)
+{
+ int i, ndmas;
+ uint16_t channel;
+ p_tx_rings_t tx_rings;
+ p_tx_ring_t *tx_desc_rings;
+ p_tx_mbox_areas_t tx_mbox_areas_p;
+ p_tx_mbox_t *tx_mbox_p;
+ p_hxge_dma_pool_t dma_buf_poolp;
+ p_hxge_dma_pool_t dma_cntl_poolp;
+ p_hxge_dma_common_t *dma_buf_p;
+ p_hxge_dma_common_t *dma_cntl_p;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma"));
+
+ dma_buf_poolp = hxgep->tx_buf_pool_p;
+ dma_cntl_poolp = hxgep->tx_cntl_pool_p;
+
+ if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_map_txdma: buf not allocated"));
+ return (HXGE_ERROR);
+ }
+ ndmas = dma_buf_poolp->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_map_txdma: no dma allocated"));
+ return (HXGE_ERROR);
+ }
+ dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
+ dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
+
+ tx_rings = (p_tx_rings_t)KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
+ tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC(
+ sizeof (p_tx_ring_t) * ndmas, KM_SLEEP);
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
+ "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
+
+ tx_mbox_areas_p = (p_tx_mbox_areas_t)
+ KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
+ tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC(
+ sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP);
+
+ /*
+ * Map descriptors from the buffer pools for each dma channel.
+ */
+ for (i = 0; i < ndmas; i++) {
+ /*
+ * Set up and prepare buffer blocks, descriptors and mailbox.
+ */
+ channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
+ status = hxge_map_txdma_channel(hxgep, channel,
+ (p_hxge_dma_common_t *)&dma_buf_p[i],
+ (p_tx_ring_t *)&tx_desc_rings[i],
+ dma_buf_poolp->num_chunks[i],
+ (p_hxge_dma_common_t *)&dma_cntl_p[i],
+ (p_tx_mbox_t *)&tx_mbox_p[i]);
+ if (status != HXGE_OK) {
+ goto hxge_map_txdma_fail1;
+ }
+ tx_desc_rings[i]->index = (uint16_t)i;
+ tx_desc_rings[i]->tdc_stats = &hxgep->statsp->tdc_stats[i];
+ }
+
+ tx_rings->ndmas = ndmas;
+ tx_rings->rings = tx_desc_rings;
+ hxgep->tx_rings = tx_rings;
+ tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p;
+ hxgep->tx_mbox_areas_p = tx_mbox_areas_p;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
+ "tx_rings $%p rings $%p", hxgep->tx_rings, hxgep->tx_rings->rings));
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
+ "tx_rings $%p tx_desc_rings $%p",
+ hxgep->tx_rings, tx_desc_rings));
+
+ goto hxge_map_txdma_exit;
+
+hxge_map_txdma_fail1:
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma: uninit tx desc "
+ "(status 0x%x channel %d i %d)", hxgep, status, channel, i));
+ i--;
+ for (; i >= 0; i--) {
+ channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
+ hxge_unmap_txdma_channel(hxgep, channel, tx_desc_rings[i],
+ tx_mbox_p[i]);
+ }
+
+ KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
+ KMEM_FREE(tx_rings, sizeof (tx_rings_t));
+ KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
+ KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
+
+hxge_map_txdma_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma: (status 0x%x channel %d)", status, channel));
+
+ return (status);
+}
+
+static void
+hxge_unmap_txdma(p_hxge_t hxgep)
+{
+ int i, ndmas;
+ uint8_t channel;
+ p_tx_rings_t tx_rings;
+ p_tx_ring_t *tx_desc_rings;
+ p_tx_mbox_areas_t tx_mbox_areas_p;
+ p_tx_mbox_t *tx_mbox_p;
+ p_hxge_dma_pool_t dma_buf_poolp;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma"));
+
+ dma_buf_poolp = hxgep->tx_buf_pool_p;
+ if (!dma_buf_poolp->buf_allocated) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "==> hxge_unmap_txdma: buf not allocated"));
+ return;
+ }
+ ndmas = dma_buf_poolp->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_unmap_txdma: no dma allocated"));
+ return;
+ }
+ tx_rings = hxgep->tx_rings;
+ tx_desc_rings = tx_rings->rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_unmap_txdma: NULL ring pointer"));
+ return;
+ }
+ tx_desc_rings = tx_rings->rings;
+ if (tx_desc_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_unmap_txdma: NULL ring pointers"));
+ return;
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma: "
+ "tx_rings $%p tx_desc_rings $%p ndmas %d",
+ tx_rings, tx_desc_rings, ndmas));
+
+ tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
+ tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
+
+ for (i = 0; i < ndmas; i++) {
+ channel = tx_desc_rings[i]->tdc;
+ (void) hxge_unmap_txdma_channel(hxgep, channel,
+ (p_tx_ring_t)tx_desc_rings[i],
+ (p_tx_mbox_t)tx_mbox_p[i]);
+ }
+
+ KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
+ KMEM_FREE(tx_rings, sizeof (tx_rings_t));
+ KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
+ KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma"));
+}
+
+static hxge_status_t
+hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p,
+ uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
+ p_tx_mbox_t *tx_mbox_p)
+{
+ int status = HXGE_OK;
+
+ /*
+ * Set up and prepare buffer blocks, descriptors and mailbox.
+ */
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel (channel %d)", channel));
+
+ /*
+ * Transmit buffer blocks
+ */
+ status = hxge_map_txdma_channel_buf_ring(hxgep, channel,
+ dma_buf_p, tx_desc_p, num_chunks);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_map_txdma_channel (channel %d): "
+ "map buffer failed 0x%x", channel, status));
+ goto hxge_map_txdma_channel_exit;
+ }
+ /*
+ * Transmit block ring, and mailbox.
+ */
+ hxge_map_txdma_channel_cfg_ring(hxgep, channel, dma_cntl_p, *tx_desc_p,
+ tx_mbox_p);
+
+ goto hxge_map_txdma_channel_exit;
+
+hxge_map_txdma_channel_fail1:
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel: unmap buf"
+ "(status 0x%x channel %d)", status, channel));
+ hxge_unmap_txdma_channel_buf_ring(hxgep, *tx_desc_p);
+
+hxge_map_txdma_channel_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_map_txdma_channel: (status 0x%x channel %d)",
+ status, channel));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static void
+hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
+{
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_unmap_txdma_channel (channel %d)", channel));
+
+ /* unmap tx block ring, and mailbox. */
+ (void) hxge_unmap_txdma_channel_cfg_ring(hxgep, tx_ring_p, tx_mbox_p);
+
+ /* unmap buffer blocks */
+ (void) hxge_unmap_txdma_channel_buf_ring(hxgep, tx_ring_p);
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma_channel"));
+}
+
+/*ARGSUSED*/
+static void
+hxge_map_txdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
+ p_hxge_dma_common_t *dma_cntl_p, p_tx_ring_t tx_ring_p,
+ p_tx_mbox_t *tx_mbox_p)
+{
+ p_tx_mbox_t mboxp;
+ p_hxge_dma_common_t cntl_dmap;
+ p_hxge_dma_common_t dmap;
+ tdc_tdr_cfg_t *tx_ring_cfig_p;
+ tdc_tdr_kick_t *tx_ring_kick_p;
+ tdc_tdr_cfg_t *tx_cs_p;
+ tdc_int_mask_t *tx_evmask_p;
+ tdc_mbh_t *mboxh_p;
+ tdc_mbl_t *mboxl_p;
+ uint64_t tx_desc_len;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_cfg_ring"));
+
+ cntl_dmap = *dma_cntl_p;
+
+ dmap = (p_hxge_dma_common_t)&tx_ring_p->tdc_desc;
+ hxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
+ sizeof (tx_desc_t));
+
+ /*
+ * Zero out transmit ring descriptors.
+ */
+ bzero((caddr_t)dmap->kaddrp, dmap->alength);
+ tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
+ tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
+ tx_cs_p = &(tx_ring_p->tx_cs);
+ tx_evmask_p = &(tx_ring_p->tx_evmask);
+ tx_ring_cfig_p->value = 0;
+ tx_ring_kick_p->value = 0;
+ tx_cs_p->value = 0;
+ tx_evmask_p->value = 0;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_cfg_ring: channel %d des $%p",
+ dma_channel, dmap->dma_cookie.dmac_laddress));
+
+ tx_ring_cfig_p->value = 0;
+
+ /* Hydra len is 11 bits and the lower 5 bits are 0s */
+ tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 5);
+ tx_ring_cfig_p->value =
+ (dmap->dma_cookie.dmac_laddress & TDC_TDR_CFG_ADDR_MASK) |
+ (tx_desc_len << TDC_TDR_CFG_LEN_SHIFT);
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
+ dma_channel, tx_ring_cfig_p->value));
+
+ tx_cs_p->bits.reset = 1;
+
+ /* Map in mailbox */
+ mboxp = (p_tx_mbox_t)KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
+ dmap = (p_hxge_dma_common_t)&mboxp->tx_mbox;
+ hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
+ mboxh_p = (tdc_mbh_t *)&tx_ring_p->tx_mbox_mbh;
+ mboxl_p = (tdc_mbl_t *)&tx_ring_p->tx_mbox_mbl;
+ mboxh_p->value = mboxl_p->value = 0;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
+ dmap->dma_cookie.dmac_laddress));
+
+ mboxh_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
+ TDC_MBH_ADDR_SHIFT) & TDC_MBH_MASK);
+ mboxl_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress &
+ TDC_MBL_MASK) >> TDC_MBL_SHIFT);
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
+ dmap->dma_cookie.dmac_laddress));
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_cfg_ring: hmbox $%p mbox $%p",
+ mboxh_p->bits.mbaddr, mboxl_p->bits.mbaddr));
+
+ /*
+ * Set page valid and no mask
+ */
+ tx_ring_p->page_hdl.value = 0;
+
+ *tx_mbox_p = mboxp;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_map_txdma_channel_cfg_ring"));
+}
+
+/*ARGSUSED*/
+static void
+hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
+{
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_unmap_txdma_channel_cfg_ring: channel %d",
+ tx_ring_p->tdc));
+
+ KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_unmap_txdma_channel_cfg_ring"));
+}
+
+static hxge_status_t
+hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
+ p_hxge_dma_common_t *dma_buf_p,
+ p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
+{
+ p_hxge_dma_common_t dma_bufp, tmp_bufp;
+ p_hxge_dma_common_t dmap;
+ hxge_os_dma_handle_t tx_buf_dma_handle;
+ p_tx_ring_t tx_ring_p;
+ p_tx_msg_t tx_msg_ring;
+ hxge_status_t status = HXGE_OK;
+ int ddi_status = DDI_SUCCESS;
+ int i, j, index;
+ uint32_t size, bsize;
+ uint32_t nblocks, nmsgs;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_buf_ring"));
+
+ dma_bufp = tmp_bufp = *dma_buf_p;
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ " hxge_map_txdma_channel_buf_ring: channel %d to map %d "
+ "chunks bufp $%p", channel, num_chunks, dma_bufp));
+
+ nmsgs = 0;
+ for (i = 0; i < num_chunks; i++, tmp_bufp++) {
+ nmsgs += tmp_bufp->nblocks;
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_buf_ring: channel %d "
+ "bufp $%p nblocks %d nmsgs %d",
+ channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
+ }
+ if (!nmsgs) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_map_txdma_channel_buf_ring: channel %d "
+ "no msg blocks", channel));
+ status = HXGE_ERROR;
+
+ goto hxge_map_txdma_channel_buf_ring_exit;
+ }
+ tx_ring_p = (p_tx_ring_t)KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
+ MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
+ (void *) hxgep->interrupt_cookie);
+ /*
+ * Allocate transmit message rings and handles for packets not to be
+ * copied to premapped buffers.
+ */
+ size = nmsgs * sizeof (tx_msg_t);
+ tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
+ for (i = 0; i < nmsgs; i++) {
+ ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
+ DDI_DMA_DONTWAIT, 0, &tx_msg_ring[i].dma_handle);
+ if (ddi_status != DDI_SUCCESS) {
+ status |= HXGE_DDI_FAILED;
+ break;
+ }
+ }
+
+ if (i < nmsgs) {
+ HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL,
+ "Allocate handles failed."));
+
+ goto hxge_map_txdma_channel_buf_ring_fail1;
+ }
+ tx_ring_p->tdc = channel;
+ tx_ring_p->tx_msg_ring = tx_msg_ring;
+ tx_ring_p->tx_ring_size = nmsgs;
+ tx_ring_p->num_chunks = num_chunks;
+ if (!hxge_tx_intr_thres) {
+ hxge_tx_intr_thres = tx_ring_p->tx_ring_size / 4;
+ }
+ tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
+ tx_ring_p->rd_index = 0;
+ tx_ring_p->wr_index = 0;
+ tx_ring_p->ring_head.value = 0;
+ tx_ring_p->ring_kick_tail.value = 0;
+ tx_ring_p->descs_pending = 0;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_buf_ring: channel %d "
+ "actual tx desc max %d nmsgs %d (config hxge_tx_ring_size %d)",
+ channel, tx_ring_p->tx_ring_size, nmsgs, hxge_tx_ring_size));
+
+ /*
+ * Map in buffers from the buffer pool.
+ */
+ index = 0;
+ bsize = dma_bufp->block_size;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma_channel_buf_ring: "
+ "dma_bufp $%p tx_rng_p $%p tx_msg_rng_p $%p bsize %d",
+ dma_bufp, tx_ring_p, tx_msg_ring, bsize));
+
+ tx_buf_dma_handle = dma_bufp->dma_handle;
+ for (i = 0; i < num_chunks; i++, dma_bufp++) {
+ bsize = dma_bufp->block_size;
+ nblocks = dma_bufp->nblocks;
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_buf_ring: dma chunk %d "
+ "size %d dma_bufp $%p",
+ i, sizeof (hxge_dma_common_t), dma_bufp));
+
+ for (j = 0; j < nblocks; j++) {
+ tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
+ dmap = &tx_msg_ring[index++].buf_dma;
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_map_txdma_channel_buf_ring: j %d"
+ "dmap $%p", i, dmap));
+ hxge_setup_dma_common(dmap, dma_bufp, 1, bsize);
+ }
+ }
+
+ if (i < num_chunks) {
+ status = HXGE_ERROR;
+
+ goto hxge_map_txdma_channel_buf_ring_fail1;
+ }
+
+ *tx_desc_p = tx_ring_p;
+
+ goto hxge_map_txdma_channel_buf_ring_exit;
+
+hxge_map_txdma_channel_buf_ring_fail1:
+ index--;
+ for (; index >= 0; index--) {
+ if (tx_msg_ring[index].dma_handle != NULL) {
+ ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
+ }
+ }
+ MUTEX_DESTROY(&tx_ring_p->lock);
+ KMEM_FREE(tx_msg_ring, size);
+ KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
+
+ status = HXGE_ERROR;
+
+hxge_map_txdma_channel_buf_ring_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_map_txdma_channel_buf_ring status 0x%x", status));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static void
+hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, p_tx_ring_t tx_ring_p)
+{
+ p_tx_msg_t tx_msg_ring;
+ p_tx_msg_t tx_msg_p;
+ int i;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_unmap_txdma_channel_buf_ring"));
+ if (tx_ring_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_unmap_txdma_channel_buf_ring: NULL ringp"));
+ return;
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_unmap_txdma_channel_buf_ring: channel %d",
+ tx_ring_p->tdc));
+
+ tx_msg_ring = tx_ring_p->tx_msg_ring;
+ for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
+ tx_msg_p = &tx_msg_ring[i];
+ if (tx_msg_p->flags.dma_type == USE_DVMA) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "entry = %d", i));
+ (void) dvma_unload(tx_msg_p->dvma_handle, 0, -1);
+ tx_msg_p->dvma_handle = NULL;
+ if (tx_ring_p->dvma_wr_index ==
+ tx_ring_p->dvma_wrap_mask) {
+ tx_ring_p->dvma_wr_index = 0;
+ } else {
+ tx_ring_p->dvma_wr_index++;
+ }
+ tx_ring_p->dvma_pending--;
+ } else if (tx_msg_p->flags.dma_type == USE_DMA) {
+ if (ddi_dma_unbind_handle(tx_msg_p->dma_handle)) {
+ cmn_err(CE_WARN, "hxge_unmap_tx_bug_ring: "
+ "ddi_dma_unbind_handle failed.");
+ }
+ }
+ if (tx_msg_p->tx_message != NULL) {
+ freemsg(tx_msg_p->tx_message);
+ tx_msg_p->tx_message = NULL;
+ }
+ }
+
+ for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
+ if (tx_msg_ring[i].dma_handle != NULL) {
+ ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
+ }
+ }
+
+ MUTEX_DESTROY(&tx_ring_p->lock);
+ KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
+ KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_unmap_txdma_channel_buf_ring"));
+}
+
+static hxge_status_t
+hxge_txdma_hw_start(p_hxge_t hxgep)
+{
+ int i, ndmas;
+ uint16_t channel;
+ p_tx_rings_t tx_rings;
+ p_tx_ring_t *tx_desc_rings;
+ p_tx_mbox_areas_t tx_mbox_areas_p;
+ p_tx_mbox_t *tx_mbox_p;
+ hxge_status_t status = HXGE_OK;
+ uint64_t tmp;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start"));
+
+ /*
+ * Initialize REORD Table 1. Disable VMAC 2. Reset the FIFO Err Stat.
+ * 3. Scrub memory and check for errors.
+ */
+ (void) hxge_tx_vmac_disable(hxgep);
+
+ /*
+ * Clear the error status
+ */
+ HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7);
+
+ /*
+ * Scrub the rtab memory for the TDC and reset the TDC.
+ */
+ HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, 0x0ULL);
+ HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, 0x0ULL);
+
+ for (i = 0; i < 256; i++) {
+ HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
+ (uint64_t)i);
+
+ /*
+ * Write the command register with an indirect read instruction
+ */
+ tmp = (0x1ULL << 30) | i;
+ HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp);
+
+ /*
+ * Wait for status done
+ */
+ tmp = 0;
+ do {
+ HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
+ &tmp);
+ } while (((tmp >> 31) & 0x1ULL) == 0x0);
+ }
+
+ for (i = 0; i < 256; i++) {
+ /*
+ * Write the command register with an indirect read instruction
+ */
+ tmp = (0x1ULL << 30) | i;
+ HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp);
+
+ /*
+ * Wait for status done
+ */
+ tmp = 0;
+ do {
+ HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
+ &tmp);
+ } while (((tmp >> 31) & 0x1ULL) == 0x0);
+
+ HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, &tmp);
+ if (0x1ff00ULL != (0x1ffffULL & tmp)) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
+ "unexpected data (hi), entry: %x, value: 0x%0llx\n",
+ i, (unsigned long long)tmp));
+ }
+
+ HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, &tmp);
+ if (tmp != 0) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
+ "unexpected data (lo), entry: %x\n", i));
+ }
+
+ HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp);
+ if (tmp != 0) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
+ "parity error, entry: %x, val 0x%llx\n",
+ i, (unsigned long long)tmp));
+ }
+
+ HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp);
+ if (tmp != 0) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
+ "parity error, entry: %x\n", i));
+ }
+ }
+
+ /*
+ * Reset FIFO Error Status for the TDC and enable FIFO error events.
+ */
+ HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7);
+ HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_MASK, 0x0);
+
+ /*
+ * Initialize the Transmit DMAs.
+ */
+ tx_rings = hxgep->tx_rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_start: NULL ring pointer"));
+ return (HXGE_ERROR);
+ }
+ tx_desc_rings = tx_rings->rings;
+ if (tx_desc_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_start: NULL ring pointers"));
+ return (HXGE_ERROR);
+ }
+ ndmas = tx_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_start: no dma channel allocated"));
+ return (HXGE_ERROR);
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start: "
+ "tx_rings $%p tx_desc_rings $%p ndmas %d",
+ tx_rings, tx_desc_rings, ndmas));
+
+ tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
+ tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
+
+ /*
+ * Init the DMAs.
+ */
+ for (i = 0; i < ndmas; i++) {
+ channel = tx_desc_rings[i]->tdc;
+ status = hxge_txdma_start_channel(hxgep, channel,
+ (p_tx_ring_t)tx_desc_rings[i],
+ (p_tx_mbox_t)tx_mbox_p[i]);
+ if (status != HXGE_OK) {
+ goto hxge_txdma_hw_start_fail1;
+ }
+ }
+
+ (void) hxge_tx_vmac_enable(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_hw_start: tx_rings $%p rings $%p",
+ hxgep->tx_rings, hxgep->tx_rings->rings));
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_hw_start: tx_rings $%p tx_desc_rings $%p",
+ hxgep->tx_rings, tx_desc_rings));
+
+ goto hxge_txdma_hw_start_exit;
+
+hxge_txdma_hw_start_fail1:
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_hw_start: disable (status 0x%x channel %d i %d)",
+ status, channel, i));
+
+ for (; i >= 0; i--) {
+ channel = tx_desc_rings[i]->tdc,
+ (void) hxge_txdma_stop_channel(hxgep, channel,
+ (p_tx_ring_t)tx_desc_rings[i],
+ (p_tx_mbox_t)tx_mbox_p[i]);
+ }
+
+hxge_txdma_hw_start_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_hw_start: (status 0x%x)", status));
+
+ return (status);
+}
+
+static void
+hxge_txdma_hw_stop(p_hxge_t hxgep)
+{
+ int i, ndmas;
+ uint16_t channel;
+ p_tx_rings_t tx_rings;
+ p_tx_ring_t *tx_desc_rings;
+ p_tx_mbox_areas_t tx_mbox_areas_p;
+ p_tx_mbox_t *tx_mbox_p;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop"));
+
+ tx_rings = hxgep->tx_rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_stop: NULL ring pointer"));
+ return;
+ }
+
+ tx_desc_rings = tx_rings->rings;
+ if (tx_desc_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_stop: NULL ring pointers"));
+ return;
+ }
+
+ ndmas = tx_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_hw_stop: no dma channel allocated"));
+ return;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: "
+ "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
+
+ tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
+ tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
+
+ for (i = 0; i < ndmas; i++) {
+ channel = tx_desc_rings[i]->tdc;
+ (void) hxge_txdma_stop_channel(hxgep, channel,
+ (p_tx_ring_t)tx_desc_rings[i],
+ (p_tx_mbox_t)tx_mbox_p[i]);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: "
+ "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_hw_stop"));
+}
+
+static hxge_status_t
+hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_start_channel (channel %d)", channel));
+ /*
+ * TXDMA/TXC must be in stopped state.
+ */
+ (void) hxge_txdma_stop_inj_err(hxgep, channel);
+
+ /*
+ * Reset TXDMA channel
+ */
+ tx_ring_p->tx_cs.value = 0;
+ tx_ring_p->tx_cs.bits.reset = 1;
+ status = hxge_reset_txdma_channel(hxgep, channel,
+ tx_ring_p->tx_cs.value);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_txdma_start_channel (channel %d)"
+ " reset channel failed 0x%x", channel, status));
+
+ goto hxge_txdma_start_channel_exit;
+ }
+
+ /*
+ * Initialize the TXDMA channel specific FZC control configurations.
+ * These FZC registers are pertaining to each TX channel (i.e. logical
+ * pages).
+ */
+ status = hxge_init_fzc_txdma_channel(hxgep, channel,
+ tx_ring_p, tx_mbox_p);
+ if (status != HXGE_OK) {
+ goto hxge_txdma_start_channel_exit;
+ }
+
+ /*
+ * Initialize the event masks.
+ */
+ tx_ring_p->tx_evmask.value = 0;
+ status = hxge_init_txdma_channel_event_mask(hxgep,
+ channel, &tx_ring_p->tx_evmask);
+ if (status != HXGE_OK) {
+ goto hxge_txdma_start_channel_exit;
+ }
+
+ /*
+ * Load TXDMA descriptors, buffers, mailbox, initialise the DMA
+ * channels and enable each DMA channel.
+ */
+ status = hxge_enable_txdma_channel(hxgep, channel,
+ tx_ring_p, tx_mbox_p);
+ if (status != HXGE_OK) {
+ goto hxge_txdma_start_channel_exit;
+ }
+
+hxge_txdma_start_channel_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_start_channel"));
+
+ return (status);
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
+{
+ int status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_stop_channel: channel %d", channel));
+
+ /*
+ * Stop (disable) TXDMA and TXC (if stop bit is set and STOP_N_GO bit
+ * not set, the TXDMA reset state will not be set if reset TXDMA.
+ */
+ (void) hxge_txdma_stop_inj_err(hxgep, channel);
+
+ /*
+ * Reset TXDMA channel
+ */
+ tx_ring_p->tx_cs.value = 0;
+ tx_ring_p->tx_cs.bits.reset = 1;
+ status = hxge_reset_txdma_channel(hxgep, channel,
+ tx_ring_p->tx_cs.value);
+ if (status != HXGE_OK) {
+ goto hxge_txdma_stop_channel_exit;
+ }
+
+hxge_txdma_stop_channel_exit:
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_stop_channel"));
+
+ return (status);
+}
+
+static p_tx_ring_t
+hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel)
+{
+ int index, ndmas;
+ uint16_t tdc;
+ p_tx_rings_t tx_rings;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_ring"));
+
+ tx_rings = hxgep->tx_rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_get_ring: NULL ring pointer"));
+ return (NULL);
+ }
+ ndmas = tx_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_get_ring: no channel allocated"));
+ return (NULL);
+ }
+ if (tx_rings->rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_get_ring: NULL rings pointer"));
+ return (NULL);
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_ring: "
+ "tx_rings $%p tx_desc_rings $%p ndmas %d",
+ tx_rings, tx_rings, ndmas));
+
+ for (index = 0; index < ndmas; index++) {
+ tdc = tx_rings->rings[index]->tdc;
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_fixup_txdma_rings: channel %d", tdc));
+ if (channel == tdc) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_get_ring: tdc %d ring $%p",
+ tdc, tx_rings->rings[index]));
+ return (p_tx_ring_t)(tx_rings->rings[index]);
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_ring"));
+
+ return (NULL);
+}
+
+static p_tx_mbox_t
+hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel)
+{
+ int index, tdc, ndmas;
+ p_tx_rings_t tx_rings;
+ p_tx_mbox_areas_t tx_mbox_areas_p;
+ p_tx_mbox_t *tx_mbox_p;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_mbox"));
+
+ tx_rings = hxgep->tx_rings;
+ if (tx_rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_txdma_get_mbox: NULL ring pointer"));
+ return (NULL);
+ }
+ tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
+ if (tx_mbox_areas_p == NULL) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_txdma_get_mbox: NULL mbox pointer"));
+ return (NULL);
+ }
+ tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
+
+ ndmas = tx_rings->ndmas;
+ if (!ndmas) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_txdma_get_mbox: no channel allocated"));
+ return (NULL);
+ }
+ if (tx_rings->rings == NULL) {
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "<== hxge_txdma_get_mbox: NULL rings pointer"));
+ return (NULL);
+ }
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_mbox: "
+ "tx_rings $%p tx_desc_rings $%p ndmas %d",
+ tx_rings, tx_rings, ndmas));
+
+ for (index = 0; index < ndmas; index++) {
+ tdc = tx_rings->rings[index]->tdc;
+ HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
+ "==> hxge_txdma_get_mbox: channel %d", tdc));
+ if (channel == tdc) {
+ HXGE_DEBUG_MSG((hxgep, TX_CTL,
+ "<== hxge_txdma_get_mbox: tdc %d ring $%p",
+ tdc, tx_rings->rings[index]));
+ return (p_tx_mbox_t)(tx_mbox_p[index]);
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_mbox"));
+
+ return (NULL);
+}
+
+/*ARGSUSED*/
+static hxge_status_t
+hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
+ tdc_stat_t cs)
+{
+ hpi_handle_t handle;
+ uint8_t channel;
+ p_tx_ring_t *tx_rings;
+ p_tx_ring_t tx_ring_p;
+ p_hxge_tx_ring_stats_t tdc_stats;
+ boolean_t txchan_fatal = B_FALSE;
+ hxge_status_t status = HXGE_OK;
+ tdc_drop_cnt_t drop_cnt;
+
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_tx_err_evnts"));
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ channel = ldvp->channel;
+
+ tx_rings = hxgep->tx_rings->rings;
+ tx_ring_p = tx_rings[index];
+ tdc_stats = tx_ring_p->tdc_stats;
+
+ /* Get the error counts if any */
+ TXDMA_REG_READ64(handle, TDC_DROP_CNT, channel, &drop_cnt.value);
+ tdc_stats->count_hdr_size_err += drop_cnt.bits.hdr_size_error_count;
+ tdc_stats->count_runt += drop_cnt.bits.runt_count;
+ tdc_stats->count_abort += drop_cnt.bits.abort_count;
+
+ if (cs.bits.peu_resp_err) {
+ tdc_stats->peu_resp_err++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_tx_err_evnts(channel %d): "
+ "fatal error: peu_resp_err", channel));
+ txchan_fatal = B_TRUE;
+ }
+
+ if (cs.bits.pkt_size_hdr_err) {
+ tdc_stats->pkt_size_hdr_err++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_tx_err_evnts(channel %d): "
+ "fatal error: pkt_size_hdr_err", channel));
+ txchan_fatal = B_TRUE;
+ }
+
+ if (cs.bits.runt_pkt_drop_err) {
+ tdc_stats->runt_pkt_drop_err++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_tx_err_evnts(channel %d): "
+ "fatal error: runt_pkt_drop_err", channel));
+ txchan_fatal = B_TRUE;
+ }
+
+ if (cs.bits.pkt_size_err) {
+ tdc_stats->pkt_size_err++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_tx_err_evnts(channel %d): "
+ "fatal error: pkt_size_err", channel));
+ txchan_fatal = B_TRUE;
+ }
+
+ if (cs.bits.tx_rng_oflow) {
+ tdc_stats->tx_rng_oflow++;
+ if (tdc_stats->tx_rng_oflow)
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_tx_err_evnts(channel %d): "
+ "fatal error: tx_rng_oflow", channel));
+ }
+
+ if (cs.bits.pref_par_err) {
+ tdc_stats->pref_par_err++;
+
+ /* Get the address of parity error read data */
+ TXDMA_REG_READ64(hxgep->hpi_handle, TDC_PREF_PAR_LOG,
+ channel, &tdc_stats->errlog.value);
+
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_tx_err_evnts(channel %d): "
+ "fatal error: pref_par_err", channel));
+ txchan_fatal = B_TRUE;
+ }
+
+ if (cs.bits.tdr_pref_cpl_to) {
+ tdc_stats->tdr_pref_cpl_to++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_tx_err_evnts(channel %d): "
+ "fatal error: config_partition_err", channel));
+ txchan_fatal = B_TRUE;
+ }
+
+ if (cs.bits.pkt_cpl_to) {
+ tdc_stats->pkt_cpl_to++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_TDMC_PKT_CPL_TO);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_tx_err_evnts(channel %d): "
+ "fatal error: pkt_cpl_to", channel));
+ txchan_fatal = B_TRUE;
+ }
+
+ if (cs.bits.invalid_sop) {
+ tdc_stats->invalid_sop++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_TDMC_INVALID_SOP);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_tx_err_evnts(channel %d): "
+ "fatal error: invalid_sop", channel));
+ txchan_fatal = B_TRUE;
+ }
+
+ if (cs.bits.unexpected_sop) {
+ tdc_stats->unexpected_sop++;
+ HXGE_FM_REPORT_ERROR(hxgep, channel,
+ HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_tx_err_evnts(channel %d): "
+ "fatal error: unexpected_sop", channel));
+ txchan_fatal = B_TRUE;
+ }
+
+ /* Clear error injection source in case this is an injected error */
+ TXDMA_REG_WRITE64(hxgep->hpi_handle, TDC_STAT_INT_DBG, channel, 0);
+
+ if (txchan_fatal) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_tx_err_evnts: "
+ " fatal error on channel %d cs 0x%llx\n",
+ channel, cs.value));
+ status = hxge_txdma_fatal_err_recover(hxgep, channel,
+ tx_ring_p);
+ if (status == HXGE_OK) {
+ FM_SERVICE_RESTORED(hxgep);
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "<== hxge_tx_err_evnts"));
+
+ return (status);
+}
+
+hxge_status_t
+hxge_txdma_handle_sys_errors(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+ hxge_status_t status = HXGE_OK;
+ tdc_fifo_err_stat_t fifo_stat;
+ hxge_tdc_sys_stats_t *tdc_sys_stats;
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_handle_sys_errors"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ /*
+ * The FIFO is shared by all channels.
+ * Get the status of Reorder Buffer and Reorder Table Buffer Errors
+ */
+ HXGE_REG_RD64(handle, TDC_FIFO_ERR_STAT, &fifo_stat.value);
+
+ /*
+ * Clear the error bits. Note that writing a 1 clears the bit. Writing
+ * a 0 does nothing.
+ */
+ HXGE_REG_WR64(handle, TDC_FIFO_ERR_STAT, fifo_stat.value);
+
+ tdc_sys_stats = &hxgep->statsp->tdc_sys_stats;
+ if (fifo_stat.bits.reord_tbl_par_err) {
+ tdc_sys_stats->reord_tbl_par_err++;
+ HXGE_FM_REPORT_ERROR(hxgep, NULL,
+ HXGE_FM_EREPORT_TDMC_REORD_TBL_PAR);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_txdma_handle_sys_errors: fatal error: "
+ "reord_tbl_par_err"));
+ }
+
+ if (fifo_stat.bits.reord_buf_ded_err) {
+ tdc_sys_stats->reord_buf_ded_err++;
+ HXGE_FM_REPORT_ERROR(hxgep, NULL,
+ HXGE_FM_EREPORT_TDMC_REORD_BUF_DED);
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_txdma_handle_sys_errors: "
+ "fatal error: reord_buf_ded_err"));
+ }
+
+ if (fifo_stat.bits.reord_buf_sec_err) {
+ tdc_sys_stats->reord_buf_sec_err++;
+ if (tdc_sys_stats->reord_buf_sec_err == 1)
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_txdma_handle_sys_errors: "
+ "reord_buf_sec_err"));
+ }
+
+ if (fifo_stat.bits.reord_tbl_par_err ||
+ fifo_stat.bits.reord_buf_ded_err) {
+ status = hxge_tx_port_fatal_err_recover(hxgep);
+ if (status == HXGE_OK) {
+ FM_SERVICE_RESTORED(hxgep);
+ }
+ }
+
+ HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_handle_sys_errors"));
+
+ return (status);
+}
+
+static hxge_status_t
+hxge_txdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_ring_p)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ p_tx_mbox_t tx_mbox_p;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover"));
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Recovering from TxDMAChannel#%d error...", channel));
+
+ /*
+ * Stop the dma channel waits for the stop done. If the stop done bit
+ * is not set, then create an error.
+ */
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping txdma channel(%d)",
+ channel));
+ MUTEX_ENTER(&tx_ring_p->lock);
+ rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_txdma_fatal_err_recover (channel %d): "
+ "stop failed ", channel));
+
+ goto fail;
+ }
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming txdma channel(%d)",
+ channel));
+ (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
+
+ /*
+ * Reset TXDMA channel
+ */
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "resetting txdma channel(%d)",
+ channel));
+ if ((rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
+ HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_txdma_fatal_err_recover (channel %d)"
+ " reset channel failed 0x%x", channel, rs));
+
+ goto fail;
+ }
+ /*
+ * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
+ * overflow fatal error if tail is not set to 0 after reset!
+ */
+ TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0);
+
+ /*
+ * Restart TXDMA channel
+ *
+ * Initialize the TXDMA channel specific FZC control configurations.
+ * These FZC registers are pertaining to each TX channel (i.e. logical
+ * pages).
+ */
+ tx_mbox_p = hxge_txdma_get_mbox(hxgep, channel);
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "restarting txdma channel(%d)",
+ channel));
+ status = hxge_init_fzc_txdma_channel(hxgep, channel,
+ tx_ring_p, tx_mbox_p);
+ if (status != HXGE_OK)
+ goto fail;
+
+ /*
+ * Initialize the event masks.
+ */
+ tx_ring_p->tx_evmask.value = 0;
+ status = hxge_init_txdma_channel_event_mask(hxgep, channel,
+ &tx_ring_p->tx_evmask);
+ if (status != HXGE_OK)
+ goto fail;
+
+ tx_ring_p->wr_index_wrap = B_FALSE;
+ tx_ring_p->wr_index = 0;
+ tx_ring_p->rd_index = 0;
+
+ /*
+ * Load TXDMA descriptors, buffers, mailbox, initialise the DMA
+ * channels and enable each DMA channel.
+ */
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "enabling txdma channel(%d)",
+ channel));
+ status = hxge_enable_txdma_channel(hxgep, channel,
+ tx_ring_p, tx_mbox_p);
+ MUTEX_EXIT(&tx_ring_p->lock);
+ if (status != HXGE_OK)
+ goto fail;
+
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Recovery Successful, TxDMAChannel#%d Restored", channel));
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover"));
+
+ return (HXGE_OK);
+
+fail:
+ MUTEX_EXIT(&tx_ring_p->lock);
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
+ "hxge_txdma_fatal_err_recover (channel %d): "
+ "failed to recover this txdma channel", channel));
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
+
+ return (status);
+}
+
+static hxge_status_t
+hxge_tx_port_fatal_err_recover(p_hxge_t hxgep)
+{
+ hpi_handle_t handle;
+ hpi_status_t rs = HPI_SUCCESS;
+ hxge_status_t status = HXGE_OK;
+ p_tx_ring_t *tx_desc_rings;
+ p_tx_rings_t tx_rings;
+ p_tx_ring_t tx_ring_p;
+ int i, ndmas;
+ uint16_t channel;
+ block_reset_t reset_reg;
+
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
+ "==> hxge_tx_port_fatal_err_recover"));
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Recovering from TxPort error..."));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ /* Reset TDC block from PEU for this fatal error */
+ reset_reg.value = 0;
+ reset_reg.bits.tdc_rst = 1;
+ HXGE_REG_WR32(handle, BLOCK_RESET, reset_reg.value);
+
+ HXGE_DELAY(1000);
+
+ /*
+ * Stop the dma channel waits for the stop done. If the stop done bit
+ * is not set, then create an error.
+ */
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping all DMA channels..."));
+
+ tx_rings = hxgep->tx_rings;
+ tx_desc_rings = tx_rings->rings;
+ ndmas = tx_rings->ndmas;
+
+ for (i = 0; i < ndmas; i++) {
+ if (tx_desc_rings[i] == NULL) {
+ continue;
+ }
+ tx_ring_p = tx_rings->rings[i];
+ MUTEX_ENTER(&tx_ring_p->lock);
+ }
+
+ for (i = 0; i < ndmas; i++) {
+ if (tx_desc_rings[i] == NULL) {
+ continue;
+ }
+ channel = tx_desc_rings[i]->tdc;
+ tx_ring_p = tx_rings->rings[i];
+ rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_txdma_fatal_err_recover (channel %d): "
+ "stop failed ", channel));
+
+ goto fail;
+ }
+ }
+
+ /*
+ * Do reclaim on all of th DMAs.
+ */
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming all DMA channels..."));
+ for (i = 0; i < ndmas; i++) {
+ if (tx_desc_rings[i] == NULL) {
+ continue;
+ }
+ tx_ring_p = tx_rings->rings[i];
+ (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
+ }
+
+ /* Restart the TDC */
+ if ((status = hxge_txdma_hw_start(hxgep)) != HXGE_OK)
+ goto fail;
+
+ for (i = 0; i < ndmas; i++) {
+ if (tx_desc_rings[i] == NULL) {
+ continue;
+ }
+ tx_ring_p = tx_rings->rings[i];
+ MUTEX_EXIT(&tx_ring_p->lock);
+ }
+
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "Recovery Successful, TxPort Restored"));
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
+ "<== hxge_tx_port_fatal_err_recover"));
+ return (HXGE_OK);
+
+fail:
+ for (i = 0; i < ndmas; i++) {
+ if (tx_desc_rings[i] == NULL) {
+ continue;
+ }
+ tx_ring_p = tx_rings->rings[i];
+ MUTEX_EXIT(&tx_ring_p->lock);
+ }
+
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
+ HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
+ "hxge_txdma_fatal_err_recover (channel %d): "
+ "failed to recover this txdma channel"));
+
+ return (status);
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_txdma.h b/usr/src/uts/common/io/hxge/hxge_txdma.h
new file mode 100644
index 0000000000..0b57f2fea1
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_txdma.h
@@ -0,0 +1,248 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_TXDMA_H
+#define _SYS_HXGE_HXGE_TXDMA_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hxge_txdma_hw.h>
+#include <hpi_txdma.h>
+
+#define TXDMA_RECLAIM_PENDING_DEFAULT 64
+#define TX_FULL_MARK 3
+
+/*
+ * Transmit load balancing definitions.
+ */
+#define HXGE_TX_LB_TCPUDP 0 /* default policy */
+#define HXGE_TX_LB_HASH 1 /* from the hint data */
+#define HXGE_TX_LB_DEST_MAC 2 /* Dest. MAC */
+
+/*
+ * Descriptor ring empty:
+ * (1) head index is equal to tail index.
+ * (2) wrapped around bits are the same.
+ * Descriptor ring full:
+ * (1) head index is equal to tail index.
+ * (2) wrapped around bits are different.
+ *
+ */
+#define TXDMA_RING_EMPTY(head, head_wrap, tail, tail_wrap) \
+ ((head == tail && head_wrap == tail_wrap) ? B_TRUE : B_FALSE)
+
+#define TXDMA_RING_FULL(head, head_wrap, tail, tail_wrap) \
+ ((head == tail && head_wrap != tail_wrap) ? B_TRUE : B_FALSE)
+
+#define TXDMA_DESC_NEXT_INDEX(index, entries, wrap_mask) \
+ ((index + entries) & wrap_mask)
+
+typedef struct _tx_msg_t {
+ hxge_os_block_mv_t flags; /* DMA, BCOPY, DVMA (?) */
+ hxge_os_dma_common_t buf_dma; /* premapped buffer blocks */
+ hxge_os_dma_handle_t buf_dma_handle; /* premapped buffer handle */
+ hxge_os_dma_handle_t dma_handle; /* DMA handle for normal send */
+ hxge_os_dma_handle_t dvma_handle; /* Fast DVMA handle */
+
+ p_mblk_t tx_message;
+ uint32_t tx_msg_size;
+ size_t bytes_used;
+ int head;
+ int tail;
+} tx_msg_t, *p_tx_msg_t;
+
+/*
+ * TX Statistics.
+ */
+typedef struct _hxge_tx_ring_stats_t {
+ uint64_t opackets;
+ uint64_t obytes;
+ uint64_t obytes_with_pad;
+ uint64_t oerrors;
+
+ uint32_t tx_inits;
+ uint32_t tx_no_buf;
+
+ uint32_t peu_resp_err;
+ uint32_t pkt_size_hdr_err;
+ uint32_t runt_pkt_drop_err;
+ uint32_t pkt_size_err;
+ uint32_t tx_rng_oflow;
+ uint32_t pref_par_err;
+ uint32_t tdr_pref_cpl_to;
+ uint32_t pkt_cpl_to;
+ uint32_t invalid_sop;
+ uint32_t unexpected_sop;
+
+ uint64_t count_hdr_size_err;
+ uint64_t count_runt;
+ uint64_t count_abort;
+
+ uint32_t tx_starts;
+ uint32_t tx_no_desc;
+ uint32_t tx_dma_bind_fail;
+ uint32_t tx_hdr_pkts;
+ uint32_t tx_ddi_pkts;
+ uint32_t tx_jumbo_pkts;
+ uint32_t tx_max_pend;
+ uint32_t tx_marks;
+ tdc_pref_par_log_t errlog;
+} hxge_tx_ring_stats_t, *p_hxge_tx_ring_stats_t;
+
+typedef struct _hxge_tdc_sys_stats {
+ uint32_t reord_tbl_par_err;
+ uint32_t reord_buf_ded_err;
+ uint32_t reord_buf_sec_err;
+} hxge_tdc_sys_stats_t, *p_hxge_tdc_sys_stats_t;
+
+typedef struct _tx_ring_t {
+ hxge_os_dma_common_t tdc_desc;
+ struct _hxge_t *hxgep;
+ p_tx_msg_t tx_msg_ring;
+ uint32_t tnblocks;
+ tdc_tdr_cfg_t tx_ring_cfig;
+ tdc_tdr_kick_t tx_ring_kick;
+ tdc_tdr_cfg_t tx_cs;
+ tdc_int_mask_t tx_evmask;
+ tdc_mbh_t tx_mbox_mbh;
+ tdc_mbl_t tx_mbox_mbl;
+
+ tdc_page_handle_t page_hdl;
+
+ hxge_os_mutex_t lock;
+ uint16_t index;
+ uint16_t tdc;
+ struct hxge_tdc_cfg *tdc_p;
+ uint_t tx_ring_size;
+ uint32_t num_chunks;
+
+ uint_t tx_wrap_mask;
+ uint_t rd_index;
+ uint_t wr_index;
+ boolean_t wr_index_wrap;
+ uint_t head_index;
+ boolean_t head_wrap;
+ tdc_tdr_head_t ring_head;
+ tdc_tdr_kick_t ring_kick_tail;
+ txdma_mailbox_t tx_mbox;
+
+ uint_t descs_pending;
+ boolean_t queueing;
+
+ p_mblk_t head;
+ p_mblk_t tail;
+
+ p_hxge_tx_ring_stats_t tdc_stats;
+
+ uint_t dvma_wr_index;
+ uint_t dvma_rd_index;
+ uint_t dvma_pending;
+ uint_t dvma_available;
+ uint_t dvma_wrap_mask;
+
+ hxge_os_dma_handle_t *dvma_ring;
+
+ mac_resource_handle_t tx_mac_resource_handle;
+} tx_ring_t, *p_tx_ring_t;
+
+
+/* Transmit Mailbox */
+typedef struct _tx_mbox_t {
+ hxge_os_mutex_t lock;
+ uint16_t index;
+ struct _hxge_t *hxgep;
+ uint16_t tdc;
+ hxge_os_dma_common_t tx_mbox;
+ tdc_mbl_t tx_mbox_l;
+ tdc_mbh_t tx_mbox_h;
+} tx_mbox_t, *p_tx_mbox_t;
+
+typedef struct _tx_rings_t {
+ p_tx_ring_t *rings;
+ boolean_t txdesc_allocated;
+ uint32_t ndmas;
+ hxge_os_dma_common_t tdc_dma;
+ hxge_os_dma_common_t tdc_mbox;
+} tx_rings_t, *p_tx_rings_t;
+
+typedef struct _tx_mbox_areas_t {
+ p_tx_mbox_t *txmbox_areas_p;
+ boolean_t txmbox_allocated;
+} tx_mbox_areas_t, *p_tx_mbox_areas_t;
+
+/*
+ * Transmit prototypes.
+ */
+hxge_status_t hxge_init_txdma_channels(p_hxge_t hxgep);
+void hxge_uninit_txdma_channels(p_hxge_t hxgep);
+void hxge_setup_dma_common(p_hxge_dma_common_t, p_hxge_dma_common_t,
+ uint32_t, uint32_t);
+hxge_status_t hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel,
+ uint64_t reg_data);
+hxge_status_t hxge_init_txdma_channel_event_mask(p_hxge_t hxgep,
+ uint16_t channel, tdc_int_mask_t *mask_p);
+hxge_status_t hxge_enable_txdma_channel(p_hxge_t hxgep, uint16_t channel,
+ p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p);
+
+p_mblk_t hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads);
+ int hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p);
+boolean_t hxge_txdma_reclaim(p_hxge_t hxgep,
+ p_tx_ring_t tx_ring_p, int nmblks);
+
+void hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum,
+ int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp);
+
+hxge_status_t hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable);
+void hxge_txdma_stop(p_hxge_t hxgep);
+void hxge_fixup_txdma_rings(p_hxge_t hxgep);
+void hxge_txdma_hw_kick(p_hxge_t hxgep);
+void hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel);
+void hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
+ uint16_t channel);
+void hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
+ uint16_t channel);
+
+void hxge_check_tx_hang(p_hxge_t hxgep);
+void hxge_fixup_hung_txdma_rings(p_hxge_t hxgep);
+void hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel);
+void hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
+ uint16_t channel);
+
+void hxge_reclaim_rings(p_hxge_t hxgep);
+int hxge_txdma_channel_hung(p_hxge_t hxgep,
+ p_tx_ring_t tx_ring_p, uint16_t channel);
+int hxge_txdma_hung(p_hxge_t hxgep);
+int hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel);
+hxge_status_t hxge_txdma_handle_sys_errors(p_hxge_t hxgep);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_TXDMA_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_txdma_hw.h b/usr/src/uts/common/io/hxge/hxge_txdma_hw.h
new file mode 100644
index 0000000000..506a589be1
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_txdma_hw.h
@@ -0,0 +1,207 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_TXDMA_HW_H
+#define _SYS_HXGE_HXGE_TXDMA_HW_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hxge_defs.h>
+#include <hxge_tdc_hw.h>
+
+/*
+ * Transmit Packet Descriptor Structure
+ * See Hydra PRM (Chapter 8, Section 8.1.1)
+ */
+typedef union _tx_desc_t {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t sop:1;
+ uint64_t mark:1;
+ uint64_t num_ptr:4;
+ uint64_t rsvd:1;
+ uint64_t tr_len:13;
+ uint64_t sad:44;
+#else
+ uint64_t sad:44;
+ uint64_t tr_len:13;
+ uint64_t rsvd:1;
+ uint64_t num_ptr:4;
+ uint64_t mark:1;
+ uint64_t sop:1;
+#endif
+ } bits;
+} tx_desc_t, *p_tx_desc_t;
+
+/*
+ * TDC Ring Configuration
+ */
+#define TDC_TDR_CFG_STADDR_SHIFT 6 /* bits 18:6 */
+#define TDC_TDR_CFG_STADDR_MASK 0x000000000007FFC0ULL
+#define TDC_TDR_CFG_ADDR_MASK 0x00000FFFFFFFFFC0ULL
+#define TDC_TDR_CFG_STADDR_BASE_SHIFT 19 /* bits 43:19 */
+#define TDC_TDR_CFG_STADDR_BASE_MASK 0x00000FFFFFF80000ULL
+#define TDC_TDR_CFG_LEN_SHIFT 53 /* bits 63:53 */
+#define TDC_TDR_CFG_LEN_MASK 0xFFE0000000000000ULL
+#define TDC_TDR_RST_SHIFT 46
+#define TDC_TDR_RST_MASK 0x0000400000000000ULL
+
+/*
+ * Transmit Event Mask
+ */
+#define TDC_INT_MASK_MK_MASK 0x0000000000008000ULL
+
+/*
+ * Trasnmit Mailbox High
+ */
+#define TDC_MBH_SHIFT 0 /* bit 11:0 */
+#define TDC_MBH_ADDR_SHIFT 32 /* bit 43:32 */
+#define TDC_MBH_MASK 0x0000000000000FFFULL
+
+/*
+ * Trasnmit Mailbox Low
+ */
+#define TDC_MBL_SHIFT 6 /* bit 31:6 */
+#define TDC_MBL_MASK 0x00000000FFFFFFC0ULL
+
+#define TXDMA_MAILBOX_BYTE_LENGTH 64
+#define TXDMA_MAILBOX_UNUSED 24
+
+typedef struct _txdma_mailbox_t {
+ tdc_stat_t tx_cs; /* 8 bytes */
+ tdc_tdr_pre_head_t tx_dma_pre_st; /* 8 bytes */
+ tdc_tdr_head_t tx_ring_hdl; /* 8 bytes */
+ tdc_tdr_kick_t tx_ring_kick; /* 8 bytes */
+ uint32_t tx_rng_err_logh; /* 4 bytes */
+ uint32_t tx_rng_err_logl; /* 4 bytes */
+ uint8_t resv[TXDMA_MAILBOX_UNUSED];
+} txdma_mailbox_t, *p_txdma_mailbox_t;
+
+/*
+ * Internal Transmit Packet Format (16 bytes)
+ */
+#define TX_PKT_HEADER_SIZE 16
+#define TX_MAX_GATHER_POINTERS 15
+#define TX_GATHER_POINTERS_THRESHOLD 8
+/*
+ * There is bugs in the hardware
+ * and max sfter len is changed from 4096 to 4076.
+ *
+ * Jumbo from 9500 to 9216
+ */
+#define TX_MAX_TRANSFER_LENGTH 4076
+#define TX_JUMBO_MTU 9216
+
+#define TX_PKT_HEADER_PAD_SHIFT 0 /* bit 2:0 */
+#define TX_PKT_HEADER_PAD_MASK 0x0000000000000007ULL
+#define TX_PKT_HEADER_TOT_XFER_LEN_SHIFT 16 /* bit 16:29 */
+#define TX_PKT_HEADER_TOT_XFER_LEN_MASK 0x000000000000FFF8ULL
+#define TX_PKT_HEADER_L4STUFF_SHIFT 32 /* bit 37:32 */
+#define TX_PKT_HEADER_L4STUFF_MASK 0x0000003F00000000ULL
+#define TX_PKT_HEADER_L4START_SHIFT 40 /* bit 45:40 */
+#define TX_PKT_HEADER_L4START_MASK 0x00003F0000000000ULL
+#define TX_PKT_HEADER_L3START_SHIFT 48 /* bit 45:40 */
+#define TX_PKT_HEADER_IHL_SHIFT 52 /* bit 52 */
+#define TX_PKT_HEADER_VLAN__SHIFT 56 /* bit 56 */
+#define TX_PKT_HEADER_TCP_UDP_CRC32C_SHIFT 57 /* bit 57 */
+#define TX_PKT_HEADER_LLC_SHIFT 57 /* bit 57 */
+#define TX_PKT_HEADER_TCP_UDP_CRC32C_SET 0x0200000000000000ULL
+#define TX_PKT_HEADER_TCP_UDP_CRC32C_MASK 0x0200000000000000ULL
+#define TX_PKT_HEADER_L4_PROTO_OP_SHIFT 2 /* bit 59:58 */
+#define TX_PKT_HEADER_L4_PROTO_OP_MASK 0x0C00000000000000ULL
+#define TX_PKT_HEADER_V4_HDR_CS_SHIFT 60 /* bit 60 */
+#define TX_PKT_HEADER_V4_HDR_CS_SET 0x1000000000000000ULL
+#define TX_PKT_HEADER_V4_HDR_CS_MASK 0x1000000000000000ULL
+#define TX_PKT_HEADER_IP_VER_SHIFT 61 /* bit 61 */
+#define TX_PKT_HEADER_IP_VER_MASK 0x2000000000000000ULL
+#define TX_PKT_HEADER_PKT_TYPE_SHIFT 62 /* bit 62 */
+#define TX_PKT_HEADER_PKT_TYPE_MASK 0x4000000000000000ULL
+
+/* L4 Prototol Operations */
+#define TX_PKT_L4_PROTO_OP_NOP 0x00
+#define TX_PKT_L4_PROTO_OP_FULL_L4_CSUM 0x01
+#define TX_PKT_L4_PROTO_OP_L4_PAYLOAD_CSUM 0x02
+#define TX_PKT_L4_PROTO_OP_SCTP_CRC32 0x04
+
+/* Transmit Packet Types */
+#define TX_PKT_PKT_TYPE_NOP 0x00
+#define TX_PKT_PKT_TYPE_TCP 0x01
+#define TX_PKT_PKT_TYPE_UDP 0x02
+#define TX_PKT_PKT_TYPE_SCTP 0x03
+
+typedef union _tx_pkt_header_t {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t cksum_en_pkt_type:2;
+ uint64_t ip_ver:1;
+ uint64_t rsrvd:4;
+ uint64_t vlan:1;
+ uint64_t ihl:4;
+ uint64_t l3start:4;
+ uint64_t rsvrvd1:2;
+ uint64_t l4start:6;
+ uint64_t rsvrvd2:2;
+ uint64_t l4stuff:6;
+ uint64_t rsvrvd3:2;
+ uint64_t tot_xfer_len:14;
+ uint64_t rsrrvd4:13;
+ uint64_t pad:3;
+#else
+ uint64_t pad:3;
+ uint64_t rsrrvd4:13;
+ uint64_t tot_xfer_len:14;
+ uint64_t rsvrvd3:2;
+ uint64_t l4stuff:6;
+ uint64_t rsvrvd2:2;
+ uint64_t l4start:6;
+ uint64_t rsvrvd1:2;
+ uint64_t l3start:4;
+ uint64_t ihl:4;
+ uint64_t vlan:1;
+ uint64_t rsrvd:4;
+ uint64_t ip_ver:1;
+ uint64_t cksum_en_pkt_type:2;
+#endif
+ } bits;
+} tx_pkt_header_t, *p_tx_pkt_header_t;
+
+typedef struct _tx_pkt_hdr_all_t {
+ tx_pkt_header_t pkthdr;
+ uint64_t reserved;
+} tx_pkt_hdr_all_t, *p_tx_pkt_hdr_all_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_TXDMA_HW_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_virtual.c b/usr/src/uts/common/io/hxge/hxge_virtual.c
new file mode 100644
index 0000000000..2914cebb98
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_virtual.c
@@ -0,0 +1,1109 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+#include <hxge_vmac.h>
+#include <hxge_pfc.h>
+#include <hpi_pfc.h>
+
+static hxge_status_t hxge_get_mac_addr_properties(p_hxge_t);
+static void hxge_use_cfg_hydra_properties(p_hxge_t);
+static void hxge_use_cfg_dma_config(p_hxge_t);
+static void hxge_use_cfg_class_config(p_hxge_t);
+static void hxge_set_hw_dma_config(p_hxge_t);
+static void hxge_set_hw_class_config(p_hxge_t);
+static void hxge_ldgv_setup(p_hxge_ldg_t *ldgp, p_hxge_ldv_t *ldvp, uint8_t ldv,
+ uint8_t endldg, int *ngrps);
+static hxge_status_t hxge_mmac_init(p_hxge_t);
+
+extern uint16_t hxge_rcr_timeout;
+extern uint16_t hxge_rcr_threshold;
+
+extern uint32_t hxge_rbr_size;
+extern uint32_t hxge_rcr_size;
+
+extern uint_t hxge_rx_intr();
+extern uint_t hxge_tx_intr();
+extern uint_t hxge_vmac_intr();
+extern uint_t hxge_syserr_intr();
+extern uint_t hxge_pfc_intr();
+
+uint_t hxge_nmac_intr(caddr_t arg1, caddr_t arg2);
+
+/*
+ * Entry point to populate configuration parameters into the master hxge
+ * data structure and to update the NDD parameter list.
+ */
+hxge_status_t
+hxge_get_config_properties(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, VPD_CTL, " ==> hxge_get_config_properties"));
+
+ if (hxgep->hxge_hw_p == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_get_config_properties: common hardware not set"));
+ return (HXGE_ERROR);
+ }
+
+ hxgep->classifier.tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY;
+
+ status = hxge_get_mac_addr_properties(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ " hxge_get_config_properties: mac addr properties failed"));
+ return (status);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, VPD_CTL,
+ " ==> hxge_get_config_properties: Hydra"));
+
+ hxge_use_cfg_hydra_properties(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, VPD_CTL, " <== hxge_get_config_properties"));
+ return (HXGE_OK);
+}
+
+
+static void
+hxge_set_hw_vlan_class_config(p_hxge_t hxgep)
+{
+ int i;
+ p_hxge_param_t param_arr;
+ uint_t vlan_cnt;
+ int *vlan_cfg_val;
+ hxge_param_map_t *vmap;
+ char *prop;
+ p_hxge_class_pt_cfg_t p_class_cfgp;
+ uint32_t good_cfg[32];
+ int good_count = 0;
+ hxge_mv_cfg_t *vlan_tbl;
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_set_hw_vlan_config"));
+ p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
+
+ param_arr = hxgep->param_arr;
+ prop = param_arr[param_vlan_ids].fcode_name;
+
+ /*
+ * uint32_t array, each array entry specifying a VLAN id
+ */
+ for (i = 0; i <= VLAN_ID_MAX; i++) {
+ p_class_cfgp->vlan_tbl[i].flag = 0;
+ }
+
+ vlan_tbl = (hxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
+ &vlan_cfg_val, &vlan_cnt) != DDI_PROP_SUCCESS) {
+ return;
+ }
+
+ for (i = 0; i < vlan_cnt; i++) {
+ vmap = (hxge_param_map_t *)&vlan_cfg_val[i];
+ if ((vmap->param_id) && (vmap->param_id <= VLAN_ID_MAX)) {
+ HXGE_DEBUG_MSG((hxgep, CFG2_CTL,
+ " hxge_vlan_config vlan id %d", vmap->param_id));
+
+ good_cfg[good_count] = vlan_cfg_val[i];
+ if (vlan_tbl[vmap->param_id].flag == 0)
+ good_count++;
+
+ vlan_tbl[vmap->param_id].flag = 1;
+ }
+ }
+
+ ddi_prop_free(vlan_cfg_val);
+ if (good_count != vlan_cnt) {
+ (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
+ hxgep->dip, prop, (int *)good_cfg, good_count);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_vlan_config"));
+}
+
+
+/*
+ * Read param_vlan_ids and param_implicit_vlan_id properties from either
+ * hxge.conf or OBP. Update the soft properties. Populate these
+ * properties into the hxge data structure.
+ */
+static void
+hxge_use_cfg_vlan_class_config(p_hxge_t hxgep)
+{
+ uint_t vlan_cnt;
+ int *vlan_cfg_val;
+ int status;
+ p_hxge_param_t param_arr;
+ char *prop;
+ uint32_t implicit_vlan_id = 0;
+ int *int_prop_val;
+ uint_t prop_len;
+ p_hxge_param_t pa;
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_vlan_config"));
+ param_arr = hxgep->param_arr;
+ prop = param_arr[param_vlan_ids].fcode_name;
+
+ status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
+ &vlan_cfg_val, &vlan_cnt);
+ if (status == DDI_PROP_SUCCESS) {
+ status = ddi_prop_update_int_array(DDI_DEV_T_NONE,
+ hxgep->dip, prop, vlan_cfg_val, vlan_cnt);
+ ddi_prop_free(vlan_cfg_val);
+ }
+
+ pa = &param_arr[param_implicit_vlan_id];
+ prop = pa->fcode_name;
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
+ &int_prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+ implicit_vlan_id = (uint32_t)*int_prop_val;
+ if ((implicit_vlan_id >= pa->minimum) ||
+ (implicit_vlan_id <= pa->maximum)) {
+ status = ddi_prop_update_int(DDI_DEV_T_NONE, hxgep->dip,
+ prop, (int)implicit_vlan_id);
+ }
+ ddi_prop_free(int_prop_val);
+ }
+
+ hxge_set_hw_vlan_class_config(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_use_cfg_vlan_config"));
+}
+
+/*
+ * Read in the configuration parameters from either hxge.conf or OBP and
+ * populate the master data structure hxge.
+ * Use these parameters to update the soft properties and the ndd array.
+ */
+static void
+hxge_use_cfg_hydra_properties(p_hxge_t hxgep)
+{
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_hydra_properties"));
+
+ (void) hxge_use_cfg_dma_config(hxgep);
+ (void) hxge_use_cfg_vlan_class_config(hxgep);
+ (void) hxge_use_cfg_class_config(hxgep);
+
+ /*
+ * Read in the hardware (fcode) properties and use these properties
+ * to update the ndd array.
+ */
+ (void) hxge_get_param_soft_properties(hxgep);
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_use_cfg_hydra_properties"));
+}
+
+
+/*
+ * Read param_accept_jumbo, param_rxdma_intr_time, and param_rxdma_intr_pkts
+ * from either hxge.conf or OBP.
+ * Update the soft properties.
+ * Populate these properties into the hxge data structure for latter use.
+ */
+static void
+hxge_use_cfg_dma_config(p_hxge_t hxgep)
+{
+ int tx_ndmas, rx_ndmas;
+ p_hxge_dma_pt_cfg_t p_dma_cfgp;
+ p_hxge_hw_pt_cfg_t p_cfgp;
+ dev_info_t *dip;
+ p_hxge_param_t param_arr;
+ char *prop;
+ int *prop_val;
+ uint_t prop_len;
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_dma_config"));
+ param_arr = hxgep->param_arr;
+
+ p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
+ p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+ dip = hxgep->dip;
+
+ tx_ndmas = 4;
+ p_cfgp->start_tdc = 0;
+ p_cfgp->max_tdcs = hxgep->max_tdcs = tx_ndmas;
+ hxgep->tdc_mask = (tx_ndmas - 1);
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_cfg_dma_config: "
+ "p_cfgp 0x%llx max_tdcs %d hxgep->max_tdcs %d",
+ p_cfgp, p_cfgp->max_tdcs, hxgep->max_tdcs));
+
+ rx_ndmas = 4;
+ p_cfgp->start_rdc = 0;
+ p_cfgp->max_rdcs = hxgep->max_rdcs = rx_ndmas;
+
+ p_cfgp->start_ldg = 0;
+ p_cfgp->max_ldgs = HXGE_INT_MAX_LDG;
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_default_dma_config: "
+ "p_cfgp 0x%llx max_rdcs %d hxgep->max_rdcs %d",
+ p_cfgp, p_cfgp->max_rdcs, hxgep->max_rdcs));
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_cfg_dma_config: "
+ "p_cfgp 0x%016llx start_ldg %d hxgep->max_ldgs %d ",
+ p_cfgp, p_cfgp->start_ldg, p_cfgp->max_ldgs));
+
+ /*
+ * add code for individual rdc properties
+ */
+ prop = param_arr[param_accept_jumbo].fcode_name;
+
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+ &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+ if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
+ (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
+ hxgep->dip, prop, prop_val, prop_len);
+ }
+ ddi_prop_free(prop_val);
+ }
+
+ prop = param_arr[param_rxdma_intr_time].fcode_name;
+
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+ &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+ if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
+ (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
+ hxgep->dip, prop, prop_val, prop_len);
+ }
+ ddi_prop_free(prop_val);
+ }
+
+ prop = param_arr[param_rxdma_intr_pkts].fcode_name;
+
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
+ &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+ if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
+ (void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
+ hxgep->dip, prop, prop_val, prop_len);
+ }
+ ddi_prop_free(prop_val);
+ }
+
+ hxge_set_hw_dma_config(hxgep);
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_use_cfg_dma_config"));
+}
+
+static void
+hxge_use_cfg_class_config(p_hxge_t hxgep)
+{
+ hxge_set_hw_class_config(hxgep);
+}
+
+static void
+hxge_set_hw_dma_config(p_hxge_t hxgep)
+{
+ p_hxge_dma_pt_cfg_t p_dma_cfgp;
+ p_hxge_hw_pt_cfg_t p_cfgp;
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_set_hw_dma_config"));
+
+ p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
+ p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+ /* Transmit DMA Channels */
+ hxgep->ntdc = p_cfgp->max_tdcs;
+
+ /* Receive DMA Channels */
+ hxgep->nrdc = p_cfgp->max_rdcs;
+
+ p_dma_cfgp->rbr_size = hxge_rbr_size;
+ p_dma_cfgp->rcr_size = hxge_rcr_size;
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_dma_config"));
+}
+
+
+boolean_t
+hxge_check_rxdma_port_member(p_hxge_t hxgep, uint8_t rdc)
+{
+ p_hxge_dma_pt_cfg_t p_dma_cfgp;
+ p_hxge_hw_pt_cfg_t p_cfgp;
+ int status = B_TRUE;
+
+ HXGE_DEBUG_MSG((hxgep, CFG2_CTL, "==> hxge_check_rxdma_port_member"));
+
+ p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
+ p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+ /* Receive DMA Channels */
+ if (rdc < p_cfgp->max_rdcs)
+ status = B_TRUE;
+ HXGE_DEBUG_MSG((hxgep, CFG2_CTL, " <== hxge_check_rxdma_port_member"));
+
+ return (status);
+}
+
+boolean_t
+hxge_check_txdma_port_member(p_hxge_t hxgep, uint8_t tdc)
+{
+ p_hxge_dma_pt_cfg_t p_dma_cfgp;
+ p_hxge_hw_pt_cfg_t p_cfgp;
+ int status = B_FALSE;
+
+ HXGE_DEBUG_MSG((hxgep, CFG2_CTL, "==> hxge_check_txdma_port_member"));
+
+ p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
+ p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+ /* Receive DMA Channels */
+ if (tdc < p_cfgp->max_tdcs)
+ status = B_TRUE;
+ HXGE_DEBUG_MSG((hxgep, CFG2_CTL, " <== hxge_check_txdma_port_member"));
+
+ return (status);
+}
+
+
+/*
+ * Read the L2 classes, L3 classes, and initial hash from either hxge.conf
+ * or OBP. Populate these properties into the hxge data structure for latter
+ * use. Note that we are not updating these soft properties.
+ */
+static void
+hxge_set_hw_class_config(p_hxge_t hxgep)
+{
+ int i, j;
+ p_hxge_param_t param_arr;
+ int *int_prop_val;
+ uint32_t cfg_value;
+ char *prop;
+ p_hxge_class_pt_cfg_t p_class_cfgp;
+ int start_prop, end_prop;
+ uint_t prop_cnt;
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_set_hw_class_config"));
+
+ p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
+
+ param_arr = hxgep->param_arr;
+
+ /*
+ * L2 class configuration. User configurable ether types
+ */
+ start_prop = param_class_cfg_ether_usr1;
+ end_prop = param_class_cfg_ether_usr2;
+
+ for (i = start_prop; i <= end_prop; i++) {
+ prop = param_arr[i].fcode_name;
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip,
+ 0, prop, &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
+ cfg_value = (uint32_t)*int_prop_val;
+ ddi_prop_free(int_prop_val);
+ } else {
+ cfg_value = (uint32_t)param_arr[i].value;
+ }
+
+ j = (i - start_prop) + TCAM_CLASS_ETYPE_1;
+ p_class_cfgp->class_cfg[j] = cfg_value;
+ }
+
+ /*
+ * Use properties from either .conf or the NDD param array. Only bits
+ * 2 and 3 are significant
+ */
+ start_prop = param_class_opt_ipv4_tcp;
+ end_prop = param_class_opt_ipv6_sctp;
+
+ for (i = start_prop; i <= end_prop; i++) {
+ prop = param_arr[i].fcode_name;
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip,
+ 0, prop, &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
+ cfg_value = (uint32_t)*int_prop_val;
+ ddi_prop_free(int_prop_val);
+ } else {
+ cfg_value = (uint32_t)param_arr[i].value;
+ }
+
+ j = (i - start_prop) + TCAM_CLASS_TCP_IPV4;
+ p_class_cfgp->class_cfg[j] = cfg_value;
+ }
+
+ prop = param_arr[param_hash_init_value].fcode_name;
+
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
+ &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
+ cfg_value = (uint32_t)*int_prop_val;
+ ddi_prop_free(int_prop_val);
+ } else {
+ cfg_value = (uint32_t)param_arr[param_hash_init_value].value;
+ }
+
+ p_class_cfgp->init_hash = (uint32_t)cfg_value;
+
+ HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_class_config"));
+}
+
+
+/*
+ * Interrupts related interface functions.
+ */
+hxge_status_t
+hxge_ldgv_init(p_hxge_t hxgep, int *navail_p, int *nrequired_p)
+{
+ uint8_t ldv, i, maxldvs, maxldgs, start, end, nldvs;
+ int ldg, endldg, ngrps;
+ uint8_t channel;
+ p_hxge_dma_pt_cfg_t p_dma_cfgp;
+ p_hxge_hw_pt_cfg_t p_cfgp;
+ p_hxge_ldgv_t ldgvp;
+ p_hxge_ldg_t ldgp, ptr;
+ p_hxge_ldv_t ldvp;
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_ldgv_init"));
+ if (!*navail_p) {
+ *nrequired_p = 0;
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_ldgv_init:no avail"));
+ return (HXGE_ERROR);
+ }
+ p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
+ p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
+
+ /* each DMA channels */
+ nldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
+
+ /* vmac */
+ nldvs++;
+
+ /* pfc */
+ nldvs++;
+
+ /* nmac for the link status register only */
+ nldvs++;
+
+ /* system error interrupts. */
+ nldvs++;
+
+ maxldvs = nldvs;
+ maxldgs = p_cfgp->max_ldgs;
+
+ if (!maxldvs || !maxldgs) {
+ /* No devices configured. */
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_ldgv_init: "
+ "no logical devices or groups configured."));
+ return (HXGE_ERROR);
+ }
+ ldgvp = hxgep->ldgvp;
+ if (ldgvp == NULL) {
+ ldgvp = KMEM_ZALLOC(sizeof (hxge_ldgv_t), KM_SLEEP);
+ hxgep->ldgvp = ldgvp;
+ ldgvp->maxldgs = maxldgs;
+ ldgvp->maxldvs = maxldvs;
+ ldgp = ldgvp->ldgp =
+ KMEM_ZALLOC(sizeof (hxge_ldg_t) * maxldgs, KM_SLEEP);
+ ldvp = ldgvp->ldvp =
+ KMEM_ZALLOC(sizeof (hxge_ldv_t) * maxldvs, KM_SLEEP);
+ }
+
+ ldgvp->ndma_ldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
+ ldgvp->tmres = HXGE_TIMER_RESO;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_ldgv_init: maxldvs %d maxldgs %d nldvs %d",
+ maxldvs, maxldgs, nldvs));
+
+ ldg = p_cfgp->start_ldg;
+ ptr = ldgp;
+ for (i = 0; i < maxldgs; i++) {
+ ptr->arm = B_TRUE;
+ ptr->vldg_index = i;
+ ptr->ldg_timer = HXGE_TIMER_LDG;
+ ptr->ldg = ldg++;
+ ptr->sys_intr_handler = hxge_intr;
+ ptr->nldvs = 0;
+ ptr->hxgep = hxgep;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_ldgv_init: maxldvs %d maxldgs %d ldg %d",
+ maxldvs, maxldgs, ptr->ldg));
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_ldv_init: timer %d", ptr->ldg_timer));
+ ptr++;
+ }
+
+ ldg = p_cfgp->start_ldg;
+ if (maxldgs > *navail_p) {
+ ngrps = *navail_p;
+ } else {
+ ngrps = maxldgs;
+ }
+ endldg = ldg + ngrps;
+
+ /*
+ * Receive DMA channels.
+ */
+ channel = p_cfgp->start_rdc;
+ start = p_cfgp->start_rdc + HXGE_RDMA_LD_START;
+ end = start + p_cfgp->max_rdcs;
+ nldvs = 0;
+ ldgvp->nldvs = 0;
+ ldgp->ldvp = NULL;
+ *nrequired_p = 0;
+ ptr = ldgp;
+
+ /*
+ * Start with RDC to configure logical devices for each group.
+ */
+ for (i = 0, ldv = start; ldv < end; i++, ldv++) {
+ ldvp->is_rxdma = B_TRUE;
+ ldvp->ldv = ldv;
+
+ /*
+ * If non-seq needs to change the following code
+ */
+ ldvp->channel = channel++;
+ ldvp->vdma_index = i;
+ ldvp->ldv_intr_handler = hxge_rx_intr;
+ ldvp->ldv_ldf_masks = 0;
+ ldvp->use_timer = B_FALSE;
+ ldvp->hxgep = hxgep;
+ hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
+ nldvs++;
+ }
+
+ /*
+ * Transmit DMA channels.
+ */
+ channel = p_cfgp->start_tdc;
+ start = p_cfgp->start_tdc + HXGE_TDMA_LD_START;
+ end = start + p_cfgp->max_tdcs;
+ for (i = 0, ldv = start; ldv < end; i++, ldv++) {
+ ldvp->is_txdma = B_TRUE;
+ ldvp->ldv = ldv;
+ ldvp->channel = channel++;
+ ldvp->vdma_index = i;
+ ldvp->ldv_intr_handler = hxge_tx_intr;
+ ldvp->ldv_ldf_masks = 0;
+ ldvp->use_timer = B_FALSE;
+ ldvp->hxgep = hxgep;
+ hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
+ nldvs++;
+ }
+
+ /*
+ * VMAC
+ */
+ ldvp->is_vmac = B_TRUE;
+ ldvp->ldv_intr_handler = hxge_vmac_intr;
+ ldvp->ldv_ldf_masks = 0;
+ ldv = HXGE_VMAC_LD;
+ ldvp->ldv = ldv;
+ ldvp->use_timer = B_FALSE;
+ ldvp->hxgep = hxgep;
+ hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
+ nldvs++;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
+ nldvs, *navail_p, *nrequired_p));
+
+ /*
+ * PFC
+ */
+ ldvp->is_pfc = B_TRUE;
+ ldvp->ldv_intr_handler = hxge_pfc_intr;
+ ldvp->ldv_ldf_masks = 0;
+ ldv = HXGE_PFC_LD;
+ ldvp->ldv = ldv;
+ ldvp->use_timer = B_FALSE;
+ ldvp->hxgep = hxgep;
+ hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
+ nldvs++;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
+ nldvs, *navail_p, *nrequired_p));
+
+ /*
+ * NMAC
+ */
+ ldvp->ldv_intr_handler = hxge_nmac_intr;
+ ldvp->ldv_ldf_masks = 0;
+ ldv = HXGE_NMAC_LD;
+ ldvp->ldv = ldv;
+ ldvp->use_timer = B_FALSE;
+ ldvp->hxgep = hxgep;
+ hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
+ nldvs++;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
+ nldvs, *navail_p, *nrequired_p));
+
+ /*
+ * System error interrupts.
+ */
+ ldv = HXGE_SYS_ERROR_LD;
+ ldvp->ldv = ldv;
+ ldvp->is_syserr = B_TRUE;
+ ldvp->ldv_intr_handler = hxge_syserr_intr;
+ ldvp->ldv_ldf_masks = 0;
+ ldvp->hxgep = hxgep;
+ ldvp->use_timer = B_FALSE;
+ ldgvp->ldvp_syserr = ldvp;
+
+ /* Reset PEU error mask to allow PEU error interrupts */
+ HXGE_REG_WR64(hxgep->hpi_handle, PEU_INTR_MASK, 0x0);
+
+ /*
+ * Unmask the system interrupt states.
+ */
+ (void) hxge_fzc_sys_err_mask_set(hxgep, B_FALSE);
+ (void) hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
+ nldvs++;
+
+ ldgvp->ldg_intrs = *nrequired_p;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
+ nldvs, *navail_p, *nrequired_p));
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_ldgv_init"));
+ return (status);
+}
+
+hxge_status_t
+hxge_ldgv_uninit(p_hxge_t hxgep)
+{
+ p_hxge_ldgv_t ldgvp;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_ldgv_uninit"));
+ ldgvp = hxgep->ldgvp;
+ if (ldgvp == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_ldgv_uninit: no logical group configured."));
+ return (HXGE_OK);
+ }
+
+ if (ldgvp->ldgp) {
+ KMEM_FREE(ldgvp->ldgp, sizeof (hxge_ldg_t) * ldgvp->maxldgs);
+ }
+ if (ldgvp->ldvp) {
+ KMEM_FREE(ldgvp->ldvp, sizeof (hxge_ldv_t) * ldgvp->maxldvs);
+ }
+
+ KMEM_FREE(ldgvp, sizeof (hxge_ldgv_t));
+ hxgep->ldgvp = NULL;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_ldgv_uninit"));
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_intr_ldgv_init(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_ldgv_init"));
+ /*
+ * Configure the logical device group numbers, state vectors
+ * and interrupt masks for each logical device.
+ */
+ status = hxge_fzc_intr_init(hxgep);
+
+ /*
+ * Configure logical device masks and timers.
+ */
+ status = hxge_intr_mask_mgmt(hxgep);
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_ldgv_init"));
+ return (status);
+}
+
+hxge_status_t
+hxge_intr_mask_mgmt(p_hxge_t hxgep)
+{
+ p_hxge_ldgv_t ldgvp;
+ p_hxge_ldg_t ldgp;
+ p_hxge_ldv_t ldvp;
+ hpi_handle_t handle;
+ int i, j;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_mask_mgmt"));
+
+ if ((ldgvp = hxgep->ldgvp) == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_intr_mask_mgmt: Null ldgvp"));
+ return (HXGE_ERROR);
+ }
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ ldgp = ldgvp->ldgp;
+ ldvp = ldgvp->ldvp;
+ if (ldgp == NULL || ldvp == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_intr_mask_mgmt: Null ldgp or ldvp"));
+ return (HXGE_ERROR);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt: # of intrs %d ", ldgvp->ldg_intrs));
+ /* Initialize masks. */
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt(Hydra): # intrs %d ", ldgvp->ldg_intrs));
+ for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt(Hydra): # ldv %d in group %d",
+ ldgp->nldvs, ldgp->ldg));
+ for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt: set ldv # %d "
+ "for ldg %d", ldvp->ldv, ldgp->ldg));
+ rs = hpi_intr_mask_set(handle, ldvp->ldv,
+ ldvp->ldv_ldf_masks);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_intr_mask_mgmt: set mask failed "
+ " rs 0x%x ldv %d mask 0x%x",
+ rs, ldvp->ldv, ldvp->ldv_ldf_masks));
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt: set mask OK "
+ " rs 0x%x ldv %d mask 0x%x",
+ rs, ldvp->ldv, ldvp->ldv_ldf_masks));
+ }
+ }
+
+ ldgp = ldgvp->ldgp;
+ /* Configure timer and arm bit */
+ for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) {
+ rs = hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
+ ldgp->arm, ldgp->ldg_timer);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_intr_mask_mgmt: set timer failed "
+ " rs 0x%x dg %d timer 0x%x",
+ rs, ldgp->ldg, ldgp->ldg_timer));
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt: set timer OK "
+ " rs 0x%x ldg %d timer 0x%x",
+ rs, ldgp->ldg, ldgp->ldg_timer));
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_fzc_intr_mask_mgmt"));
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_intr_mask_mgmt_set(p_hxge_t hxgep, boolean_t on)
+{
+ p_hxge_ldgv_t ldgvp;
+ p_hxge_ldg_t ldgp;
+ p_hxge_ldv_t ldvp;
+ hpi_handle_t handle;
+ int i, j;
+ hpi_status_t rs = HPI_SUCCESS;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt_set (%d)", on));
+
+ if ((ldgvp = hxgep->ldgvp) == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_intr_mask_mgmt_set: Null ldgvp"));
+ return (HXGE_ERROR);
+ }
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ ldgp = ldgvp->ldgp;
+ ldvp = ldgvp->ldvp;
+ if (ldgp == NULL || ldvp == NULL) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_intr_mask_mgmt_set: Null ldgp or ldvp"));
+ return (HXGE_ERROR);
+ }
+
+ /* set masks. */
+ for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt_set: flag %d ldg %d"
+ "set mask nldvs %d", on, ldgp->ldg, ldgp->nldvs));
+ for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt_set: "
+ "for %d %d flag %d", i, j, on));
+ if (on) {
+ ldvp->ldv_ldf_masks = 0;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt_set: "
+ "ON mask off"));
+ } else {
+ ldvp->ldv_ldf_masks = (uint8_t)LD_IM_MASK;
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt_set:mask on"));
+ }
+
+ /*
+ * Bringup - NMAC constantly interrupts since hydrad
+ * is not available yet. When hydrad is available
+ * and handles the interrupts, we will delete the
+ * following two lines
+ */
+ if (ldvp->ldv_intr_handler == hxge_nmac_intr)
+ ldvp->ldv_ldf_masks = (uint8_t)LD_IM_MASK;
+
+ rs = hpi_intr_mask_set(handle, ldvp->ldv,
+ ldvp->ldv_ldf_masks);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "==> hxge_intr_mask_mgmt_set: "
+ "set mask failed rs 0x%x ldv %d mask 0x%x",
+ rs, ldvp->ldv, ldvp->ldv_ldf_masks));
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt_set: flag %d"
+ "set mask OK ldv %d mask 0x%x",
+ on, ldvp->ldv, ldvp->ldv_ldf_masks));
+ }
+ }
+
+ ldgp = ldgvp->ldgp;
+ /* set the arm bit */
+ for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) {
+ if (on && !ldgp->arm) {
+ ldgp->arm = B_TRUE;
+ } else if (!on && ldgp->arm) {
+ ldgp->arm = B_FALSE;
+ }
+ rs = hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
+ ldgp->arm, ldgp->ldg_timer);
+ if (rs != HPI_SUCCESS) {
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
+ "<== hxge_intr_mask_mgmt_set: "
+ "set timer failed rs 0x%x ldg %d timer 0x%x",
+ rs, ldgp->ldg, ldgp->ldg_timer));
+ return (HXGE_ERROR | rs);
+ }
+ HXGE_DEBUG_MSG((hxgep, INT_CTL,
+ "==> hxge_intr_mask_mgmt_set: OK (flag %d) "
+ "set timer ldg %d timer 0x%x",
+ on, ldgp->ldg, ldgp->ldg_timer));
+ }
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_mask_mgmt_set"));
+ return (HXGE_OK);
+}
+
+/*
+ * For Big Endian systems, the mac address will be from OBP. For Little
+ * Endian (x64) systems, it will be retrieved from the card since it cannot
+ * be programmed into PXE.
+ * This function also populates the MMAC parameters.
+ */
+static hxge_status_t
+hxge_get_mac_addr_properties(p_hxge_t hxgep)
+{
+#if defined(_BIG_ENDIAN)
+ uchar_t *prop_val;
+ uint_t prop_len;
+#endif
+ uint32_t num_macs;
+ hxge_status_t status;
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_get_mac_addr_properties "));
+#if defined(_BIG_ENDIAN)
+ /*
+ * Get the ethernet address.
+ */
+ (void) localetheraddr((struct ether_addr *)NULL, &hxgep->ouraddr);
+
+ /*
+ * Check if it is an adapter with its own local mac address
+ * If it is present, override the system mac address.
+ */
+ if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, hxgep->dip, 0,
+ "local-mac-address", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+ if (prop_len == ETHERADDRL) {
+ hxgep->factaddr = *(p_ether_addr_t)prop_val;
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Local mac address = "
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ prop_val[0], prop_val[1], prop_val[2],
+ prop_val[3], prop_val[4], prop_val[5]));
+ }
+ ddi_prop_free(prop_val);
+ }
+ if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, hxgep->dip, 0,
+ "local-mac-address?", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
+ if (strncmp("true", (caddr_t)prop_val, (size_t)prop_len) == 0) {
+ hxgep->ouraddr = hxgep->factaddr;
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL,
+ "Using local MAC address"));
+ }
+ ddi_prop_free(prop_val);
+ } else {
+ hxgep->ouraddr = hxgep->factaddr;
+ }
+#else
+ (void) hxge_pfc_mac_addrs_get(hxgep);
+ hxgep->ouraddr = hxgep->factaddr;
+#endif
+
+ /*
+ * Get the number of MAC addresses the Hydra supports per blade.
+ */
+ if (hxge_pfc_num_macs_get(hxgep, &num_macs) == HXGE_OK) {
+ hxgep->hxge_mmac_info.num_mmac = (uint8_t)num_macs;
+ } else {
+ HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
+ "hxge_get_mac_addr_properties: get macs failed"));
+ return (HXGE_ERROR);
+ }
+
+ /*
+ * Initialize alt. mac addr. in the mac pool
+ */
+ status = hxge_mmac_init(hxgep);
+ if (status != HXGE_OK) {
+ HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
+ "hxge_get_mac_addr_properties: init mmac failed"));
+ return (HXGE_ERROR);
+ }
+
+ HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_get_mac_addr_properties "));
+ return (HXGE_OK);
+}
+
+static void
+hxge_ldgv_setup(p_hxge_ldg_t *ldgp, p_hxge_ldv_t *ldvp, uint8_t ldv,
+ uint8_t endldg, int *ngrps)
+{
+ HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup"));
+ /* Assign the group number for each device. */
+ (*ldvp)->ldg_assigned = (*ldgp)->ldg;
+ (*ldvp)->ldgp = *ldgp;
+ (*ldvp)->ldv = ldv;
+
+ HXGE_DEBUG_MSG((NULL, INT_CTL,
+ "==> hxge_ldgv_setup: ldv %d endldg %d ldg %d, ldvp $%p",
+ ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
+
+ (*ldgp)->nldvs++;
+ if ((*ldgp)->ldg == (endldg - 1)) {
+ if ((*ldgp)->ldvp == NULL) {
+ (*ldgp)->ldvp = *ldvp;
+ *ngrps += 1;
+ HXGE_DEBUG_MSG((NULL, INT_CTL,
+ "==> hxge_ldgv_setup: ngrps %d", *ngrps));
+ }
+ HXGE_DEBUG_MSG((NULL, INT_CTL,
+ "==> hxge_ldgv_setup: ldvp $%p ngrps %d",
+ *ldvp, *ngrps));
+ ++*ldvp;
+ } else {
+ (*ldgp)->ldvp = *ldvp;
+ *ngrps += 1;
+ HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup(done): "
+ "ldv %d endldg %d ldg %d, ldvp $%p",
+ ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
+ (*ldvp) = ++*ldvp;
+ (*ldgp) = ++*ldgp;
+ HXGE_DEBUG_MSG((NULL, INT_CTL,
+ "==> hxge_ldgv_setup: new ngrps %d", *ngrps));
+ }
+
+ HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup: "
+ "ldg %d nldvs %d ldv %d ldvp $%p endldg %d ngrps %d",
+ (*ldgp)->ldg, (*ldgp)->nldvs, ldv, ldvp, endldg, *ngrps));
+
+ HXGE_DEBUG_MSG((NULL, INT_CTL, "<== hxge_ldgv_setup"));
+}
+
+/*
+ * Note: This function assumes the following distribution of mac
+ * addresses for a hydra blade:
+ *
+ * -------------
+ * 0| |0 - local-mac-address for blade
+ * -------------
+ * | |1 - Start of alt. mac addr. for blade
+ * | |
+ * | |
+ * | |15
+ * --------------
+ */
+
+static hxge_status_t
+hxge_mmac_init(p_hxge_t hxgep)
+{
+ int slot;
+ hxge_mmac_t *mmac_info;
+
+ mmac_info = (hxge_mmac_t *)&hxgep->hxge_mmac_info;
+
+ /* Set flags for unique MAC */
+ mmac_info->mac_pool[0].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
+ mmac_info->num_factory_mmac = 1;
+
+ /*
+ * Skip the factory/default address which is in slot 0.
+ * Initialze all other mac addr. to "AVAILABLE" state.
+ * Clear flags of all alternate MAC slots.
+ */
+ for (slot = 1; slot < mmac_info->num_mmac; slot++) {
+ (void) hpi_pfc_clear_mac_address(hxgep->hpi_handle, slot);
+ mmac_info->mac_pool[slot].flags = 0;
+ }
+
+ /* Exclude the factory mac address */
+ mmac_info->naddrfree = mmac_info->num_mmac - 1;
+
+ /* Initialize the first two parameters for mmac kstat */
+ hxgep->statsp->mmac_stats.mmac_max_cnt = mmac_info->num_mmac;
+ hxgep->statsp->mmac_stats.mmac_avail_cnt = mmac_info->naddrfree;
+
+ return (HXGE_OK);
+}
+
+/*ARGSUSED*/
+uint_t
+hxge_nmac_intr(caddr_t arg1, caddr_t arg2)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg2;
+ hpi_handle_t handle;
+ p_hxge_stats_t statsp;
+ cip_link_stat_t link_stat;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_INT_CTL, "==> hxge_nmac_intr"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+ statsp = (p_hxge_stats_t)hxgep->statsp;
+
+ HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
+ HXGE_DEBUG_MSG((hxgep, MAC_INT_CTL, "hxge_nmac_intr: status is 0x%x",
+ link_stat.value));
+
+ if (link_stat.bits.xpcs0_link_up) {
+ mac_link_update(hxgep->mach, LINK_STATE_UP);
+ statsp->mac_stats.link_up = 1;
+ } else {
+ mac_link_update(hxgep->mach, LINK_STATE_DOWN);
+ statsp->mac_stats.link_up = 0;
+ }
+
+ HXGE_DEBUG_MSG((hxgep, MAC_INT_CTL, "<== hxge_nmac_intr"));
+ return (DDI_INTR_CLAIMED);
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_virtual.h b/usr/src/uts/common/io/hxge/hxge_virtual.h
new file mode 100644
index 0000000000..2480a0dc69
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_virtual.h
@@ -0,0 +1,55 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_VIRTUAL_H
+#define _SYS_HXGE_HXGE_VIRTUAL_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* 12 bits are available */
+#define COMMON_CFG_VALID 0x01
+#define COMMON_CFG_BUSY 0x02
+#define COMMON_INIT_START 0x04
+#define COMMON_INIT_DONE 0x08
+#define COMMON_TCAM_BUSY 0x10
+#define COMMON_VLAN_BUSY 0x20
+
+#define COMMON_TXDMA_CFG 1
+#define COMMON_RXDMA_CFG 2
+#define COMMON_RXDMA_GRP_CFG 4
+#define COMMON_CLASS_CFG 8
+#define COMMON_QUICK_CFG 0x10
+
+hxge_status_t hxge_intr_mask_mgmt(p_hxge_t hxgep);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_VIRTUAL_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_vmac.c b/usr/src/uts/common/io/hxge/hxge_vmac.c
new file mode 100644
index 0000000000..266f96cc5e
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_vmac.c
@@ -0,0 +1,399 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <hxge_impl.h>
+#include <hxge_vmac.h>
+
+hxge_status_t hxge_vmac_init(p_hxge_t hxgep);
+hxge_status_t hxge_tx_vmac_init(p_hxge_t hxgep);
+hxge_status_t hxge_rx_vmac_init(p_hxge_t hxgep);
+hxge_status_t hxge_tx_vmac_enable(p_hxge_t hxgep);
+hxge_status_t hxge_tx_vmac_disable(p_hxge_t hxgep);
+hxge_status_t hxge_rx_vmac_enable(p_hxge_t hxgep);
+hxge_status_t hxge_rx_vmac_disable(p_hxge_t hxgep);
+hxge_status_t hxge_tx_vmac_reset(p_hxge_t hxgep);
+hxge_status_t hxge_rx_vmac_reset(p_hxge_t hxgep);
+uint_t hxge_vmac_intr(caddr_t arg1, caddr_t arg2);
+hxge_status_t hxge_set_promisc(p_hxge_t hxgep, boolean_t on);
+
+extern boolean_t hxge_jumbo_enable;
+
+hxge_status_t
+hxge_link_init(p_hxge_t hxgep)
+{
+ p_hxge_stats_t statsp;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_link_init>"));
+
+ statsp = hxgep->statsp;
+
+ statsp->mac_stats.cap_10gfdx = 1;
+ statsp->mac_stats.lp_cap_10gfdx = 1;
+
+ /*
+ * The driver doesn't control the link.
+ * It is always 10Gb full duplex.
+ */
+ statsp->mac_stats.link_duplex = 2;
+ statsp->mac_stats.link_speed = 10000;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_link_init"));
+ return (HXGE_OK);
+}
+
+hxge_status_t
+hxge_vmac_init(p_hxge_t hxgep)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_vmac_init:"));
+
+ if ((status = hxge_tx_vmac_reset(hxgep)) != HXGE_OK)
+ goto fail;
+
+ if ((status = hxge_rx_vmac_reset(hxgep)) != HXGE_OK)
+ goto fail;
+
+ if ((status = hxge_tx_vmac_enable(hxgep)) != HXGE_OK)
+ goto fail;
+
+ if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK)
+ goto fail;
+
+ /* Clear the interrupt status registers */
+ (void) hpi_vmac_clear_rx_int_stat(hxgep->hpi_handle);
+ (void) hpi_vmac_clear_tx_int_stat(hxgep->hpi_handle);
+
+ /*
+ * Take the masks off the overflow counters. Interrupt the system when
+ * any counts overflow. Don't interrupt the system for each frame.
+ * The current counts are retrieved when the "kstat" command is used.
+ */
+ (void) hpi_pfc_set_rx_int_stat_mask(hxgep->hpi_handle, 0, 1);
+ (void) hpi_pfc_set_tx_int_stat_mask(hxgep->hpi_handle, 0, 1);
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_vmac_init:"));
+
+ return (HXGE_OK);
+fail:
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL,
+ "hxge_vmac_init: failed to initialize VMAC>"));
+
+ return (status);
+}
+
+
+/* Initialize the TxVMAC sub-block */
+
+hxge_status_t
+hxge_tx_vmac_init(p_hxge_t hxgep)
+{
+ uint64_t config;
+ hpi_handle_t handle = hxgep->hpi_handle;
+
+ /* Set Max and Min Frame Size */
+
+ hxgep->vmac.is_jumbo = B_FALSE;
+ if (hxge_jumbo_enable)
+ hxgep->vmac.is_jumbo = B_TRUE;
+
+ if (hxgep->param_arr[param_accept_jumbo].value ||
+ hxgep->vmac.is_jumbo == B_TRUE)
+ hxgep->vmac.maxframesize = TX_JUMBO_MTU;
+ else
+ hxgep->vmac.maxframesize = STD_FRAME_SIZE + TX_PKT_HEADER_SIZE;
+ hxgep->vmac.minframesize = 64;
+
+ /* CFG_VMAC_TX_EN is done separately */
+ config = CFG_VMAC_TX_CRC_INSERT | CFG_VMAC_TX_PAD;
+
+ if (hpi_vmac_tx_config(handle, INIT, config,
+ hxgep->vmac.maxframesize) != HPI_SUCCESS)
+ return (HXGE_ERROR);
+
+ hxgep->vmac.tx_config = config;
+
+ return (HXGE_OK);
+}
+
+/* Initialize the RxVMAC sub-block */
+
+hxge_status_t
+hxge_rx_vmac_init(p_hxge_t hxgep)
+{
+ uint64_t xconfig;
+ hpi_handle_t handle = hxgep->hpi_handle;
+ uint16_t max_frame_length = hxgep->vmac.maxframesize;
+
+ /*
+ * NOTE: CFG_VMAC_RX_ENABLE is done separately. Do not enable
+ * strip CRC. Bug ID 11451 -- enable strip CRC will cause
+ * rejection on minimum sized packets.
+ */
+ xconfig = CFG_VMAC_RX_CRC_CHECK_DISABLE |
+ CFG_VMAC_RX_PASS_FLOW_CTRL_FR;
+
+ if (hxgep->filter.all_phys_cnt != 0)
+ xconfig |= CFG_VMAC_RX_PROMISCUOUS_MODE;
+
+ if (hxgep->filter.all_multicast_cnt != 0)
+ xconfig |= CFG_VMAC_RX_PROMIXCUOUS_GROUP;
+
+ if (hxgep->statsp->port_stats.lb_mode != hxge_lb_normal)
+ xconfig |= CFG_VMAC_RX_LOOP_BACK;
+
+ if (hpi_vmac_rx_config(handle, INIT, xconfig, max_frame_length)
+ != HPI_SUCCESS)
+ return (HXGE_ERROR);
+
+ hxgep->vmac.rx_config = xconfig;
+
+ return (HXGE_OK);
+}
+
+/* Enable TxVMAC */
+
+hxge_status_t
+hxge_tx_vmac_enable(p_hxge_t hxgep)
+{
+ hpi_status_t rv;
+ hxge_status_t status = HXGE_OK;
+ hpi_handle_t handle = hxgep->hpi_handle;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_tx_vmac_enable"));
+
+ rv = hxge_tx_vmac_init(hxgep);
+ if (rv != HXGE_OK)
+ return (rv);
+
+ /* Based on speed */
+ hxgep->msg_min = ETHERMIN;
+
+ rv = hpi_vmac_tx_config(handle, ENABLE, CFG_VMAC_TX_EN, 0);
+
+ status = (rv == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_tx_vmac_enable"));
+
+ return (status);
+}
+
+/* Disable TxVMAC */
+
+hxge_status_t
+hxge_tx_vmac_disable(p_hxge_t hxgep)
+{
+ hpi_status_t rv;
+ hxge_status_t status = HXGE_OK;
+ hpi_handle_t handle = hxgep->hpi_handle;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_tx_vmac_disable"));
+
+ rv = hpi_vmac_tx_config(handle, DISABLE, CFG_VMAC_TX_EN, 0);
+
+ status = (rv == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_tx_vmac_disable"));
+
+ return (status);
+}
+
+/* Enable RxVMAC */
+
+hxge_status_t
+hxge_rx_vmac_enable(p_hxge_t hxgep)
+{
+ hpi_status_t rv;
+ hxge_status_t status = HXGE_OK;
+ hpi_handle_t handle = hxgep->hpi_handle;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_rx_vmac_enable"));
+
+ rv = hxge_rx_vmac_init(hxgep);
+ if (rv != HXGE_OK)
+ return (rv);
+
+ rv = hpi_vmac_rx_config(handle, ENABLE, CFG_VMAC_RX_EN, 0);
+
+ status = (rv == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_rx_vmac_enable"));
+
+ return (status);
+}
+
+/* Disable RxVMAC */
+
+hxge_status_t
+hxge_rx_vmac_disable(p_hxge_t hxgep)
+{
+ hpi_status_t rv;
+ hxge_status_t status = HXGE_OK;
+ hpi_handle_t handle = hxgep->hpi_handle;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_rx_vmac_disable"));
+
+ rv = hpi_vmac_rx_config(handle, DISABLE, CFG_VMAC_RX_EN, 0);
+
+ status = (rv == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_rx_vmac_disable"));
+
+ return (status);
+}
+
+/* Reset TxVMAC */
+
+hxge_status_t
+hxge_tx_vmac_reset(p_hxge_t hxgep)
+{
+ hpi_handle_t handle = hxgep->hpi_handle;
+
+ (void) hpi_tx_vmac_reset(handle);
+
+ return (HXGE_OK);
+}
+
+/* Reset RxVMAC */
+
+hxge_status_t
+hxge_rx_vmac_reset(p_hxge_t hxgep)
+{
+ hpi_handle_t handle = hxgep->hpi_handle;
+
+ (void) hpi_rx_vmac_reset(handle);
+
+ return (HXGE_OK);
+}
+
+/*ARGSUSED*/
+uint_t
+hxge_vmac_intr(caddr_t arg1, caddr_t arg2)
+{
+ p_hxge_t hxgep = (p_hxge_t)arg2;
+ hpi_handle_t handle;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_vmac_intr"));
+
+ handle = HXGE_DEV_HPI_HANDLE(hxgep);
+
+ hxge_save_cntrs(hxgep);
+
+ /* Clear the interrupt status registers */
+ (void) hpi_vmac_clear_rx_int_stat(handle);
+ (void) hpi_vmac_clear_tx_int_stat(handle);
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_vmac_intr"));
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * Set promiscous mode
+ */
+hxge_status_t
+hxge_set_promisc(p_hxge_t hxgep, boolean_t on)
+{
+ hxge_status_t status = HXGE_OK;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_set_promisc: on %d", on));
+
+ hxgep->filter.all_phys_cnt = ((on) ? 1 : 0);
+
+ RW_ENTER_WRITER(&hxgep->filter_lock);
+ if ((status = hxge_rx_vmac_disable(hxgep)) != HXGE_OK)
+ goto fail;
+ if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK)
+ goto fail;
+ RW_EXIT(&hxgep->filter_lock);
+
+ if (on)
+ hxgep->statsp->mac_stats.promisc = B_TRUE;
+ else
+ hxgep->statsp->mac_stats.promisc = B_FALSE;
+
+ HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_set_promisc"));
+ return (HXGE_OK);
+
+fail:
+ RW_EXIT(&hxgep->filter_lock);
+
+ HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_set_promisc: "
+ "Unable to set promisc (%d)", on));
+ return (status);
+}
+
+void
+hxge_save_cntrs(p_hxge_t hxgep)
+{
+ p_hxge_stats_t statsp;
+ hpi_handle_t handle;
+
+ vmac_tx_frame_cnt_t tx_frame_cnt;
+ vmac_tx_byte_cnt_t tx_byte_cnt;
+ vmac_rx_frame_cnt_t rx_frame_cnt;
+ vmac_rx_byte_cnt_t rx_byte_cnt;
+ vmac_rx_drop_fr_cnt_t rx_drop_fr_cnt;
+ vmac_rx_drop_byte_cnt_t rx_drop_byte_cnt;
+ vmac_rx_crc_cnt_t rx_crc_cnt;
+ vmac_rx_pause_cnt_t rx_pause_cnt;
+ vmac_rx_bcast_fr_cnt_t rx_bcast_fr_cnt;
+ vmac_rx_mcast_fr_cnt_t rx_mcast_fr_cnt;
+
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_save_cntrs"));
+
+ statsp = (p_hxge_stats_t)hxgep->statsp;
+ handle = hxgep->hpi_handle;
+
+ HXGE_REG_RD64(handle, VMAC_TX_FRAME_CNT, &tx_frame_cnt.value);
+ HXGE_REG_RD64(handle, VMAC_TX_BYTE_CNT, &tx_byte_cnt.value);
+ HXGE_REG_RD64(handle, VMAC_RX_FRAME_CNT, &rx_frame_cnt.value);
+ HXGE_REG_RD64(handle, VMAC_RX_BYTE_CNT, &rx_byte_cnt.value);
+ HXGE_REG_RD64(handle, VMAC_RX_DROP_FR_CNT, &rx_drop_fr_cnt.value);
+ HXGE_REG_RD64(handle, VMAC_RX_DROP_BYTE_CNT, &rx_drop_byte_cnt.value);
+ HXGE_REG_RD64(handle, VMAC_RX_CRC_CNT, &rx_crc_cnt.value);
+ HXGE_REG_RD64(handle, VMAC_RX_PAUSE_CNT, &rx_pause_cnt.value);
+ HXGE_REG_RD64(handle, VMAC_RX_BCAST_FR_CNT, &rx_bcast_fr_cnt.value);
+ HXGE_REG_RD64(handle, VMAC_RX_MCAST_FR_CNT, &rx_mcast_fr_cnt.value);
+
+ statsp->vmac_stats.tx_frame_cnt += tx_frame_cnt.bits.tx_frame_cnt;
+ statsp->vmac_stats.tx_byte_cnt += tx_byte_cnt.bits.tx_byte_cnt;
+ statsp->vmac_stats.rx_frame_cnt += rx_frame_cnt.bits.rx_frame_cnt;
+ statsp->vmac_stats.rx_byte_cnt += rx_byte_cnt.bits.rx_byte_cnt;
+ statsp->vmac_stats.rx_drop_frame_cnt +=
+ rx_drop_fr_cnt.bits.rx_drop_frame_cnt;
+ statsp->vmac_stats.rx_drop_byte_cnt +=
+ rx_drop_byte_cnt.bits.rx_drop_byte_cnt;
+ statsp->vmac_stats.rx_crc_cnt += rx_crc_cnt.bits.rx_crc_cnt;
+ statsp->vmac_stats.rx_pause_cnt += rx_pause_cnt.bits.rx_pause_cnt;
+ statsp->vmac_stats.rx_bcast_fr_cnt +=
+ rx_bcast_fr_cnt.bits.rx_bcast_fr_cnt;
+ statsp->vmac_stats.rx_mcast_fr_cnt +=
+ rx_mcast_fr_cnt.bits.rx_mcast_fr_cnt;
+
+hxge_save_cntrs_exit:
+ HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_save_cntrs"));
+}
diff --git a/usr/src/uts/common/io/hxge/hxge_vmac.h b/usr/src/uts/common/io/hxge/hxge_vmac.h
new file mode 100644
index 0000000000..d028fa0207
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_vmac.h
@@ -0,0 +1,89 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_HXGE_HXGE_VMAC_H
+#define _SYS_HXGE_HXGE_VMAC_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <hxge_vmac_hw.h>
+#include <hpi_vmac.h>
+
+/* Common MAC statistics */
+typedef struct _hxge_mac_stats {
+ /* Transciever state informations. */
+ uint32_t cap_10gfdx;
+
+ /* Advertised capabilities. */
+ uint32_t adv_cap_10gfdx;
+
+ /* Link partner capabilities. */
+ uint32_t lp_cap_10gfdx;
+
+ /* Physical link statistics. */
+ uint32_t link_speed;
+ uint32_t link_duplex;
+ uint32_t link_up;
+
+ /* Promiscous mode */
+ boolean_t promisc;
+} hxge_mac_stats_t;
+
+/* VMAC statistics */
+
+typedef struct _hxge_vmac_stats {
+ uint64_t tx_frame_cnt; /* vmac_tx_frame_cnt_t */
+ uint64_t tx_byte_cnt; /* vmac_tx_byte_cnt_t */
+
+ uint64_t rx_frame_cnt; /* vmac_rx_frame_cnt_t */
+ uint64_t rx_byte_cnt; /* vmac_rx_byte_cnt_t */
+ uint64_t rx_drop_frame_cnt; /* vmac_rx_drop_fr_cnt_t */
+ uint64_t rx_drop_byte_cnt; /* vmac_rx_drop_byte_cnt_t */
+ uint64_t rx_crc_cnt; /* vmac_rx_crc_cnt_t */
+ uint64_t rx_pause_cnt; /* vmac_rx_pause_cnt_t */
+ uint64_t rx_bcast_fr_cnt; /* vmac_rx_bcast_fr_cnt_t */
+ uint64_t rx_mcast_fr_cnt; /* vmac_rx_mcast_fr_cnt_t */
+} hxge_vmac_stats_t, *p_hxge_vmac_stats_t;
+
+
+typedef struct _hxge_vmac {
+ boolean_t is_jumbo;
+ uint64_t tx_config;
+ uint64_t rx_config;
+ uint16_t minframesize;
+ uint16_t maxframesize;
+ uint16_t maxburstsize;
+} hxge_vmac_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HXGE_HXGE_VMAC_H */
diff --git a/usr/src/uts/common/io/hxge/hxge_vmac_hw.h b/usr/src/uts/common/io/hxge/hxge_vmac_hw.h
new file mode 100644
index 0000000000..c5aad7e655
--- /dev/null
+++ b/usr/src/uts/common/io/hxge/hxge_vmac_hw.h
@@ -0,0 +1,693 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _HXGE_VMAC_HW_H
+#define _HXGE_VMAC_HW_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define VMAC_BASE_ADDR 0X00100000
+
+#define VMAC_RST (VMAC_BASE_ADDR + 0x0)
+#define VMAC_TX_CFG (VMAC_BASE_ADDR + 0x8)
+#define VMAC_RX_CFG (VMAC_BASE_ADDR + 0x10)
+#define VMAC_TX_STAT (VMAC_BASE_ADDR + 0x20)
+#define VMAC_TX_MSK (VMAC_BASE_ADDR + 0x28)
+#define VMAC_RX_STAT (VMAC_BASE_ADDR + 0x30)
+#define VMAC_RX_MSK (VMAC_BASE_ADDR + 0x38)
+#define VMAC_TX_STAT_MIRROR (VMAC_BASE_ADDR + 0x40)
+#define VMAC_RX_STAT_MIRROR (VMAC_BASE_ADDR + 0x48)
+#define VMAC_TX_FRAME_CNT (VMAC_BASE_ADDR + 0x100)
+#define VMAC_TX_BYTE_CNT (VMAC_BASE_ADDR + 0x108)
+#define VMAC_RX_FRAME_CNT (VMAC_BASE_ADDR + 0x120)
+#define VMAC_RX_BYTE_CNT (VMAC_BASE_ADDR + 0x128)
+#define VMAC_RX_DROP_FR_CNT (VMAC_BASE_ADDR + 0x130)
+#define VMAC_RX_DROP_BYTE_CNT (VMAC_BASE_ADDR + 0x138)
+#define VMAC_RX_CRC_CNT (VMAC_BASE_ADDR + 0x140)
+#define VMAC_RX_PAUSE_CNT (VMAC_BASE_ADDR + 0x148)
+#define VMAC_RX_BCAST_FR_CNT (VMAC_BASE_ADDR + 0x150)
+#define VMAC_RX_MCAST_FR_CNT (VMAC_BASE_ADDR + 0x158)
+
+
+/*
+ * Register: VmacRst
+ * VMAC Software Reset Command
+ * Description:
+ * Fields:
+ * Write a '1' to reset Rx VMAC; auto clears. This brings rx vmac
+ * to power on reset state.
+ * Write a '1' to reset Tx VMAC; auto clears. This brings tx vmac
+ * to power on reset state.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:55;
+ uint64_t rx_reset:1;
+ uint64_t rsrvd1:7;
+ uint64_t tx_reset:1;
+#else
+ uint64_t tx_reset:1;
+ uint64_t rsrvd1:7;
+ uint64_t rx_reset:1;
+ uint64_t rsrvd:55;
+#endif
+ } bits;
+} vmac_rst_t;
+
+
+/*
+ * Register: VmacTxCfg
+ * Tx VMAC Configuration
+ * Description:
+ * Fields:
+ * Maximum length of any total transfer gathered by Tx VMAC,
+ * including packet data, header, crc, transmit header and any
+ * pad bytes. Default value of 0x2422 represents 9220 bytes of
+ * packet data, ethernet header, and crc, 14 bytes maximum pad,
+ * and 16 bytes transmit header = 9250 (0x2422).
+ * Enable padding of short packet to meet minimum frame length of
+ * 64 bytes. Software should note that if txPad functionality is
+ * used to pad runt packets to minimum length, that crcInsert
+ * functionality (below) must also be used to provide the packet
+ * with correct L2 crc.
+ * 1: Enable generation and appending of FCS to the packets. 0:
+ * Disable generation and appending of FCS to the packets.
+ * Enable Tx VMAC. Write a '1' to enable Tx VMAC; write a '0' to
+ * disable it. This bit also propagates as vmacTdcEn to the TDC
+ * block. In TDC, the vmacTdcEn bit disables the RTab state
+ * machine. Hence, the transmission from that blade would be
+ * stopped and be queued, but no packets would be dropped. Thus,
+ * the VMAC can only be enabled/disabled at packet boundary. The
+ * VMAC will not send out portion of a packet. The currently
+ * processed packet will continue to be sent out when Tx VMAC is
+ * disabled.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t tx_max_frame_length:14;
+ uint64_t rsrvd1:15;
+ uint64_t tx_pad:1;
+ uint64_t crc_insert:1;
+ uint64_t tx_en:1;
+#else
+ uint64_t tx_en:1;
+ uint64_t crc_insert:1;
+ uint64_t tx_pad:1;
+ uint64_t rsrvd1:15;
+ uint64_t tx_max_frame_length:14;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_tx_cfg_t;
+
+
+/*
+ * Register: VmacRxCfg
+ * Rx VMAC Configuration
+ * Description: MAC address and length in Type/Length field are
+ * checked in PFC.
+ * Fields:
+ * Maximum length of a frame accepted by Rx/Tx VMAC. Only packets
+ * with length between 64 bytes and maxFrameLength will be
+ * accepted by Rx/Tx VMAC. This length indicates just the packet
+ * length excluding the packet header, crc, and any pad bytes.
+ * Maximum value is 9K (9*1024)
+ * enable packets from the same blade to loopback
+ * Enable acceptance of all Unicast packets for L2 destination
+ * address, ie, allow all Unicast packets to pass the L2
+ * filtering.
+ * Enable acceptance of all multi-cast packets, ie, allow all
+ * multi-cast packets to pass the L2 filtering.
+ * Enable the passing through of flow control frames.
+ * Enable the stripping of FCS field in the packets.
+ * Disable of FCS checking. When enable, packets with incorrect
+ * FCS value are dropped by Rx VMAC.
+ * Enable rx VMAC. Write a '1' to enable rx VMAC; write a '0' to
+ * disable it. The VMAC will begin to accept packet at the
+ * detection of the SOP (start of packet). When disable, the
+ * currently processed packet will continue to be accepted.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rx_max_frame_length:14;
+ uint64_t reserved:11;
+ uint64_t loopback:1;
+ uint64_t promiscuous_mode:1;
+ uint64_t promiscuous_group:1;
+ uint64_t pass_flow_ctrl_fr:1;
+ uint64_t strip_crc:1;
+ uint64_t crc_check_disable:1;
+ uint64_t rx_en:1;
+#else
+ uint64_t rx_en:1;
+ uint64_t crc_check_disable:1;
+ uint64_t strip_crc:1;
+ uint64_t pass_flow_ctrl_fr:1;
+ uint64_t promiscuous_group:1;
+ uint64_t promiscuous_mode:1;
+ uint64_t loopback:1;
+ uint64_t reserved:11;
+ uint64_t rx_max_frame_length:14;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_rx_cfg_t;
+
+
+/*
+ * Register: VmacTxStat
+ * Tx VMAC Status Register
+ * Description: A new interrupt will be generated only if Tx VMAC is
+ * enabled by vmacTxCfg::txEn=1. Disabling Tx VMAC does not affect
+ * currently-existing Ldf state. Writing this register affects
+ * vmacTxStatMirror register bits also the same way.
+ * Fields:
+ * Indicates that counter of byte transmitted has exceeded the
+ * max value.
+ * Indicates that counter of frame transmitted has exceeded the
+ * max value.
+ * A frame has been successfully transmitted.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:61;
+ uint64_t tx_byte_cnt_overflow:1;
+ uint64_t tx_frame_cnt_overflow:1;
+ uint64_t frame_tx:1;
+#else
+ uint64_t frame_tx:1;
+ uint64_t tx_frame_cnt_overflow:1;
+ uint64_t tx_byte_cnt_overflow:1;
+ uint64_t rsrvd:61;
+#endif
+ } bits;
+} vmac_tx_stat_t;
+
+
+/*
+ * Register: VmacTxMsk
+ * Tx VMAC Status Mask
+ * Description: masking vmacTxStat from interrupt.
+ * Fields:
+ * 1: mask interrupt due to overflow of counter of byte
+ * transmitted
+ * 1: mask interrupt due to overflow of counter of frame
+ * transmitted
+ * 1: mask interrupt due to successful transmition of frame.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:61;
+ uint64_t tx_byte_cnt_overflow_msk:1;
+ uint64_t tx_frame_cnt_overflow_msk:1;
+ uint64_t frame_tx_msk:1;
+#else
+ uint64_t frame_tx_msk:1;
+ uint64_t tx_frame_cnt_overflow_msk:1;
+ uint64_t tx_byte_cnt_overflow_msk:1;
+ uint64_t rsrvd:61;
+#endif
+ } bits;
+} vmac_tx_msk_t;
+
+
+/*
+ * Register: VmacRxStat
+ * Rx VMAC Status Register
+ * Description: Overflow indicators are read-only registers; Read off
+ * the counters to clear. A new interrupt will be generated only if
+ * Rx VMAC is enabled by vmacRxCfg::rxEn=1. Disabling Rx VMAC does
+ * not affect currently-existing Ldf state. Writing this register
+ * affects vmacRxStatMirror register bits also the same way.
+ * Fields:
+ * Indicates that the counter for broadcast packets has exceeded
+ * the max value.
+ * Indicates that the counter for multicast packets has exceeded
+ * the max value.
+ * Indicates that the counter for pause packets has exceeded the
+ * max value.
+ * Indicates that the counter for packets with mismatched FCS has
+ * exceeded the max value.
+ * Indicates that counter of dropped byte has exceeded the max
+ * value.
+ * Indicates that counter of dropped frame has exceeded the max
+ * value.
+ * Indicates that counter of byte received has exceeded the max
+ * value.
+ * Indicates that counter of frame received has exceeded the max
+ * value.
+ * A valid frame has been successfully received.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:55;
+ uint64_t bcast_cnt_overflow:1;
+ uint64_t mcast_cnt_overflow:1;
+ uint64_t pause_cnt_overflow:1;
+ uint64_t crc_err_cnt_overflow:1;
+ uint64_t rx_drop_byte_cnt_overflow:1;
+ uint64_t rx_drop_frame_cnt_overflow:1;
+ uint64_t rx_byte_cnt_overflow:1;
+ uint64_t rx_frame_cnt_overflow:1;
+ uint64_t frame_rx:1;
+#else
+ uint64_t frame_rx:1;
+ uint64_t rx_frame_cnt_overflow:1;
+ uint64_t rx_byte_cnt_overflow:1;
+ uint64_t rx_drop_frame_cnt_overflow:1;
+ uint64_t rx_drop_byte_cnt_overflow:1;
+ uint64_t crc_err_cnt_overflow:1;
+ uint64_t pause_cnt_overflow:1;
+ uint64_t mcast_cnt_overflow:1;
+ uint64_t bcast_cnt_overflow:1;
+ uint64_t rsrvd:55;
+#endif
+ } bits;
+} vmac_rx_stat_t;
+
+
+/*
+ * Register: VmacRxMsk
+ * Rx VMAC Status Mask
+ * Description:
+ * Fields:
+ * 1: mask interrupt due to overflow of the counter for broadcast
+ * packets
+ * 1: mask interrupt due to overflow of the counter for multicast
+ * packets
+ * 1: mask interrupt due to overflow of the counter for pause
+ * packets
+ * 1: mask interrupt due to overflow of the counter for packets
+ * with mismatched FCS the max value.
+ * 1: mask interrupt due to overflow of dropped byte counter
+ * 1: mask interrupt due to overflow of dropped frame counter
+ * 1: mask interrupt due to overflow of received byte counter
+ * 1: mask interrupt due to overflow of received frame counter
+ * 1: mask interrupt due to a valid frame has been successfully
+ * received.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:55;
+ uint64_t bcast_cnt_overflow_msk:1;
+ uint64_t mcast_cnt_overflow_msk:1;
+ uint64_t pause_cnt_overflow_msk:1;
+ uint64_t crc_err_cnt_overflow_msk:1;
+ uint64_t rx_drop_byte_cnt_overflow_msk:1;
+ uint64_t rx_drop_frame_cnt_overflow_msk:1;
+ uint64_t rx_byte_cnt_overflow_msk:1;
+ uint64_t rx_frame_cnt_overflow_msk:1;
+ uint64_t frame_rx_msk:1;
+#else
+ uint64_t frame_rx_msk:1;
+ uint64_t rx_frame_cnt_overflow_msk:1;
+ uint64_t rx_byte_cnt_overflow_msk:1;
+ uint64_t rx_drop_frame_cnt_overflow_msk:1;
+ uint64_t rx_drop_byte_cnt_overflow_msk:1;
+ uint64_t crc_err_cnt_overflow_msk:1;
+ uint64_t pause_cnt_overflow_msk:1;
+ uint64_t mcast_cnt_overflow_msk:1;
+ uint64_t bcast_cnt_overflow_msk:1;
+ uint64_t rsrvd:55;
+#endif
+ } bits;
+} vmac_rx_msk_t;
+
+
+/*
+ * Register: VmacTxStatMirror
+ * Tx VMAC Status Mirror Register
+ * Description: Write a 1 to this register to force the corresponding
+ * interrupt. Reading this register returns the current Tx interrupt
+ * status which would be the same as reading the vmacTxStat register.
+ * The bits are cleared by writing 1 to the corresponding register
+ * bit in the vmacTxStat register. ie, bit 0 of this register is
+ * cleared by writing 1 to bit 0 in the vmacTxStat register.
+ *
+ * Fields:
+ * 1 : Force tx byte counter overflow interrupt generation
+ * 1 : Force tx frame counter overflow interrupt generation
+ * 1 : Force frame transmitted interrupt generation
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:61;
+ uint64_t force_tx_byte_cnt_overflow:1;
+ uint64_t force_tx_frame_cnt_overflow:1;
+ uint64_t force_frame_tx:1;
+#else
+ uint64_t force_frame_tx:1;
+ uint64_t force_tx_frame_cnt_overflow:1;
+ uint64_t force_tx_byte_cnt_overflow:1;
+ uint64_t rsrvd:61;
+#endif
+ } bits;
+} vmac_tx_stat_mirror_t;
+
+
+/*
+ * Register: VmacRxStatMirror
+ * Rx VMAC Status Mirror Register
+ * Description: Write a 1 to this register to force the corresponding
+ * interrupt. Reading this register returns the current Rx interrupt
+ * status which would be the same as reading the vmacRxStat register.
+ * The bits are cleared by writing 1 to the corresponding register
+ * bit in the vmacRxStat register. ie, bit 0 of this register is
+ * cleared by writing 1 to bit 0 in the vmacRxStat register.
+ * Fields:
+ * 1 : Force broadcast frame counter overflow interrupt
+ * generation
+ * 1 : Force multicast frame counter overflow interrupt
+ * generation
+ * 1 : Force pause frame counter overflow interrupt generation
+ * 1 : Force crc error counter overflow interrupt generation
+ * 1 : Force dropped frames byte counter overflow interrupt
+ * generation
+ * 1 : Force dropped frame counter overflow interrupt generation
+ * 1 : Force rx byte counter overflow interrupt generation
+ * 1 : Force rx frame counter overflow interrupt generation
+ * 1 : Force frame received interrupt generation
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:55;
+ uint64_t force_bcast_cnt_overflow:1;
+ uint64_t force_mcast_cnt_overflow:1;
+ uint64_t force_pause_cnt_overflow:1;
+ uint64_t force_crc_err_cnt_overflow:1;
+ uint64_t force_rx_drop_byte_cnt_overflow:1;
+ uint64_t force_rx_drop_frame_cnt_overflow:1;
+ uint64_t force_rx_byte_cnt_overflow:1;
+ uint64_t force_rx_frame_cnt_overflow:1;
+ uint64_t force_frame_rx:1;
+#else
+ uint64_t force_frame_rx:1;
+ uint64_t force_rx_frame_cnt_overflow:1;
+ uint64_t force_rx_byte_cnt_overflow:1;
+ uint64_t force_rx_drop_frame_cnt_overflow:1;
+ uint64_t force_rx_drop_byte_cnt_overflow:1;
+ uint64_t force_crc_err_cnt_overflow:1;
+ uint64_t force_pause_cnt_overflow:1;
+ uint64_t force_mcast_cnt_overflow:1;
+ uint64_t force_bcast_cnt_overflow:1;
+ uint64_t rsrvd:55;
+#endif
+ } bits;
+} vmac_rx_stat_mirror_t;
+
+
+/*
+ * Register: VmacTxFrameCnt
+ * VMAC transmitted frame counter
+ * Description:
+ * Fields:
+ * Indicates the number of frames transmitted by Tx VMAC. The
+ * counter will saturate at max value. The counter is stalled
+ * when Tx VMAC is disabled by vmacTxCfg::txEn=0
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t tx_frame_cnt:32;
+#else
+ uint64_t tx_frame_cnt:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_tx_frame_cnt_t;
+
+
+/*
+ * Register: VmacTxByteCnt
+ * VMAC transmitted byte counter
+ * Description:
+ * Fields:
+ * Indicates the number of byte (octet) of data transmitted by Tx
+ * VMAC. This counter counts all the bytes of the incoming data
+ * including packet header, packet data, crc, and pad bytes. The
+ * counter will saturate at max value. The counter is stalled
+ * when Tx VMAC is disabled by vmacTxCfg::txEn=0
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t tx_byte_cnt:32;
+#else
+ uint64_t tx_byte_cnt:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_tx_byte_cnt_t;
+
+
+/*
+ * Register: VmacRxFrameCnt
+ * VMAC received frame counter
+ * Description:
+ * Fields:
+ * Indicates the number of frame received by Rx VMAC. The counter
+ * will saturate at max value. The counter is stalled when Rx
+ * VMAC is disabled by vmacRxCfg::rxEn=0.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rx_frame_cnt:32;
+#else
+ uint64_t rx_frame_cnt:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_rx_frame_cnt_t;
+
+
+/*
+ * Register: VmacRxByteCnt
+ * VMAC received byte counter
+ * Description:
+ * Fields:
+ * Indicates the number of bytes (octet) of data received by Rx
+ * VMAC including any error frames. The counter will saturate at
+ * max value. The counter is stalled when Rx VMAC is disabled by
+ * vmacRxCfg::rxEn=0.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rx_byte_cnt:32;
+#else
+ uint64_t rx_byte_cnt:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_rx_byte_cnt_t;
+
+
+/*
+ * Register: VmacRxDropFrCnt
+ * VMAC dropped frame counter
+ * Description:
+ * Fields:
+ * Indicates the number of frame dropped by Rx VMAC. The counter
+ * will This counter increments for every frame dropped for the
+ * following: - crc mismatch & crc check is enabled - failed the
+ * L2 address match & Vmac is not in promiscuous mode - pause
+ * packet & Vmac is not programmed to pass these frames The
+ * counter will saturate at max value. The counter is stalled
+ * when Rx VMAC is disabled by vmacRxCfg::rxEn=0.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rx_drop_frame_cnt:32;
+#else
+ uint64_t rx_drop_frame_cnt:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_rx_drop_fr_cnt_t;
+
+
+/*
+ * Register: VmacRxDropByteCnt
+ * VMAC dropped byte counter
+ * Description:
+ * Fields:
+ * Indicates the number of byte of data dropped by Rx VMAC.
+ * Frames are dropped for one of the follg conditions : - crc
+ * mismatch & crc check is enabled - failed the L2 address match
+ * & Vmac is not in promiscuous mode - pause packet & Vmac is not
+ * programmed to pass these frames The counter will saturate at
+ * max value. The counter is stalled when Rx VMAC is disabled by
+ * vmacRxCfg::rxEn=0.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rx_drop_byte_cnt:32;
+#else
+ uint64_t rx_drop_byte_cnt:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_rx_drop_byte_cnt_t;
+
+
+/*
+ * Register: VmacRxCrcCnt
+ * VMAC received CRC error frame counter
+ * Description:
+ * Fields:
+ * Indicates the number of frames with invalid CRC. When NMAC
+ * truncates a packet, it asserts crcError indication to VMAC
+ * which then counts it towards CRC error. Thus the VMAC crc
+ * error counter reflects the CRC mismatches on all the packets
+ * going out of RxMAC while the NMAC crc error counter reflects
+ * the CRC mismatches on all the packets coming into RxMAC. The
+ * counter will saturate at max value The counter is stalled when
+ * Rx VMAC is disabled by vmacRxCfg::rxEn=0.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rx_crc_cnt:32;
+#else
+ uint64_t rx_crc_cnt:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_rx_crc_cnt_t;
+
+
+/*
+ * Register: VmacRxPauseCnt
+ * VMAC received pause frame counter
+ * Description:
+ * Fields:
+ * Count the number of pause frames received by Rx VMAC. The
+ * counter is stalled when Rx VMAC is disabled by
+ * vmacRxCfg::rxEn=0.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rx_pause_cnt:32;
+#else
+ uint64_t rx_pause_cnt:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_rx_pause_cnt_t;
+
+
+/*
+ * Register: VmacRxBcastFrCnt
+ * VMAC received broadcast frame counter
+ * Description:
+ * Fields:
+ * Indicates the number of broadcast frames received The counter
+ * is stalled when Rx VMAC is disabled by vmacRxCfg::rxEn=0.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rx_bcast_fr_cnt:32;
+#else
+ uint64_t rx_bcast_fr_cnt:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_rx_bcast_fr_cnt_t;
+
+
+/*
+ * Register: VmacRxMcastFrCnt
+ * VMAC received multicast frame counter
+ * Description:
+ * Fields:
+ * Indicates the number of multicast frames received The counter
+ * is stalled when Rx VMAC is disabled by vmacRxCfg::rxEn=0.
+ */
+typedef union {
+ uint64_t value;
+ struct {
+#if defined(_BIG_ENDIAN)
+ uint64_t rsrvd:32;
+ uint64_t rx_mcast_fr_cnt:32;
+#else
+ uint64_t rx_mcast_fr_cnt:32;
+ uint64_t rsrvd:32;
+#endif
+ } bits;
+} vmac_rx_mcast_fr_cnt_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HXGE_VMAC_HW_H */
diff --git a/usr/src/uts/intel/Makefile.intel.shared b/usr/src/uts/intel/Makefile.intel.shared
index f4679348a4..6b326bea0c 100644
--- a/usr/src/uts/intel/Makefile.intel.shared
+++ b/usr/src/uts/intel/Makefile.intel.shared
@@ -325,6 +325,7 @@ DRV_KMODS += xge
DRV_KMODS += zcons
DRV_KMODS += chxge
DRV_KMODS += nsmb
+DRV_KMODS += hxge
#
# Don't build some of these for OpenSolaris, since they will be
diff --git a/usr/src/uts/intel/hxge/Makefile b/usr/src/uts/intel/hxge/Makefile
new file mode 100644
index 0000000000..8b7af2e4c1
--- /dev/null
+++ b/usr/src/uts/intel/hxge/Makefile
@@ -0,0 +1,129 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# uts/intel/hxge/Makefile
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the Sun
+# 10G hxge Ethernet leaf driver kernel module.
+#
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = hxge
+HXGE_OBJECTS = $(HXGE_OBJS) $(HXGE_HPI_OBJS)
+OBJECTS = $(HXGE_OBJECTS:%=$(OBJS_DIR)/%)
+LINTS = $(HXGE_OBJECTS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/common/io/hxge
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+#
+# Override defaults to build a unique, local modstubs.o.
+#
+MODSTUBS_DIR = $(OBJS_DIR)
+
+CLEANFILES += $(MODSTUBS_O)
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+#
+# Turn on doubleword alignment for 64 bit registers
+#
+CFLAGS += -dalign
+#
+# Include hxge specific header files
+#
+INC_PATH += -I$(UTSBASE)/common
+INC_PATH += -I$(UTSBASE)/common/io/hxge
+#
+#
+# lint pass one enforcement
+#
+CFLAGS += -DSOLARIS
+#
+# Debug flags
+#
+# CFLAGS += -DHXGE_DEBUG -DHPI_DEBUG
+#
+# 64 bit only
+#
+ALL_BUILDS = $(ALL_BUILDS64)
+DEF_BUILDS = $(DEF_BUILDS64)
+CLEANLINTFILES += $(LINT64_FILES)
+#
+LINTFLAGS += -DSOLARIS
+#
+# STREAMS, DDI API limitations and other ON header file definitions such as ethernet.h
+# force us to turn off these lint checks.
+#
+LINTTAGS += -erroff=E_BAD_PTR_CAST_ALIGN
+LINTTAGS += -erroff=E_PTRDIFF_OVERFLOW
+LINTTAGS += -erroff=E_FALSE_LOGICAL_EXPR
+#
+# Driver depends on mac & IP
+#
+LDFLAGS += -dy -N misc/mac -N drv/ip
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ